serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
2,601 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <cuda_runtime.h>
#include <sys/time.h>
double cpuSecond(){
struct timeval tp;
gettimeofday(&tp,NULL);
return ( (double)tp.tv_sec + (double)tp.tv_usec * 1e-6 );
}
#define CHECK(call){ \
const cudaError_t error = call; \
if( error != cudaSuccess ){ \
printf("Error: %s:%d\n", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
void checkResult(float* hostRef, float* gpuRef, const int N){
double epsilon = 1.0E-8;
int match = 1;
for( int idx = 0; idx != N; ++idx ){
if(abs(gpuRef[idx] - hostRef[idx]) > epsilon){
match = 0;
printf("Arrays don't match.\n");
printf("gpu: %5.2f host: %5.2f at current %d\n", gpuRef[idx], hostRef[idx], idx);
break;
}
}
if(match){ printf("Arrays match.\n"); }
return;
}
void initializeData(float* ptr, const int size){
time_t t;
srand( (unsigned) time(&t) );
for(int idx = 0; idx != size; ++idx ){
ptr[idx] = (float)(rand() & 0xFF) / 10.0f;
}
}
void sumArraysOnHost(float* A, float* B, float* C, const int N){
for(int idx = 0; idx != N; ++idx)
C[idx] = A[idx] + B[idx];
}
__global__ void sumArraysOnDevice(float* A, float* B, float* C){
int idx = threadIdx.x + blockIdx.x * blockDim.x; // assuming 1D ?
C[idx] = A[idx] + B[idx];
}
int main(int argc, char** argv){
if(argc != 2){
printf("Invalid arguments\n");
exit(2);
}
printf("Starting...\n");
double iStart, iElapse;
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("using device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
int nElem = 1<<24;
printf("Vector size: %d\n", nElem);
size_t nBytes = nElem * sizeof(float);
float* h_A = (float *) malloc(nBytes);
float* h_B = (float *) malloc(nBytes);
float* hostRef = (float *) malloc(nBytes);
float* gpuRef = (float *) malloc(nBytes);
initializeData(h_A, nElem);
initializeData(h_B, nElem);
memset(hostRef, 0, nElem);
memset(gpuRef, 0, nElem);
float *d_A, *d_B, *d_C;
CHECK(cudaMalloc((float**) &d_A, nBytes));
CHECK(cudaMalloc((float**) &d_B, nBytes));
CHECK(cudaMalloc((float**) &d_C, nBytes));
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
dim3 block(atoi(argv[1]));
dim3 grid((nElem + block.x -1) / block.x);
iStart = cpuSecond();
sumArraysOnDevice<<< grid, block >>>(d_A, d_B, d_C);
CHECK(cudaDeviceSynchronize());
iElapse = cpuSecond() - iStart;
printf("sumArraysOnDevice() <<< %d, %d >>> time: %5.6f sec\n", grid.x, block.x, iElapse);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost ));
iStart = cpuSecond();
sumArraysOnHost(h_A, h_B, hostRef, nElem);
iElapse = cpuSecond() - iStart;
printf("sumArraysOnHost(): time: %5.6f sec\n", iElapse);
checkResult(hostRef, gpuRef, nElem);
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
return(0);
}
|
2,602 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
int main()
{
// Fetch device properties and display them to the screen.
int nDevices;
// All CUDA API calls have a return value that indicate
// whether or not an error occurred during the execution
// of the function.
cudaError_t err = cudaGetDeviceCount(&nDevices);
// Code like this will handle errors in the CPU calls.
// Kernel errors are more difficult to handle than this
// since they are executing asynchronously with respect
// to the host (CPU). Debugging macros in the in the
// asynchronous portions of your code prevents concurrency,
// so be wise when checking those. Probably not good for
// release builds.
if (err != cudaSuccess) {
printf("%s\n", cudaGetErrorString(err));
}
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop; // Note: this struct has many other fields
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
// This calculation represents the theoretical peak memory bandwidth
// which is based on the given hardware specs.
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8) / 1.0e6);
}
return 0;
}
|
2,603 | #include <stdio.h>
#include <time.h>
#define virtualCores 1000
#define intervals 1000000000
#define intervalsPerCore ((intervals)/(virtualCores))
#define intervalBase ((1.0)/(intervals))
__global__ void calculatePi(float* acums) {
int coreNum = threadIdx.x;
int currentInterval = coreNum * intervalsPerCore;
int lastInterval = currentInterval + intervalsPerCore;
double x = currentInterval * intervalBase;
double fdx;
for (; currentInterval < lastInterval; currentInterval++) {
fdx = 4 / (1 + x * x);
acums[coreNum] = acums[coreNum] + (fdx * intervalBase);
x = x + intervalBase;
}
}
int main() {
// Initialize host variables
float *h_acums;
int size = sizeof(float) * virtualCores;
h_acums = (float *)malloc(size);
// Initialize device variables
float *d_acums;
cudaMalloc((void**)&d_acums, size);
clock_t start, end;
start = clock();
calculatePi <<<1, virtualCores >>> (d_acums);
// Wait for device
cudaDeviceSynchronize();
cudaMemcpy(h_acums, d_acums, size, cudaMemcpyDeviceToHost);
int coreNum;
double acum = 0.0;
for (coreNum = 0; coreNum < virtualCores; coreNum++) {
acum += h_acums[coreNum];
}
end = clock();
printf("Result = %20.18lf (%ld)\n", acum, end - start);
return 0;
} |
2,604 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
//#include "benchmark.h"
//Macros
#define min(a, b) ( (a)<(b)? (a): (b) )
#define max(a, b) ( (a)>(b)? (a): (b) )
//Constants
#define MAX_VECTOR_COUNT 5
//Vector structure
typedef struct {
float e[3];
}Vec3f;
//Global array
Vec3f vecArray[MAX_VECTOR_COUNT];
Vec3f newvecArray[MAX_VECTOR_COUNT];
//forward declarations
__global__ void reduce(Vec3f *input, Vec3f *output);
int
main(int argc, char** argv){
vecArray[0].e[0] = 1.0; vecArray[0].e[1] = 2.0; vecArray[0].e[2] = 3.0;
vecArray[1].e[0] = 4.0; vecArray[1].e[1] = 5.0; vecArray[1].e[2] = 6.0;
vecArray[2].e[0] = 7.0; vecArray[2].e[1] = 8.0; vecArray[2].e[2] = 9.0;
vecArray[3].e[0] = 10.0; vecArray[3].e[1] = 11.0; vecArray[3].e[2] = 12.0;
vecArray[4].e[0] = 13.0; vecArray[4].e[1] = 14.0; vecArray[4].e[2] = 15.0;
// NOTE: the data being operated on are Vec3f's and frange from 0 (black) to 10ish for each rgb.
//I think they are the intesities.
//--------------------------------------------------------------------------------
//allocate device mem
Vec3f *ddata, *dbuffer;
cudaMalloc( &ddata, MAX_VECTOR_COUNT * sizeof(Vec3f) );
cudaMalloc( &dbuffer, MAX_VECTOR_COUNT * sizeof(Vec3f) );
cudaMemset( dbuffer, 0, MAX_VECTOR_COUNT * sizeof(Vec3f) );
cudaMemcpy( ddata, vecArray, MAX_VECTOR_COUNT * sizeof(Vec3f), cudaMemcpyHostToDevice );
dim3 gridDim(1,1);
dim3 blockDim(5,1);
//Check verArray values going into kernel function
for (int i = 0 ; i < 5 ; i++){
for (int j = 0 ; j < 3 ; j ++)
printf("vecArray[%d][%d] = %.3f\n", j,i,vecArray[i].e[j]);
}
printf("\n\n");
//call the reduction function
reduce<<< gridDim, blockDim >>> ( ddata, dbuffer );
//ZERO out newvecArray
memset(newvecArray, 0, MAX_VECTOR_COUNT * sizeof(Vec3f));
cudaMemcpy( newvecArray, dbuffer, MAX_VECTOR_COUNT * sizeof(Vec3f), cudaMemcpyDeviceToHost );
//Check to see if copied over to newvecArry
//Check to see if copied over to newvecArry
printf("Check to see if copied over to newvecArry\n");
for (int i = 0 ; i < 5 ; i++){
for (int j = 0 ; j < 3 ; j ++)
printf("newvecArray[%d][%d] = %.3f\n", j,i,newvecArray[i].e[j]);
}
//free device mem
cudaFree( &ddata );
//--------------------------------------------------------------------------------
return 0;
}
__global__ void
reduce(Vec3f *input, Vec3f *output){
extern __shared__ Vec3f sdata[];
// each thread loadsome element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = threadIdx.x + blockIdx.x * blockDim.x;
sdata[tid] = input[i];
__syncthreads();
//perform reduction in shared mem
for(unsigned int s=1; s < blockDim.x; s *= 2) {
//int s = 2;
if(tid % (2*s) == 0){
sdata[tid].e[0] += sdata[tid + s].e[0]; //summing
sdata[tid].e[1] += sdata[tid + s].e[1];
sdata[tid].e[2] += sdata[tid + s].e[2];
/*
sdata[tid].e[0] = min( sdata[tid].e[0], sdata[tid + s].e[0] ); //min
sdata[tid].e[1] = min( sdata[tid].e[1], sdata[tid + s].e[1] );
sdata[tid].e[2] = min( sdata[tid].e[2], sdata[tid + s].e[2] );
sdata[tid].e[0] = max( sdata[tid].e[0], sdata[tid + s].e[0] ); //max
sdata[tid].e[1] = max( sdata[tid].e[1], sdata[tid + s].e[1] );
sdata[tid].e[2] = max( sdata[tid].e[2], sdata[tid + s].e[2] );
*/
}
__syncthreads();
}
// write result for this block to global mem
if(tid == 0) output[blockIdx.x] = sdata[0];
}
|
2,605 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
typedef struct node {
int data;
struct node *parent;
struct node *left;
struct node *right;
int sema;
} node;
__device__ int lock(node* n) {
return !atomicExch(&n->sema, 1);
}
__device__ void unlock(node* n) {
atomicExch(&n->sema, 0);
}
__device__ node* new_node(int val, node* parent) {
node *tmp = (node *) malloc(sizeof(node));
tmp->data = val;
tmp->parent = parent;
tmp->left = tmp->right = NULL;
tmp->sema = 0;
return tmp;
}
__device__ node* find(node* root, int key) {
if (root == NULL) return NULL;
if (root->data == key) return root;
else if (root->data > key) return find(root->left, key);
else return find(root->right, key);
}
__device__ void insert(node* root, int key) {
if (root == NULL) { // Empty Tree
root = new_node(key, NULL);
return;
}
int acquired = lock(root);
if (acquired) {
if (key < root->data) {
if (root->left == NULL) { // Can be inserted to the immediate left
root->left = new_node(key, root);
unlock(root);
return;
} else { // Release this Node and proceed
unlock(root);
insert(root->left, key);
}
} else {
if (root->right == NULL) { // Can be inserted to the immediate right
root->right = new_node(key, root);
unlock(root);
return;
} else {
unlock(root); // Release this Node and proceed
insert(root->right, key);
}
}
} else {
insert(root, key);
}
}
__device__ void pre_order(node* root)
{
if(root != NULL)
{
printf("%d ", root->data);
pre_order(root->left);
pre_order(root->right);
}
return;
}
__device__ void in_order(node* root)
{
if(root != NULL)
{
in_order(root->left);
printf("%d ", root->data);
in_order(root->right);
}
return;
}
__device__ node* min_BST(node* root) {
node* tmp = root->right;
if (tmp == NULL) return NULL;
while(tmp->left != NULL) tmp = tmp->left;
return tmp;
}
__device__ void bst_delete(node* root, int key) {
if (root == NULL) return;
// printf("del key= %d\n", key);
//int root_acquired = lock(root);
int root_acquired = 1;
if (root_acquired) {
node* node2delete = find(root, key);
if (node2delete) {
//printf("Delete Node %d\n",node2delete->data);
node* parent = node2delete->parent;
if (parent) {
//unlock(root);
int parent_acquired = lock(parent);
if (parent_acquired) {
node* successor = min_BST(node2delete);
if (successor == NULL) { // Leaf Node
if (node2delete->data < parent->data) {
parent->left = node2delete->left;
} else {
parent->right = node2delete->left;
}
if(node2delete->left)
node2delete->left->parent = parent;
free(node2delete);
}
else if (successor != NULL) {
node* parent_of_successor = successor->parent;
node2delete->data = successor->data;
if (successor->data < parent_of_successor->data) {
parent_of_successor->left = successor->right;
} else {
parent_of_successor->right = successor->right;
}
if(successor->right)
successor->right->parent = parent_of_successor;
free(successor);
}
unlock(parent);
} else {
//printf("recall %d\n", key);
bst_delete(root, key);
}
} else { // ROOT of tree involved!
// not handled!
}
} else {
//unlock(root);
}
} else {
//printf("recall %d\n", key);
bst_delete(root, key);
}
} |
2,606 | #include <thrust/sequence.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <sys/time.h>
#define pi_f 3.14159265358979f // Greek pi in single precision
struct sin_functor
{
__host__ __device__
float operator()(float x) const
{
return x*sin(x);
}
};
int main(void)
{
int M = 12; // --- Maximum number of Romberg iterations
float a = 0.f; // --- Lower integration limit
float b = 1000.f; // --- Upper integration limit
float hmin = (b-a)/pow(2.f,M-1); // --- Minimum integration step size
// --- Define the matrix for Romberg approximations and initialize to 1.f
timeval t;
double t1,t2,t3,t4;
gettimeofday(&t, NULL);
t1 = t.tv_sec*1000.0 + (t.tv_usec/1000.0);
thrust::host_vector<float> R(M*M,1.f);
for (int k=0; k<M; k++) {
float h = pow(2.f,k-1)*hmin; // --- Step size for the k-th row of the Romberg matrix
// --- Define integration nodes
int N = (int)((b - a)/h) + 1;
thrust::device_vector<float> d_x(N);
thrust::sequence(d_x.begin(), d_x.end(), a, h);
// --- Calculate function values
thrust::device_vector<float> d_y(N);
thrust::transform(d_x.begin(), d_x.end(), d_y.begin(), sin_functor());
// --- Calculate integral
R[k*M] = (.5f*h) * (d_y[0] + 2.f*thrust::reduce(d_y.begin() + 1, d_y.begin() + N - 1, 0.0f) + d_y[N-1]);
}
// --- Compute the k-th column of the Romberg matrix
for (int k=1; k<M; k++) {
// --- The matrix of Romberg approximations is triangular!
for (int kk=0; kk<(M-k+1); kk++) {
// --- See the Romberg integration algorithm
R[kk*M+k] = R[kk*M+k-1] + (R[kk*M+k-1] - R[(kk+1)*M+k-1])/(pow(4.f,k)-1.f);
}
}
gettimeofday(&t, NULL);
t2 = t.tv_sec*1000.0 + (t.tv_usec/1000.0);
// --- Define the vector Rnum for numerical approximations
thrust::host_vector<float> Rnum(M);
thrust::copy(R.begin(), R.begin() + M, Rnum.begin());
printf("TIME : %lf ms\n",t2-t1);
for (int i=0; i<M; i++) printf("%i %f\n",i,Rnum[i]);
//getchar();
return 0;
}
|
2,607 | /*
cuda implementation of RecPF algorithm based on matlab function version
[U,Out] = RecPF(m,n,aTV,aL1,picks,B,TVtype,opts,PsiT,Psi,URange,uOrg) - deklaracja funkcji w Matlabie
poniej przykadowe wywoanie funkcji RecPF w matlabie ze skryptu sart_tv
[UU,Out_RecPF] = RecPF(nn,nn,aTV,aL1,picks,B,2,opts,PsiT,Psi,range(U(:)),U);
U - macierz
U(:) - wektor, dodaje kolejno kolumny macierzy U
range - dla wektora, zwraca rnice midzy najbardziej skrajnymi wartociami (najmniejsz i najwiksz)
innymi sowy, zwraca najwiksz rnic midzy dowolnymi dwoma elementami
opis parametrw:
czy parametr jest skalarem, wektorem, macierz, typ danych itp.
m - nn - warto typu int
n - nn - warto typu int
aTV - staa double
aL1 - staa double
picks - wektor zawierajcy indeksy, patrz: picks = find(abs(FB)>thresh);
B - B = FB(picks); - tworzy wektor B z wartociami z macierzy FB odpowiadajcymi indeksom picks
TVtype -- 2 (isotropic) or 1 (anisotropic) (w przykadzie warto 2)
opts
Psit - chyba funkcja
Psi - chyba funkcja
URange - range(U(:))
uOrg -- (optional) true image - macierz
-----w sart_tv: ----------
fb = FB(:);
U = reshape(xx,nn,nn);
FB = fft2(U)/nn;%sqrt(n);
thresh = var(abs(fb))*median(abs(fb(2:end)))*max(10+k,10+K);%(K-k+1);
picks = find(abs(FB)>thresh);
B = FB(picks);
----------------------------
tresh - ta warto jest obliczana w sposb do skomplikowany
picks, B oraz U, ktre s argumentami w wywoaniu RecPF
B = FB(picks); - tworzy wektor B z wartociami z macierzy FB odpowiadajcymi indeksom picks
*/
/*
algorytm do obliczania Denom2 = abs(psf2otf([prd,-prd],[m,n])).^2 + abs(psf2otf([prd;-prd],[m,n])).^2;
abs(psf2otf([prd,-prd],[m,n])).^2 = abs(fft([prd,-prd], n)).^2 (gdzie fft daje tylko wiersz, ktry naley powieli)
abs(psf2otf([prd;-prd],[m,n])).^2 = abs(fft([prd;-prd], m)).^2 (gdzie fft daje tylko kolumn, ktr naley powieli)
*/
|
2,608 | #include <stdio.h>
#define DIM 1000
struct cppComplex {
float r;
float i;
cppComplex( float a, float b ) : r(a), i(b) {}
float magnitude2( void ) {
return r * r + i * i;
}
cppComplex operator*(const cppComplex& a) {
return cppComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
cppComplex operator+(const cppComplex& a) {
return cppComplex(r+a.r, i+a.i);
}
};
int julia_cpu( int x, int y ) {
const float scale = 1.5;
float jx = scale * (float)(DIM/2 - x)/(DIM/2);
float jy = scale * (float)(DIM/2 - y)/(DIM/2);
cppComplex c(-0.8, 0.156);
cppComplex a(jx, jy);
int i = 0;
for(i=0; i<200; i++){
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
void julia_set_cpu() {
unsigned char *pixels = new unsigned char[DIM * DIM];
for (int x = 0; x < DIM; ++x) {
for (int y = 0; y < DIM; ++y) {
pixels[x + y * DIM] = 255 * julia_cpu(x, y);
}
}
FILE *f = fopen("julia_cpu.ppm", "wb");
fprintf(f, "P6\n%i %i 255\n", DIM, DIM);
for (int y = 0; y < DIM; y++) {
for (int x = 0; x < DIM; x++) {
fputc(pixels[(y * DIM + x)], f);
fputc(0, f);
fputc(0, f);
}
}
fclose(f);
delete [] pixels;
}
///////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////
/*Begin the GPU part*/
///////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////
__global__ void kernel( unsigned char *ptr ) {
/*
wirte your kernel code here
*/
}
void julia_set_gpu() {
unsigned char *pixels = new unsigned char[DIM * DIM];
/*
wirte the host code here
*/
//kernel<<<1,1>>>(dev_bitmap);
/*
write the code to copy the data back
*/
FILE *f = fopen("julia_gpu.ppm", "wb");
fprintf(f, "P6\n%i %i 255\n", DIM, DIM);
for (int y = 0; y < DIM; y++) {
for (int x = 0; x < DIM; x++) {
fputc(pixels[(y * DIM + x)], f); // 0 .. 255
fputc(0, f);
fputc(0, f);
}
}
fclose(f);
delete [] pixels;
}
int main( void ) {
julia_set_cpu();
julia_set_gpu();
}
|
2,609 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <math.h>
#define ENOUGH_SMALL 0.00001f
__host__ __device__
static float func(int d, float *k, float x)
{
int i;
float ans = 0;
for (i = 0; i <= d; i++) ans += k[i] * pow(x, i);
return ans;
}
__host__ __device__
static int compare(const void *arg1, const void *arg2)
{
float *p1, *p2;
p1 = (float*)arg1;
p2 = (float*)arg2;
if (*p1 == *p2) return 0;
else if (*p1 > *p2) return 1;
else return -1;
}
__host__ __device__
void qsort2(float* base, size_t num, size_t width, int(*compare)(const void *, const void *))
{
if (num < 2)
return;
for (unsigned int i = 0; i < num - 1; i++) {
for (unsigned int j = i; j < num - 1; j++) {
if (compare(base+ j, base + j + 1) > 0) {
float v = base[j];
base[j] = base[j+1];
base[j+1] = v;
}
}
}
}
__host__ __device__
static float walk(int d, float *k, float min, float max)
{
float a, b, f, g, x, y;
long l = 0;
if ((f = func(d, k, max)) == 0.0f) return max;
if ((g = func(d, k, min)) == 0.0f) return min;
if (f * g > 0) {
//MessageBox ( NULL, "Error!", "Solve_Polynomial", MB_OK );
//exit(0);
}
a = min, b = max;
while (1) {
if (fabs(a - b) < ENOUGH_SMALL)
break;
x = (a + b) / 2; // CPU error!?
y = func(d, k, x);
if (f > 0) {
if (func(d, k, b) - y < ENOUGH_SMALL)
break;
if (y < 0)
a = x;
else
b = x;
}
else {
if (func(d, k, a) - y < ENOUGH_SMALL)
break;
if (y > 0)
a = x;
else
b = x;
}
if (l++ > 100)
break; // for CPU bug
}
return x;
}
__host__ __device__
int Solve_Polynomial(int d, float *k, float min, float max, float *r)
{
int i, j, N, ans;
float D;
float *K, *R, *y;
switch (d)
{
case 1:
if (k[1] == 0.0f)
return 0;
r[0] = -k[0] / k[1];
return (min <= r[0] && r[0] <= max) ? 1 : 0;
case 2:
if (k[2] == 0.0f)
{
if (k[1] == 0.0f) return 0;
r[0] = -k[0] / k[1];
return (min <= r[0] && r[0] <= max) ? 1 : 0;
}
D = k[1] * k[1] - 4 * k[2] * k[0];
if (D == 0.0f)
{
r[0] = -k[1] / (2.0f * k[2]);
return (min <= r[0] && r[0] <= max) ? 1 : 0;
}
else if (D > 0)
{
r[0] = (-k[1] + sqrt(D)) / (2.0f * k[2]);
r[1] = (-k[1] - sqrt(D)) / (2.0f * k[2]);
if (r[0] > r[1])
D = r[0], r[0] = r[1], r[1] = D;
i = ((min <= r[0] && r[0] <= max) ? 1 : 0) + ((min <= r[1] && r[1] <= max) ? 1 : 0);
return i;
}
return 0;
}
K = new float[2 * d];
R = new float[2 * d + 4];
y = new float[2 * d + 4];
for (i = 1; i <= d; i++)
K[i - 1] = i * k[i] / k[d] / d;
// 極値の個数を求める。
N = Solve_Polynomial(d - 1, K, min, max, R + 1);
R[0] = min, R[N + 1] = max;
for (i = 0; i <= N + 1; i++)
y[i] = func(d, k, R[i]);
for (ans = i = 0; i <= N; i++)
{
// 解が存在しない区間の場合、次へ。
if (y[i] * y[i + 1] > 0) continue;
r[ans++] = walk(d, k, R[i], R[i + 1]);
}
qsort2(r, (size_t)ans, sizeof(float), compare);
for (i = 0; i < ans - 1; i++)
{
if (r[i + 1] - r[i] <= ENOUGH_SMALL)
{
for (j = i; j < ans - 1; j++)
r[j] = r[j + 1];
i--;
ans--;
}
}
delete K;
delete R;
delete y;
return ans;
}
|
2,610 | // Compile: nvcc -arch=sm_61 -std=c++11 assignment5-p4.cu -o assignment5-p4
#include <cmath>
#include <cuda.h>
#include <iostream>
#include <sys/time.h>
const uint64_t N = (1 << 12);
const uint64_t BLOCK_SIZE = (1 << 4);
const uint64_t TILE_SIZE = (1 << 5);
using namespace std;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
double rtclock() {
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, &Tzp);
if (stat != 0) {
cout << "Error return from gettimeofday: " << stat << "\n";
}
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
__global__ void kernel1(uint64_t* A, uint64_t* B, uint64_t* C) {
// SB: Write your code here
uint64_t i = blockIdx.y*blockDim.y + threadIdx.y;
uint64_t j = blockIdx.x*blockDim.x + threadIdx.x;
uint64_t sum = 0;
for (uint64_t k = 0; k < N; k++) {
sum += A[i * N + k] * B[k * N + j];
}
C[i * N + j] = sum;
}
__global__ void kernel2(uint64_t* A, uint64_t* B, uint64_t* C) {
// SB: Write your code here
uint64_t sum = 0;
uint64_t i = blockIdx.y*blockDim.y + threadIdx.y;
uint64_t j = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ uint64_t A_t[TILE_SIZE][TILE_SIZE];
__shared__ uint64_t B_t[TILE_SIZE][TILE_SIZE];
for (uint64_t tid = 0; tid < N/blockDim.x; tid++) {
A_t[threadIdx.y][threadIdx.x] = A[i * N + tid * blockDim.x + threadIdx.x];
B_t[threadIdx.y][threadIdx.x] = B[(tid * blockDim.y + threadIdx.y) * N + j];
__syncthreads();
sum += A_t[threadIdx.y][0] * B_t[0][threadIdx.x]
+ A_t[threadIdx.y][1] * B_t[1][threadIdx.x]
+ A_t[threadIdx.y][2] * B_t[2][threadIdx.x]
+ A_t[threadIdx.y][3] * B_t[3][threadIdx.x]
+ A_t[threadIdx.y][4] * B_t[4][threadIdx.x]
+ A_t[threadIdx.y][5] * B_t[5][threadIdx.x]
+ A_t[threadIdx.y][6] * B_t[6][threadIdx.x]
+ A_t[threadIdx.y][7] * B_t[7][threadIdx.x]
+ A_t[threadIdx.y][8] * B_t[8][threadIdx.x]
+ A_t[threadIdx.y][9] * B_t[9][threadIdx.x]
+ A_t[threadIdx.y][10] * B_t[10][threadIdx.x]
+ A_t[threadIdx.y][11] * B_t[11][threadIdx.x]
+ A_t[threadIdx.y][12] * B_t[12][threadIdx.x]
+ A_t[threadIdx.y][13] * B_t[13][threadIdx.x]
+ A_t[threadIdx.y][14] * B_t[14][threadIdx.x]
+ A_t[threadIdx.y][15] * B_t[15][threadIdx.x]
+ A_t[threadIdx.y][16] * B_t[16][threadIdx.x]
+ A_t[threadIdx.y][17] * B_t[17][threadIdx.x]
+ A_t[threadIdx.y][18] * B_t[18][threadIdx.x]
+ A_t[threadIdx.y][19] * B_t[19][threadIdx.x]
+ A_t[threadIdx.y][20] * B_t[20][threadIdx.x]
+ A_t[threadIdx.y][21] * B_t[21][threadIdx.x]
+ A_t[threadIdx.y][22] * B_t[22][threadIdx.x]
+ A_t[threadIdx.y][23] * B_t[23][threadIdx.x]
+ A_t[threadIdx.y][24] * B_t[24][threadIdx.x]
+ A_t[threadIdx.y][25] * B_t[25][threadIdx.x]
+ A_t[threadIdx.y][26] * B_t[26][threadIdx.x]
+ A_t[threadIdx.y][27] * B_t[27][threadIdx.x]
+ A_t[threadIdx.y][28] * B_t[28][threadIdx.x]
+ A_t[threadIdx.y][29] * B_t[29][threadIdx.x]
+ A_t[threadIdx.y][30] * B_t[30][threadIdx.x]
+ A_t[threadIdx.y][31] * B_t[31][threadIdx.x];
__syncthreads();
}
C[i * N + j] = sum;
}
__host__ void cpumatMul(uint64_t* A, uint64_t* B, uint64_t* C) {
for (uint64_t i = 0; i < N; i++) {
for (uint64_t j = 0; j < N; j++) {
uint64_t sum = 0;
for (uint64_t k = 0; k < N; k++) {
sum += A[i * N + k] * B[k * N + j];
}
C[i * N + j] = sum;
}
}
}
__host__ void check_result(uint64_t* w_ref, uint64_t* w_opt) {
for (uint64_t i = 0; i < N; i++) {
for (uint64_t j = 0; j < N; j++) {
if (w_ref[i * N + j] != w_opt[i * N + j]) {
cout << "Difference found\n";
exit(EXIT_FAILURE);
}
}
}
cout << "No differences found between base and test versions\n";
}
int main() {
int SIZE = N * N;
cudaEvent_t start, end;
gpuErrchk( cudaEventCreate(&start) );
gpuErrchk( cudaEventCreate(&end) );
uint64_t *h_A, *h_B, *h_C1, *h_C2, *cpuResult;
h_A = (uint64_t*)malloc(SIZE * sizeof(uint64_t));
h_B = (uint64_t*)malloc(SIZE * sizeof(uint64_t));
h_C1 = (uint64_t*)malloc(SIZE * sizeof(uint64_t));
h_C2 = (uint64_t*)malloc(SIZE * sizeof(uint64_t));
cpuResult = (uint64_t*)malloc(SIZE * sizeof(uint64_t));
for (uint64_t i = 0; i < N; i++) {
for (uint64_t j = 0; j < N; j++) {
h_A[i * N + j] = random();
h_B[i * N + j] = random();
h_C1[i * N + j] = 0;
h_C2[i * N + j] = 0;
cpuResult[i * N + j] = 0;
}
}
double clkbegin, clkend;
double t;
clkbegin = rtclock();
// cpumatMul(h_A, h_B, cpuResult);
clkend = rtclock();
t = clkend - clkbegin;
cout << "Serial Time (ms): " << t * 1000 << "\n";
uint64_t *d_A, *d_B, *d_C1, *d_C2;
gpuErrchk( cudaMalloc((void**)&d_A, SIZE * sizeof(uint64_t)) );
gpuErrchk( cudaMalloc((void**)&d_B, SIZE * sizeof(uint64_t)) );
gpuErrchk( cudaMalloc((void**)&d_C1, SIZE * sizeof(uint64_t)) );
gpuErrchk( cudaMalloc((void**)&d_C2, SIZE * sizeof(uint64_t)) );
dim3 threadsPerBlock1(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocksPerGrid1((N + threadsPerBlock1.x - 1)/threadsPerBlock1.x, (N + threadsPerBlock1.y - 1)/threadsPerBlock1.y);
gpuErrchk( cudaEventRecord(start, 0) );
gpuErrchk( cudaMemcpy(d_A, h_A, SIZE * sizeof(uint64_t), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_B, h_B, SIZE * sizeof(uint64_t), cudaMemcpyHostToDevice) );
kernel1<<<blocksPerGrid1, threadsPerBlock1>>>(d_A, d_B, d_C1);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaMemcpy(h_C1, d_C1, SIZE * sizeof(uint64_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaEventRecord(end, 0) );
gpuErrchk( cudaDeviceSynchronize() );
float kernel_time = 0;
gpuErrchk( cudaEventElapsedTime(&kernel_time, start, end) );
std::cout << "Kernel 1 time (ms): " << kernel_time << "\n";
dim3 threadsPerBlock2(TILE_SIZE, TILE_SIZE);
dim3 blocksPerGrid2((N + threadsPerBlock2.x - 1)/threadsPerBlock2.x, (N + threadsPerBlock2.y - 1)/threadsPerBlock2.y);
gpuErrchk( cudaEventRecord(start, 0) );
gpuErrchk( cudaMemcpy(d_A, h_A, SIZE * sizeof(uint64_t), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_B, h_B, SIZE * sizeof(uint64_t), cudaMemcpyHostToDevice) );
kernel2<<<blocksPerGrid2, threadsPerBlock2>>>(d_A, d_B, d_C2);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaMemcpy(h_C2, d_C2, SIZE * sizeof(uint64_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaEventRecord(end, 0) );
gpuErrchk( cudaDeviceSynchronize() );
gpuErrchk( cudaEventElapsedTime(&kernel_time, start, end) );
std::cout << "Kernel 2 time (ms): " << kernel_time << "\n";
gpuErrchk( cudaFree(d_A) );
gpuErrchk( cudaFree(d_B) );
gpuErrchk( cudaFree(d_C1) );
gpuErrchk( cudaFree(d_C2) );
free(h_A);
free(h_B);
// check_result(cpuResult, h_C1);
// check_result(cpuResult, h_C2);
check_result(h_C1, h_C2);
free(cpuResult);
free(h_C1);
free(h_C2);
return EXIT_SUCCESS;
}
|
2,611 | #include "includes.h"
__global__ void gpu_stencil37_hack2_cp_rows(double * dst, double * shared_rows, double *shared_cols,double *shared_slices,int d_xpitch,int d_ypitch,int d_zpitch,int s_xpitch,int s_ypitch, int s_zpitch, int n_rows, int n_cols,int n_slices,int tile_x,int tile_y, int tile_z){
#ifdef CUDA_DARTS_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.x==0)){
printf("copy rows: begin\n");
printf("copy rows: n_cols=%d,n_rows=%d,n_slices=%d\n",n_cols,n_rows,n_slices);
printf("copy rows: gridDim.x=%d,gridDim.y=%d,gridDim.z=%d\n",gridDim.x,gridDim.y,gridDim.z);
printf("copy rows: blockDim.x=%d,blockDim.y=%d,blockDim.z=%d\n",blockDim.x,blockDim.y,blockDim.z);
printf("copy rows: tile_x=%d,tile_y=%d,tile_z=%d\n",tile_x,tile_y,tile_z);
}
#endif
int base_global_slice = tile_z * blockIdx.z;
int base_global_row = tile_y * blockIdx.y;
int base_global_col = blockDim.x*blockIdx.x;
//int dst_area = n_rows*n_cols;
//int s_area = gridDim.y*n_cols*2;
int dst_area = d_ypitch*d_xpitch;
int s_area = gridDim.y*s_xpitch*2;
//int base_global_idx = base_global_slice*dst_area + base_global_row * n_cols + base_global_col;
int base_global_idx = base_global_slice*dst_area + base_global_row * d_xpitch + base_global_col;
int nextRow = base_global_row+1;
bool legalNextRow = nextRow<n_rows;
int tx = threadIdx.x;
bool legalCurCol = (base_global_col + tx)<n_cols;
for(int tz=0;tz<tile_z;++tz){
bool legalCurSlice = (base_global_slice + tz)<n_slices;
int idx_dst =base_global_idx + tz*dst_area+ tx ;
//int idx = (base_global_slice+tz)*s_area + blockIdx.y*n_cols*2+blockIdx.x*blockDim.x+ tx ;
int idx = (base_global_slice+tz)*s_area + blockIdx.y*s_xpitch*2+blockIdx.x*blockDim.x+ tx ;
if(legalCurCol && legalCurSlice){
shared_rows[idx] = dst[idx_dst];
}
if(legalCurCol && legalCurSlice && legalNextRow){
//shared_rows[idx+n_cols] = dst[idx_dst+n_cols];
shared_rows[idx+s_xpitch] = dst[idx_dst+d_xpitch];
}
}
__syncthreads();
#ifdef CUDA_CUDA_DEBUG
if(blockIdx.y==0 && blockIdx.x==0 &&blockIdx.z==0 ){
if((threadIdx.x==0 || threadIdx.x==1 || threadIdx.x==2 ) && threadIdx.y==0){
int d_addr0 = base_global_idx+0*dst_area+threadIdx.x;
int d_addr1 = base_global_idx+1*dst_area+threadIdx.x;
int s_addr00 = base_global_slice+blockIdx.x*blockDim.x + threadIdx.x;
int s_addr01 = base_global_slice+blockIdx.x*blockDim.x + threadIdx.x+s_xpitch;
int s_addr02 = base_global_slice+blockIdx.x*blockDim.x + threadIdx.x+s_xpitch*2;
int s_addr10 = s_area*(base_global_slice+1)+blockIdx.x*blockDim.x+ threadIdx.x;
int s_addr11 = s_area*(base_global_slice+1)+blockIdx.x*blockDim.x+ threadIdx.x+s_xpitch;
int s_addr12 = s_area*(base_global_slice+1)+blockIdx.x*blockDim.x+ threadIdx.x+s_xpitch*2;
int s_addr20 = s_area*(base_global_slice+2)+blockIdx.x*blockDim.x+ threadIdx.x;
int s_addr21 = s_area*(base_global_slice+2)+blockIdx.x*blockDim.x+ threadIdx.x+s_xpitch;
int s_addr22 = s_area*(base_global_slice+2)+blockIdx.x*blockDim.x+ threadIdx.x+s_xpitch*2;
printf("copy rows: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,dst : z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,0,d_addr0,dst[d_addr0]);
printf("copy rows: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,dst : z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,1,d_addr1,dst[d_addr1]);
printf("copy rows: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_rows: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,0,s_addr00,shared_rows[s_addr00]);
printf("copy rows: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_rows: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,1,s_addr01,shared_rows[s_addr01]);
printf("copy rows: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_rows: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,2,s_addr00,shared_rows[s_addr02]);
printf("copy rows: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_rows: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,1,s_addr10,shared_rows[s_addr10]);
printf("copy rows: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_rows: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,1,s_addr11,shared_rows[s_addr11]);
printf("copy rows: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_rows: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,1,s_addr12,shared_rows[s_addr12]);
printf("copy rows: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_rows: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,2,s_addr20,shared_rows[s_addr20]);
printf("copy rows: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_rows: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,2,s_addr21,shared_rows[s_addr21]);
printf("copy rows: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_rows: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,2,s_addr22,shared_rows[s_addr22]);
}
if(threadIdx.x==0 && threadIdx.y==0){
int addr = 2*s_area+n_cols+256;
int addr1 = 2*dst_area+n_cols+256;
printf("copy rows: shared_rows: addr:%d, val:%f\n", addr, shared_rows[addr]);
printf("copy rows: dst : addr:%d, val:%f\n", addr1, dst[addr1]);
}
}
#endif
#ifdef CUDA_DARTS_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.x==0)){
printf("copy rows end!\n");
}
#endif
} |
2,612 | #include "includes.h"
/*****************************************************************************/
// nvcc -O1 -o bpsw bpsw.cu -lrt -lm
// Assertion to check for errors
__global__ void kernel_jacobi(long* nArray, long* dArray, long len) {
int bx = blockIdx.x; // ID thread
int tx = threadIdx.x;
int result, t;
long d, dAbs, sign, temp, n1, d1;
// Identify the row and column of the Pd element to work on
long memIndex = bx*TILE_WIDTH + tx;
if (memIndex < len) //out of bounds checking - some threads will be doing nothing
{
result = 0;
dAbs = 5;
sign = 1;
while (result != -1) //if result != -1, increment d and try again
{
n1 = nArray[memIndex]; //reinitialize n1 to n
d = dAbs*sign;
t = 1;
d1 = d; //reinitialize d1 to d
d1 = d1 % n1;
while (d1 != 0)
{
while (d1 % 2 == 0) //while d is even
{
d1 = d1 / 2;
if (n1 % 8 == 3 || n1 % 8 == 5) t = -t;
}
temp = d1;
d1 = n1;
n1 = temp;
if ((d1 % 4 == 3) && (n1 % 4 == 3)) t = -t;
d1 = d1 % n1;
}
if (n1 == 1) result = t;
else result = 0;
dAbs = dAbs + 2;
sign = sign * -1;
}
}
__syncthreads();
if (memIndex < len)
dArray[memIndex] = d;
__syncthreads();
} |
2,613 |
// small program to print some Gpu properties
// compile with nvcc GetGpuProps.cu -o build/GetGpuProps
#include <cstdio>
int main()
{
int count = 0;
if (cudaSuccess != cudaGetDeviceCount(&count)) return -1;
if (count == 0) return -1;
std::printf("\nNumber of Cuda Gpu devices: %d\n\n",count-1);
cudaDeviceProp prop;
for (int device = 0; device < count; ++device)
{
std::printf("Device %d:\n",device);
if (cudaSuccess == cudaGetDeviceProperties(&prop, device))
std::printf("Max threads per block: %d\nShared memory per block: %d\n\n", (int)prop.maxThreadsPerBlock, (int)prop.sharedMemPerBlock);
}
return 0;
}
|
2,614 | #include <stdio.h>
#include <time.h>
__global__ void gpu_loop()
{
printf("GPU::This is iteration number %d\n", threadIdx.x);
}
__host__ void cpu_loop(int n)
{
for(int i = 0; i < n; i++)
printf("CPU::This is iteration number %d\n", i);
}
int main()
{
int n, b;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("n, b?\n");
scanf("%d %d", &n, &b);
cudaEventRecord(start,0);
gpu_loop<<<b, n>>>();
//commented out the functions which helps to calculate time
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float et;
cudaEventElapsedTime(&et, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
clock_t begin = clock();
cpu_loop(n);
clock_t end = clock();
double time_spent = (double)1000 * (end - begin) / CLOCKS_PER_SEC;
printf("GPU time: %f\n", et);
printf("CPU time: %f", time_spent);
}
|
2,615 | #include "includes.h"
__global__ void writeSeedList( const int idxLimit, const int* gatewayIndexArray, const int* indexArray, const int* seedWriteIndexArray, const int* cellSizeArray, const int* tIDArray, const int* tIndexArray, const int* qIDArray, const int* qIndexArray, int* target_IDArray, int* target_indexArray, int* query_IDArray, int* query_indexArray) {
const int bIdx = blockIdx.y * gridDim.x + blockIdx.x;
const int tIdx = blockDim.x * bIdx + threadIdx.x;
if(tIdx < idxLimit) {
const int qID = qIDArray [tIdx];
const int qIdx = qIndexArray[tIdx];
const int seedWriteIndex = seedWriteIndexArray[tIdx];
const int cellSize = cellSizeArray [tIdx];
const int gatewayIndex = gatewayIndexArray[tIdx];
for(int i = 0; i < cellSize; ++i) {
const int seedListIndex = seedWriteIndex + i;
const int refTargetIndex = indexArray[gatewayIndex + i];
target_IDArray [seedListIndex] = tIDArray [refTargetIndex];
target_indexArray[seedListIndex] = tIndexArray[refTargetIndex];
query_IDArray [seedListIndex] = qID;
query_indexArray [seedListIndex] = qIdx;
}
}
} |
2,616 | #include <stdio.h>
#include <stdint.h>
const int MILLION = 1000000; //define the constant million
const int thread_per_block = 1000;
#define time_record_begin(start){ \
cudaEventCreate(&start); \
cudaEventRecord(start, 0); \
}
#define time_record_end(start, stop, time){ \
cudaEventCreate(&stop); \
cudaEventRecord(stop, 0); \
cudaEventSynchronize(stop); \
cudaEventElapsedTime(&time, start, stop);\
}
__global__ void arradd(float *A, float target, int numElements){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < numElements){
A[idx] = A[idx] + target;
}
}
__global__ void darradd(double *A, double target, int numElements){
int idx= blockIdx.x * blockDim.x + threadIdx.x;
if(idx < numElements){
A[idx] = A[idx] + target;
}
}
__global__ void iarradd(int32_t *A, int32_t target, int numElements){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < numElements){
A[idx] = A[idx] + target;
}
}
__global__ void xarradd(float *A, float target, int times, int numElements){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < numElements){
for(int j = 0; j < times; j++){
A[idx] = A[idx] + target;
}
}
}
/*** Time measure for single precision floating-point numbers ***/
void measure_float(){
printf("Elements(M)\t CPUtoGPU(ms)\t Kernel(ms)\t GPUtoCPU(ms)\n");
float *A;
float X = 10.0f;
float *d_A;
cudaEvent_t start,stop;
float time_cpu_to_gpu, time_kernel, time_gpu_to_cpu;
FILE* fp;
fp = fopen("outA.txt", "w+");
fprintf(fp,"Elements(M)\t CPUtoGPU(ms)\t Kernel(ms)\t GPUtoCPU(ms)\n");
for(int j = 1; j <=256; j *= 2){
//Malloc for the input array A on CPU
int size = j * MILLION;
A = (float *) malloc(size * sizeof(float));
//Initialize the array
for(int i = 0; i < size; i++){
A[i] = (float)i / 3.0f;
}
//Malloc array on GPU
cudaMalloc((void**)&d_A, size * sizeof(float));
//Initialize the size of grid and block
dim3 gridDim((size + thread_per_block - 1)/thread_per_block);
dim3 blockDim(thread_per_block);
//Measure time for Memcpy from CPU to GPU
time_record_begin(start);
cudaMemcpy(d_A, A, size * sizeof(float), cudaMemcpyHostToDevice);
time_record_end(start, stop, time_cpu_to_gpu);
//Measure time for kernel
time_record_begin(start);
arradd<<<gridDim, blockDim>>>(d_A, X, size);
cudaDeviceSynchronize();
time_record_end(start, stop, time_kernel);
//Measure time for Memcpy from GPU to CPU
time_record_begin(start);
cudaMemcpy(A, d_A, size * sizeof(float), cudaMemcpyDeviceToHost);
time_record_end(start, stop, time_gpu_to_cpu);
fprintf(fp,"%d\t\t %f\t %f\t %f\n", j, time_cpu_to_gpu, time_kernel, time_gpu_to_cpu);
printf("%d\t\t %f\t %f\t %f\n", j, time_cpu_to_gpu, time_kernel, time_gpu_to_cpu);
//free memory on GPU and CPU
cudaFree(d_A);
free(A);
}
fclose(fp);
}
/*** Time measure for double precision floating-point numbers ***/
void measure_double(){
printf("Elements(M)\t CPUtoGPU(ms)\t Kernel(ms)\t GPUtoCPU(ms)\n");
double *A;
double X = 10.0;
double *d_A;
cudaEvent_t start,stop;
float time_cpu_to_gpu, time_kernel, time_gpu_to_cpu;
FILE* fp;
fp = fopen("outB.txt", "w+");
fprintf(fp,"Elements(M)\t CPUtoGPU(ms)\t Kernel(ms)\t GPUtoCPU(ms)\n");
for(int j = 1; j <= 256; j *= 2){
int size = j * MILLION;
A = (double *) malloc(size * sizeof(double));
for(int i = 0; i < size; i++){
A[i] = (double)i / 3.0f;
}
cudaMalloc((void**)&d_A, size * sizeof(double));
//Initialize the size of grid and block
dim3 gridDim((size + thread_per_block - 1)/thread_per_block);
dim3 blockDim(thread_per_block);
//Measure time for Memcpy from CPU to GPU
time_record_begin(start);
cudaMemcpy(d_A, A, size * sizeof(double), cudaMemcpyHostToDevice);
time_record_end(start, stop, time_cpu_to_gpu);
//Measure time for kernel
time_record_begin(start);
darradd<<<gridDim, blockDim>>>(d_A, X, size);
cudaDeviceSynchronize();
time_record_end(start, stop, time_kernel);
//Measure time for Memcpy from GPU to CPU
time_record_begin(start);
cudaMemcpy(A, d_A, size * sizeof(double), cudaMemcpyDeviceToHost);
time_record_end(start, stop, time_gpu_to_cpu);
fprintf(fp,"%d\t\t %f\t %f\t %f\n", j, time_cpu_to_gpu, time_kernel, time_gpu_to_cpu);
printf("%d\t\t %f\t %f\t %f\t\n", j, time_cpu_to_gpu, time_kernel, time_gpu_to_cpu);
cudaFree(d_A);
free(A);
}
fclose(fp);
}
/*** Time measure for 32-bit integers ***/
void measure_int32(){
printf("Elements(M)\t CPUtoGPU(ms)\t Kernel(ms)\t GPUtoCPU(ms)\n");
int32_t *A;
int32_t X = 10;
int32_t *d_A;
cudaEvent_t start,stop;
float time_cpu_to_gpu, time_kernel, time_gpu_to_cpu;
FILE* fp;
fp = fopen("outC.txt", "w+");
fprintf(fp,"Elements(M)\t CPUtoGPU(ms)\t Kernel(ms)\t GPUtoCPU(ms)\n");
for(int j = 1; j <= 256; j *= 2){
int size = j * MILLION;
A = (int32_t *) malloc(size * sizeof(int32_t));
for(int i = 0; i < size; i++){
A[i] = (int32_t)(i / 3);
}
cudaMalloc((void**)&d_A, size * sizeof(int32_t));
//Initialize the size of grid and block
dim3 gridDim((size + thread_per_block - 1)/thread_per_block);
dim3 blockDim(thread_per_block);
//Measure time for Memcpy from CPU to GPU
time_record_begin(start);
cudaMemcpy(d_A, A, size * sizeof(int32_t), cudaMemcpyHostToDevice);
time_record_end(start, stop, time_cpu_to_gpu);
//Measure time for kernel
time_record_begin(start);
iarradd<<<gridDim, blockDim>>>(d_A, X, size);
cudaDeviceSynchronize();
time_record_end(start, stop, time_kernel);
//Measure time for Memcpy from GPU to CPU
time_record_begin(start);
cudaMemcpy(A, d_A, size * sizeof(int32_t), cudaMemcpyDeviceToHost);
time_record_end(start, stop, time_gpu_to_cpu);
fprintf(fp,"%d\t\t %f\t %f\t %f\n", j, time_cpu_to_gpu, time_kernel, time_gpu_to_cpu);
printf("%d\t\t %f\t %f\t %f\t\n", j, time_cpu_to_gpu, time_kernel, time_gpu_to_cpu);
cudaFree(d_A);
free(A);
}
fclose(fp);
}
/*** Time measure for diffrent adding times ***/
void measure_xaddtimes(){
printf("XaddedTimes\t Elements(M)\t CPUtoGPU(ms)\t Kernel(ms)\t GPUtoCPU(ms)\n");
float *A;
float X = 10.0;
float *d_A;
cudaEvent_t start,stop;
float time_cpu_to_gpu, time_kernel, time_gpu_to_cpu;
int size = 128 * MILLION;
//Initialize the size of grid and block
dim3 gridDim((size + thread_per_block - 1)/thread_per_block);
dim3 blockDim(thread_per_block);
FILE* fp;
fp = fopen("outD.txt", "w+");
fprintf(fp,"XaddedTimes\t Elements(M)\t CPUtoGPU(ms)\t Kernel(ms)\t GPUtoCPU(ms)\n");
for(int j = 1; j <= 256; j *= 2){
A = (float *) malloc(size * sizeof(float));
for(int i = 0; i < size; i++){
A[i] = (float) i / 3.0f;
}
cudaMalloc((void**)&d_A, size * sizeof(float));
//Measure time for Memcpy from CPU to GPU
time_record_begin(start);
cudaMemcpy(d_A, A, size * sizeof(float), cudaMemcpyHostToDevice);
time_record_end(start, stop, time_cpu_to_gpu);
//Measure time for kernel
time_record_begin(start);
xarradd<<<gridDim, blockDim>>>(d_A, X, j, size);
cudaDeviceSynchronize();
time_record_end(start, stop, time_kernel);
//Measure time for Memcpy from GPU to CPU
time_record_begin(start);
cudaMemcpy(A, d_A, size * sizeof(float), cudaMemcpyDeviceToHost);
time_record_end(start, stop, time_gpu_to_cpu);
fprintf(fp,"%d\t\t .\t\t %f\t %f\t %f\t\n", j, time_cpu_to_gpu, time_kernel, time_gpu_to_cpu);
printf("%d\t\t .\t\t %f\t %f\t %f\t\n", j, time_cpu_to_gpu, time_kernel, time_gpu_to_cpu);
cudaFree(d_A);
free(A);
}
fclose(fp);
}
int main(int argc, char ** argv){
printf("Part A\n");
measure_float();
printf("Part B\n");
measure_double();
printf("Part C\n");
measure_int32();
printf("Part D\n");
measure_xaddtimes();
printf("Done.\n");
}
|
2,617 | #include<stdio.h>
#define ARRAY_SIZE 16
__global__ void print_index_and_data(int * data) {
int tid = threadIdx.y * blockDim.x + threadIdx.x;
int number_of_threads_in_block = blockDim.x * blockDim.y;
int block_offeset = blockIdx.x * number_of_threads_in_block;
int number_of_threads_in_row = number_of_threads_in_block * gridDim.x;
int row_offset = number_of_threads_in_row * blockIdx.y;
int gid = tid + block_offeset + row_offset;
printf("tid: %d, block_offset: (%d * %d = %d), row_offset (%d * %d = %d) = %d\n", tid, number_of_threads_in_block, blockIdx.x, block_offeset, number_of_threads_in_row, blockIdx.y, row_offset, gid);
printf("threadIdx.x: %d, threadIdx.y: %d, blockDim.x: %d, tid: %d, gid: %d, blockIdx.x: %d,, blockIdx.y: %d,, blockDim.x: %d,, blockDim.y: %d, gridDim.x: %d, gridDim.y: %d, data: %d\n",
threadIdx.x, threadIdx.y, blockDim.x, tid, gid, blockIdx.x, blockIdx.y, blockDim.x, blockDim.y, gridDim.x, gridDim.y, data[gid]);
}
int main() {
int array_size_bytes = sizeof(int) * ARRAY_SIZE;
int h_data[ARRAY_SIZE];
for(int i=0; i<ARRAY_SIZE; i++) {
h_data[i] =i;
printf("%d ", h_data[i]);
}
printf("\n\n");
dim3 block(2, 2);
dim3 grid(2, 2);
int * d_data;
cudaMalloc((void**)&d_data, array_size_bytes);
cudaMemcpy(d_data, h_data, array_size_bytes, cudaMemcpyHostToDevice);
print_index_and_data<<<grid, block>>>(d_data);
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
} |
2,618 | #include<iostream>
#include <cuda.h>
#include <math.h>
#include<stdio.h>
#include<stdlib.h>
#include <sys/types.h>
#include <time.h>
using namespace std;
__device__ int mymin(int a, int b)
{
int m = a;
if(m > b)
m=b;
return m;
}
__device__ int min1(int a,int b,int c)
{
int m=a;
if(m>b)
m=b;
if(m>c)
m=c;
return m;
}
int min2(int a,int b,int c)
{
int m=a;
if(m>b)
m=b;
if(m>c)
m=c;
return m;
}
void swapDiagnolPointers(int **prev,int **current)
{
int * temp = *prev;
*prev = *current;
*current = temp;
}
void swapDiagnolPointersForG(int **prevprev, int **prev,int **current)
{
int * temp = *prevprev;
*prevprev = *prev;
*prev = *current;
*current = temp;
}
__global__ void less_than_n(int k,int numElements,int * D1,int * D2,int * I1,int * I2,int * G0,int *G1,int * G2,char *s1,char *s2, int gi, int ge)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int s =0;
if( i > numElements ) return ;
if(i == 0)
{
G2[i]=gi+ge*k;
I2[i]=G2[i]+ge;
}
if(i == k)
{
G2[i]=gi+ge*k;
D2[i]=G2[i]+ge;
}
if( i>0 && i<k)
{
D2[i]=min(D1[i],G1[i]+gi)+ge;
I2[i]=min(I1[i-1],G1[i-1]+gi)+ge;
if(s1[i]!=s2[k-i])
s=1;
else
s=0;
G2[i]=min1(D2[i],I2[i],G0[i-1]+s);
}
}
__global__ void greater_than_n(int k,int numElements,int * D1,int * D2,int * I1,int * I2,int * G0,int *G1,int * G2,char *s1,char *s2,int n, int gi, int ge)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int s =0;
if( i > numElements ) return ;
D2[i]=min(D1[i+1],G1[i+1]+gi)+ge;
I2[i]=min(I1[i],G1[i]+gi)+ge;
if(s1[i+(k-n)]!=s2[k-(i+k-n)])
s=1;
else
s=0;
if((k-n)==1)
G2[i]=min1(D2[i],I2[i],G0[i]+s);
else
G2[i]=min1(D2[i],I2[i],G0[i+1]+s);
}
int main(int argc, char** argv)
{
int i,j,n,k,l;
time_t t0, t1;
clock_t c0,c1;
char skip;
scanf("%d",&n);
printf("%d \n",n);
while(1)
{
scanf("%c",&skip);
if(skip == '\n')
break;
}
size_t size = (n+1) * sizeof(char);
size_t size1 = (n+1)*sizeof(int);
char* h_s1 = (char*)malloc(size);
char* h_s2 = (char*)malloc(size);
for(i=1; i<=n; i++)
{
scanf("%c",&h_s1[i]);
}
while(1)
{
scanf("%c",&skip);
if(skip == '\n')
break;
}
for(i=1; i<=n; i++)
{
scanf("%c",&h_s2[i]);
}
/* t0=time(NULL);
c0=clock();
printf ("\tbegin (wall): %ld\n", (long) t0);
printf ("\tbegin (CPU): %d\n", (int) c0);
*/
int* h_D0 = (int*)malloc(size1);
int* h_D1 = (int*)malloc(size1);
int* h_D2 = (int*)malloc(size1);
int* h_G0 = (int*)malloc(size1);
int* h_G1 = (int*)malloc(size1);
int* h_G2 = (int*)malloc(size1);
int* h_I0 = (int*)malloc(size1);
int* h_I1 = (int*)malloc(size1);
int* h_I2 = (int*)malloc(size1);
// int D0[n+1],D1[n+1],D2[n+1],G0[n+1],G1[n+1],G2[n+1],I0[n+1],I1[n+1],I2[n+1]; //--- declaration of functions
int gi,ge,s,cost;
gi=2;ge=1;
int *d_D0, *d_D1, *d_D2;
cudaMalloc(&d_D0, size1);
cudaMalloc(&d_D1, size1);
cudaMalloc(&d_D2, size1);
int *d_G0, *d_G1, *d_G2;
cudaMalloc(&d_G0, size1);
cudaMalloc(&d_G1, size1);
cudaMalloc(&d_G2, size1);
int *d_I0, *d_I1, *d_I2;
cudaMalloc(&d_I0, size1);
cudaMalloc(&d_I1, size1);
cudaMalloc(&d_I2, size1);
char *d_s1, *d_s2;
cudaMalloc(&d_s1, size);
cudaMalloc(&d_s2, size);
cudaMemcpy(d_s1, h_s1, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_s2, h_s2, size, cudaMemcpyHostToDevice);
h_G1[0]=0;
cudaMemcpy(d_G1, h_G1, size1, cudaMemcpyHostToDevice);
int num_threads;
int num_blocks;
int numElements;
for(k=1;k<=2*n;k++)
{
if(k <=n)
{
numElements = k+1;
if( numElements <= 256) {
num_threads = numElements;
num_blocks =1 ;
}
else
{
num_threads = 256 ;
num_blocks = numElements/num_threads + ((numElements/num_threads == 0) ? 0 : 1) ;
}
less_than_n<<<num_blocks,num_threads>>>( k, numElements, d_D1, d_D2, d_I1,d_I2, d_G0, d_G1, d_G2, d_s1, d_s2, gi, ge);
}
if(k > n)
{
numElements = n-(k-n)+1;
if( numElements <= 256) {
num_threads = numElements;
num_blocks =1 ;
}
else
{
num_threads = 256 ;
num_blocks = numElements/num_threads + ((numElements/num_threads == 0) ? 0 : 1) ;
}
greater_than_n<<<num_blocks,num_threads>>>( k, numElements, d_D1, d_D2, d_I1,d_I2, d_G0, d_G1, d_G2, d_s1, d_s2, n, gi, ge);
}
swapDiagnolPointers(&d_D1,&d_D2);
swapDiagnolPointers(&d_I1,&d_I2);
swapDiagnolPointersForG(&d_G0,&d_G1, &d_G2);
}
cudaMemcpy(h_D2, d_D2, size1, cudaMemcpyDeviceToHost);
cudaMemcpy(h_G2, d_G2, size1, cudaMemcpyDeviceToHost);
cudaMemcpy(h_I2, d_I2, size1, cudaMemcpyDeviceToHost);
cudaMemcpy(h_D1, d_D1, size1, cudaMemcpyDeviceToHost);
cudaMemcpy(h_G1, d_G1, size1, cudaMemcpyDeviceToHost);
cudaMemcpy(h_I1, d_I1, size1, cudaMemcpyDeviceToHost);
cost=min2(h_D1[0],h_I1[0],h_G1[0]); //--- allignment cost
cout<<"Optimal Allignment cost: "<<cost<<endl;
/*
t1=time(NULL);
c1=clock();
printf ("\telapsed wall clock time: %ld\n", (long) (t1 - t0));
printf ("\telapsed CPU time: %f\n", (float) (c1 - c0)/CLOCKS_PER_SEC);
cudaFree(d_G0);
cudaFree(d_G1);
cudaFree(d_G2);
cudaFree(d_D0);
cudaFree(d_D1);
cudaFree(d_D2);
cudaFree(d_I0);
cudaFree(d_I1);
cudaFree(d_I2);
cudaFree(d_s1);
cudaFree(d_s2);
free(h_s1);
free(h_s2);
free(h_G1);
free(h_G2);
free(h_D1);
free(h_D2);
free(h_I1);
free(h_I2);
*/
return 0;
}
|
2,619 | /* Project: ECE 408 Final Project
* File Name: cuda_test.cu
* Calls: None
* Called by: None
* Associated Header: None
* Date created: Sat Nov 7 2015
* Engineers: Conor Gardner
* Compiler: nvcc
* Target OS: Ubuntu Linux 14.04
* Target architecture: CPU: x86 (64 bit)
* GPU: GeForce GTX 970 (compute capability 5.2)
* Description: Simple vector add program designed to verify that your cuda
* toolchain and drivers are properly installed */
#include <cstdio>
// cuda kernel executed on the GPU
__global__ void vector_add(
unsigned vector_size,
const float* input_a_d,
const float* input_b_d,
float* output_d
){
unsigned ix = (blockIdx.x * blockDim.x) + threadIdx.x;
if (ix < vector_size)
output_d[ix] = input_a_d[ix] + input_b_d[ix];
}
// CPU code which sends the GPU work and verifies the output
int main()
{
// feel free to customize these parameters
const unsigned vector_size = 1024;
const unsigned threads_per_block = 256;
const float tolerance = 0.01; // maximum floating-point error when verifying (1%)
// initialize memory on the host (just initilize with arbitrary data)
float* input_a_h = new float[vector_size];
float* input_b_h = new float[vector_size];
float* output_h = new float[vector_size];
for (unsigned ix = 0; ix < vector_size; ix++)
{
input_a_h[ix] = ix * (ix / 3.3f);
input_b_h[ix] = 7.8f * ix + 8.1f;
}
// allocate memory on the device and perform host
float* input_a_d;
float* input_b_d;
float* output_d;
cudaMalloc(&input_a_d, vector_size * sizeof(float));
cudaMalloc(&input_b_d, vector_size * sizeof(float));
cudaMalloc(&output_d, vector_size * sizeof(float));
// blocking memory copy host --> device
cudaMemcpy(input_a_d, input_a_h, vector_size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(input_b_d, input_b_h, vector_size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(output_d, output_h, vector_size * sizeof(float), cudaMemcpyHostToDevice);
// invoke the GPU - it will perform the vector addition after the previous memcpy
unsigned blocks_per_grid = (vector_size - 1) / threads_per_block + 1;
vector_add<<<blocks_per_grid, threads_per_block>>>(vector_size, input_a_d, input_b_d, output_d);
// blocking memory copy device --> host
cudaMemcpy(input_a_h, input_a_d, vector_size * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(input_b_h, input_b_d, vector_size * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(output_h, output_d, vector_size * sizeof(float), cudaMemcpyDeviceToHost);
// de-allocate device memory ASAP
cudaFree(input_a_d);
cudaFree(input_b_d);
cudaFree(output_d);
// verify the data received from the device
float max_percent_err = 0.0f;
for (unsigned ix = 0; ix < vector_size; ix++)
{
float host_result = input_a_h[ix] + input_b_h[ix];
float device_result = output_h[ix];
float percent_err = (host_result - device_result) / host_result;
if (percent_err < 0.0f)
percent_err = -percent_err;
if (percent_err > max_percent_err)
max_percent_err = percent_err;
// print error message and abort on the first incorrect result
if (percent_err > tolerance)
{
printf
(
"FAILED to verify element %u: %f + %f = %f. GPU returned %f\n",
ix,
input_a_h[ix],
input_b_h[ix],
host_result,
device_result
);
return -1;
}
}
printf("SUCCESS with max error = %3.2f%%\n", max_percent_err * 100.0);
// de-allocate host memory
delete[] input_a_h;
delete[] input_b_h;
delete[] output_h;
return 0;
}
|
2,620 | /**
*
* This is a cuda version of the array addition program as created from the
* tutorial from here:
*
* https://devblogs.nvidia.com/even-easier-introduction-cuda/
*
* Any adjustments made are made from suggestions from Programming Massively
* Parallel Processors, 3rd Edition:
*
* https://www.amazon.com/Programming-Massively-Parallel-Processors-Hands/dp/0128119861/ref=dp_ob_title_bk
*
* */
#include <cuda.h>
#include <iostream>
#include <math.h>
// Signifies a kernel function
__global__ // Runs on device code
void deviceAdd(int n, float *dst, float *src)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index < n) {
dst[index] = src[index] + dst[index];
}
}
void add(int n, float* h_dst, float* h_src) {
int size = n * sizeof(float);
float *d_dst, *d_src;
// This allocates memory and copies
// the memory from the host to the
// device memory.
cudaMalloc((void **) &d_src, size);
cudaMemcpy(d_src, h_src, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_dst, size);
cudaMemcpy(d_dst, h_dst, size, cudaMemcpyHostToDevice);
deviceAdd<<<ceil(n / 256.0), 256>>>(n, d_dst, d_src);
cudaMemcpy(h_dst, d_dst, size, cudaMemcpyDeviceToHost);
cudaFree(d_src);
cudaFree(d_dst);
}
int main(void)
{
int N = 100<<20; // 100M elements
float* x = (float*) malloc(N * sizeof(float));
float* y = (float*) malloc(N * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the CPU
add(N, x, y);
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
free(x);
free(y);
return 0;
}
|
2,621 | #include <time.h>
#include <cuda.h>
#include <stdio.h>
#define STOP 0
#define START 1
#define BLOCKSIZE 256
extern "C" void chrono (int kind, float *time);
__global__ void kconvol (float *gpu_a, float *gpu_b, int n) {
int i, j, l;
// TO DO : evaluate the global 1D index l of the current thread,
// using blockDim, blockIdx and threadIdx.
l = blockDim.x*blockIdx.x+threadIdx.x;
// TO DO : evaluate global indeces of thread (i,j) from the index l
j = l/n;
i = l%n;
if ((i >= n) || (j >= n)) return;
if ((i == 0) || (j == 0) || (i == n-1) || (j == n-1)) {
gpu_b[l] = gpu_a[l]; // edges are untouched
}
else
// TO DO : fill up the MISSING indices below
gpu_b[l]=(1./5.)*(gpu_a[l-n] + gpu_a[l-1] + gpu_a[l] + gpu_a[l+1]+ gpu_a[l+n]);
}
extern "C" void gpu_convol (float *a, float *b, int n) {
float *gpu_a;
float *gpu_b;
cudaError_t err;
float time;
err = cudaMalloc (&gpu_a, n*n*sizeof(float));
if (err != 0) {
printf ("Error allocating gpu_a: %s\n", cudaGetErrorString (err));
exit (1);
}
err = cudaMalloc (&gpu_b, n*n*sizeof(float));
if (err != 0) {
printf ("Error allocating gpu_b: %s\n", cudaGetErrorString (err));
exit (1);
}
cudaMemcpy (gpu_a, a, n*n*sizeof(float), cudaMemcpyHostToDevice);
// NOTE : the chronometer below does not contemplate overhead of memory allocation and
// memory transfer.
chrono (START, &time);
// TO DO : the number of blocks is missing below in the kernel invocation
kconvol <<<n*n/BLOCKSIZE+1,BLOCKSIZE>>> (gpu_a, gpu_b, n);
err=cudaDeviceSynchronize ();
chrono (STOP, &time);
printf ("Convolution took %f sec. on GPU\n", time);
cudaMemcpy (b, gpu_b, n*n*sizeof(float), cudaMemcpyDeviceToHost);
if (err != 0) {
printf ("%s\n", cudaGetErrorString (err));
exit (1);
}
cudaFree (gpu_a);
cudaFree (gpu_b);
}
|
2,622 | #include <string.h>
#include <math.h>
#ifndef RESTRICT
#define restrict __restrict__
#endif /* RESTRICT */
//ldoc on
/**
* ## Implementation
*
* The actually work of computing the fluxes and speeds is done
* by local (`static`) helper functions that take as arguments
* pointers to all the individual fields. This is helpful to the
* compilers, since by specifying the `restrict` keyword, we are
* promising that we will not access the field data through the
* wrong pointer. This lets the compiler do a better job with
* vectorization.
*/
__constant__ static const float g = 9.8;
// total number of cells (ncells) = nx_all * ny_all
__global__ static
void shallow2dv_flux(float* restrict fh,
float* restrict fhu,
float* restrict fhv,
float* restrict gh,
float* restrict ghu,
float* restrict ghv,
const float* restrict h,
const float* restrict hu,
const float* restrict hv,
float g)
{
const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
const unsigned int idy = (blockIdx.y * blockDim.y) + threadIdx.y;
// linearize to 1D
const unsigned int tid = ((gridDim.x * blockDim.x) * idy) + idx;
float hi = h[tid], hui = hu[tid], hvi = hv[tid];
float inv_h = 1/hi;
fhu[tid] = hui*hui*inv_h + (0.5f*g)*hi*hi;
fhv[tid] = hui*hvi*inv_h;
ghu[tid] = hui*hvi*inv_h;
ghv[tid] = hvi*hvi*inv_h + (0.5f*g)*hi*hi;
}
__global__ static
void shallow2dv_speed(float* restrict cxy,
const float* restrict h,
const float* restrict hu,
const float* restrict hv,
float g)
{
float cx = cxy[0];
float cy = cxy[1];
const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
const unsigned int idy = (blockIdx.y * blockDim.y) + threadIdx.y;
// linearize to 1D
const unsigned int tid = ((gridDim.x * blockDim.x) * idy) + idx;
float hi = h[tid];
float inv_hi = 1.0f/h[tid];
float root_gh = sqrtf(g * hi);
float cxi = fabsf(hu[tid] * inv_hi) + root_gh;
float cyi = fabsf(hv[tid] * inv_hi) + root_gh;
if (cx < cxi) cx = cxi;
if (cy < cyi) cy = cyi;
cxy[0] = cx;
cxy[1] = cy;
}
extern "C"
void shallow2d_flux_cu(float* FU, float* GU, const float* U,
int nx, int ny, int field_stride)
{
cudaMemcpy(FU, U+field_stride, nx * ny * sizeof(float), cudaMemcpyDeviceToDevice);
cudaMemcpy(GU, U+2*field_stride, nx * ny * sizeof(float), cudaMemcpyDeviceToDevice);
shallow2dv_flux<<<ny, nx>>>(FU, FU+field_stride, FU+2*field_stride,
GU, GU+field_stride, GU+2*field_stride,
U, U +field_stride, U +2*field_stride,
g);
}
extern "C"
void shallow2d_speed_cu(float* cxy, const float* U,
int nx, int ny, int field_stride)
{
shallow2dv_speed<<<ny, nx>>>(cxy, U, U+field_stride, U+2*field_stride, g);
}
|
2,623 | extern "C" __global__ void saxpy(float* S, float A, float* X, float* Y) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
S[i] = A * X[i] + Y[i];
}
|
2,624 | #include <cuda.h>
#include <stdio.h>
#define mega 1048576
__global__ void fdcalc(int n)
{
long n1 = 0;
for (int j=0; j < 100000; j++) {
for(int i=2; i < n; ++i) {
n1=pow(n1,i);
//n1=n1*i; (GF 730)
}
}
}
// função principal executada iniciada em CPU
int main(int argc, char const *argv[]) {
unsigned long *dev_a;
unsigned long *dev_b;
unsigned long *dev_c;
unsigned long *dev_d;
unsigned long *dev_e;
unsigned long *dev_f;
int error=0;
int i=0, mem=0, mem2=0;
if (argc == 3) {
mem = atoi(argv[1]);
mem2 = atoi(argv[2]);
} else {
printf ("./06_mem <mem_contigua> <mem_N_contigua>`\n");
printf ("Titan = ./06_mem 2040 12000\n");
printf ("730 = ./06_mem 1930 1970\n");
exit(1);
}
printf ("\n ##### ##### \n Alocação de Memória Global\n");
i = mem;
while (!error) {
// alocação de memória na GPU
error = cudaMalloc ((void **) &dev_a, mega*i);
printf ("\t cudaMalloc (%d MB)\tError Status %d\n", i, error);
fdcalc <<<1,1>>>(20);
cudaDeviceSynchronize();
cudaFree(dev_a);
i+=10;
}
printf ("\n ##### ##### \n Alocação de Memória Global\n");
error=0;
i = (int)mem2/6;
while (!error) {
// alocação de memória na GPU
error = cudaMalloc ((void **) &dev_a, mega*i);
error = cudaMalloc ((void **) &dev_b, mega*i);
error = cudaMalloc ((void **) &dev_c, mega*i);
error = cudaMalloc ((void **) &dev_d, mega*i);
error = cudaMalloc ((void **) &dev_e, mega*i);
error = cudaMalloc ((void **) &dev_f, mega*i);
printf ("\t cudaMalloc (%d MB)\tError Status %d\n", i*6, error);
fdcalc <<<1,1>>>(20);
cudaDeviceSynchronize();
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaFree(dev_d);
cudaFree(dev_e);
cudaFree(dev_f);
i+=10;
}
printf("\n");
return 0;
}
|
2,625 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#define THREADS_PER_BLOCK 1024
void matrixMultiplyCPU(float *a, float *b, float *c, int width) {
float result;
for (int row = 0; row < width; row++) {
for (int col = 0; col < width; col++) {
result = 0;
for (int k = 0; k < width; k++) {
result += a[row * width + k] * b[k * width + col];
}
c[row * width + col] = result;
}
}
}
__global__ void matrixMultiplySimple(float *a, float *b, float *c, int width) {
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
float result = 0;
if (col < width && row < width) {
for (int k = 0; k < width; k++) {
result += a[row * width + k] * b[k * width + col];
}
c[row * width + col] = result;
}
}
const int TILE_WIDTH = 16;
__global__ void matrixMultiplyOptimised(float *a, float *b, float *c, int width) {
// Allocate 2D tiles in shared memory
__shared__ float s_a[TILE_WIDTH][TILE_WIDTH];
__shared__ float s_b[TILE_WIDTH][TILE_WIDTH];
// Calculate row and column index of element
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float result = 0;
// Loop over tiles of input in phases
for (int p = 0; p < width/TILE_WIDTH; p++) {
// Collaboratively load tiles into shared memory
s_a[threadIdx.y][threadIdx.x] = a[row*width + (p*TILE_WIDTH + threadIdx.x)];
s_b[threadIdx.y][threadIdx.x] = b[(p*TILE_WIDTH + threadIdx.y)*width + col];
// Wait until all data is loaded before allowing any threads in the block to continue
__syncthreads();
// Dot product between row of s_a and column of s_b
for (int i = 0; i < TILE_WIDTH; i++) {
result += s_a[threadIdx.y][i] * s_b[i][threadIdx.x];
}
// Wait until all calculations are finished before allowing any threads in the block to continue
__syncthreads();
}
// Write result
c[row * width + col] = result;
}
int main() {
int width = 2000; // Define width of square matrix
// Initialise grid and block variables
int sqrtThreads = sqrt(THREADS_PER_BLOCK);
int nBlocks = width/sqrtThreads;
if (width % sqrtThreads != 0) { // Add an extra block if necessary
nBlocks++;
}
dim3 grid(nBlocks, nBlocks, 1);
dim3 block(sqrtThreads, sqrtThreads, 1); // Max number of threads per block
// Initialise host pointers (dynamically allocated memory) and device pointers
float *a_h;
float *b_h;
float *c_h; // GPU results
float *d_h; // CPU results
float *a_d;
float *b_d;
float *c_d;
int size; // Number of bytes required by arrays
// Create timer
cudaEvent_t start;
cudaEvent_t stop;
float elapsed1, elapsed2, elapsed3;
// Print out information about blocks and threads
printf("Number of threads: %i (%ix%i)\n", block.x*block.y, block.x, block.y);
printf("Number of blocks: %i (%ix%i)\n", grid.x*grid.y, grid.x, grid.y);
// Dynamically allocate host memory
size = width * width * sizeof(float);
a_h = (float*) malloc(size);
b_h = (float*) malloc(size);
c_h = (float*) malloc(size);
d_h = (float*) malloc(size);
// Load host arrays with data
for (int i = 0; i < width; i++) {
for (int j = 0; j < width; j++) {
a_h[i * width + j] = i;
b_h[i * width + j] = i;
}
}
// Allocate device memory
cudaMalloc((void**)&a_d, size);
cudaMalloc((void**)&b_d, size);
cudaMalloc((void**)&c_d, size);
// Copy host memory to device memory
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(c_d, c_h, size, cudaMemcpyHostToDevice);
// Start timer for GPU
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Launch kernel
matrixMultiplySimple<<<grid, block>>>(a_d, b_d, c_d, width);
// Stop timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed1, start, stop);
// Print execution time
printf("Time to calculate results on GPU: %f ms\n", elapsed1);
// Copy results to host
cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost);
// Start timer for CPU
cudaEventRecord(start, 0);
// Launch CPU code
matrixMultiplyCPU(a_h, b_h, d_h, width);
// Stop timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed2, start, stop);
// Print execution time
printf("Time to calculate results on CPU: %f ms\n", elapsed2);
// Compare results
for (int i = 0; i < width*width; i++) {
if (c_h[i] != d_h[i]) {
printf("Error: CPU and GPU results do not match\n");
break;
}
}
// Start timer for GPU (optimised)
cudaEventRecord(start, 0);
// Launch kernel (optimised)
matrixMultiplyOptimised<<<grid, block>>>(a_h, b_h, c_h, width);
// Stop timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed3, start, stop);
// Print execution time
printf("Time to calculate results on GPU (optimised): %f ms\n", elapsed3);
// Copy results to host
cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost);
// Compare results
for (int i = 0; i < width*width; i++) {
if (c_h[i] != d_h[i]) {
printf("Error: CPU and GPU (optimised) results do not match\n");
break;
}
}
// Free memory
free(a_h);
free(b_h);
free(c_h);
free(d_h);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
2,626 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
/* Time */
#include <sys/time.h>
#include <sys/resource.h>
static struct timeval tv0;
double getMicroSeconds()
{
double t;
gettimeofday(&tv0, (struct timezone*)0);
t = ((tv0.tv_usec) + (tv0.tv_sec)*1000000);
return (t);
}
void init_seed()
{
int seedi=1;
FILE *fd;
/* Generated random values between 0.00 - 1.00 */
fd = fopen("/dev/urandom", "r");
fread( &seedi, sizeof(int), 1, fd);
fclose (fd);
srand( seedi );
}
void init2Drand(float **buffer, int n)
{
int i, j;
for (i=0; i<n; i++)
for(j=0; j<n; j++)
buffer[i][j] = 500.0*(float(rand())/RAND_MAX)-500.0; /* [-500 500]*/
}
float *getmemory1D( int nx )
{
int i;
float *buffer;
if( (buffer=(float *)malloc(nx*sizeof(float *)))== NULL )
{
fprintf( stderr, "ERROR in memory allocation\n" );
return( NULL );
}
for( i=0; i<nx; i++ )
buffer[i] = 0.0;
return( buffer );
}
float **getmemory2D(int nx, int ny)
{
int i,j;
float **buffer;
if( (buffer=(float **)malloc(nx*sizeof(float *)))== NULL )
{
fprintf( stderr, "ERROR in memory allocation\n" );
return( NULL );
}
if( (buffer[0]=(float *)malloc(nx*ny*sizeof(float)))==NULL )
{
fprintf( stderr, "ERROR in memory allocation\n" );
free( buffer );
return( NULL );
}
for( i=1; i<nx; i++ )
{
buffer[i] = buffer[i-1] + ny;
}
for( i=0; i<nx; i++ )
for( j=0; j<ny; j++ )
{
buffer[i][j] = 0.0;
}
return( buffer );
}
/********************************************************************************/
/********************************************************************************/
/*
* Traspose 2D version
*/
void transpose2D(float **in, float **out, int n)
{
int i, j;
for(j=0; j < n; j++)
for(i=0; i < n; i++)
out[j][i] = in[i][j];
}
/*
* Traspose 1D version
*/
void transpose1D(float *in, float *out, int n)
{
int i, j;
for(j=0; j < n; j++)
for(i=0; i < n; i++)
out[j*n+i] = in[i*n+j];
}
/*
* Traspose CUDA version
*/
#define TILE_DIM 16
__global__ void transpose_device_v3(float *in, float *out, int rows, int cols)
{
int i,j;
__shared__ float tile[TILE_DIM][TILE_DIM];
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < rows && j<cols){
tile[i][j] = in[j * cols + i];
__syncthreads();
i = threadIdx.x;
j = threadIdx.y;
out [ i * cols + j ] = tile [i][j];
}
}
int check(float *GPU, float **CPU, int n)
{
int i,j;
for (i=0; i<n; i++)
for(j = 0; j < n; j++)
if(GPU[i * n + j]!=CPU[i][j])
return(1);
return(0);
}
void print_matrix(float *M, int hM, int wM)
{
int i,j;
for (i=0; i<hM; i++){
// printf("Line %i: ", i);
for (j=0; j<wM; j++)
printf("%4.1f ", M[i*wM+j]);
printf("\n");
}
}
int main(int argc, char **argv)
{
int n;
float **array2D, **array2D_trans;
float *array2D_trans_GPU;
double t0;
float size_block = 16;
if (argc==2)
n = atoi(argv[1]);
else {
n = 4096;
printf("./exec n (by default n=%i)\n", n);
}
/* Initizalization */
init_seed();
array2D = getmemory2D(n,n);
array2D_trans = getmemory2D(n,n);
init2Drand(array2D, n);
/* Transpose 2D version */
t0 = getMicroSeconds();
transpose2D(array2D, array2D_trans, n);
printf("Transpose version 2D: %f MB/s\n", n*n*sizeof(float)/((getMicroSeconds()-t0)/1000000)/1024/1024);
/* CUDA version */
float *darray2D, *darray2D_trans;
cudaMalloc((void**)&darray2D, n*n*sizeof(float));
cudaMemcpy(darray2D, array2D, n*n*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)&darray2D_trans, n*n*sizeof(float));
dim3 dimBlock(size_block,size_block);
int blocks = ceil(n/size_block);
dim3 dimGrid(blocks);
t0 = getMicroSeconds();
transpose_device_v3<<<dimGrid,dimBlock>>>(darray2D, darray2D_trans, n, n);
array2D_trans_GPU = (float *)malloc(n*n * sizeof(float));
cudaMemcpy(array2D_trans_GPU, darray2D_trans, n*n*sizeof(float), cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
printf("Transpose kernel version: %f MB/s\n", n*n*sizeof(float)/((getMicroSeconds()-t0)/1000000)/1024/1024);
printf("Matriz GPU:\n");
print_matrix(array2D_trans_GPU,n,n);
printf("Matriz CPU\n");
for(int i = 0; i < n; i++){
for(int j = 0; j < n; j++){
printf("%4.1f ",array2D_trans[i][j]);
}
printf("\n");
}
if (check(array2D_trans_GPU, array2D_trans, n*n))
printf("Transpose CPU-GPU differs!!\n");
return(1);
}
|
2,627 | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<cuda_runtime.h>
#define N 10000000
__global__ void vector_add(float *out, float *a, float *b, int n){
for(int i=0;i<n;i++){
out[i]=a[i]+b[i];
}
}
int main(){
float *a, *b, *out;
float *d_a, *d_b, *d_out;
a=(float*)malloc(sizeof(float)*N);
b=(float*)malloc(sizeof(float)*N);
out=(float*)malloc(sizeof(float)*N);
for(int i=0; i<N; i++){
a[i]=1.0f; b[i]=2.0f;
}
cudaMalloc((void**)&d_a,sizeof(float)*N);
cudaMalloc((void**)&d_b,sizeof(float)*N);
cudaMalloc((void**)&d_out,sizeof(float)*N);
cudaMemcpy(d_a, a, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(float)*N, cudaMemcpyHostToDevice);
vector_add<<<1,1>>>(d_out, d_a, d_b, N);
cudaMemcpy(out, d_out, sizeof(float)*N, cudaMemcpyDeviceToHost);
printf("%f\n", out[0]);
// Deallocate device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
// Deallocate host memory
free(a);
free(b);
free(out);
return 0;
}
|
2,628 | #include <stdio.h>
// A macro for checking the error codes of cuda runtime calls
#define CUDA_ERROR_CHECK(expr) \
{ \
cudaError_t err = expr; \
if (err != cudaSuccess) \
{ \
printf("CUDA call failed!\n%s\n", cudaGetErrorString(err)); \
exit(1); \
} \
}
__global__
void swapChannel_kernel(uchar3 * device_inputImage, uchar3 * device_outputImage, int rows, int cols)
{
//----------------------------------------------------------------
// TODO: Implement this Kernel
//----------------------------------------------------------------
}
__global__
void blurImage_kernel(uchar3 * device_inputImage, uchar3 * device_outputImage, int rows, int cols)
{
//----------------------------------------------------------------
// TODO: Implement this Kernel
//----------------------------------------------------------------
}
__global__
void inplaceFlip_kernel(uchar3 * device_outputImage, int rows, int cols)
{
//----------------------------------------------------------------
// TODO: Implement this Kernel
//----------------------------------------------------------------
}
__global__
void creative_kernel(uchar3 * device_inputImage, uchar3 * device_outputImage, int rows, int cols)
{
//----------------------------------------------------------------
// TODO: Implement this Kernel
//----------------------------------------------------------------
}
__host__
float filterImage(uchar3 *host_inputImage, uchar3 *host_outputImage, int rows, int cols, int filterNumber){
int numPixels = rows * cols;
//allocate memory on device (GPU)
uchar3 *device_inputImage;
uchar3 *device_outputImage;
CUDA_ERROR_CHECK(cudaMalloc(&device_inputImage, sizeof(uchar3) * numPixels));
CUDA_ERROR_CHECK(cudaMalloc(&device_outputImage, sizeof(uchar3) * numPixels));
CUDA_ERROR_CHECK(cudaMemset(device_outputImage, 0, sizeof(uchar3) * numPixels)); //make sure no memory is left laying around
//copy input image to the device (GPU)
CUDA_ERROR_CHECK(cudaMemcpy(device_inputImage, host_inputImage, sizeof(uchar3) * numPixels, cudaMemcpyHostToDevice));
//start timing to measure length of kernel call
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
//----------------------------------------------------------------
// TODO: Fill in the parameters for the kernel calls
//----------------------------------------------------------------
// Each of the parameters are as follows:
// 1. Number of thread blocks, can be either int or dim3 (see CUDA manual)
// 2. Number of threads per thread block, can be either int or dim3 (see CUDA manual)
// Also note that you pass the pointers to the device memory to the kernel call
int gridSize = 1;
int blockSize = 1;
switch(filterNumber){
case 1:
swapChannel_kernel<<<gridSize,blockSize>>>(device_inputImage, device_outputImage, rows, cols);
break;
case 2:
blurImage_kernel<<<gridSize,blockSize>>>(device_inputImage, device_outputImage, rows, cols);
break;
case 3:
inplaceFlip_kernel<<<gridSize,blockSize>>>(device_inputImage, rows, cols);
break;
case 4:
creative_kernel<<<gridSize,blockSize>>>(device_inputImage, device_outputImage, rows, cols);
break;
default:
break;
}
//----------------------------------------------------------------
// END KERNEL CALLS - Do not modify code beyond this point!
//----------------------------------------------------------------
//stop timing
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float timeElapsedInMs = 0;
cudaEventElapsedTime(&timeElapsedInMs, start, stop);
//synchronize
cudaDeviceSynchronize(); CUDA_ERROR_CHECK(cudaGetLastError());
//copy device output image back to host output image
//special case for filter swap - since it is in place, we actually copy the input image back to the host output
if (filterNumber==3){
CUDA_ERROR_CHECK(cudaMemcpy(host_outputImage, device_inputImage, sizeof(uchar3) * numPixels, cudaMemcpyDeviceToHost));
}else{
CUDA_ERROR_CHECK(cudaMemcpy(host_outputImage, device_outputImage, sizeof(uchar3) * numPixels, cudaMemcpyDeviceToHost));
}
//free Memory
CUDA_ERROR_CHECK(cudaFree(device_inputImage));
CUDA_ERROR_CHECK(cudaFree(device_outputImage));
return timeElapsedInMs;
}
|
2,629 | /*
* Rayhana ZIARA
* produit matrice vecteur
*/
#include <stdlib.h>
#include <stdio.h>
/*
* DESCRIPTION : kernel concernant le produit matrice vecteur
* PARAMETRES : matrice A, vecteur v, vecteur r et taille des vecteurs
* RETOUR : /
*/
__global__ void matVect(float *A, float *v, float *r, int size)
{
float resultat = 0.0;
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index > size)
{
printf("ERREUR - Index > size\n");
return;
}
for(int i = 0; i < size; i++)
resultat += A[i * size + index] * v[i];
r[index] = resultat;
}
/*
* DESCRIPTION : fonction d'affichage de matrice et de vecteur
* PARAMETRES : matrice à afficher, nb ligne et nb colonne de A,
* RETOUR : /
*/
void affichage(float *M, int ligne, int colonne)
{
for(int i = 0; i < ligne; i++)
{
for(int j = 0; j < colonne; j++)
fprintf(stdout, "%lf\t", M[i * ligne + j]);
fprintf(stdout, "\n");
}
fprintf(stdout, "\n");
}
int main(int argc, char **argv)
{
// declaration des variables du produit matrice vecteur
// variables du hote
float *A, *v, *r;
int n; // taille de la matrice et du vecteur
// variables du device
float *d_A, *d_v, *d_r;
if(argc != 2)
{
fprintf(stderr, "ERREUR - Veuillez entrez la taille de A en parametre d'execution. Merci'\n./exam_rz n \n");
return -1;
}
n = atoi(argv[1]); // taille de la matrice A(n * n) et du vecteur v (n)
// allocation memoire dans le hote pour la matrice A et le vecteur d
A = (float*)malloc(n * n * sizeof(float));
v = (float*)malloc(n * sizeof(float));
r = (float*)malloc(n * sizeof(float));
// initialisation de la matrice A (matrice stockée en 1D) et du vecteur v
for(int i = 0; i < n; i++)
{
v[i] = i * n;
for(int j = 0; j < n; j++)
A[i * n + j] = i * n + j;
}
// allocation memoire dans le device pour les equivalents de matrice A et du vecteur v
cudaMalloc((void**)&d_A, n * n * sizeof(float));
cudaMalloc((void**)&d_v, n * sizeof(float));
cudaMalloc((void**)&d_r, n * sizeof(float));
// copie de la matrice A et du vecteur v dans le device
cudaMemcpy(d_A, A, n * n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_v, v, n * sizeof(float), cudaMemcpyHostToDevice);
// appel du kernel
dim3 threads(4, 4); // 32*16
dim3 blocks;
blocks.x = (n + threads.x - 1) / threads.x;
blocks.y = (n + threads.y - 1) / threads.y;
matVect<<<blocks, threads>>>(d_A, d_v, d_r, n);
// attente de tous les threads
cudaThreadSynchronize();
// copie de la matrice equivalente C dans le hote
cudaMemcpy(r, d_r, n * sizeof(float), cudaMemcpyDeviceToHost);
fprintf(stdout, "Matrice A\n");
affichage(A, n, n);
fprintf(stdout, "Vecteur v\n");
affichage(v, 1, n);
fprintf(stdout, "Vecteur r\n");
affichage(r, 1, n);
// liberation memoire hote
free(A);
free(v);
free(r);
// liberation memoire device
cudaFree(d_A);
cudaFree(d_v);
cudaFree(d_r);
return 0;
}
|
2,630 | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This application demonstrates how to use the CUDA API to use multiple GPUs.
*
* Note that in order to detect multiple GPUs in your system you have to disable
* SLI in the nvidia control panel. Otherwise only one GPU is visible to the
* application. On the other side, you can still extend your desktop to screens
* attached to both GPUs.
*/
////////////////////////////////////////////////////////////////////////////////
// Simple reduction kernel.
// Refer to the 'reduction' CUDA SDK sample describing
// reduction optimization strategies
////////////////////////////////////////////////////////////////////////////////
__global__ static void reduceKernel(float *d_Result, float *d_Input, int N){
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int threadN = gridDim.x * blockDim.x;
float sum = 0;
for(int pos = tid; pos < N; pos += threadN)
sum += d_Input[pos];
d_Result[tid] = sum;
}
extern "C"
void launch_reduceKernel(float *d_Result, float *d_Input, int N, int BLOCK_N, int THREAD_N, cudaStream_t &s)
{
reduceKernel<<<BLOCK_N, THREAD_N, 0, s>>>(d_Result, d_Input, N);
}
|
2,631 | #include <stdio.h>
#include <stdlib.h>
#define NUMBER_OF_THREADS 100
#define NUMBER_OF_QUERY_IPS 100
#define NUMBER_OF_DATABASE_IPS 1000000
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
__global__ void bruteForceMatcher(float *x1, float *y1, float *strength1, float *trace1, bool *sign1, float *scale1, float *descriptors1,
float *x2, float *y2, float *strength2, float *trace2, bool *sign2, float *scale2, float *descriptors2, int *anzahlMatches) {
float distance, delta;
int blockSizeQuery = NUMBER_OF_QUERY_IPS / NUMBER_OF_THREADS;
int blockSizeDB = NUMBER_OF_DATABASE_IPS / NUMBER_OF_THREADS;
int startIndexQuery = blockIdx.x * blockSizeQuery;
int endIndexQuery = blockIdx.x * blockSizeQuery + blockSizeQuery;
int startIndexDB = blockIdx.x * blockSizeDB;
int endIndexDB = blockIdx.x * blockSizeDB + blockSizeDB;
for(int i = startIndexQuery; i < endIndexQuery; i++) {
float bestDistance = 999999999.0f;
float secondBestDistance = 999999999.0f;
for (int a = startIndexDB; a < endIndexDB; a++) {
if (sign1[i] == sign2[a]){
distance = 0;
for (int b = 0; b < 64; b++) {
delta = descriptors1[b + (i * 64)] - descriptors2[b + (a * 64)];
distance += (delta * delta);
}
if(distance < bestDistance){
secondBestDistance = bestDistance;
bestDistance = distance;
}
}
}
// Ermittle die beste und die 2. beste distanz
if (bestDistance < 0.6f * secondBestDistance) {
anzahlMatches[blockIdx.x]++;
}
}
}
//__global__ void add(int *a, int *b, int *c, int **d) {
// c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
//}
void random_ints(int* a, int anzahl){
int i;
for (i = 0; i < anzahl; ++i){
//a[i] = rand();
a[i] = 0;
}
}
void random_floats(float* a, int anzahl){
int i;
for (i = 0; i < anzahl; ++i){
//a[i] = rand();
a[i] = 0.0005f;
}
}
void random_bools(bool* a, int anzahl){
int i;
for (i = 0; i < anzahl; ++i){
a[i] = true;
}
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void) {
printf("Start ...\n");
// Host variables
float *x1, *y1, *strength1, *trace1, *scale1, *descriptors1, *x2, *y2, *strength2, *trace2, *scale2, *descriptors2;
bool *sign1, *sign2;
int *anzahlMatches;
// Device variables
float *d_x1, *d_y1, *d_strength1, *d_trace1, *d_scale1, *d_descriptors1, *d_x2, *d_y2, *d_strength2, *d_trace2, *d_scale2, *d_descriptors2;
bool *d_sign1, *d_sign2;
int *d_anzahlMatches;
// Allocate cuda space
printf("Start allocating space for cuda...\n");
CUDA_CHECK_RETURN(cudaMalloc((void** ) &d_x1, NUMBER_OF_QUERY_IPS * sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void** ) &d_y1, NUMBER_OF_QUERY_IPS * sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void** ) &d_strength1, NUMBER_OF_QUERY_IPS * sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void** ) &d_trace1, NUMBER_OF_QUERY_IPS * sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void** ) &d_scale1, NUMBER_OF_QUERY_IPS * sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void** ) &d_descriptors1, NUMBER_OF_QUERY_IPS * sizeof(float) * 64));
CUDA_CHECK_RETURN(cudaMalloc((void** ) &d_sign1, NUMBER_OF_QUERY_IPS * sizeof(bool)));
CUDA_CHECK_RETURN(cudaMalloc((void** ) &d_x2, NUMBER_OF_DATABASE_IPS * sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void** ) &d_y2, NUMBER_OF_DATABASE_IPS * sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void** ) &d_strength2, NUMBER_OF_DATABASE_IPS * sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void** ) &d_trace2, NUMBER_OF_DATABASE_IPS * sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void** ) &d_scale2, NUMBER_OF_DATABASE_IPS * sizeof(float)));
CUDA_CHECK_RETURN(cudaMalloc((void** ) &d_descriptors2, NUMBER_OF_DATABASE_IPS * sizeof(float) * 64));
CUDA_CHECK_RETURN(cudaMalloc((void** ) &d_sign2, NUMBER_OF_DATABASE_IPS * sizeof(bool)));
CUDA_CHECK_RETURN(cudaMalloc((void** ) &d_anzahlMatches, NUMBER_OF_THREADS * sizeof(int)));
printf("Allocating space for cuda finished!\n");
// Create space for the query-data
printf("Start allocating space for host...\n");
x1 = (float *)malloc(NUMBER_OF_QUERY_IPS * sizeof(float)); random_floats(x1, NUMBER_OF_QUERY_IPS);
y1 = (float *)malloc(NUMBER_OF_QUERY_IPS * sizeof(float)); random_floats(y1, NUMBER_OF_QUERY_IPS);
strength1 = (float *)malloc(NUMBER_OF_QUERY_IPS * sizeof(float)); random_floats(strength1, NUMBER_OF_QUERY_IPS);
trace1 = (float *)malloc(NUMBER_OF_QUERY_IPS * sizeof(float)); random_floats(trace1, NUMBER_OF_QUERY_IPS);
scale1 = (float *)malloc(NUMBER_OF_QUERY_IPS * sizeof(float)); random_floats(scale1, NUMBER_OF_QUERY_IPS);
descriptors1 = (float *)malloc(NUMBER_OF_QUERY_IPS * sizeof(float) * 64); random_floats(descriptors1, NUMBER_OF_QUERY_IPS * 64);
sign1 = (bool *)malloc(NUMBER_OF_QUERY_IPS * sizeof(bool)); random_bools(sign1, NUMBER_OF_QUERY_IPS);
x2 = (float *)malloc(NUMBER_OF_DATABASE_IPS * sizeof(float)); random_floats(x2, NUMBER_OF_DATABASE_IPS);
y2 = (float *)malloc(NUMBER_OF_DATABASE_IPS * sizeof(float)); random_floats(y2, NUMBER_OF_DATABASE_IPS);
strength2 = (float *)malloc(NUMBER_OF_DATABASE_IPS * sizeof(float)); random_floats(strength2, NUMBER_OF_DATABASE_IPS);
trace2 = (float *)malloc(NUMBER_OF_DATABASE_IPS * sizeof(float)); random_floats(trace2, NUMBER_OF_DATABASE_IPS);
scale2 = (float *)malloc(NUMBER_OF_DATABASE_IPS * sizeof(float)); random_floats(scale2, NUMBER_OF_DATABASE_IPS);
descriptors2 = (float *)malloc(NUMBER_OF_DATABASE_IPS * sizeof(float) * 64); random_floats(descriptors2, NUMBER_OF_DATABASE_IPS * 64);
sign2 = (bool *)malloc(NUMBER_OF_DATABASE_IPS * sizeof(bool)); random_bools(sign2, NUMBER_OF_DATABASE_IPS);
anzahlMatches = (int *)malloc(NUMBER_OF_THREADS * sizeof(int));
// Zero-fill the current match count
for(int i = 0; i < NUMBER_OF_THREADS; i++){
anzahlMatches[i] = 0;
}
printf("Start allocating space for host finished!\n");
// Copy data to device
printf("Start copying stuff to the gpu...\n");
CUDA_CHECK_RETURN(cudaMemcpy(d_x1, x1, NUMBER_OF_QUERY_IPS * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_y1, y1, NUMBER_OF_QUERY_IPS * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_strength1, strength1, NUMBER_OF_QUERY_IPS * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_trace1, trace1, NUMBER_OF_QUERY_IPS * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_scale1, scale1, NUMBER_OF_QUERY_IPS * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_descriptors1, descriptors1, NUMBER_OF_QUERY_IPS * sizeof(float) * 64, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_sign1, sign1, NUMBER_OF_QUERY_IPS * sizeof(bool), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_x2, x2, NUMBER_OF_DATABASE_IPS * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_y2, y2, NUMBER_OF_DATABASE_IPS * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_strength2, strength2, NUMBER_OF_DATABASE_IPS * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_trace2, trace2, NUMBER_OF_DATABASE_IPS * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_scale2, scale2, NUMBER_OF_DATABASE_IPS * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_descriptors2, descriptors2, NUMBER_OF_DATABASE_IPS * sizeof(float) * 64, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_sign2, sign2, NUMBER_OF_DATABASE_IPS * sizeof(bool), cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_anzahlMatches, anzahlMatches, NUMBER_OF_THREADS * sizeof(int), cudaMemcpyHostToDevice));
printf("Copying stuff to the gpu finished!\n");
printf("Start image-recognition ...\n");
bruteForceMatcher<<<NUMBER_OF_THREADS, 1>>>(d_x1, d_y1, d_strength1, d_trace1, d_sign1, d_scale1, d_descriptors1,
d_x2, d_y2, d_strength2, d_trace2, d_sign2, d_scale2, d_descriptors2, d_anzahlMatches);
printf("Image-recognition ready!");
CUDA_CHECK_RETURN(cudaThreadSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(cudaGetLastError());
CUDA_CHECK_RETURN(cudaMemcpy(anzahlMatches, d_anzahlMatches, NUMBER_OF_THREADS * sizeof(int), cudaMemcpyDeviceToHost));
for(int i = 0; i < NUMBER_OF_THREADS; i++){
printf("Ergebnis: %d\n", anzahlMatches[i]);
}
CUDA_CHECK_RETURN(cudaFree(d_x1));
CUDA_CHECK_RETURN(cudaFree(d_y1));
CUDA_CHECK_RETURN(cudaFree(d_strength1));
CUDA_CHECK_RETURN(cudaFree(d_trace1));
CUDA_CHECK_RETURN(cudaFree(d_scale1));
CUDA_CHECK_RETURN(cudaFree(d_descriptors1));
CUDA_CHECK_RETURN(cudaFree(d_sign1));
CUDA_CHECK_RETURN(cudaFree(d_x2));
CUDA_CHECK_RETURN(cudaFree(d_y2));
CUDA_CHECK_RETURN(cudaFree(d_strength2));
CUDA_CHECK_RETURN(cudaFree(d_trace2));
CUDA_CHECK_RETURN(cudaFree(d_scale2));
CUDA_CHECK_RETURN(cudaFree(d_descriptors2));
CUDA_CHECK_RETURN(cudaFree(d_sign2));
CUDA_CHECK_RETURN(cudaDeviceReset());
printf("End!\n");
return 0;
}
|
2,632 | #include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cstdio>
#include <cstdlib>
#define ARR_I_J(arr, i, j) arr[(i) * (K) + (j)]
#define ARR_I_J_W(arr, i, j, W) arr[(i) * (L) + (j)]
//#define N_DEBUG
struct PathNode
{
int prevTimestamp;
int prevQueryIdx;
int videoId;
float score;
public:
PathNode() : prevTimestamp(-1), prevQueryIdx(-1), videoId(-1), score(0)
{
}
PathNode(int _videoId, float _score) : prevTimestamp(-1), prevQueryIdx(-1), videoId(_videoId), score(_score)
{
}
};
__global__ void detect_cuda_vwii(const int* d_ref_index, const float* d_ref_score, int* const d_video_idx, PathNode* d_PN, const int L, const int K, int tmp_wnd, const int offset,
int* d_last_timestamp_list, int* d_last_queryidx_list, float* d_res_score_list
)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int s_idx = tid * offset;
int e_idx = s_idx + offset;
if (e_idx > K)
e_idx = K;
#ifdef N_DEBUG
printf("[tid:%d] %d ~ %d(%d)\n", tid, s_idx, e_idx, offset);
#endif
if (tid >= K)
{
return;
}
for (int i = 1; i < L; i++)
{
for (int j = s_idx; j < e_idx; j++)
{
int curr_videoidx = ARR_I_J(d_video_idx, i, j);
if (curr_videoidx == -1)
{
continue;
}
float curr_score = ARR_I_J(d_ref_score, i, j);
int curr_timpstamp = ARR_I_J(d_ref_index, i, j);
float max_score = curr_score;
int prev_i = -1; //query idx
int prev_j = -1; //ref timestamp
int start_idx = i - tmp_wnd >= 0 ? i - tmp_wnd : 0;
#ifdef N_DEBUG
//printf("start %d -> %d\n", start_idx, i);
#endif
for (int l = start_idx; l < i; l++)
{
for (int k = 0; k < K; k++)
{
int prev_videoidx = ARR_I_J(d_video_idx, l, k);
if (curr_videoidx != prev_videoidx)
{
continue;
}
if (prev_videoidx == -1)
{
continue;
}
float prev_score = ARR_I_J(d_PN, l, k).score; //prev까지 가는 path의 score
int prev_timestamp = ARR_I_J(d_ref_index, l, k);
if (prev_timestamp >= curr_timpstamp)
continue;
//if (prev_timestamp <= curr_timpstamp - tmp_wnd)
//continue;
if (max_score < prev_score + curr_score)
{
//printf("updeted(prev score %f -> %f\n", max_score, prev_score + curr_score);
max_score = prev_score + curr_score;
prev_i = l;
prev_j = k;
}
}
}
ARR_I_J(d_PN, i, j).prevQueryIdx = prev_i;
ARR_I_J(d_PN, i, j).prevTimestamp = prev_j;
ARR_I_J(d_PN, i, j).videoId = curr_videoidx;
ARR_I_J(d_PN, i, j).score = max_score;
if (d_res_score_list[curr_videoidx] <= max_score)
{
d_last_queryidx_list[curr_videoidx] = i;
d_last_timestamp_list[curr_videoidx] = j;
d_res_score_list[curr_videoidx] = max_score;
}
}
__syncthreads();
}
#ifdef N_DEBUG
printf("==d_last_queryidx_list\n");
for (int j = 0 ; j < K; j++)
{
printf("%d ", d_last_queryidx_list[j]);
}
printf("\n\n");
printf("==d_last_timestamp_list\n");
for (int j = 0 ; j < K; j++)
{
printf("%d ", d_last_timestamp_list[j]);
}
printf("\n\n");
printf("==d_res_score_list\n");
for (int j = 0 ; j < K; j++)
{
printf("%f ", d_res_score_list[j]);
}
printf("\n\n");
#endif
}
__global__ void update_result(const int* d_ref_index, const float* d_ref_score, int* const d_video_idx, PathNode* d_PN, const int L, const int K, const int video_num,
int* d_last_timestamp_list, int* d_last_queryidx_list, float* d_res_score_list, const int offset, int* d_res_q, int* d_res_p, float* d_res_scores, int* d_match
)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int s_idx = tid * offset;
int e_idx = s_idx + offset;
if (e_idx > video_num)
e_idx = video_num;
#ifdef N_DEBUG
printf("[tid:%d]update_result %d ~ %d(%d) %d\n", tid, s_idx, e_idx, offset , video_num);
#endif
if (tid >= K)
{
return;
}
for(int t = s_idx; t < e_idx; t++)
{
int last_timestamp = d_last_timestamp_list[t];
int last_queryidx = d_last_queryidx_list[t];
if (last_queryidx == -1) continue;
//printf("[%d](%d %d),\n", t, last_queryidx, last_timestamp);
int p_i = last_queryidx;
int p_j = last_timestamp;
int idx = 0;
int match_cnt = 0;
while(p_i != -1)
{
//printf("(%d %d)->", p_i, p_j);
match_cnt++;
ARR_I_J_W(d_res_scores, t, idx, L) = ARR_I_J(d_ref_score, p_i, p_j);
ARR_I_J_W(d_res_q, t, idx, L) = p_i;
ARR_I_J_W(d_res_p, t, idx++, L) = ARR_I_J(d_ref_index, p_i, p_j);
int c_i = ARR_I_J(d_PN, p_i, p_j).prevQueryIdx;
int c_j = ARR_I_J(d_PN, p_i, p_j).prevTimestamp;
p_i = c_i;
p_j = c_j;
}
//printf("\n");
//reverse array
for(int i = 0; i < match_cnt / 2; i++)
{
int temp = ARR_I_J_W(d_res_q, t, match_cnt - 1 - i, video_num);
ARR_I_J_W(d_res_q, t, match_cnt - 1 - i, video_num) = ARR_I_J_W(d_res_q, t, i, video_num);
ARR_I_J_W(d_res_q, t, i, video_num) = temp;
temp = ARR_I_J_W(d_res_p, t, match_cnt - 1 - i, video_num);
ARR_I_J_W(d_res_p, t, match_cnt - 1 - i, video_num) = ARR_I_J_W(d_res_p, t, i, video_num);
ARR_I_J_W(d_res_p, t, i, video_num) = temp;
float f_temp = ARR_I_J_W(d_res_scores, t, match_cnt - 1 - i, video_num);
ARR_I_J_W(d_res_scores, t, match_cnt - 1 - i, video_num) = ARR_I_J_W(d_res_scores, t, i, video_num);
ARR_I_J_W(d_res_scores, t, i, video_num) = f_temp;
}
d_match[t] = match_cnt;
}
__syncthreads();
}
#ifdef __cplusplus
extern "C" {
#endif
void call_kernel(int* _ref_index, float* _ref_score, int* _video_idx, int L, int K, float score_thr, int tmp_wnd, int video_num, int n_block, int n_thread,
int* res_q, int* res_p, float* result_score_path, int* match, float* score)
{
//printf("call_kernel called! L : %d K : %d vn : %d\n",L, K, video_num);
int* h_listidx_list = (int*)calloc(K, sizeof(int));
int* h_queryidx_list = (int*)calloc(K, sizeof(int));
float* h_maxscore_list = (float*)calloc(K, sizeof(float));
memset(h_listidx_list, -1, K * sizeof(int));
memset(h_queryidx_list, -1, K * sizeof(int));
int* d_ref_index;
float* d_ref_score;
int* d_video_idx;
PathNode* d_PN;
int* d_lastidx_list;
int* d_last_queryidx_list;
float* d_maxscore_list;
int* d_res_q;
int* d_res_p;
float* d_res_scores;
int* d_match;
cudaMalloc((void**)&d_ref_index, L * K * sizeof(int));
cudaMalloc((void**)&d_ref_score, L * K * sizeof(float));
cudaMalloc((void**)&d_video_idx, L * K * sizeof(int));
cudaMalloc((void**)&d_PN, L * K * sizeof(PathNode));
cudaMalloc((void**)&d_lastidx_list, video_num * sizeof(int));
cudaMalloc((void**)&d_last_queryidx_list, video_num * sizeof(int));
cudaMalloc((void**)&d_maxscore_list, video_num * sizeof(float));
cudaMalloc((void**)&d_res_q, L * video_num * sizeof(int));
cudaMalloc((void**)&d_res_p, L * video_num * sizeof(int));
cudaMalloc((void**)&d_res_scores, L * video_num * sizeof(float));
cudaMalloc((void**)&d_match, video_num * sizeof(int));
/*
* set variables
*/
PathNode* h_PN = new PathNode[L * K];
//printf("update first row\n");
for (int i = 0; i < K; i++)
{
ARR_I_J(h_PN, 0, i).videoId = ARR_I_J(_video_idx, 0, i);
ARR_I_J(h_PN, 0, i).score = ARR_I_J(_ref_score, 0, i);
//printf("%f ", ARR_I_J(_ref_score, 0, i));
}
//printf("\n");
#ifdef N_DEBUG
printf("K : %d\n", K);
printf("vidnum : %d\n", video_num);
printf("====== ARR_I_J(_ref_score, i, j)\n");
for (int i = 0 ; i < L; i++)
{
printf("[%d]\t", i);
for (int j = 0 ; j < K; j++)
{
printf("%f ", ARR_I_J(_ref_score, i, j));
}
printf("\n");
}
#endif
cudaMemcpy(d_lastidx_list, h_listidx_list, K * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_last_queryidx_list, h_queryidx_list, K * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_maxscore_list, h_maxscore_list, K * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_ref_index, _ref_index, L * K * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_ref_score, _ref_score, L * K * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_video_idx, _video_idx, L * K * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_PN, h_PN, L * K * sizeof(PathNode), cudaMemcpyHostToDevice);
cudaMemcpy(d_res_q, res_q, L * video_num * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_res_p, res_p, L * video_num * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_res_scores, result_score_path, L * video_num * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_match, match, video_num * sizeof(int), cudaMemcpyHostToDevice);
int offset = K / (n_block * n_thread);
if (offset && K % (n_block * n_thread) != 0)
offset += 1;
offset = offset ? offset : 1;
cudaDeviceSynchronize();
detect_cuda_vwii<<<n_block, n_thread>>>(
d_ref_index,
d_ref_score,
d_video_idx,
d_PN,
L,
K,
tmp_wnd,
offset,
d_lastidx_list,
d_last_queryidx_list,
d_maxscore_list
);
cudaDeviceSynchronize();
#ifdef N_DEBUG
printf("=== ARR_I_J(h_PN, i, j).score\n");
for (int i = 0 ; i < L; i++)
{
for (int j = 0 ; j < K; j++)
{
printf("%f ", ARR_I_J(h_PN, i, j).score);
}
printf("\n");
}
printf("=====ARR_I_J(h_PN, i, j).prevTimestamp \n");
for (int i = 0 ; i < L; i++)
{
for (int j = 0 ; j < K; j++)
{
printf("%d ", ARR_I_J(h_PN, i, j).prevTimestamp);
}
printf("\n");
}
printf("=====ARR_I_J(h_PN, i, j).prevQueryIdx \n");
for (int i = 0 ; i < L; i++)
{
for (int j = 0 ; j < K; j++)
{
printf("%d ", ARR_I_J(h_PN, i, j).prevQueryIdx);
}
printf("\n");
}
printf("===match\n");
for (int j = 0 ; j < video_num; j++)
{
printf("%d(%d) ", match[j], j);
}
printf("\n");
printf("===res_p\n");
for (int i = 0 ; i < video_num; i++)
{
for (int j = 0 ; j < match[i]; j++)
{
printf("%d ", ARR_I_J_W(res_p, i, j, video_num));
}
printf("\n");
}
#endif
offset = video_num / (n_block * n_thread);
if (offset && video_num % (n_block * n_thread) != 0)
offset += 1;
offset = offset ? offset : 1;
update_result<<<n_block, n_thread>>>(
d_ref_index,
d_ref_score,
d_video_idx,
d_PN,
L,
K,
video_num,
d_lastidx_list,
d_last_queryidx_list,
d_maxscore_list,
offset,
d_res_q,
d_res_p,
d_res_scores,
d_match
);
cudaDeviceSynchronize();
//cudaMemcpy(_ref_index, d_ref_index, L * K * sizeof(int), cudaMemcpyDeviceToHost);
//cudaMemcpy(_ref_score, d_ref_score, L * K * sizeof(float), cudaMemcpyDeviceToHost);
//cudaMemcpy(_video_idx, d_video_idx, L * K * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(h_PN, d_PN, L * K * sizeof(PathNode), cudaMemcpyDeviceToHost);
cudaMemcpy(res_q, d_res_q, L * video_num * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(res_p, d_res_p, L * video_num * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(result_score_path, d_res_scores, L * video_num * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(score, d_maxscore_list, video_num * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(match, d_match, video_num * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_ref_index);
cudaFree(d_ref_score);
cudaFree(d_video_idx);
cudaFree(d_PN);
cudaFree(d_lastidx_list);
cudaFree(d_last_queryidx_list);
cudaFree(d_maxscore_list);
cudaFree(d_res_q);
cudaFree(d_res_p);
cudaFree(d_res_scores);
cudaFree(d_match);
}
void foo(int* arr2d, int L, int K)
{
printf("fun called!\n");
for(int i = 0 ; i < L; i++)
{
for (int j = 0 ; j < K; j++)
{
ARR_I_J(arr2d, i, j) = 100;
printf("%d ", ARR_I_J(arr2d, i, j));
}
printf("\n");
}
}
#ifdef __cplusplus
}
#endif |
2,633 | /*
* HxUpdaterTM.cpp
*
* Created on: 11 янв. 2016 г.
* Author: aleksandr
*/
#include "HxUpdaterTM.h"
#include "SmartIndex.h"
#include <thrust/device_vector.h>
#include <thrust/functional.h>
// o o o o o
// o o o o o
// o o o o o
// o o o o o
// x x x x x
__host__ __device__
void HxUpdaterTM::operator() (const int indx) {
// m и n - индексы в полноценных массивах
// sizeY - размер полноценнго массива
int m = indx/(sizeY-1);
int n = indx%(sizeY-1);
float Chxe = S / 377.0;
Hx(m, n) = Hx(m, n) - Chxe * (Ez(m, n+1) - Ez(m,n));
}
|
2,634 | #include "cuda.h"
__device__ __forceinline__ int sqr(int const x) { return (x * x); }
// Call with 2D thread-organization. Could be called with 1D arrangement but this is for exercise.
__global__ void clearFrame(uchar3 *const frame, int const frame_width, int const frame_height)
{
int const x = blockIdx.x * blockDim.x + threadIdx.x;
int const y = blockIdx.y * blockDim.y + threadIdx.y;
if(x < frame_width && y < frame_height) {
int const row_size = gridDim.x * blockDim.x;
int const offset = y * row_size + x;
frame[offset] = make_uchar3(0u,0u,0u);
}
}
__global__ void drawCircle(int const x0, int const y0, int const radius, uchar3 const color,
uchar3 *const frame, int const frame_width, int const frame_height)
{
int const tid_x = blockIdx.x * blockDim.x + threadIdx.x;
int const tid_y = blockIdx.y * blockDim.y + threadIdx.y;
int const x = x0 + (tid_x - radius);
int const y = y0 + (tid_y - radius);
if(x >= 0 && x < frame_width && y >= 0 && y < frame_height) {
int const hypot_sqr = sqr(x - x0) + sqr(y - y0);
if(hypot_sqr <= sqr(radius)) {
uchar3 *const pixel = frame + y * frame_width + x;
*pixel = color;
}
}
}
__global__ void drawRect(int const x0, int const y0,
int const x1, int const y1,
uchar3 const color,
uchar3 *const frame, int const frame_width, int const frame_height)
{
int const x = x0 + threadIdx.x;
int const y = y0 + threadIdx.y;
if(x >= 0 && x < frame_width && y >= 0 && y < frame_height) {
uchar3 *const pixel = frame + y * frame_width + x;
*pixel = color;
}
}
|
2,635 | /**
* Add 2 arrays of 100 elements on the device.
*/
#include <iostream>
#include <vector>
#include <algorithm>
__global__ void vecadd( int * v0, int * v1, std::size_t size )
{
auto tid = threadIdx.x;
v0[ tid ] += v1[ tid ];
}
int main()
{
std::vector< int > v0( 100 );
std::vector< int > v1( 100 );
int * v0_d = nullptr;
int * v1_d = nullptr;
for( std::size_t i = 0 ; i < v0.size() ; ++i )
{
v0[ i ] = v1[ i ] = i;
}
cudaMalloc( &v0_d, v0.size() * sizeof( int ) );
cudaMalloc( &v1_d, v1.size() * sizeof( int ) );
cudaMemcpy( v0_d, v0.data(), v0.size() * sizeof( int ), cudaMemcpyHostToDevice );
cudaMemcpy( v1_d, v1.data(), v1.size() * sizeof( int ), cudaMemcpyHostToDevice );
vecadd<<< 1, 100 >>>( v0_d, v1_d, v0.size() );
cudaMemcpy( v0.data(), v0_d, v0.size() * sizeof( int ), cudaMemcpyDeviceToHost );
for( auto const x: v0 )
{
std::cout << x << std::endl;
}
cudaFree( v0_d );
cudaFree( v1_d );
return 0;
} |
2,636 | //compile with:
#include <stdio.h>
#define Blocksize 10
__global__ void compute( char*, char*);
__device__ __host__ void algorithm(char*, char*);
__host__
int main (void)
{
char* targets;
char* targets2;
char* result;
char* result2;
int size = Blocksize * sizeof(char);
//speicherreservieren
cudaMalloc((void **) &targets2, size);
cudaMalloc((void **) &result2, size);
targets = (char *) malloc(size);
result = (char *) malloc(size);
targets[0] = 'A';
targets[1] = 'B';
targets[2] = 'C';
targets[3] = 'D';
targets[4] = 'E';
targets[5] = 'F';
targets[6] = 'G';
targets[7] = 'H';
targets[8] = 'I';
targets[9] = 'J';
cudaMemcpy(targets2, targets, size, cudaMemcpyHostToDevice);
cudaMemcpy(result2, result, size, cudaMemcpyHostToDevice);
compute<<<1,Blocksize>>>(targets2,result2);
cudaMemcpy(result, result2, size, cudaMemcpyDeviceToHost);
cudaMemcpy(targets, targets2, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < Blocksize; ++i)
{
printf("%c\t",targets[i]);
printf("%c\n",result[i]);
}
cudaFree(targets2);
cudaFree(result2);
return 0;
}
__global__
void compute(char* target, char* result){
__shared__ char solution[1];
if (threadIdx.x == 0){
solution[0] = 'D';
result[threadIdx.x] = '-';
__syncthreads();
}
else{
if (target[threadIdx.x] == solution[0]){
result[threadIdx.x] = '+';
}
else{
result[threadIdx.x] = '-';
}
__syncthreads();
}
}
__device__ __host__
void algorithm(char* input, char* result){
//vorläufig
result[0] = result[0];
}
|
2,637 | /*!
\file arrays.cu
\author Andrew Kerr <arkerr@gatech.edu>
\brief tests implementation of cudaMallocArray(), among other things
\date Feb 12, 2010
*/
#include <stdlib.h>
#include <stdio.h>
//////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
bool testMemcpy(bool verbose) {
bool passed = true;
int width = 1024, height = 512;
int errors = 0;
cudaChannelFormatDesc channel = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaArray *cuArray;
cudaMallocArray(&cuArray, &channel, width, height);
srand(7);
size_t bytes = sizeof(float) * width * height;
float *hostSource = new float[width * height];
float *hostDest = new float[width * height];
for (int j = 0; j < height; j++) {
float *ptr = hostSource + j * width;
float *dstPtr = hostDest + j * width;
for (int i = 0; i < width; i++) {
float x = (float)( (rand() % 1024) / 125.0f);
ptr[i] = x;
dstPtr[i] = -1.0f;
}
}
cudaMemcpyToArray(cuArray, 0, 0, hostSource, bytes, cudaMemcpyHostToDevice);
cudaMemcpyFromArray(hostDest, cuArray, 0, 0, bytes, cudaMemcpyDeviceToHost);
for (int j = 0; j < height && errors < 5; j++) {
float *srcPtr = hostSource + j * width;
float *dstPtr = hostDest + j * width;
for (int i = 0; i < width && errors < 5; i++) {
float expected = srcPtr[i];
float got = dstPtr[i];
if (fabs(expected - got) > 0.001f) {
++errors;
if (verbose) {
printf("ERROR: (%d, %d) - expected %f, got %f\n", i, j, expected, got);
fflush(stdout);
}
}
}
}
cudaFreeArray(cuArray);
delete [] hostSource;
delete [] hostDest;
return passed;
}
//////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char *arg[]) {
bool result = testMemcpy(true);
if (result) {
printf("Test PASSED\n");
}
else {
printf("Test FAILED\n");
}
return 0;
}
//////////////////////////////////////////////////////////////////////////////////////////////////
|
2,638 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define THREADS_PER_BLOCK 512
//function declarations
unsigned int getmax(unsigned int *, unsigned int);
__global__ void get_max(unsigned int *num, unsigned int size);
int main(int argc, char *argv[])
{
unsigned int size = 0; // The size of the array
unsigned int i; // loop index
unsigned int * numbers; //pointer to the array
if(argc !=2)
{
printf("usage: maxseq num\n");
printf("num = size of the array\n");
exit(1);
}
//size is number of threads total
size = atol(argv[1]);
//calculates number of blocks
unsigned int NUM_BLOCKS = size/THREADS_PER_BLOCK;
numbers = (unsigned int *)malloc(size * sizeof(unsigned int));
if( !numbers )
{
printf("Unable to allocate mem for an array of size %u\n", size);
exit(1);
}
srand(time(NULL)); // setting a seed for the random number generator
// Fill-up the array with random numbers from 0 to size-1
for( i = 0; i < size; i++){
numbers[i] = rand() % size;
}
//create device pointers
unsigned int *d_numbers;
//transfer array to device memory
cudaMalloc((void**) &d_numbers, size * sizeof(unsigned int));
cudaMemcpy(d_numbers, numbers, size * sizeof(unsigned int), cudaMemcpyHostToDevice);
//sequential
printf(" The maximum number in the array is: %u\n", getmax(numbers, size));
//parallel
get_max<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>(d_numbers, size);
cudaMemcpy(numbers, d_numbers, size * sizeof(unsigned int), cudaMemcpyDeviceToHost);
for( i = 0; i < size; i++){
if(numbers[i] > numbers[0]){
printf("element in %d: %u\n", i, numbers[i]);
}
}
printf("The max integer in the array is: %d\n", numbers[0]);
//free device matrices
cudaFree(d_numbers);
free(numbers);
exit(0);
}
__global__ void get_max(unsigned int* num, unsigned int size){
unsigned int temp;
unsigned int index = threadIdx.x + (blockDim.x * blockIdx.x);
unsigned int nTotalThreads = size;
while(nTotalThreads > 1){
unsigned int halfPoint = nTotalThreads / 2; // divide by two
// only the first half of the threads will be active.
if (index < halfPoint){
temp = num[ index + halfPoint ];
if (temp > num[ index ]) {
num[index] = temp;
}
}
__syncthreads();
nTotalThreads = nTotalThreads / 2; // divide by two.
}
}
/*
input: pointer to an array of long int
number of elements in the array
output: the maximum number of the array
*/
unsigned int getmax(unsigned int num[], unsigned int size)
{
unsigned int i;
unsigned int max = num[0];
for(i = 1; i < size; i++)
if(num[i] > max)
max = num[i];
return( max );
}
|
2,639 | __global__ void test_uchar4(uchar4* const c)
{
int a[5];
uchar4 val;
val.x = 10;
a[val.x] = 42;
uchar4 val2 = val;
a[val2.x] = 42;
uchar4 val3;
val3 = val;
a[val3.x] = 42;
uchar4 val4[3];
val4[1] = val;
a[val4[1].x] = 42;
c[1].y = 9;
c[1].w = 3;
int val5 = c[1].y;
a[val5] = 42;
} |
2,640 |
#include <iostream>
#include "string.h"
#include <cuda.h>
#include "cuda_runtime_api.h"
//===========================================================================//
void describe ( int device )
{
cudaDeviceProp device_properties;
::memset( &device_properties, 0, sizeof(device_properties));
std::cout << "***************************************"
<< "***************************************" << std::endl;
std::cout << "Device number: " << device << std::endl;
if ( cudaSuccess ==
cudaGetDeviceProperties( &device_properties, device ) )
{
std::cout << "name: "
<< "ASCII string identifying device: "
<< device_properties.name << std::endl;
std::cout << "totalGlobalMem: "
<< "Global memory available on device in bytes: "
<< device_properties.totalGlobalMem << std::endl;
std::cout << "sharedMemPerBlock: "
<< "Shared memory available per block in bytes: "
<< device_properties.sharedMemPerBlock << std::endl;
std::cout << "regsPerBlock: "
<< "32-bit registers available per block: "
<< device_properties.regsPerBlock << std::endl;
std::cout << "warpSize: "
<< "Warp size in threads: "
<< device_properties.warpSize << std::endl;
std::cout << "memPitch: "
<< "Maximum pitch in bytes allowed by memory copies: "
<< device_properties.memPitch << std::endl;
std::cout << "maxThreadsPerBlock: "
<< "Maximum number of threads per block: "
<< device_properties.maxThreadsPerBlock << std::endl;
std::cout << "maxThreadsDim[3]: "
<< "Maximum size of each dimension of a block: "
<< device_properties.maxThreadsDim[0] << " "
<< device_properties.maxThreadsDim[1] << " "
<< device_properties.maxThreadsDim[2] << std::endl;
std::cout << "maxGridSize[3]: "
<< "Maximum size of each dimension of a grid: "
<< device_properties.maxGridSize[0] << " "
<< device_properties.maxGridSize[1] << " "
<< device_properties.maxGridSize[2] << std::endl;
std::cout << "clockRate: "
<< "Clock frequency in kilohertz: "
<< device_properties.clockRate << std::endl;
std::cout << "totalConstMem: "
<< "Constant memory available on device in bytes: "
<< device_properties.totalConstMem << std::endl;
std::cout << "major: "
<< "Major compute capability: "
<< device_properties.major << std::endl;
std::cout << "minor: "
<< "Minor compute capability: "
<< device_properties.minor << std::endl;
std::cout << "textureAlignment: "
<< "Alignment requirement for textures: "
<< device_properties.textureAlignment << std::endl;
std::cout << "deviceOverlap: "
<< "Device can concurrently copy memory and execute a kernel: "
<< device_properties.deviceOverlap << std::endl;
std::cout << "multiProcessorCount: "
<< "Number of multiprocessors on device: "
<< device_properties.multiProcessorCount << std::endl;
std::cout << "kernelExecTimeoutEnable: "
<< "Specified whether there is a run time limit on kernels: "
<< device_properties.kernelExecTimeoutEnabled << std::endl;
std::cout << "integrated: "
<< "Device is integrated as opposed to discrete: "
<< device_properties.integrated << std::endl;
std::cout << "canMapHostMemory: "
<< "Device can map host memory with cudaHostAlloc/cudaHostGetDevicePointer: "
<< device_properties.canMapHostMemory << std::endl;
std::cout << "computeMode: "
<< "Compute mode (See ::cudaComputeMode): "
<< device_properties.computeMode << std::endl;
#define OUTPUT(NAME,DESC) \
std::cout << #NAME << ": " << DESC << " " << device_properties.NAME << std::endl;
OUTPUT(surfaceAlignment,"the alignment requirements for surfaces.")
OUTPUT(concurrentKernels,"is 1 if the device supports executing multiple kernels within the same context simultaneously, or 0 if not. It is not guaranteed that multiple kernels will be resident on the device concurrently so this feature should not be relied upon for correctness")
OUTPUT(ECCEnabled,"is 1 if the device has ECC support turned on, or 0 if not.")
OUTPUT(pciBusID,"the PCI bus identifier of the device")
OUTPUT(pciDeviceID,"the PCI device (sometimes called slot) identifier of the device")
OUTPUT(pciDomainID,"the PCI domain identifier of the device")
OUTPUT(tccDriver,"1 if the device is using a TCC driver or 0 if not")
OUTPUT(asyncEngineCount,"1 when the device can concurrently copy memory between host and device while executing a kernel. It is 2 when the device can concurrently copy memory between host and device in both directions and execute a kernel at the same time. It is 0 if neither of these is supported.")
OUTPUT(unifiedAddressing,"1 if the device shares a unified address space with the host and 0 otherwise")
OUTPUT(memoryClockRate,"the peak memory clock frequency in kilohertz")
OUTPUT(memoryBusWidth,"the memory bus width in bits")
OUTPUT(l2CacheSize,"L2 cache size in bytes")
OUTPUT(maxThreadsPerMultiProcessor,"the number of maximum resident threads per multiprocessor")
}
std::cout << "***************************************"
<< "***************************************" << std::endl;
}
//===========================================================================//
int get_count ()
{
int num_devices = 0;
::cudaGetDeviceCount( &num_devices );
return num_devices;
}
//===========================================================================//
void describe ()
{
for ( int device=0; device < get_count(); ++device )
{
describe( device );
}
}
//===========================================================================//
int main ()
{
describe();
}
//===========================================================================//
|
2,641 | /// basic functions
// two sum
__device__ void two_sum(float a, float b, float &hi, float &lo){
hi = a + b; // best guess
float v = hi - a;
lo = (a - (hi - v)) + (b - v);
}
__device__ void two_sum(float a, float b, float c, float &hi, float &lo){
float s,t,u;
two_sum(b,c,s,t);
two_sum(a,s,hi,u);
lo = u + t;
two_sum(hi,lo,hi,lo);
}
__device__ void two_sum(float a, float b, float c, float d, float &hi, float &lo){
float t0,t1,t2,t3;
two_sum(a,b,t0,t1);
two_sum(t0,c,t0,t2);
two_sum(t0,d,hi,t3);
t0 = t1 + t2;
lo = t0 + t3;
}
/*
* Unchecked requirement
* |a| > |b|
*/
__device__ void quick_two_sum(float a, float b, float &hi, float &lo){
hi = a + b; // floating point guess
lo = b - (hi - a); // error calculation
}
/*
* Unchecked requirement
* |a| > |b| > |c|
*/
__device__ void quick_two_sum(float a, float b,float c, float &hi, float &lo){
float s,t,u;
quick_two_sum(b,c,s,t);
quick_two_sum(a,s,hi,u);
lo = u + t;;
quick_two_sum(hi,lo,hi,lo);
}
/*
* Unchecked requirement
* |a| > |b| > |c| > |d|
*/
__device__ void quick_two_sum(float a, float b, float c, float d, float &hi, float &lo){
float t0,t1,t2,t3;
quick_two_sum(a,b,t0,t1);
quick_two_sum(t0,c,t0,t2);
quick_two_sum(t0,d,hi,t3);
t0 = t1 + t2;
lo = t0 + t3;
}
__device__ void quick_two_diff(float a, float b, float &hi, float &lo) {
hi = a - b;
lo = (a - hi) - b;
}
__device__ void two_diff(float a, float b, float &hi, float &lo) {
hi = a - b;
float v = hi - a;
lo = (a - (hi - v)) - (b + v);
/*hi = a - b;
float a1 = hi + b;
float b1 = hi - a1;
lo = (a - a1) - (b + b1);*/
}
__device__ void two_diff(float a, float b, float c, float &hi, float &lo){
float s,t,u;
two_diff(-b,c,s,t);
two_sum(a,s,hi,u);
lo = u + t;;
two_sum(hi,lo,hi,lo);
}
__device__ void two_prod(float a, float b, float &hi, float &lo){
hi = a * b;
lo = fmaf(a, b, -hi);
} |
2,642 | #include "saxpy.cuh"
__global__ void saxpy(const int *A, const int *B, int *C, int N, int a) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
C[i] = a * A[i] + B[i];
} |
2,643 | #define max(a, b) ((a > b)?a:b)
#define THREADSPERDIM 16
#define FALSE 0
#define TRUE !FALSE
// mX has order rows x cols
// vectY has length rows
__global__ void getRestricted(int countx, int county, int rows, int cols,
float * mX, int mXdim, float * vY, int vYdim, float * mQ, int mQdim,
float * mR, int mRdim, float * vectB, int vectBdim) {
int
m = blockIdx.x * THREADSPERDIM + threadIdx.x, n,
i, j, k;
float
sum, invnorm,
* X, * Y, * Q, * R, * B,
* coli, * colj,
* colQ, * colX;
if(m >= county) return;
if(m == 1) n = 0;
else n = 1;
X = mX + (m * mXdim);
// initialize the intercepts
for(i = 0; i < rows; i++)
X[i] = 1.f;
Y = vY + (m * countx + n) * vYdim;
B = vectB + m * vectBdim;
Q = mQ + m * mQdim;
R = mR + m * mRdim;
// initialize Q with X ...
for(i = 0; i < rows; i++) {
for(j = 0; j < cols; j++)
Q[i+j*rows] = X[i+j*rows];
}
// gramm-schmidt process to find Q
for(j = 0; j < cols; j++) {
colj = Q+rows*j;
for(i = 0; i < j; i++) {
coli = Q+rows*i;
sum = 0.f;
for(k = 0; k < rows; k++)
sum += coli[k] * colj[k];
for(k = 0; k < rows; k++)
colj[k] -= sum * coli[k];
}
sum = 0.f;
for(i = 0; i < rows; i++)
sum += colj[i] * colj[i];
invnorm = 1.f / sqrtf(sum);
for(i = 0; i < rows; i++)
colj[i] *= invnorm;
}
for(i = cols-1; i > -1; i--) {
colQ = Q+i*rows;
// matmult Q * X -> R
for(j = 0; j < cols; j++) {
colX = X+j*rows;
sum = 0.f;
for(k = 0; k < rows; k++)
sum += colQ[k] * colX[k];
R[i+j*cols] = sum;
}
sum = 0.f;
// compute the vector Q^t * Y -> B
for(j = 0; j < rows; j++)
sum += colQ[j] * Y[j];
// back substitution to find the x for Rx = B
for(j = cols-1; j > i; j--)
sum -= R[i+j*cols] * B[j];
B[i] = sum / R[i+i*cols];
}
}
// mX has order rows x cols
// vectY has length rows
__global__ void getUnrestricted(int countx, int county, int rows, int cols,
float * mX, int mXdim, float * vY, int vYdim, float * mQ, int mQdim,
float * mR, int mRdim, float * vectB, int vectBdim) {
int
n = blockIdx.x * THREADSPERDIM + threadIdx.x,
m = blockIdx.y * THREADSPERDIM + threadIdx.y,
i, j, k;
float
sum, invnorm,
* X, * Y, * Q, * R, * B,
* coli, * colj,
* colQ, * colX;
if((m >= county) || (n >= countx)) return;
X = mX + (m * countx + n) * mXdim;
// initialize the intercepts
for(i = 0; i < rows; i++)
X[i] = 1.f;
Y = vY + (m*countx+n) * vYdim;
B = vectB + (m*countx+n) * vectBdim;
Q = mQ + (m*countx+n) * mQdim;
R = mR + (m*countx+n) * mRdim;
// initialize Q with X ...
for(i = 0; i < rows; i++) {
for(j = 0; j < cols; j++)
Q[i+j*rows] = X[i+j*rows];
}
// gramm-schmidt process to find Q
for(j = 0; j < cols; j++) {
colj = Q+rows*j;
for(i = 0; i < j; i++) {
coli = Q+rows*i;
sum = 0.f;
for(k = 0; k < rows; k++)
sum += coli[k] * colj[k];
for(k = 0; k < rows; k++)
colj[k] -= sum * coli[k];
}
sum = 0.f;
for(i = 0; i < rows; i++)
sum += colj[i] * colj[i];
invnorm = 1.f / sqrtf(sum);
for(i = 0; i < rows; i++)
colj[i] *= invnorm;
}
for(i = cols-1; i > -1; i--) {
colQ = Q+i*rows;
// matmult Q * X -> R
for(j = 0; j < cols; j++) {
colX = X+j*rows;
sum = 0.f;
for(k = 0; k < rows; k++)
sum += colQ[k] * colX[k];
R[i+j*cols] = sum;
}
sum = 0.f;
// compute the vector Q^t * Y -> B
for(j = 0; j < rows; j++)
sum += colQ[j] * Y[j];
// back substitution to find the x for Rx = B
for(j = cols-1; j > i; j--)
sum -= R[i+j*cols] * B[j];
B[i] = sum / R[i+i*cols];
}
}
__global__ void ftest(int diagFlag, int p, int rows, int colsx, int colsy,
int rCols, int unrCols, float * obs, int obsDim,
float * rCoeffs, int rCoeffsDim, float * unrCoeffs, int unrCoeffsDim,
float * rdata, int rdataDim, float * unrdata, int unrdataDim,
float * dfStats) // float * dpValues)
{
int
j = blockIdx.x * THREADSPERDIM + threadIdx.x,
i = blockIdx.y * THREADSPERDIM + threadIdx.y,
idx = i*colsx + j, k, m;
float
kobs, fp = (float) p, frows = (float) rows,
rSsq, unrSsq,
rEst, unrEst,
score = 0.f,
* tObs, * tRCoeffs, * tUnrCoeffs,
* tRdata, * tUnrdata;
if((i >= colsy) || (j >= colsx)) return;
if((!diagFlag) && (i == j)) {
dfStats[idx] = 0.f;
// dpValues[idx] = 0.f;
return;
}
tObs = obs + (i*colsx+j)*obsDim;
tRCoeffs = rCoeffs + i*rCoeffsDim;
tRdata = rdata + i*rdataDim;
tUnrCoeffs = unrCoeffs + (i*colsx+j)*unrCoeffsDim;
tUnrdata = unrdata + (i*colsx+j)*unrdataDim;
rSsq = unrSsq = 0.f;
for(k = 0; k < rows; k++) {
unrEst = rEst = 0.f;
kobs = tObs[k];
for(m = 0; m < rCols; m++)
rEst += tRCoeffs[m] * tRdata[k+m*rows];
for(m = 0; m < unrCols; m++)
unrEst += tUnrCoeffs[m] * tUnrdata[k+m*rows];
rSsq += (kobs - rEst) * (kobs - rEst);
unrSsq += (kobs - unrEst) * (kobs - unrEst);
}
score = ((rSsq - unrSsq)*(frows-2.f*fp-1.f)) / (fp*unrSsq);
if(!isfinite(score))
score = 0.f;
dfStats[idx] = score;
}
|
2,644 | #include <iostream>
#include <ctime>
#include <chrono>
using namespace std;
__global__ void initArray(uint32_t * path, double *approx, uint32_t *top_k, int n){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < n){
for(int i = 0; i < sizeof(path); i++){
approx[i]++;
top_k[i] = path[i]++;
}
}
}
#define N (2048 * 2048)
#define THREADS_PER_BLOCK 512
int main(){
int Dsize = N * sizeof(double);
int Usize = N * sizeof(uint32_t);
//int Vsize = N * sizeof(vector<uint32_t>);
double approx[1000];
uint32_t path[1000];
uint32_t top_k[1000]; // = (uint32_t *)malloc(Usize);
uint32_t DK[1000]; //= (uint32_t *)malloc(Usize);
for(int i = 0; i < 999; i++){
path[i] = rand();
approx[i] = rand();
}
chrono::time_point<chrono::system_clock> start,end;
start = chrono::system_clock::now();
cudaMalloc((void **)&approx, Dsize);
cudaMalloc((void **)&path, Usize);
//cudaMalloc((void **)&top_k, Usize);
initArray<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(path,approx,top_k,N);
cudaMemcpy(DK,top_k,Usize,cudaMemcpyDeviceToHost);
end = chrono::system_clock::now();
chrono::duration<double> timevar = end-start;
for(int i = 0; i < sizeof(top_k); i++){
cout << DK[i] << " " << i << endl;
}
cout << endl << "time to complete: " << timevar.count() << " seconds" << endl;
cudaFree(approx);
cudaFree(path);
cudaFree(top_k);
return 0;
}
|
2,645 | // [header]
// A very basic raytracer example.
// [/header]
// [compile]
// c++ -o raytracer -O3 -Wall raytracer.cpp
// [/compile]
// [ignore]
// Copyright (C) 2012 www.scratchapixel.com
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
// [/ignore]
#include <cstdlib>
#include <cstdio>
#include <cmath>
#include <fstream>
#include <vector>
#include <iostream>
#include <cassert>
#include <cstring>
#include <sys/time.h>
#include <iomanip>
#if defined __linux__ || defined __APPLE__
// "Compiled for Linux
#else
// Windows doesn't define these values by default, Linux does
#define M_PI 3.141592653589793
#define INFINITY 1e8
#endif
#ifndef TILE_WIDTH
#define TILE_WIDTH 32
#endif
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
class UnifiedMemoryClass
{
public:
void* operator new(size_t len) {
void *ptr;
cudaMallocManaged(&ptr, len);
return ptr;
}
void operator delete(void *ptr) {
cudaFree(ptr);
}
void* operator new[](size_t len) {
void *ptr;
cudaMallocManaged(&ptr, len);
return ptr;
}
};
class Vec3 : public UnifiedMemoryClass
{
public:
float x, y, z;
__device__ __host__ Vec3() : x(float(0)), y(float(0)), z(float(0)) {}
__device__ __host__ Vec3(float xx) : x(xx), y(xx), z(xx) {}
__device__ __host__ Vec3(float xx, float yy, float zz) : x(xx), y(yy), z(zz) {}
__device__ Vec3& normalize()
{
float nor2 = length2();
if (nor2 > 0) {
float invNor = 1 / sqrt(nor2);
x *= invNor, y *= invNor, z *= invNor;
}
return *this;
}
__device__ Vec3 operator * (const float &f) const { return Vec3(x * f, y * f, z * f); }
__device__ static Vec3 mult (const float &f, const Vec3 v) {
return Vec3(v.x * f, v.y * f, v.z * f);
}
__device__ Vec3 operator * (const Vec3 &v) const { return Vec3(x * v.x, y * v.y, z * v.z); }
__device__ static Vec3 mult (const Vec3 &v1, const Vec3 &v2) {
return Vec3(v1.x * v2.x, v1.y * v2.y, v1.z * v2.z);
}
__device__ float dot(const Vec3 &v) const { return x * v.x + y * v.y + z * v.z; }
__device__ static float dot(const Vec3 &v1, const Vec3 &v2) {
return v1.x * v2.x + v1.y * v2.y + v1.z * v2.z;
}
__device__ Vec3 operator - (const Vec3 &v) const { return Vec3(x - v.x, y - v.y, z - v.z); }
__device__ static Vec3 sub (const Vec3 &v1, const Vec3 &v2) {
return Vec3(v1.x - v2.x, v1.y - v2.y, v1.z - v2.z);
}
__device__ Vec3 operator + (const Vec3 &v) const { return Vec3(x + v.x, y + v.y, z + v.z); }
__device__ static Vec3 add (const Vec3 &v1, const Vec3 &v2) {
return Vec3(v1.x + v2.x, v1.y + v2.y, v1.z + v2.z);
}
__device__ Vec3& operator += (const Vec3 &v) { x += v.x, y += v.y, z += v.z; return *this; }
__device__ Vec3& operator *= (const Vec3 &v) { x *= v.x, y *= v.y, z *= v.z; return *this; }
__device__ Vec3 operator - () const { return Vec3(-x, -y, -z); }
__device__ static Vec3 neg (const Vec3 &v) {
return Vec3(-v.x, -v.y, -v.z);
}
__device__ float length2() const { return x * x + y * y + z * z; }
__device__ static float length2(const Vec3 &v) {
return v.x * v.x + v.y * v.y + v.z * v.z;
}
__device__ float length() const { return sqrt(length2()); }
__device__ static float length(const Vec3 &v) {
return sqrt(length2(v));
}
__host__ friend std::ostream & operator << (std::ostream &os, const Vec3 &v)
{
os << "[" << v.x << " " << v.y << " " << v.z << "]";
return os;
}
};
__device__ Vec3 sub (const Vec3 &v1, const Vec3 &v2) {
return Vec3(v1.x - v2.x, v1.y - v2.y, v1.z - v2.z);
}
typedef Vec3 Vec3f;
typedef Vec3 RGB;
class Sphere : public UnifiedMemoryClass
{
public:
Vec3f center; /// position of the sphere
float radius, radius2; /// sphere radius and radius^2
Vec3f surfaceColor, emissionColor; /// surface color and emission (light)
float reflection; /// surface transparency and reflectivity
__device__ __host__ Sphere(
const Vec3f &c,
const float &r,
const Vec3f &sc,
const float &refl = 0,
const Vec3f &ec = 0) :
center(c), radius(r), radius2(r * r), surfaceColor(sc), emissionColor(ec),
reflection(refl)
{ /* empty */ }
__device__ __host__ Sphere(){}
//[comment]
// Compute a ray-sphere intersection using the geometric solution
//[/comment]
__device__ bool intersect(const Vec3f &rayorig, const Vec3f &raydir, float &t0, float &t1) const
{
Vec3f l = center - rayorig;
float tca = l.dot(raydir);
if (tca < 0) return false;
float d2 = l.dot(l) - tca * tca;
if (d2 > radius2) return false;
float thc = sqrtf(radius2 - d2);
t0 = tca - thc;
t1 = tca + thc;
return true;
}
};
//[comment]
// This variable controls the maximum recursion depth
//[/comment]
#ifndef MAX_RAY_DEPTH
#define MAX_RAY_DEPTH 10
#endif
__device__ float mix(const float &a, const float &b, const float &mix)
{
return b * mix + a * (1 - mix);
}
//[comment]
// This is the main trace function. It takes a ray as argument (defined by its origin
// and direction). We test if this ray intersects any of the geometry in the scene.
// If the ray intersects an object, we compute the intersection point, the normal
// at the intersection point, and shade this point using this information.
// Shading depends on the surface property (is it transparent, reflective, diffuse).
// The function returns a color for the ray. If the ray intersects an object that
// is the color of the object at the intersection point, otherwise it returns
// the background color.
//[/comment]
__global__ void trace(
const Sphere* spheres,
const unsigned n,
Vec3f *image,
unsigned width,
unsigned height)
{
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned tid = threadIdx.y * blockDim.x + threadIdx.x;
unsigned blockSize = blockDim.x * blockDim.y;
extern __shared__ Sphere s_spheres[];
if (tid < n) {
s_spheres[tid] = spheres[tid];
}
if (blockSize < n && tid == 0) {
for (unsigned i = blockSize; i < n; i++) {
s_spheres[i] = spheres[i];
}
}
__syncthreads();
if (x < width && y < height) {
float fov = 30;
float aspectratio = width / float(height);
float angle = tan(M_PI * 0.5 * fov / 180.);
float xx = (2 * ((x + 0.5) / float(width)) - 1) * angle * aspectratio;
float yy = (1 - 2 * ((y + 0.5) / float(height))) * angle;
Vec3f rayorig(0);
Vec3f raydir(xx, yy, -1);
raydir.normalize();
int depth = 0;
Vec3f reflectionMultStack[MAX_RAY_DEPTH+1];
Vec3f reflectionAddStack[MAX_RAY_DEPTH+1];
Vec3f result;
while (true) {
//if (raydir.length() != 1) std::cerr << "Error " << raydir << std::endl;
float tnear = INFINITY;
const Sphere* sphere = NULL;
// find intersection of this ray with the sphere in the scene
for (unsigned i = 0; i < n; ++i) {
float t0 = INFINITY, t1 = INFINITY;
if (s_spheres[i].intersect(rayorig, raydir, t0, t1)) {
if (t0 < 0) t0 = t1;
if (t0 < tnear) {
tnear = t0;
sphere = &s_spheres[i];
}
}
}
// if there's no intersection return black or background color
if (!sphere) {
//return Vec3f(2);
result = Vec3f(2);
break;
}
Vec3f surfaceColor = 0; // color of the ray/surfaceof the object intersected by the ray
Vec3f phit = rayorig + raydir * tnear; // point of intersection
Vec3f nhit = phit - sphere->center; // normal at the intersection point
nhit.normalize(); // normalize normal direction
// If the normal and the view direction are not opposite to each other
// reverse the normal direction. That also means we are inside the sphere so set
// the inside bool to true. Finally reverse the sign of IdotN which we want
// positive.
float bias = 1e-4; // add some bias to the point from which we will be tracing
if (raydir.dot(nhit) > 0) nhit = -nhit;
if (sphere->reflection > 0 && depth < MAX_RAY_DEPTH) {
float facingratio = -raydir.dot(nhit);
// change the mix value to tweak the effect
float fresneleffect = mix(pow(1 - facingratio, 3), 1, 0.1);
// compute reflection direction (not need to normalize because all vectors
// are already normalized)
Vec3f refldir = raydir - nhit * 2 * raydir.dot(nhit);
refldir.normalize();
reflectionMultStack[depth] = sphere->surfaceColor * fresneleffect;
reflectionAddStack[depth] = sphere->emissionColor;
rayorig = phit + nhit * bias;
raydir = refldir;
depth++;
}
else {
// it's a diffuse object, no need to raytrace any further
for (unsigned i = 0; i < n; ++i) {
if (s_spheres[i].emissionColor.x > 0) {
// this is a light
Vec3f transmission = 1;
Vec3f lightDirection = s_spheres[i].center - phit;
lightDirection.normalize();
for (unsigned j = 0; j < n; ++j) {
if (i != j) {
float t0, t1;
if (s_spheres[j].intersect(phit + nhit * bias, lightDirection, t0, t1)) {
transmission = 0;
break;
}
}
}
surfaceColor += sphere->surfaceColor * transmission *
fmaxf(float(0), nhit.dot(lightDirection)) * s_spheres[i].emissionColor;
// return surfaceColor + sphere->emissionColor;
}
}
result = surfaceColor + sphere->emissionColor;
break;
}
}
for (depth = depth - 1; depth >= 0; depth--) {
result = result * reflectionMultStack[depth] + reflectionAddStack[depth];
}
image[y*width+x] = result;
}
}
//[comment]
// Main rendering function. We compute a camera ray for each pixel of the image
// trace it and return a color. If the ray hits a sphere, we return the color of the
// sphere at the intersection point, else we return the background color.
//[/comment]
void render(Vec3f* image, unsigned width, unsigned height, const Sphere* spheres, const unsigned n)
{
dim3 dimGrid(ceil((float) width / TILE_WIDTH), ceil((float)height / TILE_WIDTH));
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
trace<<<dimGrid, dimBlock, n * sizeof(Sphere)>>>(spheres, n, image, width, height);
cudaDeviceSynchronize();
}
void save(const char* filename, Vec3f* image, unsigned width, unsigned height) {
// Save result to a PPM image (keep these flags if you compile under Windows)
std::ofstream ofs(filename, std::ios::out | std::ios::binary);
ofs << "P6\n" << width << " " << height << "\n255\n";
for (unsigned i = 0; i < width * height; ++i) {
ofs << (unsigned char)(std::min(float(1), image[i].x) * 255) <<
(unsigned char)(std::min(float(1), image[i].y) * 255) <<
(unsigned char)(std::min(float(1), image[i].z) * 255);
}
ofs.close();
}
//[comment]
// In the main function, we will create the scene which is composed of some spheres
// and some light (which is also a sphere). Then, once the scene description is complete
// we render that scene, by calling the render() function.
//[/comment]
int main(int argc, char **argv)
{
if (argc < 3) {
std::cout << "No file detected" << '\n';
return 1;
}
FILE* scene = NULL;
scene = fopen(argv[1], "r");
if (scene == NULL) {
std::cout << "Error when reading file" << '\n';
}
unsigned width, height;
unsigned s, l;
fscanf(scene, "%u %u\n", &width, &height);
fscanf(scene, "%u %u\n", &s, &l);
Sphere* spheres = new Sphere[s+l];
// spheres
for(int i=0; i<s; i++) {
float x, y, z, r, refl;
fscanf(scene, "%f %f %f %f ", &x, &y, &z, &r);
Vec3f position(x, y, z);
fscanf(scene, "%f %f %f %f\n", &x, &y, &z, &refl);
RGB color(x, y, z);
// position, radius, surface color, reflectivity, transparency, emission color
spheres[i] = Sphere(position, r, color, refl);
}
// lights
for(int i=s; i<s+l; i++) {
float x, y, z, r, refl, ec;
fscanf(scene, "%f %f %f %f ", &x, &y, &z, &r);
Vec3f position(x, y, z);
fscanf(scene, "%f %f %f %f %f\n", &x, &y, &z, &refl, &ec);
RGB color(x, y, z);
// position, radius, surface color, reflectivity, transparency, emission color
spheres[i] = Sphere(position, r, color, refl, ec);
}
Vec3f *image = new Vec3f[width * height];
cudaEvent_t start, stop;
float runTime;
cudaEventCreate(&start);
cudaEventRecord(start,0);
render(image, width, height, spheres, s+l);
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&runTime, start,stop);
runTime /= 1000.0;
save(argv[2], image, width, height);
std::cout << std::fixed << std::setprecision(4);
std::cout << argv[1] << ",";
std::cout << height << ",";
std::cout << width << ",";
std::cout << height << ",";
std::cout << s << ",";
std::cout << runTime << '\n';
delete image;
delete spheres;
return 0;
}
|
2,646 | #include "dot-tree.hh"
#include <fstream>
namespace utils
{
DotTree::DotTree(const std::string& label,
const std::vector<DotTree>& nodes)
: label_(label)
{
for (const auto& node : nodes)
nodes_.push_back(new DotTree(node));
}
DotTree::DotTree(const DotTree& tree)
: label_(tree.label_)
{
for (auto node : tree.nodes_)
nodes_.push_back(new DotTree(*node));
}
DotTree::~DotTree()
{
for (auto node : nodes_)
delete node;
}
DotTree& DotTree::operator=(const DotTree& tree)
{
for (auto node : nodes_)
delete node;
nodes_.clear();
label_ = tree.label_;
for (const auto& node : tree.nodes_)
nodes_.push_back(new DotTree(*node));
return *this;
}
void DotTree::write_dot(std::ostream& os) const
{
int id = 0;
os << "digraph tree\n";
os << "{\n";
write_(os, id);
os << "}\n";
}
void DotTree::write_file(const std::string& path) const
{
std::ofstream os(path);
write_dot(os);
}
void DotTree::write_png(const std::string& path) const
{
(void) path;
/*
write_file(TMP_DOT);
Command::exec("dot -Tpng " + std::string(TMP_DOT)
+ " -o " + path);
*/
}
int DotTree::write_(std::ostream& os, int& id) const
{
int self = id++;
os << " n" << self << " [label=\"";
for (char c : label_)
if (c == '"')
os << "\\\"";
else
os << c;
os << "\"]\n";
for (auto node : nodes_)
{
int child = node->write_(os, id);
os << " n" << self << " -> n" << child << "\n";
}
return self;
}
}
|
2,647 | #include "includes.h"
using namespace std;
//Check for edges valid to be part of augmented path
//Update frontier
__global__ void kernel(bool* adj_mat, const int N, bool* visited, int* frontier, bool* new_frontier, bool* par_mat, int* cap_mat, int* cap_max_mat) {
int row_idx = frontier[blockIdx.x+1];
long offset = N * row_idx;
int col_idx = threadIdx.x;
long offset2 = N * col_idx;
if(adj_mat[offset + col_idx] && (cap_mat[offset + col_idx] < cap_max_mat[offset + col_idx]) && !visited[col_idx]) {
new_frontier[col_idx] = true;
par_mat[offset2 + row_idx] = true;
}
if(adj_mat[offset2 + row_idx] && (cap_mat[offset2 + row_idx] > 0) && !visited[col_idx]) {
new_frontier[col_idx] = true;
par_mat[offset2 + row_idx] = true;
}
} |
2,648 | #include <iostream>
__global__ void mkernel(void){}
int main()
{
mkernel <<<1,1>>>();
std::cout<<"Hello, World!"<<std::endl;
system("pause");
return 0;
}
|
2,649 | #include <algorithm>
#include <math.h>
#include <iostream>
#include <stdint.h>
#include <stdio.h>
#include <time.h>
#define USE_GPU 1
/*
enum Piece
{
empty,
white_reg,
white_reg_moved,
white_king,
white_king_moved,
black_reg,
black_reg_moved,
black_king,
black_king_moved
};*/
typedef uint8_t Piece;
const Piece empty = 0;
const Piece white_reg = empty + 1;
const Piece white_reg_moved = white_reg + 1;
const Piece white_king = white_reg_moved + 1;
const Piece white_king_moved = white_king + 1;
const Piece black_reg = white_king_moved + 1;
const Piece black_reg_moved = black_reg + 1;
const Piece black_king = black_reg_moved + 1;
const Piece black_king_moved = black_king + 1;
struct Board {
Piece pieces[4][8];
//bool valid;
};
enum Turn
{
white,
black
};
struct Pair {
unsigned char first;
unsigned char second;
};
const Board bad_board_host = {{empty}};//, false};
__constant__ Board bad_board = {{empty}};//, false};
#define BLOCK_SIZE 512
#define gpuErrChk(stmt) \
do\
{\
cudaError_t errCode = stmt; \
if(errCode != cudaSuccess)\
{ \
std::cerr << "gpuErrChk: " << cudaGetErrorString(errCode)\
<< " " << __FILE__ << " " << __LINE__ << " "\
<< std::endl;\
return -1;\
}\
} while(0)
__device__ Board outputBoard;
__host__ __device__ void makeMoves(Board * boards, Turn turn, unsigned int tx);
__host__ __device__ int ipow(int base, int exp)
{
int result = 1;
while(exp)
{
if(exp & 1)
{
result *= base;
}
exp >>= 1;
base *= base;
}
return result;
}
__host__ __device__ bool boardEquality(const Board *a, const Board *b)
{
for(int x = 0; x < 4; x++)
{
for(int y = 0; y < 8; y++)
{
if(a->pieces[x][y] != b->pieces[x][y])
{
return false;
}
}
}
return true;
}
__host__ bool boardIsValid_host(const Board *a)
{
return !boardEquality(a, &bad_board_host);
}
__device__ bool boardIsValid_device(const Board *a)
{
return !boardEquality(a, &bad_board);
}
__host__ __device__ int analyseBoard(Board *board)
{
int score = 0;
int white_wins = 1;
int black_wins = 1;
for(int x = 0; x < 4; x++)
{
for(int y = 0; y < 8; y++)
{
//kings are worth 2, pieces are worth 1
Piece piece = board->pieces[x][y];
if (piece != empty && piece <= white_king_moved)
{
score += (piece+1)/2;
white_wins = 0;
}
else if (piece != empty)
{
score -= (piece-3)/2;
black_wins = 0;
}
}
}
score = score + white_wins*10000 + black_wins*-10000;
//returns 1,000,000 if invalid board,
return score*(!(white_wins && black_wins)) + 1000000*(white_wins && black_wins);
}
//reduces by 1 turn, with scores at the leaf nodes
//works with 512 spawned threads
__global__ void analyze_score_tree(int * input, int * output){
int tx = threadIdx.x;
unsigned int blockNum = blockIdx.x+blockIdx.y*gridDim.x;
__shared__ int scores[512];
__shared__ int mins[22];
scores[tx] = input[blockNum*blockDim.x+tx];
__syncthreads();
if(threadIdx.x < 22)
{
int min = 1000000;
for(int i = 0; i < 22; i++)
{
int temp = scores[threadIdx.x*22+i];
if (temp < min && temp != -100000000)
min = temp;
}
mins[threadIdx.x] = min;
}
__syncthreads();
if(threadIdx.x == 0)
{
int max = -100000000;
for(int i = 0; i < 22; i++)
if(mins[i] > max && mins[i] != 1000000)
max = mins[i];
output[blockNum] = max;
}
}
//reduces by 1 turn, with boards at the leaf nodes
//works with 512 spawned threads
__global__ void analyze_board_tree(Board * input, int * output){
int tx = threadIdx.x;
unsigned int blockNum = blockIdx.x+blockIdx.y*gridDim.x;
__shared__ int scores[512];
__shared__ int mins[22];
scores[tx] = analyseBoard(&input[blockNum*blockDim.x+threadIdx.x]);
__syncthreads();
if(threadIdx.x < 22)
{
int min = 1000000;
for(int i = 0; i < 22; i++)
{
int temp = scores[threadIdx.x*22+i];
if (temp < min)
min = temp;
}
mins[threadIdx.x] = min;
}
__syncthreads();
if(threadIdx.x == 0)
{
int max = -100000000;
for(int i = 0; i < 22; i++)
if(mins[i] > max && mins[i] != 1000000)
max = mins[i];
output[blockNum] = max;
}
/*
for(int stride = 2; stride <= 32; stride *= 2)
{
if (board_from_base*(stride) + stride/2 < 22 && board_from_base%stride == 0)
if(scores[base_board+board_from_base*stride+stride/2] < scores[base_board+board_from_base*stride])
scores[base_board+board_from_base*stride] = scores[base_board+board_from_base*stride+stride/2];
__syncthreads();
}
for( int stride = 2; stride <= 32; stride *= 2)
{
int index1 = base_board*stride*22;
int index2 = base_board*stride*22+stride*11;
if(base_board*stride+stride/2 < 22 && base_board%stride == 0)
{
if( scores[index1] < scores[index2] && scores[index2] != 1000000)
scores[base_board*stride*22] = scores[index2];
if (scores[base_board*stride*22] == 1000000)
scores[base_board*stride*22] = -1000000;
}
__syncthreads();
}
if (threadIdx.x == 0)
output[blockNum] = scores[0];*/
}
__global__ void expand(Board * input, Board * output, int len) {
const int shared_size = 484;
__shared__ Board B[shared_size]; //TODO
unsigned int tx = threadIdx.x;
unsigned int blockNum = blockIdx.x+blockIdx.y*gridDim.x;
if (blockNum < len && tx == 0)
B[0] = input[blockNum];
else if (blockNum < len && tx < shared_size)
B[tx] = bad_board;
__syncthreads();
if(tx == 0 && ~boardEquality(&B[tx], &bad_board))
makeMoves(B, white, tx);
__syncthreads();
if(tx < shared_size && ~boardEquality(&B[tx], &bad_board))
makeMoves(B, black, tx);
__syncthreads();
if (tx < shared_size && blockNum < len)
output[blockDim.x*blockNum+tx] = B[tx];
else if (blockNum < len)
output[blockDim.x*blockNum+tx] = bad_board;
}
//TODO: deal with 22 move boundary
__host__ __device__
void makeMoves(Board * boards, Turn turn, unsigned int tx)
{
// tx = 0 condition because only the first thread has a valid board to work on.
if(turn == white && tx == 0)
{
int exp_rate = 22;
int move_idx = 0;
Board b = boards[tx];
Board temp = boards[tx];
for(int x = 0; x < 4; x++)
for(int y = 0; y < 8; y++)
{
if (b.pieces[x][y] == white_reg || b.pieces[x][y] == white_king)
{
/*White pieces move (not take) */
if(y%2 && y < 6 && x != 3 && !b.pieces[x+1][y+1])
{
//printf("white at %d,%d move right\n", x, y);
temp.pieces[x+1][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(y%2 && y < 6 && !b.pieces[x][y+1])
{
//printf("white at %d,%d move left\n", x, y);
temp.pieces[x][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && x != 0 && !b.pieces[x-1][y+1])
{
//printf("white at %d,%d move left\n", x, y);
if (y == 6)
temp.pieces[x-1][y+1] = white_king;
else
temp.pieces[x-1][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && !b.pieces[x][y+1])
{
//printf("white at %d,%d move right\n", x, y);
if (y == 6)
temp.pieces[x][y+1] = white_king;
else
temp.pieces[x][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
/*White piece captures a black piece (not become king)*/
if(y%2 && x!= 3 && b.pieces[x+1][y+1] > white_king_moved && !b.pieces[x+1][y+2])
{
//TODO add double takes here
if (y != 5)
temp.pieces[x+1][y+2] = temp.pieces[x][y];
else
temp.pieces[x+1][y+2] = white_king;
temp.pieces[x][y] = empty;
temp.pieces[x+1][y+1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(y%2 && x != 0 && b.pieces[x][y+1] > white_king_moved && !b.pieces[x-1][y+2])
{
//TODO add double takes here
if (y != 5)
temp.pieces[x-1][y+2] = temp.pieces[x][y];
else
temp.pieces[x+1][y+2] = white_king;
temp.pieces[x][y] = empty;
temp.pieces[x][y+1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y < 5 && x != 0 && b.pieces[x-1][y+1] > white_king_moved && !b.pieces[x-1][y+2])
{
//TODO add double takes here
temp.pieces[x-1][y+2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x-1][y+1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y < 5 && x != 3 && b.pieces[x][y+1] > white_king_moved && !b.pieces[x+1][y+2])
{
//TODO add double takes here
temp.pieces[x+1][y+2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x][y+1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
}
if (b.pieces[x][y] == white_king)
{
/*White king move backwards(not take) */
if(y%2 && x != 3 && !b.pieces[x+1][y-1])
{
temp.pieces[x+1][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(y%2 && !b.pieces[x][y-1])
{
temp.pieces[x][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y>0 && x != 0 && !b.pieces[x-1][y-1])
{
temp.pieces[x-1][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y>0 && !b.pieces[x][y-1])
{
temp.pieces[x][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(y%2 && y>1 && x!= 3 && b.pieces[x+1][y-1] > white_king_moved && !b.pieces[x+1][y-2])
{
//TODO add double takes here
temp.pieces[x+1][y-2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x+1][y-1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(y%2 && y>1 && x != 0 && b.pieces[x][y-1] > white_king_moved && !b.pieces[x-1][y-2])
{
//TODO add double takes here
temp.pieces[x-1][y-2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x][y-1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y>0 && x != 0 && b.pieces[x-1][y-1] > white_king_moved && !b.pieces[x-1][y-2])
{
//TODO add double takes here
temp.pieces[x-1][y-2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x-1][y-1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y>0 && x!=3 && b.pieces[x][y-1] > white_king_moved && !b.pieces[x+1][y-2])
{
//TODO add double takes here
temp.pieces[x+1][y-2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x][y-1] = empty;
boards[tx+move_idx*exp_rate] = temp;
move_idx++;
temp = b;
}
}
}
}
else if (tx < 22)
{
int move_idx = 0;
Board b = boards[tx*22];
Board temp = boards[tx*22];
for(int x = 0; x < 4; x++)
for(int y = 0; y < 8; y++)
{
if (b.pieces[x][y] == black_reg || b.pieces[x][y] == black_king)
{
/*White pieces move (not take) */
if(y%2 && x != 3 && !b.pieces[x+1][y-1])
{
//printf("black at %d,%d move right\n", x, y);
if (y == 1)
temp.pieces[x+1][y-1] = black_king;
else
temp.pieces[x+1][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(y%2 && !b.pieces[x][y-1])
{
//printf("black at %d,%d move left\n", x, y);
if (y == 1)
temp.pieces[x+1][y-1] = black_king;
else
temp.pieces[x][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y != 0 && x != 0 && !b.pieces[x-1][y-1])
{
//printf("black at %d,%d move left\n", x, y);
temp.pieces[x-1][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && !b.pieces[x][y-1])
{
//printf("black at %d,%d move right\n", x, y);
temp.pieces[x][y-1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
/*White piece captures a black piece*/
if(y%2 && y>1 && x!= 3 && b.pieces[x+1][y-1] > 0 && b.pieces[x+1][y-1] <= white_king_moved && !b.pieces[x+1][y-2])
{
//TODO add double takes here
if (y != 2)
temp.pieces[x+1][y-2] = temp.pieces[x][y];
else
temp.pieces[x+1][y-2] = white_king;
temp.pieces[x][y] = empty;
temp.pieces[x+1][y-1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(y%2 && y>1 && x != 0 && b.pieces[x][y-1] > 0 && b.pieces[x][y-1] <= white_king_moved && !b.pieces[x-1][y-2])
{
//TODO add double takes here
if (y != 2)
temp.pieces[x-1][y-2] = temp.pieces[x][y];
else
temp.pieces[x+1][y-2] = white_king;
temp.pieces[x][y] = empty;
temp.pieces[x][y-1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y>2 && x != 0 && b.pieces[x-1][y-1] <= white_king_moved && b.pieces[x-1][y-1] > 0 && !b.pieces[x-1][y-2])
{
//TODO add double takes here
temp.pieces[x-1][y-2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x-1][y-1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y>2 && x!=3 && b.pieces[x][y-1] <= white_king_moved && b.pieces[x][y-1]>0 && !b.pieces[x+1][y-2])
{
//TODO add double takes here
temp.pieces[x+1][y-2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x][y-1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
}
if (b.pieces[x][y] == black_king)
{
/*White king move backwards(not take) */
if(y%2 && y<7 && x != 3 && !b.pieces[x+1][y+1])
{
temp.pieces[x+1][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(y%2 && y<7 && !b.pieces[x][y+1])
{
temp.pieces[x][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && x != 0 && !b.pieces[x-1][y+1])
{
temp.pieces[x-1][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && !b.pieces[x][y+1])
{
temp.pieces[x][y+1] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(y%2 && y<6 && x!= 3 && b.pieces[x+1][y+1] <= white_king_moved && b.pieces[x+1][y+1] > 0 && !b.pieces[x+1][y+2])
{
//TODO add double takes here
temp.pieces[x+1][y+2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x+1][y+1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(y%2 && y<6 && x != 0 && b.pieces[x][y+1] <= white_king_moved && b.pieces[x][y+1] > 0 && !b.pieces[x-1][y+2])
{
//TODO add double takes here
temp.pieces[x-1][y+2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x][y+1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y<5 && x != 0 && b.pieces[x-1][y+1] <= white_king_moved && b.pieces[x-1][y+1] > 0 && !b.pieces[x-1][y+2])
{
//TODO add double takes here
temp.pieces[x-1][y+2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x-1][y+1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
if(!(y%2) && y<5 && x!=3 && b.pieces[x][y+1] <= white_king_moved && b.pieces[x][y+1] > 0 && !b.pieces[x+1][y+2])
{
//TODO add double takes here
temp.pieces[x+1][y+2] = temp.pieces[x][y];
temp.pieces[x][y] = empty;
temp.pieces[x][y+1] = empty;
boards[22*tx+move_idx] = temp;
move_idx++;
temp = b;
}
}
}
}
}
void printBoard(Board b);
int initBoard(Board *b);
int makeMove(Board *board);
int analyseBoard(Board *board, Turn player);
void reverse(Board * b);
int main(int argc, char **argv) {
Board * b = (Board *)malloc(sizeof(Board)*512);
initBoard(b);
for(int i = 0; i <100; i++)
{
clock_t start = clock(), diff;
makeMove(b);
if(i%2)
printBoard(b[0]);
reverse(b);
//printBoard(b[0]);
diff = clock() - start;
int msec = diff * 1000 / CLOCKS_PER_SEC;
//printf("Time taken %d seconds %d milliseconds\n", msec/1000, msec%1000);
}
}
void reverse(Board * b)
{
Piece temp;
for(int i = 0; i < 2; i++)
for(int j = 0; j < 8; j++)
{
temp = b->pieces[i][j];
if(b->pieces[3-i][7-j] > 4)
b->pieces[i][j] = b->pieces[3-i][7-j]-4;
else if(b->pieces[3-i][7-j] <= 4 && b->pieces[3-i][7-j] > 0)
b->pieces[i][j] = b->pieces[3-i][7-j]+4;
else
b->pieces[i][j] = b->pieces[3-i][7-j];
if(temp > 4)
b->pieces[3-i][7-j] = temp-4;
else if(temp <= 4 && temp > 0)
b->pieces[3-i][7-j] = temp+4;
else
b->pieces[3-i][7-j] = temp;
}
}
void printBoard(Board b)
{
printf("Board: --------------------------------------\n");
for(int i = 3; i >= 0; i--)
{
for(int j = 0; j < 4; j++)
{
switch(b.pieces[j][i*2+1])
{
case white_reg:
case white_reg_moved:
printf("_|w|");
break;
case white_king:
case white_king_moved:
printf("_|W|");
break;
case black_reg:
case black_reg_moved:
printf("_|b|");
break;
case black_king:
case black_king_moved:
printf("_|B|");
break;
case empty:
printf("_|_|");
break;
default:
printf("x|x|");
break;
}
}
printf("\n");
for(int j = 0; j < 4; j++)
{
switch(b.pieces[j][i*2])
{
case white_reg:
case white_reg_moved:
printf("w|_|");
break;
case white_king:
case white_king_moved:
printf("W|_|");
break;
case black_reg:
case black_reg_moved:
printf("b|_|");
break;
case black_king:
case black_king_moved:
printf("B|_|");
break;
case empty:
printf("_|_|");
break;
default:
printf("x|x|");
break;
}
}
printf("\n");
}
}
int initBoard(Board *board)
{
if(!board)
{
return -1;
}
for(int y = 0; y < 3; y++)
{
for(int x = 0; x < 4; x++)
{
board->pieces[x][y] = white_reg;
board->pieces[x][y + 5] = black_reg;
}
}
return 0;
}
int makeMove(Board *board)
{
Board *host_output;
Board *host_input;
Board *device_output;
Board *device1_output;
Board *device2_output;
Board *device_input;
int inputSize = 1;
int outputSize = inputSize * 512;
host_input = board;
if(USE_GPU)
{
// cuda malloc
cudaMalloc(&device_input, inputSize * sizeof(Board));
cudaMalloc(&device1_output, outputSize * sizeof(Board));
cudaMalloc(&device2_output, outputSize * 512 * sizeof(Board));
// cuda memcpy
cudaMemcpy(device_input, host_input, inputSize * sizeof(*device_input), cudaMemcpyHostToDevice);
//launch kernel and check errors
//printf("initializing kernel with grid dim: %d and block dim: %d\n", inputSize, BLOCK_SIZE);
dim3 dimGrid(1);
dim3 dimBlock(BLOCK_SIZE);
expand<<<dimGrid, dimBlock>>>(device_input, device1_output, inputSize);
cudaPeekAtLastError();
cudaDeviceSynchronize();
//set up for second kernel launch
inputSize = outputSize;
outputSize = inputSize * 512;
//launch kernel and check errors
//printf("initializing kernel with grid dim: %d and block dim: %d\n", inputSize, BLOCK_SIZE);
dim3 dimGrid2(512);
expand<<<dimGrid2, dimBlock>>>(device1_output, device2_output, inputSize);
cudaPeekAtLastError();
cudaDeviceSynchronize();
int expansion_rate = 512;
dim3 dimGrid3(1*expansion_rate);
dim3 dimGrid4(512*expansion_rate);
//Board *temp_device_output;
Board *third_level_output;
int * device_first_level_scores;
int * device_second_level_scores;
int * device_third_level_scores;
cudaMalloc(&device_second_level_scores, 512*512*sizeof(int));
cudaMalloc(&device_third_level_scores, 512*expansion_rate*sizeof(int));
cudaMalloc(&device_first_level_scores, 512*sizeof(int));
//gpuErrChk(cudaMalloc(&temp_device_output, 512*512*expansion_rate*sizeof(Board)));
gpuErrChk(cudaMalloc(&third_level_output, 512*expansion_rate*sizeof(Board)));
for(int i = 0; i < 512*512/expansion_rate; i++)
{
device_input = &device2_output[i*expansion_rate];
expand<<<dimGrid3, dimBlock>>>(device_input, third_level_output, expansion_rate);
cudaPeekAtLastError();
cudaDeviceSynchronize();
//expand<<<dimGrid4, dimBlock>>>(third_level_output, temp_device_output, 512*expansion_rate);
//cudaPeekAtLastError();
//cudaDeviceSynchronize();
//analyze_board_tree<<<dimGrid4, dimBlock>>>(temp_device_output, device_third_level_scores);
//cudaPeekAtLastError();
//cudaDeviceSynchronize();
analyze_board_tree<<<dimGrid3, dimBlock>>>(third_level_output,
&device_second_level_scores[i*expansion_rate]);
cudaPeekAtLastError();
cudaDeviceSynchronize();
}
analyze_score_tree<<<dimGrid2, dimBlock>>>(device_second_level_scores,
device_first_level_scores);
cudaPeekAtLastError();
cudaDeviceSynchronize();
int * first_level_scores = (int*)malloc(512*sizeof(int));
Board * second_level_boards = (Board*)malloc(512*512*sizeof(Board));
cudaMemcpy(first_level_scores, device_first_level_scores, 512*sizeof(int), cudaMemcpyDeviceToHost);
int max = -100000;
int index = -1;
for(int i = 0; i < 22; i++)
{
int min = 1000000;
for(int j = 0; j < 22; j++)
if(first_level_scores[22*i+j] < min && first_level_scores[22*i+j] != -100000000)
min = first_level_scores[22*i+j];
if (min > max && min != 1000000)
{
index = i;
max = min;
}
}
Board boards[512];
boards[0] = host_input[0];
makeMoves(boards, white, 0);
host_input[0] = boards[22*index];
cudaFree(device_second_level_scores);
cudaFree(device_third_level_scores);
cudaFree(device_first_level_scores);
cudaFree(third_level_output);
cudaFree(device_input);
cudaFree(device1_output);
cudaFree(device2_output);
free(first_level_scores);
free(second_level_boards);
return 0;
} else // iterative version
{
static int numTurns = 0;
int score = 0;
unsigned long size;
if(!numTurns)
{
std::cin >> numTurns;
}
if(numTurns == 4)
{
size = ipow(512, 3);
} else if(numTurns <= 3)
{
size = ipow(512, numTurns);
} else
{
printf("max 4\n");
return -1;
}
host_output = new (std::nothrow) Board[size];
if(!host_output)
{
fprintf(stderr, "operator new failed on size %lu\n", size);
return -1;
}
host_output[0] = *board;
for(int i = 0; i < numTurns && i < 3; i++)
{
Board *temp_output = new (std::nothrow) Board[size];
if(!temp_output)
{
fprintf(stderr, "new failed on size %lu\n", size);
return -1;
}
for( int j = 0; j < ipow(512, i); j++)
{
if(!boardIsValid_host(&host_output[j]))
{
continue;
}
Board b[512] = {empty};
b[0] = host_output[j];
makeMoves(b, white, 0);
for(int k = 0; k < 512; k++)
{
if(boardIsValid_host(&b[k]))
{
makeMoves(b, black, k);
}
temp_output[512 * j + k] = b[k];
}
}
delete[] host_output;
host_output = temp_output;
}
if(numTurns > 3)
{
for(int i = 0; i < ipow(512,3); i++)
{
Board b[512] = {empty};
//Board *temp_output = new (std::nothrow) Board[ipow(512,2)];
b[0] = host_output[i];
makeMoves(b, white, 0);
for(int j = 0; j < 512; j+=22)
{
if(boardIsValid_host(&host_output[i]))
{
makeMoves(b, black, j);
}
}
for(int j = 0; j < 512; j++)
{
if(boardIsValid_host(&host_output[i]))
{
score = std::max(score, analyseBoard(&host_output[i]));
}
}
//delete[] temp_output;
}
} else
{
int * scores = new int[ipow(512,numTurns - 1)];
int max = 0, idx = -1;
for(int i = numTurns; i > 0; i--)
{
for(int j = 0; j < ipow(512,i); j++)
{
if(boardIsValid_host(&host_output[j]))
{
score = std::max(score, analyseBoard(&host_output[i]));
}
if(!(j % 512))
{
scores[j/512] = score;
if(score > max)
{
max = score;
idx = j/512;
}
score = 0;
}
}
}
//printf("%d, %d\n", max, idx);
Board boards[512];
boards[0] = board[0];
makeMoves(boards, white, 0);
board[0] = boards[0];
}
//printf("Score: %d\n", score);
delete [] host_output;
/*
int sum = 0, last_idx;
for(int i = 0; i < size; i++)
{
if(boardIsValid_host(&host_output[i]))
{
sum++;
last_idx = i;
//printBoard(host_output[i]);
}
}
printf("%d %d\n", sum, last_idx);
printBoard(host_output[last_idx]);
*/
*board = host_output[0];
}
return 0;
}
|
2,650 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
cudaError_t addWithCuda(int *c, int *a, unsigned int size, int gridx, int gridy, int dimBlock);
int * createArray(int amountToAdd, int arraySize, int * a);
__global__ void addKernel(int *c, int *a)
{
//declares space in shared memory
extern __shared__ int sdata[];
//gets unique thread id
int tid = (blockIdx.y*gridDim.x + blockIdx.x)*blockDim.x + threadIdx.x;
//each thread copies information from global into shared memory
sdata[threadIdx.x] = a[tid];
//threads are synced as all data must be copied over before any addition can be done
__syncthreads();
//TODO: improve efficiency by changing from interleaved addressing to sequential addressing
//This for loop adds two adjacent numbers together, then sums the combination until only one number remains per block
for (int s = 1; s < blockDim.x; s *= 2) {
int index = 2 * s * threadIdx.x;
if (index < blockDim.x) {
sdata[index] += sdata[index + s];
}
//threads are synced to ensure all addition has completed before moving on
__syncthreads();
}
//each block stores its final value in the array c at the position of its unique block id
if (threadIdx.x == 0) c[blockIdx.x + blockIdx.y * gridDim.x] = sdata[0];
}
int main()
{
//number of values we want to add (memory restrictions make it impossible to add 1 billion at once)
int amountToAdd = 125000000;
//TODO: Optimize by removing the need to have an array of a power of two
//array is set to a power of two to make the addition easier
const int arraySize = 134217728;
int* a = new int[arraySize];
int* b = new int[arraySize];
int* c = new int[arraySize];
//create the array up to 8 times and reduce to a manageable amount of numbers (set i to 1 for 125,000,000 numbers and 8 for 1,000,000,000)
for (int i = 0; i < 8; i++){
createArray(amountToAdd, arraySize, a);
// Add vectors in parallel. reduces 125,000,000 numbers (8 times for 1 billion) stores in an array for further reduction
cudaError_t cudaStatus = addWithCuda(c, a, arraySize, 32768, 4, 1024);
for (int j = 0; j < 32768 * 4; j++){
b[i*32768*4+j] = a[j];
//printf("{%d}", i*32768*4+j);
}
}
a = b;
//reduces the values further until only a single value remains
cudaError_t cudaStatus = addWithCuda(c, a, arraySize, 1024, 1, 1024);
addWithCuda(c, a, arraySize, 1, 1, 1024);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
//Should print 1,000,000,000
printf("{%d}\n", a[0]);
//cudaDeviceReset must be called before exiting in order for profiling and
//tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
int * createArray(int amountToAdd, int arraySize, int * a){
//populate the array with data
for (int i = 0; i<amountToAdd; i++) {
a[i] = 1;
}
//fill the rest of the array with 0's so as not to affect the final result
for (int i = amountToAdd; i < arraySize; i++){
a[i] = 0;
}
return a;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, int *a, unsigned int size, int gridx, int gridy, int dimBlock)
{
int *dev_a = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "c cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "a cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<dim3(gridx,gridy,1), dimBlock, dimBlock *sizeof(int)>>>(dev_c, dev_a);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(a, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
return cudaStatus;
}
|
2,651 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
#include <chrono>
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "CUDA Error: %s: %s.\n", msg, cudaGetErrorString(err) );
exit(EXIT_FAILURE);
}
}
#define BLOCKSIZE 1024
template <unsigned int blockSize> __device__ void warpReduce(volatile unsigned int* sdata, int tid) {
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
template <unsigned int blockSize> __global__ void reduce(unsigned int* dVec, unsigned int* dAux, size_t N)
{
__shared__ unsigned int sdata[BLOCKSIZE];
size_t tid = threadIdx.x;
size_t i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
sdata[tid] = dVec[i] + dVec[i+blockDim.x];
__syncthreads();
if (blockSize >= 1024) { if (tid < 512) { sdata[tid] += sdata[tid + 512]; } __syncthreads(); }
if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); }
if (tid < 32) warpReduce<blockSize>(sdata, tid);
if (tid == 0) dAux[blockIdx.x] = sdata[0];
}
int main(int argc, char** argv)
{
unsigned int *vec;
unsigned int *dVec, *dAux;
size_t N0 = 32768;
size_t N = N0*N0;
vec = (unsigned int*) malloc (sizeof(unsigned int)*N);
for (size_t i = 0; i < N; i++) vec[i] = i;
cudaMalloc(&dVec, sizeof(unsigned int)*N); checkCUDAError("Error allocating dVec");
cudaMalloc(&dAux, sizeof(unsigned int)*N); checkCUDAError("Error allocating dAux");
cudaMemcpy(dVec, vec, sizeof(unsigned int)*N, cudaMemcpyHostToDevice); checkCUDAError("Error copying vec");
auto startTime = std::chrono::system_clock::now();
for (size_t n = N; n > 1; n = n / BLOCKSIZE)
{
size_t bSize = BLOCKSIZE;
size_t gSize = floor((double)n / (2.0*(double)BLOCKSIZE));
if (gSize == 0) { gSize = 2; bSize = n/4; }
printf("bSize: %lu - gSize: %lu\n", bSize, gSize);
switch (bSize)
{
case 1024: reduce<1024><<< gSize, bSize>>>(dVec, dAux, N); break;
case 512: reduce<512><<< gSize, bSize>>>(dVec, dAux, N); break;
case 256: reduce<256><<< gSize, bSize>>>(dVec, dAux, N); break;
case 128: reduce<128><<< gSize, bSize>>>(dVec, dAux, N); break;
case 64: reduce< 64><<< gSize, bSize>>>(dVec, dAux, N); break;
case 32: reduce< 32><<< gSize, bSize>>>(dVec, dAux, N); break;
case 16: reduce< 16><<< gSize, bSize>>>(dVec, dAux, N); break;
case 8: reduce< 8><<< gSize, bSize>>>(dVec, dAux, N); break;
case 4: reduce< 4><<< gSize, bSize>>>(dVec, dAux, N); break;
case 2: reduce< 2><<< gSize, bSize>>>(dVec, dAux, N); break;
case 1: reduce< 1><<< gSize, bSize>>>(dVec, dAux, N); break;
/* size_t bSize = BLOCKSIZE;
size_t gSize = floor((double)n / (2.0*(double)BLOCKSIZE));
if (gSize == 0) { gSize = 2; bSize = n/4; }
printf("bSize: %lu - gSize: %lu\n", bSize, gSize);
switch (bSize)
{
case 1024: reduce<1024><<< gSize, bSize>>>(dRescpy, dtemp, n); break;
case 512: reduce<512><<< gSize, bSize>>>(dRescpy, dtemp, n); break;
case 256: reduce<256><<< gSize, bSize>>>(dRescpy, dtemp, n); break;
case 128: reduce<128><<< gSize, bSize>>>(dRescpy, dtemp, n); break;
case 64: reduce< 64><<< gSize, bSize>>>(dRescpy, dtemp, n); break;
case 32: reduce< 32><<< gSize, bSize>>>(dRescpy, dtemp, n); break;
case 16: reduce< 16><<< gSize, bSize>>>(dRescpy, dtemp, n);break;
case 8: reduce< 8><<< gSize, bSize>>>(dRescpy, dtemp, n);break;
case 4: reduce< 4><<< gSize, bSize>>>(dRescpy, dtemp, n);break;
case 2: reduce< 2><<< gSize, bSize>>>(dRescpy, dtemp, n);break;
case 1: reduce< 1><<< gSize, bSize>>>(dRescpy, dtemp, n);break;
}
double *tmp = dRescpy; dRescpy = dtemp; dtemp = tmp;
*/
}
unsigned int *tmp = dVec; dVec = dAux; dAux = tmp;
}
cudaDeviceSynchronize();
auto endTime = std::chrono::system_clock::now();
unsigned int result = 0.0;
cudaMemcpy(&result, dVec, sizeof(unsigned int), cudaMemcpyDeviceToHost); checkCUDAError("Error getting result");
printf("[GPU] Result: %u - Elapsed Time: %fs\n", result, std::chrono::duration<double>(endTime-startTime).count());
return 0;
}
|
2,652 | #include <cuda_runtime.h>
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <sys/time.h>
//Tamaño de matrices (cuadradas)
#define N 1024
//Kernel
__global__ void mul(int * A, int * B, int * C){
int i = blockIdx.x;
int j = threadIdx.x;
C[i * N + j] = 0;
for (int k = 0; k < N; k++){
C[i * N + j] += A[i * N + k] * B[k * N + j];
}
}
int main(){
struct timeval t1, t2;
int *hA, *hB, *hC, *hC2; //Host Matrix
int *dA, *dB, *dC; //Device Matrix
//Reserva de memoria Host
hA = (int*)malloc(N*N*sizeof(int));
hB = (int*)malloc(N*N*sizeof(int));
hC = (int*)malloc(N*N*sizeof(int));
hC2 = (int*)malloc(N*N*sizeof(int));
//Inicialización de matrices
srand(time(NULL));
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
hA[i*N+j] = rand();
hB[i*N+j] = rand();
}
}
//Reserva de memoria GPU
cudaMalloc((void **)&dA, N*N*sizeof(int));
cudaMalloc((void **)&dB, N*N*sizeof(int));
cudaMalloc((void **)&dC, N*N*sizeof(int));
//Copia Host -> GPU
cudaMemcpy(dA, hA, N*N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, N*N*sizeof(int), cudaMemcpyHostToDevice);
gettimeofday(&t1, 0);
//Ejecución Kernel
mul<<<N, N>>>(dA, dB, dC);
cudaDeviceSynchronize();
gettimeofday(&t2, 0);
//Copia Device -> Host
cudaMemcpy(hC, dC, N*N*sizeof(int), cudaMemcpyDeviceToHost);
//Multiplicación en Host
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
hC2[i*N + j] = 0;
for(int k = 0; k < N; k++){
hC2[i*N + j] += hA[i*N + k] * hB[k*N + j];
}
}
}
//Comprobación de errores
bool error = false;
for(int i = 0; i < N*N; i++){
if(hC[i] != hC2[i]){
error = true;
break;
}
}
if(error)
printf("La multiplicación de matrices ha fallado.\n");
else
printf("Multiplicación de matrices correcta.\n");
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
printf("Tiempo: %f ms\n", time);
//Liberar memoria
free(hA);
free(hB);
free(hC);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
}
|
2,653 | //
// kernel routine
//
|
2,654 | class weekday {
private:
unsigned char __wd;
public:
weekday() = default;
inline explicit constexpr weekday(unsigned __val) noexcept
: __wd(static_cast<unsigned char>(__val == 7 ? 0 : __val)) {}
inline constexpr unsigned c_encoding() const noexcept { return __wd; }
};
constexpr int operator-(const weekday& __lhs, const weekday& __rhs) noexcept
{
const int __wdu = __lhs.c_encoding() - __rhs.c_encoding();
const int __wk = (__wdu >= 0 ? __wdu : __wdu-6) / 7;
return __wdu - __wk * 7;
}
int main(void){
constexpr weekday w0{0};
constexpr weekday w6{6};
static_assert((w0 - w6) == 1, "");
}
|
2,655 | /*
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include "grasta_cuda_util.cuh"
#include "grasta_reduction.cuh"
using namespace std;
cudaError_t cudaSimpleReduction(float* data_to_sum, unsigned int num, float &accu){
float *dev_data_to_sum = 0; // array of elements to sum that reside on the device
float *dev_temp_sums = 0; // holds accumulations of elements between kernel calls
float *temp_sums = 0; // storage for temps sums from device
cudaError_t cudaStatus;
temp_sums = (float*) malloc(REDUCTION_BLOCK_SIZE * sizeof(float));
// This invariant makes the reduction algorithm easier to implement
assert(num % (REDUCTION_BLOCK_SIZE * 2) == 0);
// Allocate GPU buffers for element
cudaStatus = cudaMalloc((void**)&dev_data_to_sum, num * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_temp_sums, REDUCTION_BLOCK_SIZE * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_data_to_sum, data_to_sum, num * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU
const unsigned int kSMEM_BYTES = REDUCTION_BLOCK_SIZE * sizeof(float);
simple_reduce<REDUCTION_BLOCK_SIZE><<< REDUCTION_NUM_BLOCKS, REDUCTION_BLOCK_SIZE, kSMEM_BYTES >>>
(dev_data_to_sum, dev_temp_sums, num / (REDUCTION_BLOCK_SIZE * 2)); // kernel args
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(temp_sums, dev_temp_sums, REDUCTION_BLOCK_SIZE * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_temp_sums);
cudaFree(dev_data_to_sum);
free(temp_sums);
return cudaStatus;
}
*/ |
2,656 | #include "includes.h"
__global__ void initializeAtRandom ( const int dim, const int nwl, const float dlt, const float *x0, const float *stn, float *xx ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int t = i + j * dim;
if ( i < dim && j < nwl ) {
xx[t] = x0[i] + dlt * stn[t];
}
} |
2,657 | /***************************************************************************
**************************************************************************
Spherical Harmonic Transform Kit 2.7
Copyright 1997-2003 Sean Moore, Dennis Healy,
Dan Rockmore, Peter Kostelec
Copyright 2004 Peter Kostelec, Dan Rockmore
This file is part of SpharmonicKit.
SpharmonicKit is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
SpharmonicKit is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
See the accompanying LICENSE file for details.
************************************************************************
************************************************************************/
/* indextables.c - source code to hard code bit reverse permutations */
#include <string.h> /* for memcpy */
__device__ __constant__ int p2[2] = {0,1};
__device__ __constant__ int p4[4] = {0,2,1,3};
__device__ __constant__ int p8[8] = {0, 4, 2, 6, 1, 5, 3, 7};
__device__ __constant__ int p16[16] = {0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15};
__device__ __constant__ int p32[32] =
{0, 16, 8, 24, 4, 20, 12, 28, 2, 18, 10, 26, 6, 22, 14, 30, 1,
17, 9, 25, 5, 21, 13, 29, 3, 19, 11, 27, 7, 23, 15, 31};
__device__ __constant__ int p64[64] =
{0, 32, 16, 48, 8, 40, 24, 56, 4, 36, 20, 52, 12, 44, 28,60,2,
34, 18, 50, 10, 42, 26, 58, 6, 38, 22, 54, 14, 46, 30, 62, 1, 33, 17, 49, 9,41,
25, 57, 5, 37, 21, 53, 13, 45, 29, 61, 3, 35, 19, 51, 11, 43, 27, 59, 7, 39,
23, 55, 15, 47, 31, 63};
__device__ __constant__ int p128[128] =
{0, 64, 32, 96, 16, 80, 48, 112, 8, 72, 40, 104, 24,
88, 56, 120, 4, 68, 36, 100, 20, 84, 52, 116, 12, 76, 44, 108, 28, 92, 60,
124, 2, 66, 34, 98, 18, 82, 50, 114, 10, 74, 42, 106, 26, 90, 58, 122, 6,
70, 38, 102, 22, 86, 54, 118, 14, 78, 46, 110, 30, 94, 62, 126, 1, 65, 33,
97, 17, 81, 49, 113, 9, 73, 41, 105, 25, 89, 57, 121, 5, 69, 37, 101, 21,
85, 53, 117, 13, 77, 45, 109, 29, 93, 61, 125, 3, 67, 35, 99, 19, 83, 51,
115, 11, 75, 43, 107, 27, 91, 59, 123, 7, 71, 39, 103, 23, 87, 55, 119, 15,
79, 47, 111, 31, 95, 63, 127};
__device__ int p256[256] =
{0, 128, 64, 192, 32, 160, 96, 224, 16, 144, 80, 208,
48, 176, 112, 240, 8, 136, 72, 200, 40, 168, 104, 232, 24, 152, 88, 216, 56,
184, 120, 248, 4, 132, 68, 196, 36, 164, 100, 228, 20, 148, 84, 212, 52, 180,
116, 244, 12, 140, 76, 204, 44, 172, 108, 236, 28, 156, 92, 220, 60, 188, 124,
252, 2, 130, 66, 194, 34, 162, 98, 226, 18, 146, 82, 210, 50, 178, 114, 242,
10, 138, 74, 202, 42, 170, 106, 234, 26, 154, 90, 218, 58, 186, 122, 250, 6,
134, 70, 198, 38, 166, 102, 230, 22, 150, 86, 214, 54, 182, 118, 246, 14, 142,
78, 206, 46, 174, 110, 238, 30, 158, 94, 222, 62, 190, 126, 254, 1, 129, 65,
193, 33, 161, 97, 225, 17, 145, 81, 209, 49, 177, 113, 241, 9, 137, 73, 201,
41, 169, 105, 233, 25, 153, 89, 217, 57, 185, 121, 249, 5, 133, 69, 197, 37,
165, 101, 229, 21, 149, 85, 213, 53, 181, 117, 245, 13, 141, 77, 205, 45, 173,
109, 237, 29, 157, 93, 221, 61, 189, 125, 253, 3, 131, 67, 195, 35, 163, 99,
227, 19, 147, 83, 211, 51, 179, 115, 243, 11, 139, 75, 203, 43, 171, 107, 235,
27, 155, 91, 219, 59, 187, 123, 251, 7, 135, 71, 199, 39, 167, 103, 231, 23,
151, 87, 215, 55, 183, 119, 247, 15, 143, 79, 207, 47, 175, 111, 239, 31, 159,
95, 223, 63, 191, 127, 255};
/************************************************************************/
__device__ int *ptable(int n)
{
switch (n)
{
case 2: return p2;
case 4: return p4;
case 8: return p8;
case 16: return p16;
case 32: return p32;
case 64: return p64;
case 128: return p128;
case 256: return p256;
default: return 0;
}
}
/************************************************************************/
/* performs a bit-reversal on the data, a double array of size n */
/* Needs workspace of size n */
__device__ void bitreverse(double *data,
int n,
double *workspace)
{
int i;
int *perm;
perm = ptable(n);
for (i=0; i<n; i++)
workspace[i] = data[perm[i]];
memcpy(data, workspace, sizeof(double) * n);
}
|
2,658 | #include "includes.h"
__global__ void scaleWalkers ( const int n, const float c, const float *a, float *d ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if ( i < n ) {
d[i] = c * a[i];
}
} |
2,659 | #include <iostream>
#include <string.h>
#include <fstream>
#include <sstream>
#include <stdio.h>
#include <vector>
#include <time.h>
#include <cuda.h>
#include <math.h>
#include <chrono>
#include <ctime>
using namespace std;
//qsub -I -q coc-ice -l nodes=1:ppn=8:gpus=1,walltime=04:30:00,pmem=2gb
//qsub -I -q coc-ice -l nodes=1,walltime=02:30:00,pmem=2gb
const float PI = 3.14159265358979f;
//class for Complex number -------------------------------------------------------
class Complex {
public:
__device__ __host__ Complex() : real(0.0f), imag(0.0f){
}
__device__ __host__ Complex(float r, float i) : real(r), imag(i){
}
__device__ __host__ Complex(float r) : real(r), imag(0.0f){
}
__device__ __host__ Complex operator+ (const Complex& b) const{
return Complex(real + b.real, imag + b.imag);
}
__device__ __host__ Complex operator- (const Complex& b) const{
return Complex(real - b.real, imag - b.imag);
}
__device__ __host__ Complex operator* (const Complex& b) const{
return Complex(real * b.real - imag * b.imag, real * b.imag + imag * b.real);
}
__device__ __host__ Complex Mag() const{
return Complex(sqrt(real * real + imag * imag));
}
__device__ __host__ Complex Angle() const{
return Complex(atan2(imag, real) * 360 / (2 * PI));
}
__device__ __host__ Complex Conj() const{
return Complex(real, -imag);
}
void Print() const{
if(imag == 0){
cout << real;
}else{
cout << '(' << real << ',' << imag << ')' << endl;
}
}
float real;
float imag;
};
ostream& operator<< (ostream& os, const Complex& rhs) {
Complex c(rhs);
if(fabsf(rhs.imag) < 1e-10) c.imag = 0.0f;
if(fabsf(rhs.real) < 1e-10) c.real = 0.0f;
if(c.imag == 0) {
os << c.real;
}
else {
os << "(" << c.real << "," << c.imag << ")";
}
return os;
}
//class for input and output image---------------------------------------------------
class InputImage {
public:
InputImage(const char* filename);
int get_width() const;
int get_height() const;
//returns a pointer to the image data. Note the return is a 1D
//array which represents a 2D image. The data for row 1 is
//immediately following the data for row 0 in the 1D array
Complex* get_image_data() const;
//use this to save output from forward DFT
void save_image_data(const char* filename, Complex* d, int w, int h);
//use this to save output from reverse DFT
void save_image_data_real(const char* filename, Complex* d, int w, int h);
//use this to check mag
void save_image_data_mag(const char* filename, Complex* d, int w, int h);
private:
int w;
int h;
Complex* data;
};
InputImage::InputImage(const char* filename) {
std::ifstream ifs(filename);
if(!ifs) {
std::cout << "Can't open image file " << filename << std::endl;
exit(1);
}
ifs >> w >> h;
data = new Complex[w * h];
for(int r = 0; r < h; ++r) {
for(int c = 0; c < w; ++c) {
float real;
ifs >> real;
data[r * w + c] = Complex(real);
}
}
}
int InputImage::get_width() const {
return w;
}
int InputImage::get_height() const {
return h;
}
Complex* InputImage::get_image_data() const {
return data;
}
void InputImage::save_image_data(const char *filename, Complex *d, int w, int h) {
std::ofstream ofs(filename);
if(!ofs) {
std::cout << "Can't create output image " << filename << std::endl;
return;
}
ofs << w << " " << h << std::endl;
for(int r = 0; r < h; ++r) {
for(int c = 0; c < w; ++c) {
ofs << d[r * w + c] << " ";
}
ofs << std::endl;
}
}
void InputImage::save_image_data_real(const char* filename, Complex* d, int w, int h) {
std::ofstream ofs(filename);
if(!ofs) {
std::cout << "Can't create output image " << filename << std::endl;
return;
}
ofs << w << " " << h << std::endl;
for (int r = 0; r < h; ++r) {
for (int c = 0; c < w; ++c) {
ofs << d[r * w + c].real << " ";
}
ofs << std::endl;
}
}
void InputImage::save_image_data_mag(const char* newFileName, Complex* d,int w, int h){
std::ofstream ofs(newFileName);
if (!ofs)
{
std::cout << "Can't create output image " << newFileName << std::endl;
return;
}
ofs << w << " " << h << endl;
for (int r = 0; r < h; ++r)
{ // for each row
for (int c = 0; c < w; ++c)
{ // for each column
ofs << d[r * w + c].Mag() << " ";
}
ofs << std::endl;
}
}
//test,test
__global__ void reorder(Complex* a, Complex* b, int N){
long idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < N * N){
int x = idx / N;
int y = idx % N;
unsigned r = 0; // reversed index;
unsigned n = N;
unsigned num = y;
for(--n; n > 0; n >>= 1){
r <<= 1;
r |= (num & 0x1);
num >>= 1;
}
y = r;
b[x * N + y] = a[idx];
}
}
__global__ void CountW(Complex* W, int N){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < N/2){
W[idx] = Complex(cos(2 * PI * idx / N), -sin(2 * PI * idx / N));
W[idx + N/2] = Complex(-1) * W[idx];
}
//__syncthreads();
}
__global__ void CountWInverse(Complex* W, int N){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < N/2){
W[idx] = Complex(cos(2 * PI * idx / N), sin(2 * PI * idx / N));
W[idx + N/2] = Complex(-1) * W[idx];
}
//__syncthreads();
}
__global__ void Tmatrix(Complex* a, int width, int height){
long idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < width * height){
int x = idx / width;
int y = idx % width;
if(y > x){
Complex tmp = a[x * width + y];
a[x * width + y] = a[y * width + x];
a[y * width + x] = tmp;
}
}
//__syncthreads();
}
// __global__ void TransformAnArrray(Complex* a, Complex*b, int N, Complex* W){//b is all 0 initially
// long idx = threadIdx.x + blockIdx.x * blockDim.x;
// int x = idx / N;// x th row//temp[]->b; H[] -> a[x * width + 0] ~ a[x * width + width - 1]
// int y = idx % N;// y th element in x th row
// if(idx < N * N ){//
// int groupLen = 2;
// int groupNum = N / 2;
// while(groupLen <= N){//a or b should include x* width first or just use idx as index
// int i = y / groupLen;
// int j = y % groupLen;
// b[idx] = a[x * N + i * groupLen + j % (groupLen/2)] + a[x * N + i * groupLen + j%(groupLen/2) + groupLen/2] * W[N*j/groupLen];
// __syncthreads();
// a[idx] = b[idx];// should be ok?
// groupLen *= 2;
// groupNum /= 2;
// __syncthreads();
// }
// }//
// }
__global__ void TransformAnArrray(Complex* a, Complex*b, int N, Complex* W, int groupLen, int groupNum){
long idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < N * N){
int x = idx / N;// x th row//temp[]->b; H[] -> a[x * width + 0] ~ a[x * width + width - 1]
int y = idx % N;// y th element in x th row
int i = y / groupLen;
int j = y % groupLen;
b[idx] = a[x * N + i * groupLen + j % (groupLen/2)] + a[x * N + i * groupLen + j%(groupLen/2) + groupLen/2] * W[N*j/groupLen];
}
}
__global__ void ConvertAB(Complex* a, Complex*b, int N){
long idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < N * N){
a[idx] = b[idx];
}
}
__global__ void CmpleteInverseT(Complex*a, int N){
long idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < N * N){
a[idx] = Complex(1.0 / N) * a[idx];
if(a[idx].Mag().real < 1e-10){
a[idx] = Complex(0);
}
}
}
int main(int argc, const char * argv[]) {
clock_t startTime, endTime;
const char* type = argv[1];
const char* filename = argv[2];
const char* outputfile = argv[3];
startTime = clock();
InputImage Tower(filename);
int Tower_height = Tower.get_height();
int Tower_width = Tower.get_width();
Complex* TowerData = Tower.get_image_data();// data itself// one diamention array
//test-------------------------------------
// cout << "height = " << Tower_height << "\n" << "width = " << Tower_width << endl;
// for(long i = 0; i < Tower_width * Tower_height; i++){
// printf("thread:%ld, content:%f\n", i, TowerData[i].real);
// }
//test-------------------------------------
Complex* d_a;
Complex* d_b;
Complex* d_w;
chrono::steady_clock::time_point tStart;
tStart = chrono::steady_clock::now();
cudaMalloc(&d_a, sizeof(Complex) * Tower_width * Tower_height);
cudaMalloc(&d_b, sizeof(Complex) * Tower_width * Tower_height);
cudaMalloc(&d_w, sizeof(Complex) * Tower_width);
cudaMemcpy(d_a, TowerData, sizeof(Complex)* Tower_width * Tower_height, cudaMemcpyHostToDevice);
reorder<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, d_b, Tower_width);
ConvertAB<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, d_b, Tower_width);
CountW<<<(Tower_width + 511) / 512, 512>>>(d_w, Tower_width);
int groupLen = 2;
int groupNum = Tower_width / 2;
while(groupLen <= Tower_width){
TransformAnArrray<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, d_b, Tower_width, d_w, groupLen, groupNum);
ConvertAB<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, d_b, Tower_width);
groupLen *= 2;
groupNum /= 2;
}
//1D test
// cudaMemcpy(TowerData, d_a, sizeof(Complex) * Tower_width * Tower_height, cudaMemcpyDeviceToHost);
// Tower.save_image_data_mag(outputfile, TowerData, Tower_width, Tower_height);
//1D test correct
//---------------------------------------------------------------------------------------------------------
Tmatrix<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, Tower_width, Tower_height);
reorder<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, d_b, Tower_width);
ConvertAB<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, d_b, Tower_width);
//---------------------------------------------------------------------------------------------------------
groupLen = 2;
groupNum = Tower_width / 2;
while(groupLen <= Tower_width){
TransformAnArrray<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, d_b, Tower_width, d_w, groupLen, groupNum);
ConvertAB<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, d_b, Tower_width);
groupLen *= 2;
groupNum /= 2;
}
Tmatrix<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, Tower_width, Tower_height);
if(type[0] == 'f'){
cudaMemcpy(TowerData, d_a, sizeof(Complex) * Tower_width * Tower_height, cudaMemcpyDeviceToHost);
chrono::steady_clock::time_point tEnd = chrono::steady_clock::now();
chrono::duration<double> time_span = chrono::duration_cast<chrono::duration<double> >(tEnd - tStart);
cout << "Time ellipsed: " << time_span.count() << " seconds... \n";
Tower.save_image_data(outputfile, TowerData, Tower_width, Tower_height);
}else{
reorder<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, d_b, Tower_width);
ConvertAB<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, d_b, Tower_width);
CountWInverse<<<(Tower_width + 511) / 512, 512>>>(d_w, Tower_width);// different W
groupLen = 2;
groupNum = Tower_width / 2;
while(groupLen <= Tower_width){
TransformAnArrray<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, d_b, Tower_width, d_w, groupLen, groupNum);
ConvertAB<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, d_b, Tower_width);
groupLen *= 2;
groupNum /= 2;
}
CmpleteInverseT<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, Tower_width);
Tmatrix<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, Tower_width, Tower_height);
reorder<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, d_b, Tower_width);
ConvertAB<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, d_b, Tower_width);
groupLen = 2;
groupNum = Tower_width / 2;
while(groupLen <= Tower_width){
TransformAnArrray<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, d_b, Tower_width, d_w, groupLen, groupNum);
ConvertAB<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, d_b, Tower_width);
groupLen *= 2;
groupNum /= 2;
}
CmpleteInverseT<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, Tower_width);
Tmatrix<<<(Tower_width * Tower_height + 511) / 512, 512>>>(d_a, Tower_width, Tower_height);
cudaMemcpy(TowerData, d_a, sizeof(Complex) * Tower_width * Tower_height, cudaMemcpyDeviceToHost);
chrono::steady_clock::time_point tEnd = chrono::steady_clock::now();
chrono::duration<double> time_span = chrono::duration_cast<chrono::duration<double> >(tEnd - tStart);
cout << "Time ellipsed: " << time_span.count() << " seconds... \n";
Tower.save_image_data(outputfile, TowerData, Tower_width, Tower_height);
}
//z.Print();
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_w);
delete[] TowerData;
endTime = clock();
cout<<"Total time = "<<(double)(endTime - startTime)/CLOCKS_PER_SEC<<"s"<<endl;
//delete TowerData?
return 0;
}
|
2,660 | //Based on the work of Andrew Krepps
#include <stdio.h>
#define CONST_SIZE 1024
__constant__ unsigned int constDevA[CONST_SIZE];
__constant__ unsigned int constDevB[CONST_SIZE];
__host__ cudaEvent_t get_time(void) {
cudaEvent_t time;
cudaEventCreate(&time);
cudaEventRecord(time);
return time;
}
// Following function pulled from register.cu class file
__host__ void generate_rand_data(unsigned int * host_data_ptr, const unsigned int num_elem)
{
// Generate random values from 0-19
for(unsigned int i=0; i < num_elem; i++)
host_data_ptr[i] = (unsigned int) (rand()%20);
}
__global__ void gpuRegKern_registers(unsigned int *A, unsigned int *B, const int num_elem) {
const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
// Perform same operation as in other kernels but using registers as source data.
if(idx < (num_elem)) {
// Load data from global memory into registers
unsigned int a = A[idx];
unsigned int b = B[idx];
unsigned int c = 0;
// Perform some math on the array values
c = (a*b) + a + b;
c = c*c;
A[idx] = c;
}
}
__global__ void gpuRegKern_shared(unsigned int *A, unsigned int *B, const int num_elem) {
const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
// Perform same operation as in other kernels but using shared memory as source data.
if(idx < num_elem) {
extern __shared__ unsigned int concatCoeff[];
concatCoeff[0] = A[idx];
concatCoeff[1] = B[idx];
concatCoeff[0] = (concatCoeff[0]*concatCoeff[1]) + concatCoeff[0] + concatCoeff[1];
A[idx] = concatCoeff[0]*concatCoeff[0];
}
}
__global__ void gpuRegKern_const(unsigned int *C, const int num_elem) {
const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
// Perform same operation as in other kernels but using constant memory as source data.
if(idx < num_elem) {
C[idx] = (constDevA[idx]*constDevB[idx] + constDevA[idx] + constDevB[idx]) * (constDevA[idx]*constDevB[idx] + constDevA[idx] + constDevB[idx]);
}
}
__host__ void kernelCaller_Reg(const unsigned int totalThreads, const unsigned int blockSize, const unsigned int numBlocks) {
const unsigned int num_byte = totalThreads*sizeof(unsigned int); // Calculate memory size of arrays
unsigned int *h_A, *h_B, *h_C;
unsigned int *d_a, *d_b;
// Allocate host memory
h_A = (unsigned int*)malloc(num_byte);
h_B = (unsigned int*)malloc(num_byte);
h_C = (unsigned int*)malloc(num_byte);
// Allocate device memory
cudaMalloc((void**)&d_a, num_byte);
cudaMalloc((void**)&d_b, num_byte);
// Fill host arrays with random data
generate_rand_data(h_A, totalThreads);
generate_rand_data(h_B, totalThreads);
// Timestamp before copying device to device, running kernel, and copy back
cudaEvent_t startT = get_time();
// Copy data from host arrays to device arrays
cudaMemcpy(d_a, h_A, num_byte, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_B, num_byte, cudaMemcpyHostToDevice);
// Perform call to kernel
gpuRegKern_registers <<<numBlocks, blockSize>>>(d_a, d_b, totalThreads);
// Copy resultant array from device memory to host array
cudaMemcpy(h_C, d_a, num_byte, cudaMemcpyDeviceToHost);
// Timestamp before copying device to device, running kernel, and copy back
cudaEvent_t stopT = get_time();
cudaEventSynchronize(stopT);
float delta = 0;
cudaEventElapsedTime(&delta, startT, stopT);
printf("Elapsed time for performing %d calculations using register memory: %f\n", totalThreads, delta);
// Print results if you want screen spam
/*printf("Operation being performed: (a*b)+a+b)^2\n\n");
for( int i=0; i<totalThreads; i++) {
printf("Idx: %7d A: %2d B: %2d Result: %d\n", i, h_A[i], h_B[i], h_C[i]);
}*/
cudaFree((void*) d_a);
cudaFree((void*) d_b);
cudaDeviceReset();
}
__host__ void kernelCaller_Shar(const unsigned int totalThreads, const unsigned int blockSize, const unsigned int numBlocks) {
const unsigned int num_byte = totalThreads*sizeof(unsigned int); // Calculate memory size of arrays
unsigned int *h_A, *h_B, *h_C;
unsigned int *d_a, *d_b;
// Allocate host memory
h_A = (unsigned int*)malloc(num_byte);
h_B = (unsigned int*)malloc(num_byte);
h_C = (unsigned int*)malloc(num_byte);
// Allocate device memory
cudaMalloc((void**)&d_a, num_byte);
cudaMalloc((void**)&d_b, num_byte);
// Fill host arrays with random data
generate_rand_data(h_A, totalThreads);
generate_rand_data(h_B, totalThreads);
// Timestamp before copying device to device, running kernel, and copy back
cudaEvent_t startT = get_time();
// Copy data from host arrays to device arrays
cudaMemcpy(d_a, h_A, num_byte, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_B, num_byte, cudaMemcpyHostToDevice);
// Perform call to kernel
gpuRegKern_shared <<<numBlocks, blockSize,totalThreads*2*sizeof(unsigned int)>>>(d_a, d_b, totalThreads);
// Copy resultant array from device memory to host array
cudaMemcpy(h_C, d_a, num_byte, cudaMemcpyDeviceToHost);
// Timestamp before copying device to device, running kernel, and copy back
cudaEvent_t stopT = get_time();
cudaEventSynchronize(stopT);
float delta = 0;
cudaEventElapsedTime(&delta, startT, stopT);
printf("Elapsed time for performing %d calculations using shared memory: %f\n", totalThreads, delta);
// Print results if you want screen spam
/*printf("Operation being performed: (a*b)+a+b)^2\n\n");
for( int i=0; i<totalThreads; i++) {
printf("Idx: %7d A: %2d B: %2d Result: %d\n", i, h_A[i], h_B[i], h_C[i]);
}*/
cudaFree((void*) d_a);
cudaFree((void*) d_b);
cudaDeviceReset();
}
__host__ void kernelCaller_const(const unsigned int totalThreads, const unsigned int blockSize, const unsigned int numBlocks) {
const unsigned int num_byte = CONST_SIZE*sizeof(unsigned int);
unsigned int *h_A, *h_B, *h_C;
unsigned *d_c;
// Allocate host memory
h_A = (unsigned int*)malloc(num_byte);
h_B = (unsigned int*)malloc(num_byte);
h_C = (unsigned int*)malloc(num_byte);
// Allocate device memory
cudaMalloc((void**)&d_c, num_byte);
// Fill host arrays with random data
generate_rand_data(h_A, CONST_SIZE);
generate_rand_data(h_B, CONST_SIZE);
// Timestamp before copying device to device, running kernel, and copy back
cudaEvent_t startT = get_time();
// Copy random data to constant arrays
cudaMemcpyToSymbol(constDevA, h_A, num_byte);
cudaMemcpyToSymbol(constDevB, h_B, num_byte);
gpuRegKern_const<<<CONST_SIZE/256, 256>>>(d_c, CONST_SIZE);
cudaMemcpy(h_C, d_c, num_byte, cudaMemcpyDeviceToHost);
// Timestamp before copying device to device, running kernel, and copy back
cudaEvent_t stopT = get_time();
cudaEventSynchronize(stopT);
float delta = 0;
cudaEventElapsedTime(&delta, startT, stopT);
printf("Elapsed time for performing %d calculations using constant memory: %f\n", CONST_SIZE, delta);
// Print results if you want screen spam
/*printf("Operation being performed: (a*b)+a+b)^2\n\n");
for( int i=0; i<CONST_SIZE; i++) {
printf("Idx: %7d A: %2d B: %2d Result: %d\n", i, h_A[i], h_B[i], h_C[i]);
}*/
cudaFree(d_c);
cudaDeviceReset();
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = (1 << 6);
int blockSize = 16;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
printf("====================================\nRunning with %d elements\n\n", totalThreads);
kernelCaller_Reg(totalThreads, blockSize, numBlocks);
kernelCaller_Shar(totalThreads, blockSize, numBlocks);
kernelCaller_const(totalThreads, blockSize, numBlocks);
printf("====================================\n\n\n");
return EXIT_SUCCESS;
}
|
2,661 |
/* Assignment 2
Block Wise reduction
Author: Parth Tiwari
Roll: 16IM30025
Date: 26th Feb 2020
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void reduce(float* A, float* B, int q)
{
int num_threads = blockDim.x;
int block_num = blockIdx.y * gridDim.x + blockIdx.x;
int tid = block_num * num_threads + threadIdx.x;
for(int stride = 1; stride < num_threads; stride *= 2)
{
__syncthreads();
if(tid % (2* stride) == 0)
{
A[tid] += A[tid + stride];
}
}
if(tid % q == 0)
{
B[block_num] = A[tid]/q;
}
}
int main()
{
int T, p, q;
scanf("%d", &T);
while(T--)
{
scanf("%d %d", &p, &q);
p = pow(2, p);
q = pow(2, q);
size_t size = p*sizeof(float);
float *A;
float *B;
A = (float*)malloc(size);
B = (float*)malloc(size/q);
for(int i = 0; i < p; i++)
{
scanf("%f", &A[i]);
}
cudaError_t err = cudaSuccess;
// Declare array for device
float* A_d = NULL;
float* B_d = NULL;
err = cudaMalloc((void**)&A_d, size);
err = cudaMalloc((void**)&B_d, size/q);
//copy memory to device
err = cudaMemcpy(A_d, A, size, cudaMemcpyHostToDevice);
err = cudaMemcpy(B_d, B, size/q, cudaMemcpyHostToDevice);
int flag = 1;
while(p >= q)
{
dim3 grid(ceil(sqrt(p/q)),ceil(sqrt(p/q)),1);
dim3 block(q,1,1);
if(flag == 1)
reduce<<<grid,block>>>(A_d, B_d, q);
else
reduce<<<grid,block>>>(B_d, A_d, q);
p /= q;
size /= q;
flag *= -1;
}
if(flag == -1)
{
err = cudaMemcpy(B, B_d, size, cudaMemcpyDeviceToHost);
for(int i = 0; i < p; i++)
printf("%.2f ", B[i]);
}
else
{
err = cudaMemcpy(A, A_d, size, cudaMemcpyDeviceToHost);
for(int i = 0; i < p; i++)
printf("%.2f ", A[i]);
}
cudaFree(A_d);
cudaFree(B_d);
free(A);
free(B);
printf("\n");
}
return 0;
} |
2,662 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void vector_min(float *a, int n)
{
// each block should cover blockDim.x * 2 elements
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
int s;
for(s=1; s<blockDim.x; s*=2)
{
if (thread_id % 2*s == 0 && thread_id * s + s < n)
{
if(a[thread_id * s + s] < a[thread_id * s])
{
a[thread_id * s] = a[thread_id * s + s];
}
}
__syncthreads();
}
}
int main( int argc, char* argv[] )
{
cudaEvent_t start,stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if(argc < 2)
{
printf("need exactly 1 argument\n");
return 0;
}
int vector_size = atoi(argv[1]);
float* host_a = (float*)malloc(sizeof(float)*vector_size);
float* device_a;
cudaMalloc(&device_a, sizeof(float)*vector_size);
int i;
for(i=0; i<vector_size; i++)
{
host_a[i] = rand() % vector_size;
}
cudaEventRecord(start,0);
cudaMemcpy( device_a, host_a, sizeof(float)*vector_size, cudaMemcpyHostToDevice);
int block_size = 1024;
int num_blocks = vector_size/block_size;
if(vector_size%block_size)
num_blocks++;
vector_min<<<num_blocks, block_size>>>(device_a, vector_size);
cudaMemcpy( host_a, device_a, sizeof(float)*vector_size, cudaMemcpyDeviceToHost);
for(i=0; i<vector_size; i+= block_size)
{
if(host_a[i] < host_a[0])
{
host_a[0] = host_a[i];
}
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("%f\n", elapsedTime);
cudaFree(device_a);
free(host_a);
return 0;
} |
2,663 | #include <iostream> // Needed to perform IO operations
using namespace std;
#define N 100000
__global__ void add(int n, int *a, int *b, int *c) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x);
for (int i = index; i < n; i += stride) {
c[i] = a[i] + b[i];
}
}
int main(void) {
int blockSize = 256;
int numBlocks = (N + blockSize -1) / blockSize;
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void**)&dev_a, N*sizeof(int));
cudaMalloc((void**)&dev_b, N*sizeof(int));
cudaMalloc((void**)&dev_c, N*sizeof(int));
for (int i=0; i<N; i++) {
a[i] = -i;
b[i] = i*i;
}
cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
add<<<numBlocks, blockSize>>>(N, dev_a, dev_b, dev_c);
cudaDeviceSynchronize();
cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);
for (int i=0; i<N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} |
2,664 | #include "includes.h"
__global__ void kernel_add_wavelet ( float *g_u2, float wavelets, const int nx, const int ny, const int ngpus)
{
// global grid idx for (x,y) plane
int ipos = (ngpus == 2 ? ny - 10 : ny / 2 - 10);
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int idx = ipos * nx + ix;
if(ix == nx / 2) g_u2[idx] += wavelets;
} |
2,665 | void setGrid(int n, dim3 &blockDim, dim3 &gridDim)
{
// set your block dimensions and grid dimensions here
gridDim.x = n / blockDim.x/4;
gridDim.y = n / blockDim.y/4;
if(n % blockDim.x != 0)
gridDim.x++;
if(n % blockDim.y != 0)
gridDim.y++;
//blockDim.y = blockDim.y/8;
}
|
2,666 |
/*
CPP_CONTEST=2017
CPP_PROBLEM=I
CPP_LANG=CUDA
CPP_PROCESSES_PER_NODE=saturno 1
*/
/* RECORD
Francisco Muñoz García
September 20, 2017
in CESGA
time 1520
speed-up 9.80
*/
#include <stdlib.h>
__device__ int count(int ld,int n,char *a,char *b) //Each CUDA thread do this work and is called from kernel so we change to __device__
{
int i,j;
int value=0;
for(i=0;i < n;i++)
for(j=0;j < n;j++)
if(a[i*ld+j]==b[i*n+j])
value++;
return value;
}
/*
We create one thread for each element in matrix sizexsize. Each element compare its matrix and save the results in a matrix. For that reason
each thread has an associated element in the matrix.
*/
__global__ void mask(char* a, char* b, int* temp, int n, int m) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int size = n-m;
if((i<size) && (j<size)) {
temp[i*size+j]=count(n,m,&a[i*n+j],b);
}
}
int sec(int n,char *a,int m,char *b)
{
int i, j;
int maximum=0,value;
int size = n-m;
int nbytes_a = sizeof(char)*n*n;
int nbytes_b = sizeof(char)*m*m;
int nBytes_temp = sizeof(int)*size*size;
int* temp =(int*) malloc(sizeof(int)*size*size);
int* temp_d;
char* a_d;
char* b_d;
int bl_dim1 = 4;
int bl_dim2 = 8;
dim3 block(bl_dim1,bl_dim2);
//we need n-m threads
int gsx = size / bl_dim1;
if(size%bl_dim1) gsx++;
int gsy = size / bl_dim2;
if(size%bl_dim2) gsy++;
dim3 grid(gsx, gsy);
//We reserve memory for GPU
cudaMalloc((void **) &temp_d, nBytes_temp);
cudaMalloc((void**) &a_d, nbytes_a);
cudaMalloc((void**) &b_d, nbytes_b);
//Transfers here
cudaMemset(temp_d, 0, nBytes_temp*sizeof(char)); //All the values should stat with zeros because each thread add values from that initial zero.
cudaMemcpy(a_d, a, nbytes_a, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, nbytes_b, cudaMemcpyHostToDevice);
//call the kernel
mask<<<grid, block>>>(a_d, b_d, temp_d, n,m );
//We transfer the results to RAM
cudaMemcpy(temp, temp_d, nBytes_temp, cudaMemcpyDeviceToHost);
cudaFree((void**)temp_d);
cudaFree((void**)a_d);
cudaFree((void**)b_d);
//Once we have the results for each comparition we only have to know which is the best. We do this in sequencial mode.
maximum = temp[0];
for(int i=1; i<size*size;i++) {
if(temp[i]>maximum)
maximum=temp[i];
}
free(temp);
return maximum;
}
|
2,667 | #include "includes.h"
__global__ void analyze(const float *input, float *sum, int numElements) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < numElements) {
atomicAdd(sum + i, input[i]);
}
} |
2,668 | #include <stdio.h>
#include <stdlib.h>
// Define maximum number of vertices in the graph
#define N 317080
#define EDGES 1049886
// Data structure to store graph
struct Graph {
// An array of pointers to Node to represent adjacency list
struct Node* head[N+1];
};
// A data structure to store adjacency list nodes of the graph
struct Node {
int dest;
struct Node* next;
};
// data structure to store graph edges
struct Edge {
int src, dest;
};
struct author{
int id;
int co_auth;
};
extern __managed__ struct Graph * graph ;
extern __managed__ struct Node* newNode ;
extern __managed__ struct author *auth_list;
extern __managed__ int *dist_auth;
// Function to create an adjacency list from specified edges
__host__ void createGraph(struct Graph* graph, struct Edge edges[], int n)
{
unsigned i;
// allocate memory for graph data structure
//struct Graph* graph = (struct Graph*)malloc(sizeof(struct Graph));
// initialize head pointer for all vertices
for (i = 0; i < N+1; i++){
graph->head[i] = NULL;
}
// add edges to the directed graph one by one
for (i = 0; i < N+1; i++)
{
// get source and destination vertex
int src = edges[i].src;
int dest = edges[i].dest;
// allocate new node of Adjacency List from src to dest
cudaMallocManaged(&newNode, sizeof(struct Node), (unsigned int)cudaMemAttachGlobal);
cudaMemAdvise(newNode, sizeof(struct Node), cudaMemAdviseSetAccessedBy, cudaCpuDeviceId);
//struct Node* newNode = (struct Node*)malloc(sizeof(struct Node));
newNode->dest = dest;
// point new node to current head
newNode->next = graph->head[src];
// point head pointer to new node
graph->head[src] = newNode;
// 2. allocate new node of Adjacency List from dest to src
cudaMallocManaged(&newNode, sizeof(struct Node), (unsigned int)cudaMemAttachGlobal);
cudaMemAdvise(newNode, sizeof(struct Node), cudaMemAdviseSetAccessedBy, cudaCpuDeviceId);
//newNode = (struct Node*)malloc(sizeof(struct Node));
newNode->dest = src;
// point new node to current head
newNode->next = graph->head[dest];
// change head pointer to point to the new node
graph->head[dest] = newNode;
}
//return graph;
}
// Function to print adjacency list representation of graph
__global__ void countAuth(struct Graph* graph,struct author *auth_list, int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x; // HERE
int stride = blockDim.x * gridDim.x;
int i;
for (i = tid; i < n+1; i+=stride)
{
//printf("%d\n", tid+i);
int co_auth = 0;
// print current vertex and all ts neighbors
struct Node* ptr = graph->head[i];
while (ptr != NULL)
{
//printf("(%d -> %d)\t", tid, ptr->dest);
ptr = ptr->next;
co_auth++;
}
auth_list[i].id = i;
auth_list[i].co_auth = co_auth;
//printf("\n");
}
}
__global__ void distAuth(struct author *auth_list, int *dist_auth, int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x; // HERE
int stride = blockDim.x * gridDim.x;
int i;
for (i = tid; i < n+1; i+=stride)
{
int idx = auth_list[i].co_auth;
atomicAdd(dist_auth + idx, 1);
}
}
long get_vert(char *str){
char vert[20];
int space_count = 0;
int num_vert=0;
int i=0, j=0;
while(str[i] != '\n'){
if(str[i] == ' ')
space_count++;
if(space_count == 2){
vert[j] = str[i];
j++;
}
else if(space_count>2)
break;
i++;
}
vert[j] = '\0';
//printf("%s\n", vert);
num_vert = atoi(vert);
//printf("%d\n", num_vert);
return num_vert;
}
int get_src(char *str){
char s[20];
int space_count = 0;
int src=0;
int i=0, j=0;
while(str[i] != '\n'){
if(str[i] == ' ')
space_count++;
if(space_count == 0){
s[j] = str[i];
j++;
}
else
break;
i++;
}
s[j] = '\0';
//printf("%s\n", s);
src = atoi(s);
//printf("%d\n", src);
return src;
}
int get_dst(char *str){
char d[20];
int space_count = 0;
int dst=0;
int i=0, j=0;
while(str[i] != '\n'){
if(str[i] == ' ')
space_count++;
if(space_count == 1){
d[j] = str[i];
j++;
}
else if(space_count>1)
break;
i++;
}
d[j] = '\0';
//printf("%s\n", d);
dst = atoi(d);
//printf("%d\n", dst);
return dst;
}
int comparator(const void *p, const void *q)
{
int l = ((struct author *)p)->co_auth;
int r = ((struct author *)q)->co_auth;
return (r - l);
}
// Directed Graph Implementation in C
int main(void)
{
// input array containing edges of the graph (as per above diagram)
// (x, y) pair in the array represents an edge from x to y
struct Edge *edges;
edges = (struct Edge *) calloc (EDGES, sizeof(struct Edge));
FILE *fp;
char str[200];
const char* file = "dblp-co-authors.txt";
//const char* file = "test.txt";
fp = fopen(file, "r");
if (fp == NULL){
printf("Could not open file %s",file);
return 1;
}
int vert, i=0;
fgets(str, 200, fp);
fgets(str, 200, fp);
fgets(str, 200, fp);
fgets(str, 200, fp);
fgets(str, 200, fp);
//printf("%s", str);
vert = get_vert(str);
long src, dst;
//new_graph(vert);
//struct graph* gph = new_graph(vert);
while (fgets(str, 200, fp) != NULL){
//printf("%s", str);
src = get_src(str);
dst = get_dst(str);
edges[i].src = src;
edges[i].dest = dst;
i++;
}
printf("Edges copied....\n");
// calculate number of edges
int n = sizeof(edges)/sizeof(edges[0]);
cudaMallocManaged(&graph, sizeof(struct Graph), (unsigned int)cudaMemAttachGlobal);
cudaMemAdvise(graph, sizeof(struct Graph), cudaMemAdviseSetAccessedBy, cudaCpuDeviceId);
createGraph(graph, edges, N);
printf("Graph Created...\n");
int graph_size = N + 1;
int block_size = 64;
int grid_size = (graph_size + block_size - 1)/block_size;
// Set device that we will use for our cuda code
cudaSetDevice(0);
cudaMallocManaged(&auth_list, graph_size * sizeof(struct author), (unsigned int)cudaMemAttachGlobal);
cudaMemAdvise(auth_list, graph_size * sizeof(struct author), cudaMemAdviseSetAccessedBy, cudaCpuDeviceId);
// print adjacency list representation of graph
countAuth<<<grid_size, block_size>>>(graph, auth_list, N);
cudaDeviceSynchronize();
/*for(i=0;i<N+1;i++){
printf("Author %d : %d\n",auth_list[i].id, auth_list[i].co_auth);
}*/
qsort((void*)auth_list, graph_size, sizeof(struct author), comparator);
/*for(i=0;i<N+1;i++){
printf("Author %d : %d\n",auth_list[i].id, auth_list[i].co_auth);
}*/
int max = auth_list[0].co_auth;
for(i=0;i<N+1;i++){
if(auth_list[i].co_auth == max)
printf("Author %d : %d\n",auth_list[i].id, auth_list[i].co_auth);
}
cudaMallocManaged(&dist_auth, (max+1) * sizeof(int), (unsigned int)cudaMemAttachGlobal);
cudaMemAdvise(dist_auth, (max+1) * sizeof(int), cudaMemAdviseSetAccessedBy, cudaCpuDeviceId);
cudaMemset(dist_auth, 0, (max+1)*sizeof(int));
graph_size = N + 1;
block_size = 64;
grid_size = (graph_size + block_size - 1)/block_size;
distAuth<<<grid_size, block_size>>>(auth_list, dist_auth, N);
cudaDeviceSynchronize();
for(i=0;i<=max;i++){
printf("Dist %d: %d\n", i, dist_auth[i]);
}
return 0;
}
|
2,669 | #include "includes.h"
__global__ void SumaColMatrizKernel (int M, int N, float *Md, float *Nd){
// Pvalue es usado para el valor intermedio
__shared__ float Nds[DIMBLOCKX];
float Pvalue = 0;
int columna = blockIdx.y*(N/gridDim.x)+threadIdx.x;
int pasos = M/blockDim.x ;
int posIni = columna * M + threadIdx.x * pasos;
for (int k = 0; k < pasos; ++k) {
Pvalue = Pvalue + Md[posIni + k];
}
Nds[threadIdx.x] = Pvalue;
__syncthreads();
if (threadIdx.x == 0 ){
for (int i = 1; i < blockDim.x; ++i) {
Nds[0] = Nds[0]+Nds[i];
}
Nd[columna*gridDim.x+blockIdx.x] = Nds[0];
}
} |
2,670 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10) {
if (comp > (-1.3943E-36f / +0.0f - (var_2 / var_3))) {
float tmp_1 = +1.5186E28f;
float tmp_2 = -1.1281E-1f;
comp += tmp_2 * tmp_1 / cosf(logf(-1.2048E7f));
comp += coshf((-1.0964E34f * -1.2267E-44f - var_4));
if (comp < (var_5 * var_6)) {
float tmp_3 = (+1.3174E28f - +0.0f);
float tmp_4 = +1.8966E-7f;
comp += tmp_4 * tmp_3 * -1.5344E-44f / (+1.0506E18f * (var_7 - (+1.6832E19f - +1.2449E-36f)));
}
for (int i=0; i < var_1; ++i) {
float tmp_5 = +1.7930E-43f + (-1.3392E35f / logf((+1.7318E0f * -1.4533E-41f)));
comp += tmp_5 - var_8 / fmodf(+1.3093E-43f, +1.7079E34f + (var_9 * -1.9509E35f - var_10));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11);
cudaDeviceSynchronize();
return 0;
}
|
2,671 | #include <stdio.h>
#include <cuda_runtime.h>
//#include <cutil.h>
#define TILE_WIDTH 16
#define N 2048
void err_handling(cudaError_t *err, const char *str)
{
if (*err != cudaSuccess) {
printf("%s\n", str);
exit(EXIT_FAILURE);
}
}
__global__ void matMul(const float *A, const float *B, float *C, int m, int k, int n)
{
__shared__ float sh_A[TILE_WIDTH][TILE_WIDTH];
__shared__ float sh_B[TILE_WIDTH][TILE_WIDTH];
//int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int row = blockIdx.y*blockDim.y + ty;
int col = blockIdx.x*blockDim.x + tx;
float cVal = 0.0;
for (int t = 0; t < k/TILE_WIDTH; ++t) {
sh_A[ty][tx] = A[row*k + t*TILE_WIDTH + tx];
sh_B[ty][tx] = B[(t*TILE_WIDTH + ty)*k + col];
__syncthreads();
for (int i = 0; i < TILE_WIDTH; ++i) {
cVal += sh_A[ty][i] * sh_B[i][tx];
}
__syncthreads();
}
C[row*n + col] = cVal;
}
int main(void)
{
cudaError_t err = cudaSuccess;
int m = N;
int n = N;
int k = N;
float *A = (float*)malloc(m*k*sizeof(float));
float *B = (float*)malloc(k*n*sizeof(float));
float *C = (float*)malloc(m*n*sizeof(float));
if (A == NULL || B == NULL || C == NULL) {
printf("allocate host error!\n");
return 1;
}
for (int i = 0; i < m*k; ++i) {
A[i] = rand()/(float)RAND_MAX;
}
for (int i = 0; i < k*n; ++i) {
B[i] = rand()/(float)RAND_MAX;
}
for (int i = 0; i < m*n; ++i) {
C[i] = rand()/(float)RAND_MAX;
}
float *dev_A = NULL;
float *dev_B = NULL;
float *dev_C = NULL;
err = cudaMalloc((void**)&dev_A, m*k*sizeof(float));
err_handling(&err, "allocate devecie error A!");
err = cudaMalloc((void**)&dev_B, k*n*sizeof(float));
err_handling(&err, "allocate devecie error B!");
err = cudaMalloc((void**)&dev_C, m*n*sizeof(float));
err_handling(&err, "allocate devecie error C!");
err = cudaMemcpy(dev_A, A, m*k*sizeof(float), cudaMemcpyHostToDevice);
err_handling(&err, "memcpy to A error!");
err = cudaMemcpy(dev_B, B, k*n*sizeof(float), cudaMemcpyHostToDevice);
err_handling(&err, "memcpy to B error!");
dim3 dimGrid((m-1)/TILE_WIDTH+1, (n-1)/TILE_WIDTH+1, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
matMul<<<dimGrid, dimBlock>>>(dev_A, dev_B, dev_C, m, k, n);
cudaEventRecord(stop, 0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
float time_elapsed = 0;
cudaEventElapsedTime(&time_elapsed, start, stop);
printf("%fms\n", time_elapsed);
err = cudaMemcpy(C, dev_C, m*n*sizeof(float), cudaMemcpyDeviceToHost);
err_handling(&err, "memcpy to host C error!");
printf("%f %f\n", C[100*N+100], C[234*N+234]);
err = cudaFree(dev_A);
err_handling(&err, "mem free A error!");
err = cudaFree(dev_B);
err_handling(&err, "mem free B error!");
err = cudaFree(dev_C);
err_handling(&err, "mem free C error!");
err = cudaDeviceReset();
err_handling(&err, "device reset error!");
return 0;
}
|
2,672 | /** Homework 3 question 1 code
*
* \file q1.cu
* \author Jose Carlos Martinez Garcia-Vaso <carlosgvaso@utexas.edu>
* \author Utkarsh Vardan <uvardan@utexas.edu>
*/
#include <cstdio> // standard I/O
#include <string> // strings
#include <fstream> // streams
#include <vector> // std::vector
#include <sstream> // std::stringstream
#include <cuda_runtime.h> // CUDA functions
// Globals
#define DEBUG 0 //! Enable debug messages (0: no log output, 1: non-verbose logs, 2: verbose logs, 3: all logs)
#define INPUT_FILE "inp.txt" //! Input filename
#define OUTPUT_FILE_Q1A "q1a.txt" //! Q1 a output filename
#define OUTPUT_FILE_Q1B "q1b.txt" //! Q1 b output filename
#define EXIT_OK 0 //! Exit code success
#define EXIT_FATAL 1 //! Exit code unrecoverable error
/** Read input from file
*
* This function assumes the file contains a single line, as per the format in
* the README.txt.
*
* \param filename Name of input file to read
* \return Vector containing the input array in the file
*/
std::vector<int> read_input (std::string filename) {
// Create a vector of integers to store the array in file
std::vector<int> v_in;
// Create an input filestream
std::ifstream fin(filename);
// Make sure the file is open
if (!fin.is_open()) {
fprintf(stderr, "ERROR:read_input: Could not open file\n");
exit(EXIT_FATAL);
}
// Helper vars
std::string line;
int val;
// Read the column names
if (fin.good()) {
// Extract the first line in the file
std::getline(fin, line);
// Create a stringstream from line
std::stringstream ss(line);
// Extract each integer
while (ss >> val) {
// Add the current integer to the vector
v_in.push_back(val);
// If the next token is a comma, ignore it and move on
if (ss.peek() == ',') ss.ignore();
}
}
// Close file
fin.close();
return v_in;
}
/** Write formated output to file
*
* This function uses the output format described in the README.txt file. If the
* file already exists, it will be overwritten.
*
* \param filename Name of the output file
* \param v_out Vector to save to file
*/
void write_output (std::string filename, const std::vector<int> &v_out) {
// Create an output filestream object
std::ofstream fout(filename);
// Send v_out vector entries to the stream
for (int i = 0; i < v_out.size(); ++i) {
fout << v_out.at(i);
if(i != v_out.size() - 1) fout << ", "; // No comma at end of line
}
//fout << "\n"; // inp.txt doesn't have a newline at the end of the file
// Close the file
fout.close();
}
/** CUDA kernel for the Hillis-Steele parallel scan min
*
* \param d_out Pointer to output array in global memory
* \param d_in Pointer to input array in global memory
* \param n Size of the problem (input array size)
*/
__global__ void parallelScanMinKernel(int *d_out, int *d_in, int n) {
// Initialize global and thread IDs, and other variables
int gid = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
int val = 0;
// Ensure we only access available array entries
if (gid < n) {
#if DEBUG >= 3
if (tid == 0) {
printf("\t\tIterations:\n\t\t\tBlock %d: d = %d: d_in = [ ",
blockIdx.x, 0);
for (int i=0; i<n; ++i) {
if (i == n-1) {
printf("%d ]\n", d_in[i]);
} else {
printf("%d, ", d_in[i]);
}
}
}
#endif
for (int d=1; d<n; d=d*2) {
if (gid >= d) {
val = d_in[gid - d];
}
__syncthreads();
if (gid >= d) {
d_in[gid] = d_in[gid] <= val ? d_in[gid] : val;
}
__syncthreads();
#if DEBUG >= 3
if (tid == 0) {
printf("\t\t\tBlock %d: d = %d: d_in = [ ", blockIdx.x, d);
for (int i=0; i<n; ++i) {
if (i == n-1) {
printf("%d ]\n", d_in[i]);
} else {
printf("%d, ", d_in[i]);
}
}
}
#endif
}
/* The result for a block is in the last thread entry for that block.
* If n is not a multiple of blockDim.x, the result is the entry of
* gid == n-1.
*/
if ((tid == blockDim.x-1 && gid != n-1) || gid == n-1) {
d_out[blockIdx.x] = d_in[gid];
#if DEBUG >= 2
printf("\t\tBlock %d min: d_out[%d] = %d\n",
blockIdx.x, blockIdx.x, d_out[blockIdx.x]);
#endif
}
}
}
/** CUDA kernel to compute array with the last digit of entries in input array
*
* Specifically, compute array d_out such that d_out[i] is the last digit of
* d_in[i] for all i.
*
* \param d_out Pointer to output array in global memory
* \param d_in Pointer to input array in global memory
* \param n Size of the problem (input array size)
*/
__global__ void lastDigitKernel(int *d_out, int *d_in, int n) {
// Initialize global ID
int gid = threadIdx.x + blockDim.x * blockIdx.x;
// Ensure we only access available array entries
if (gid < n) {
// Save last digit to output array
d_out[gid] = d_in[gid] % 10;
#if DEBUG >= 3
printf("\t\t\td_in[%d] = %d\td_out[%d] = %d\n",
gid, d_in[gid], gid, d_out[gid]);
#endif
}
}
/** Q1 a) Compute minA, the minimum value in the input array
*
* This function uses the Hillis-Steele version of parallel scan to find the
* minimum value in the input array. Then, it outputs the result to the
* OUTPUT_FILE_Q1A output file.
*
* This function will only work for problems of size (input array size)
* (cudaDeviceProp.maxThreadsPerBlock)^2. For example, if we have a
* cudaDeviceProp.maxThreadsPerBlock = 1024 (a normal value for current Nvidia
* GPUs), the max problem size is N = 1024^2 = 1,048,576. Since the professor
* said the max graded size should be 10^6, this restriction sufices.
*
* \param v_in Input array as a vector
* \param dev_props CUDA device properties
*/
void q1a (const std::vector<int> &v_in, cudaDeviceProp *dev_props) {
#if DEBUG
printf("\tTransfering input array to GPU memory...\n");
#endif
// Declare GPU memory pointers
int *d_in, *d_intermediate, *d_out;
// Allocate GPU memory
int N = v_in.size(); // Problem size (input array size)
int d_in_size = N * sizeof(int); // Input array size in bytes
int d_out_size = sizeof(int); // Output array size in bytes
#if DEBUG
printf("\tN (input array size): %d\n", N);
#endif
if (N > ((int)((*dev_props).maxThreadsPerBlock) * (int)((*dev_props).maxThreadsPerBlock))) {
fprintf(stderr, "ERROR:q1a: problem size (input array size) is too large\n");
exit(EXIT_FATAL);
}
cudaMalloc((void **) &d_in, d_in_size);
cudaMalloc((void **) &d_intermediate, d_in_size); // overallocated
cudaMalloc((void **) &d_out, d_out_size);
/* Transfer the input array to the GPU
* Since the elements of a vector are stored contiguously in memory, we can
* pass a pointer to the first element of the vector, and that will act as
* if we passed a C array.
*/
cudaMemcpy(d_in, &v_in[0], d_in_size, cudaMemcpyHostToDevice);
#if DEBUG
// Set up a timer to measure the elapsed time to find the min
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("\tFinding minimum entry in the array...\n");
#endif
// Calculate the number of blocks and threads to use
int threads_per_block = (int)((*dev_props).maxThreadsPerBlock); // Max number of threads per block
int blocks_per_grid = (N + (threads_per_block - 1)) / threads_per_block;
#if DEBUG
printf("\tThreads per block: %d\n", threads_per_block);
printf("\tBlocks per grid: %d\n", blocks_per_grid);
printf("\tRunning kernel...\n");
cudaEventRecord(start, 0);
#endif
// Launch the kernel to find min
parallelScanMinKernel<<<blocks_per_grid, threads_per_block>>>
(d_intermediate, d_in, N);
// Make sure all the blocks finish executing
cudaDeviceSynchronize();
cudaDeviceSynchronize();
// If there are more than one block, we need to repeat the process with their results
if (blocks_per_grid > 1) {
#if DEBUG >=2
// Copy array to host
int *a_out;
a_out = (int*) malloc(d_in_size);
cudaMemcpy(a_out, d_intermediate, d_in_size, cudaMemcpyDeviceToHost);
printf("\tBlock results: d_intermediate = [ ");
for (int i=0; i<blocks_per_grid; ++i) {
if (i == blocks_per_grid-1) {
printf("%d ]\n", a_out[i]);
} else {
printf("%d, ", a_out[i]);
}
}
free(a_out);
#endif
#if DEBUG >= 2
printf("\tThreads per block: %d\n", blocks_per_grid);
printf("\tBlocks per grid: %d\n", 1);
printf("\tRunning kernel...\n");
#endif
// Fill one block with the results from the other blocks
parallelScanMinKernel<<<1, blocks_per_grid>>>
(d_out, d_intermediate, blocks_per_grid);
}
#if DEBUG
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Calculate elapsed time, and print it
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("\tAverage time elapsed: %f\n", elapsedTime);
#endif
// Copy back the min result from GPU
int a_out;
if (blocks_per_grid > 1) {
cudaMemcpy(&a_out, d_out, d_out_size, cudaMemcpyDeviceToHost);
} else {
cudaMemcpy(&a_out, d_intermediate, d_out_size, cudaMemcpyDeviceToHost);
}
#if DEBUG >= 2
printf("\ta_out: %d\n", a_out);
#endif
// Copy result to output vector
std::vector<int> v_out (&a_out, &a_out + 1);
#if DEBUG
printf("\tOutput = [ ");
for (int i=0; i<v_out.size(); ++i) {
if (i == v_out.size()-1) {
printf("%d ]\n", v_out[i]);
} else {
printf("%d, ", v_out[i]);
}
}
#endif
// Free GPU memory
cudaFree(d_in);
cudaFree(d_intermediate);
cudaFree(d_out);
// Write output to file
write_output(OUTPUT_FILE_Q1A, v_out);
}
/** Q1 b) Compute an array B such that B[i] is the last digit of A[i] for all i
*
* \param v_in Input array as a vector
* \param dev_props CUDA device properties
*/
void q1b (const std::vector<int> &v_in, cudaDeviceProp *dev_props) {
#if DEBUG
printf("\tTransfering input array to GPU memory...\n");
#endif
// Declare GPU memory pointers
int *d_in, *d_out;
// Allocate GPU memory
int N = v_in.size(); // Problem size (input array size)
int d_in_size = N * sizeof(int); // Input array size in bytes
int d_out_size = d_in_size; // Output array size in bytes
#if DEBUG
printf("\tN (input array size): %d\n", N);
#endif
/*
if (N > ((int)((*dev_props).maxThreadsPerBlock) * (int)((*dev_props).maxThreadsPerBlock))) {
fprintf(stderr, "ERROR:q1a: problem size (input array size) is too large\n");
exit(EXIT_FATAL);
}
*/
cudaMalloc((void **) &d_in, d_in_size);
cudaMalloc((void **) &d_out, d_out_size);
/* Transfer the input array to the GPU
* Since the elements of a vector are stored contiguously in memory, we can
* pass a pointer to the first element of the vector, and that will act as
* if we passed a C array.
*/
cudaMemcpy(d_in, &v_in[0], d_in_size, cudaMemcpyHostToDevice);
#if DEBUG
// Set up a timer to measure the elapsed time to find the min
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("\tFinding last digit for all entries in the array...\n");
#endif
// Calculate the number of blocks and threads to use
int threads_per_block = (int)((*dev_props).maxThreadsPerBlock); // Max number of threads per block
int blocks_per_grid = (N + (threads_per_block - 1)) / threads_per_block;
#if DEBUG
printf("\tThreads per block: %d\n", threads_per_block);
printf("\tBlocks per grid: %d\n", blocks_per_grid);
printf("\tRunning kernel...\n");
cudaEventRecord(start, 0);
#endif
#if DEBUG >= 3
printf("\t\tIterations:\n");
#endif
// Launch the kernel to find min
lastDigitKernel<<<blocks_per_grid, threads_per_block>>>
(d_out, d_in, N);
// Make sure all the blocks finish executing
cudaDeviceSynchronize();
cudaDeviceSynchronize();
#if DEBUG
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Calculate elapsed time, and print it
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("\tAverage time elapsed: %f\n", elapsedTime);
#endif
// Copy back the result from GPU
int *a_out;
a_out = (int*) malloc(d_out_size);
cudaMemcpy(a_out, d_out, d_out_size, cudaMemcpyDeviceToHost);
#if DEBUG >= 2
printf("\ta_out = [ ");
for (int i=0; i<N; ++i) {
if (i == N-1) {
printf("%d ]\n", a_out[i]);
} else {
printf("%d, ", a_out[i]);
}
}
#endif
// Copy resulting array to output vector
std::vector<int> v_out (a_out, a_out + N);
#if DEBUG
printf("\tOutput = [ ");
for (int i=0; i<v_out.size(); ++i) {
if (i == v_out.size()-1) {
printf("%d ]\n", v_out[i]);
} else {
printf("%d, ", v_out[i]);
}
}
#endif
// Free GPU memory
cudaFree(d_in);
cudaFree(d_out);
// Free host memory
free(a_out);
// Save output to file
write_output(OUTPUT_FILE_Q1B, v_out);
}
/** Main
*
* Set up CUDA device, read input file, and run Q1a and Q1b.
*
* \param argc Number of command-line arguments
* \param argv Array of command-line arguments
* \return Program return code
*/
int main (int argc, char **argv) {
#if DEBUG
std::printf("Executing main...\n");
#endif
std::vector<int> v_in;
int device_count;
int dev = 0;
cudaDeviceProp dev_props;
#if DEBUG
printf("Detecting CUDA devices...\n");
#endif
// Check there are CUDA devices available
cudaGetDeviceCount(&device_count);
if (device_count == 0) {
fprintf(stderr, "ERROR:main: no CUDA devices found\n");
exit(EXIT_FATAL);
}
// Use device 0
cudaSetDevice(dev);
if (cudaGetDeviceProperties(&dev_props, dev) == 0) {
#if DEBUG
printf("Using device:\n"
"\tID: %d\n"
"\tName: %s\n"
"\tGlobal mem: %d B\n"
"\tMax threads per block: %d\n"
"\tCompute: v%d.%d\n"
"\tClock: %d kHz\n",
dev,
dev_props.name,
(int)dev_props.totalGlobalMem,
(int)dev_props.maxThreadsPerBlock,
(int)dev_props.major,
(int)dev_props.minor,
(int)dev_props.clockRate);
#endif
} else {
fprintf(stderr, "ERROR:main: could not find CUDA device information\n");
exit(EXIT_FATAL);
}
#if DEBUG
std::printf("Reading input array...\n");
#endif
// Read input array
v_in = read_input(INPUT_FILE);
#if DEBUG >= 2
printf("\tInput array = [ ");
for (int i=0; i<v_in.size(); ++i) {
if (i == v_in.size()-1) {
printf("%d ]\n", v_in[i]);
} else {
printf("%d, ", v_in[i]);
}
}
#endif
#if DEBUG
std::printf("Running Q1 a...\n");
#endif
// Problem q1 a
q1a(v_in, &dev_props);
/*
#if DEBUG
std::printf("Reseting device...\n");
#endif
cudaDeviceReset();
*/
#if DEBUG
std::printf("Running Q1 b...\n");
#endif
// Problem q1 b
q1b(v_in, &dev_props);
/*
#if DEBUG
std::printf("Reseting device...\n");
#endif
cudaDeviceReset();
*/
#if DEBUG
std::printf("Done\n");
#endif
return 0;
}
|
2,673 | //pass
//--blockDim=32 --gridDim=2
#include <cuda.h>
__global__ void test_Prog(int *A, int N) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = blockDim.x * bid + tid;
for(int d = N/2; d > 0; d = d / 2)
{
int tmp = A[idx + d];
for (int i = 0; i < N; ++i)
{
int tmp2 = A[idx];
int t2 = tmp2;
int t32 = t2;
if (idx < d) {
A[idx] = tmp + t32;
}
}
}
} |
2,674 | #include "includes.h"
#define BLKX 32
#define BLKY 32
cudaStream_t gstream;
__global__ void initData(int nbLines, int M, double *h, double *g)
{
long idX = threadIdx.x + blockIdx.x * blockDim.x;
if (idX > nbLines * M)
return;
h[idX] = 0.0L;
g[idX] = 0.0L;
if ( idX >= M +1 && idX < 2*M-1 ){
h[idX] = 100.0;
g[idX] = 100.0;
}
} |
2,675 | #include <memory.h>
#include <ctime>
#include <random>
#include <iomanip>
#include <algorithm>
#include <iostream>
#include <iterator>
#include <numeric>
#include <sstream>
#include <fstream>
#include <cassert>
#include <climits>
#include <cstdlib>
#include <cstring>
#include <string>
#include <cstdio>
#include <vector>
#include <cmath>
#include <functional>
#include <queue>
#include <deque>
#include <stack>
#include <list>
#include <map>
#include <set>
#include <unordered_set>
#include <unordered_map>
#define REP(i,s,n) for(int (i)=s; (i)<(int)(n);(i)++)
#define RIT(it,c) for(__typeof(c.begin()) it = c.begin();it!=c.end();it++)
#define ALL(x) x.begin(), x.end()
#define SZ(x) (int)(x).size()
#define MSET(m,v) memset(m,v,sizeof(m))
using namespace std;
typedef long double ld;
typedef vector<int> vi;
typedef vector<long> vl;
typedef vector<bool> vb;
typedef vector<double> vd;
typedef pair<int,int> ii;
typedef pair<long, long> ll;
typedef unordered_set<int> ui;
class LinearReg{
int N, K, N_test;
vector<vd> train_X, test_X;
vd train_Y, pred_Y, test_Y, weights;
double bias;
public:
LinearReg(vector<vd> train_x, vd train_y, vector<vd> test_x, vd test_y){
train_X = train_x;
train_Y = train_y;
test_X = test_x;
test_Y = test_y;
// Make sure that all the dimensions are correct
assert(!train_X.empty() && !train_X[0].empty());
assert(!test_X.empty() && !test_X[0].empty());
assert(train_X.size() == train_Y.size());
assert(test_X.size() == test_Y.size());
assert(train_X[0].size() == test_X[0].size());
N = (int)train_X.size();
K = (int)train_X[0].size();
N_test = (int)test_X.size();
weights = vd(K, 0.5);
pred_Y.resize(N);
bias = 0.5;
}
double getError(){
// We use mean squre root error here.
// Here we use the vector pred_Y to record the predicted value
double sum = 0.;
for(int i=0;i<N;++i) sum += pow(pred_Y[i] - train_Y[i], 2.);
return sum/N;
}
double getTestError(){
//This function is used to compute the Error for the test case.
double sum = 0;
for(int i=0;i<N_test;++i){
double tmp = bias - test_Y[i];
for(int j=0;j<K;++j) tmp += weights[j] * test_X[i][j];
sum += pow(tmp, 2);
}
return sum/N;
}
void computePred(){
for(int i=0;i<N;++i){
pred_Y[i] = bias;
for(int j=0;j<K;++j) pred_Y[i] += train_X[i][j] * weights[j];
}
}
void oneStepUpdate(double learning_rate){
// Using the simplest gradient descent
for(int i=0;i<N;++i){
bias -= learning_rate * (pred_Y[i] - train_Y[i])/N;
for(int j=0;j<K;++j) weights[j] -= learning_rate * (pred_Y[i] - train_Y[i]) * train_X[i][j]/N;
}
return ;
}
double multipleSteps(int N_step, double learning_rate){
computePred();
for(int t=1; t<=N_step; ++t){
oneStepUpdate(learning_rate);
computePred();
}
return getError();
}
vd getWeights(){
return weights;
}
double getBias(){
return bias;
}
};
class TestLinearReg{
int n_var;
double bias, Amplitude;
vd weights;
vector<vd> train_x, test_x;
vd train_y, test_y;
double linearFn(const vd &x){
double ans = bias;
for(int i=0;i<n_var;++i) ans += x[i]*weights[i];
return ans;
}
double quardFn(const vd &x){
assert(weights.size() == 2*x.size());
double ans = bias;
for(int i=0;i<n_var;++i){
ans += weights[2*i]*x[i] + weights[2*i+1]*x[i]*x[i];
}
return ans;
}
double geneRand(){
return double(rand())/RAND_MAX;
}
public:
TestLinearReg(double w1, double w2, double b, bool Quad = false, double Amp = 0.2){
srand(1);
weights = vd{w1, w2};
bias = b;
Amplitude = Amp;
n_var = 2;
}
void generateDateSet(int N_train, int N_test){
for(int i=0;i<N_train;++i){
vd tmp_x = vd{1.7*geneRand(), 1.7*geneRand()};
train_x.push_back(tmp_x);
train_y.push_back(linearFn(tmp_x) + Amplitude * geneRand());
}
for(int i=0;i<N_test;++i){
vd tmp_x = vd{1.7*geneRand(), 1.7*geneRand()};
test_x.push_back(tmp_x);
test_y.push_back(linearFn(tmp_x) + Amplitude * geneRand());
}
}
void outputTrain(string filename){
std::ios_base::sync_with_stdio(false),cin.tie(0),cout.tie(0);
freopen(filename.c_str(), "w", stdout);
for(int i=0;i<(int)train_x.size();++i){
for(auto k:train_x[i]) cout<<k<<' ';
cout<<train_y[i]<<endl;
}
}
void outputTest(string filename){
std::ios_base::sync_with_stdio(false),cin.tie(0),cout.tie(0);
freopen(filename.c_str(), "w", stdout);
for(int i=0;i<(int)test_x.size();++i){
for(auto k:test_x[i]) cout<<k<<' ';
cout<<test_y[i]<<endl;
}
}
vector<vd> testModel(double l_rate,int n_block,int n_step){
vector<vd> ans(3, vd());
LinearReg model(train_x, train_y, test_x, test_y);
double steps = 0.;
ans[0].push_back(steps);
model.computePred();
ans[1].push_back(model.getError());
ans[2].push_back(model.getTestError());
for(int i=0;i<n_block;++i){
steps += n_step;
ans[0].push_back(model.multipleSteps(n_step, l_rate));
ans[1].push_back(model.getTestError());
}
vd pred_wei = model.getWeights();
double pred_bias = model.getBias();
cerr<<"Here is what we obtain: y = x1*"<<pred_wei[0]<<" + x2*"<<pred_wei[1]<<" +"<<pred_bias<<endl;
return ans;
}
};
int main(int argc, char* argv[]){
std::ios_base::sync_with_stdio(false),cin.tie(0),cout.tie(0);
double w1 = 1.7, w2 = 0.8, b = 2.2;
int n_train = 7000, n_test = 3000;
if(argc > 1) w1 = stod(argv[1]);
if(argc > 2) w2 = stod(argv[2]);
if(argc > 3) b = stod(argv[3]);
if(argc > 4) n_train = stoi(argv[4]);
if(argc > 5) n_test = stoi(argv[5]);
TestLinearReg testLR(w1, w2, b);
cerr<<"Going to fit this function: y = x1*"<<w1<<" + x2*"<<w2<<" +"<<b<<endl;
cerr<<"Generating "<<n_train<<" training examples and "<<n_test<<" testing examples"<<endl;
testLR.generateDateSet(n_train, n_test);
string trainfile = "train_data.txt", testfile = "test_data.txt", resultfile = "serial_rslt.txt";
testLR.outputTrain(trainfile);
testLR.outputTest(testfile);
cerr<<"Data sets are stored in "<<trainfile<<" and "<<testfile<<endl;
cerr<<"Finish generating data"<<endl;
cerr<<"Testing the model"<<endl;
clock_t start_time = clock(), end_time;
auto res = testLR.testModel(0.05, 50, 100);
end_time = clock();
float comp_time = float(end_time - start_time)/CLOCKS_PER_SEC;
cerr<< setprecision(8);
cerr<<"=========================================Time Usage========================================="<<endl<<endl;
cerr<<comp_time<<endl<<endl;
cerr<<"============================================================================================"<<endl<<endl;
cerr<<"Finish train the model"<<endl;
freopen(resultfile.c_str(), "w", stdout);
for(auto vec:res){
for(auto k:vec) cout<<k<<' ';
cout<<endl;
}
cerr<<"The cost function results are stored in "<<resultfile<<endl;
return 0;
}
|
2,676 | #include "Config.cuh.cu"
namespace RayTracing
{
std::istream& operator>>(std::istream &istream, Config& config)
{
istream >> config.framesNum;
istream >> config.outputTemplate;
istream >> config.width >> config.height;
istream >> config.horizontalViewDegrees;
istream >> config.lookFrom >> config.lookAt;
istream >> config.A >> config.B >> config.C;
istream >> config.floorData;
istream >> config.lightSourcesNum;
for (int i = 0; i < config.lightSourcesNum; ++i)
istream >> config.lightSources[i];
istream >> config.recursionDepth;
istream >> config.samplesPerPixel;
config.samplesPerPixel *= config.samplesPerPixel;
return istream;
}
std::istream& operator>>(std::istream &istream, Trajectory& trajectory)
{
istream >> trajectory.r >> trajectory.z >> trajectory.phi;
istream >> trajectory.rA >> trajectory.zA;
istream >> trajectory.rOm >> trajectory.zOm >> trajectory.phiOm;
istream >> trajectory.rP >> trajectory.zP;
return istream;
}
std::istream& operator>>(std::istream &istream, FigureData& figureData)
{
istream >> figureData.origin;
istream >> figureData.color;
istream >> figureData.radius;
istream >> figureData.reflectance >> figureData.transparency;
istream >> figureData.edgeLightsNum;
return istream;
}
std::istream& operator>>(std::istream &istream, FloorData& floorData)
{
istream >> floorData.A >> floorData.B >> floorData.C >> floorData.D;
istream >> floorData.texturePath;
istream >> floorData.color;
istream >> floorData.reflectance;
return istream;
}
std::istream& operator>>(std::istream &istream, LightSourceData& lightSourceData)
{
istream >> lightSourceData.origin >> lightSourceData.radius >> lightSourceData.color;
return istream;
}
} // namespace RayTracing
|
2,677 | #include "includes.h"
__global__ void forwardPass2(float* layer1, float* syn2, float* out)
{
int l = blockDim.x*blockIdx.x + threadIdx.x;
int Y = 128;
int Z = 10;
#pragma unroll
for (int j=0; j < Y; ++j)
out[l] += layer1[j] * syn2[j*Z + l];
out[l] = 1.0/(1.0 + exp(out[l]));
} |
2,678 | #include "includes.h"
__global__ void _norm_backward_kernel(float *x, float *mean, float *var, float *mean_diff, float *var_diff, int b, int c, int wxh, float *grad)
{
int ind = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int j = (ind / wxh) % c;
if (ind >= b * c * wxh)
return;
grad[ind] = grad[ind] * 1.0f / (sqrtf(var[j] + 0.00001f)) + var_diff[j] * 2.0f * (x[ind] - mean[j]) / (wxh * b) + mean_diff[j] / (wxh * b);
} |
2,679 | /*
* Copyright 2019 Australian National University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either or express implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <cuda.h>
// this macro checks for errors in cuda calls
#define Err(ans) { gpucheck((ans), __FILE__, __LINE__); }
inline void gpucheck(cudaError_t code, const char *file, int line)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPU Err: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
// add one to an array of n integers - indexed from 0
__global__ void addone(int *data, int n) {
int idx = blockIdx.x;
if (idx <= n) data[idx] = data[idx] + 1;
}
// add 2 to elements of the array and then double it
__global__ void doublesubten(int *data, int n) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < n) {
data[idx] = 2*data[idx];
__syncthreads();
}
if (idx < n) {
data[idx] = data[idx] - 10;
}
}
// set to zero all the elements that are over 100
__global__ void zerooutoverhundred(int *data, int n) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < n) {
int val = data[idx];
int count = 0;
while (val != 0) {
val--;
count++;
}
if (count > 100) data[idx] = 0;
}
}
// sum all the elements in the array form 0 to i-1 and place the result in data[i].
__global__ void prefixsum(int *data, int n) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < n) {
int sum = 0;
for (int i = 0;i<idx;i++) sum += data[i];
data[idx] = sum;
}
}
int main(void) {
int *data_h, *data_d;
const int size = 100;
cudaMallocHost(&data_h,size);
for (int i =0;i<size;i++) data_h[i] = i;
cudaMemcpy(data_d, data_h, size, cudaMemcpyDeviceToHost); // copy the array to the GPU
addone<<<1,100>>>(data_d,size);
doublesubten<<<1,100>>>(data_d,size);
zerooutoverhundred<<<1,100>>>(data_d,size);
prefixsum<<<1,100>>>(data_d,size);
cudaMemcpy(data_h, data_d, size, cudaMemcpyDeviceToHost);
for (int i = 0;i<size;i++) printf("%d, ", data_d);
cudaFree(data_h);
cudaFree(data_d);
}
|
2,680 | //ֵ˲Ϊboxfilter
#define TILE_DIM 16
#define BLOCKSIZE 128
__global__ void d_boxfilter_x_global(float *src, float *dst, int width, int height, int r)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int offset = 1;
int num = (width + 2 * r + 2 * BLOCKSIZE - 1) / (2 * BLOCKSIZE); //ÿһ߳̿鱻BLOCKSIZE*2ָnumsegment
int len = num * 2 * BLOCKSIZE;
int extra = len - r - width;
float scale = 1.0f / (float)((r << 1) + 1);
__shared__ float sum[35]; sum[0] = 0;
extern __shared__ float temp[];
if (bid < height)
{
for (int i = tid; i < r; i += BLOCKSIZE)
{
temp[i] = src[bid*width + 0];
}
for (int i = tid; i < width; i += BLOCKSIZE)
{
temp[r + i] = src[bid * width + i];
}
for (int i = tid; i < extra; i += BLOCKSIZE)
{
temp[r + width + i] = src[(bid + 1) * width - 1];
}
__syncthreads();
for (int cnt = 0; cnt < num; ++cnt)
{
int bias = cnt * BLOCKSIZE * 2;
for (int j = BLOCKSIZE; j > 0; j >>= 1)
{
if (tid < j)
{
int ai = bias + offset * (2 * tid + 1) - 1;
int bi = bias + offset * (2 * tid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
__syncthreads();
}
if (tid == 0)
{
sum[cnt + 1] = temp[(cnt + 1) * BLOCKSIZE * 2 - 1] + sum[cnt];
temp[(cnt + 1) * BLOCKSIZE * 2 - 1] = 0;
}
__syncthreads();
for (int j = 1; j < (BLOCKSIZE * 2); j *= 2)
{
offset >>= 1;
if (tid < j)
{
int ai = bias + offset * (2 * tid + 1) - 1;
int bi = bias + offset * (2 * tid + 2) - 1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
__syncthreads();
}
}
for (int i = tid; i < width; i += BLOCKSIZE)
{
float sum_box = temp[i + 2 * r + 1] + sum[(i + 2 * r + 1) / (BLOCKSIZE * 2)] - temp[i] - sum[i / (BLOCKSIZE * 2)]; //sumֻǵi + 2 * r + 1֮ǰԪ֮Ͳi + 2 * r + 1Ԫ
dst[bid * width + i] = sum_box * scale;
}
}
}
|
2,681 | #include "includes.h"
__global__ void k_dummy_test()
{
} |
2,682 | #include <iostream>
using namespace std;
#define MATRIX_SIZE 4
#define CUDAMALLOC_ERROR(_err) \
do { \
if (_err != cudaSuccess) { \
printf("%s failed in file %s at line #%d\n", cudaGetErrorString(_err),__FILE__,__LINE__); \
exit(EXIT_FAILURE); \
} \
} while(0)
void fillMatrix(float *matrix, float value)
{
for (int i = 0; i < MATRIX_SIZE*MATRIX_SIZE; ++i)
{
matrix[i] = value;
}
}
__global__
void matrixVecKernel(float *M, float *V, float *O, size_t n)
{
unsigned int Row = blockDim.y * blockIdx.y + threadIdx.y;
int result = 0;
if (Row < n) {
for (int i = 0; i < n; ++i) {
result += M[Row * n + i] * V[i];
}
O[Row] = result;
}
}
__global__
void matrixAddKernel(float *A, float *B, float *C, size_t n)
{
unsigned int Col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int Row = blockIdx.y * blockDim.y + threadIdx.y;
if (Row < n && Col < n)
{
// 1D coordinates
int coord = Row * n + Col;
C[coord] = A[coord] + B[coord];
}
}
void addMatrices()
{
cudaDeviceProp dev_prop;
cudaGetDeviceProperties(&dev_prop, 0);
// For this machine, sqrt(1024) = 32. Therefore, we have a 32-square matrix per block.
dim3 dimBlock(sqrt(dev_prop.maxThreadsPerBlock),sqrt(dev_prop.maxThreadsPerBlock),1);
dim3 dimGrid(1,1,1);
int matrixSize = MATRIX_SIZE * MATRIX_SIZE;
int matrixByteSize = matrixSize * sizeof (float);
float *h_A;
float *h_B;
float *h_C;
h_A = (float *) malloc (matrixByteSize);
h_B = (float *) malloc (matrixByteSize);
h_C = (float *) malloc(matrixByteSize);
fillMatrix(h_A, 1);
fillMatrix(h_B, 2);
fillMatrix(h_C, 0);
float *d_A;
cudaError_t err = cudaMalloc((void**)&d_A, matrixByteSize);
CUDAMALLOC_ERROR(err);
float *d_B;
err = cudaMalloc((void**)&d_B, matrixByteSize);
CUDAMALLOC_ERROR(err);
float *d_C;
err = cudaMalloc((void**)&d_C, matrixByteSize);
CUDAMALLOC_ERROR(err);
cudaMemcpy(d_A, h_A, matrixByteSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, matrixByteSize, cudaMemcpyHostToDevice);
matrixAddKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, MATRIX_SIZE);
cudaMemcpy(h_C, d_C, matrixByteSize, cudaMemcpyDeviceToHost);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
free(h_A); free(h_B); free(h_C);
}
void matrixVec() {
cudaDeviceProp dev_prop;
cudaGetDeviceProperties(&dev_prop, 0);
dim3 dimBlock(1,dev_prop.maxThreadsPerBlock,1);
dim3 dimGrid(1, ceil(MATRIX_SIZE/(float) dimBlock.y),1);
size_t matrixSize = sizeof (float) * MATRIX_SIZE * MATRIX_SIZE;
size_t vectorSize = sizeof (float) * MATRIX_SIZE;
float *matrix = (float *) malloc(matrixSize);
float *vector = (float *) malloc(vectorSize);
float *output = (float *) malloc(vectorSize);
// Data initialization
fillMatrix(matrix, 2);
for (int i = 0; i < MATRIX_SIZE; ++i) {
vector[i] = i + 1;
}
float *d_matrix, *d_vector, *d_output;
cudaError_t err = cudaMalloc((void **) &d_matrix, matrixSize);
CUDAMALLOC_ERROR(err);
err = cudaMalloc((void **) &d_vector, vectorSize);
CUDAMALLOC_ERROR(err);
err = cudaMalloc((void **) &d_output, vectorSize);
CUDAMALLOC_ERROR(err);
cudaMemcpy(d_matrix, matrix, matrixSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_vector, vector, vectorSize, cudaMemcpyHostToDevice);
matrixVecKernel<<<dimGrid, dimBlock>>>(d_matrix, d_vector, d_output, MATRIX_SIZE);
cudaMemcpy(output, d_output, vectorSize, cudaMemcpyDeviceToHost);
cudaFree(d_matrix);
cudaFree(d_vector);
cudaFree(d_output);
free(matrix); free(vector); free(output);
}
int main() {
addMatrices();
matrixVec();
return 0;
}
|
2,683 | #include <cstdio>
#include <iostream>
int main() {
int device_count;
cudaGetDeviceCount(&device_count);
std::cout<<"Device count: "<<device_count<<std::endl;
cudaDeviceProp device_prop;
cudaGetDeviceProperties(&device_prop, 0);
std::cout<<"Max threads per block: "<<device_prop.maxThreadsPerBlock<<std::endl;
std::cout<<"Multiprocessor count: "<<device_prop.multiProcessorCount<<std::endl;
std::cout<<"Device clock rate: "<<device_prop.clockRate<<std::endl;
return 0;
}
|
2,684 |
// #include "linalg.cu"
/*!
* Compute the initial labels for a gene pair in an expression matrix. Samples
* with missing values and samples that are outside the expression thresholds are
* labeled as such, all other samples are labeled as cluster 0. The number of
* clean samples is returned.
*
* @param x
* @param y
* @param sampleSize
* @param minExpression
* @param maxExpression
* @param labels
*/
__device__
int fetchPair(
const float *x,
const float *y,
int sampleSize,
float minExpression,
float maxExpression,
char *labels)
{
// label the pairwise samples
int N = 0;
for ( int i = 0; i < sampleSize; ++i )
{
// label samples with missing values
if ( isnan(x[i]) || isnan(y[i]) )
{
labels[i] = -9;
}
// label samples which are below the minimum expression threshold
else if ( x[i] < minExpression || y[i] < minExpression )
{
labels[i] = -6;
}
// label samples which are above the maximum expression threshold
else if ( x[i] > maxExpression || y[i] > maxExpression )
{
labels[i] = -6;
}
// label any remaining samples as cluster 0
else
{
N++;
labels[i] = 0;
}
}
// return number of clean samples
return N;
}
|
2,685 | #include <iostream>
#include <stdlib.h>
#include <time.h>
#include <float.h>
#include <cuda.h>
#include <curand_kernel.h>
#define N 500000000 //Numero de valores de entrada
#define M 8 //Tamaño del histograma
#define REPETICONES 10000 //Repeticon de pruevas para calculo de media, max y min
#define SCALA 50 //Datos calculados en cada hilo
__device__ int vector_V[N]; //Vector de datos de entrada
__device__ int vector_H[M]; //Vector del histograma
/**
* Funcion para la comprovacion de errores cuda
*/
static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
/**
* Kernel para inicializacion de datos de entrada
*/
__global__ void inicializa_v(int random, curandState *states, int threadsPerBlock, int blocksPerGrid){
int iteraciones= SCALA;
if(blocksPerGrid-1 == blockIdx.x && threadIdx.x == threadsPerBlock -1){
iteraciones = iteraciones + (N % SCALA);
}
unsigned id_x = blockIdx.x*blockDim.x + threadIdx.x;
curandState *state = states + id_x;
curand_init(random, id_x, 0, state);
for(int i = 0; i < iteraciones; i++){
if(id_x*SCALA+i < N){
vector_V[id_x*SCALA+i] = (int)((curand_uniform(state)*1000)) % M;
}
}
}
/**
* Kernel para inicializacion del vector de histograma
*/
__global__ void inicializa_h(){
unsigned id_x = blockIdx.x*blockDim.x + threadIdx.x;
vector_H[id_x] = 0;
}
/**
* Kernel para calculo del histograma
*/
__global__ void histograma(int threadsPerBlock, int blocksPerGrid){
int vector[M];
for(int i =0; i < M;i++){
vector[i] =0;
}
int iteraciones= SCALA;
if(blocksPerGrid-1 == blockIdx.x && threadIdx.x == threadsPerBlock -1){
iteraciones = iteraciones + (N % SCALA);
}
unsigned id_x = blockIdx.x*blockDim.x + threadIdx.x;
for(int i = 0; i < iteraciones; i++){
if(id_x*SCALA+i < N){
int mod = vector_V[id_x*SCALA+i]%M;
vector[mod]++;
}
}
for(int i =0; i < M;i++){
int a =vector[i];
atomicAdd(&vector_H[i],a);
}
}
int main(){
srand(time(NULL));
static curandState *states = NULL;
//int h_v_d[N];
int h_v_h[M];
int threadsPerBlock = 1024;
int blocksPerGrid =((N/SCALA) + threadsPerBlock - 1) / threadsPerBlock;
float t_duration[REPETICONES];
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
for(int j = 0; j< REPETICONES; j++){
CUDA_CHECK_RETURN(cudaEventRecord(start, 0));
CUDA_CHECK_RETURN(cudaMalloc((void **)&states, sizeof(curandState) * threadsPerBlock * blocksPerGrid));
inicializa_v<<<blocksPerGrid, threadsPerBlock>>>(rand(),states, threadsPerBlock,blocksPerGrid);
CUDA_CHECK_RETURN(cudaGetLastError());
inicializa_h<<<1,M>>>();
CUDA_CHECK_RETURN(cudaGetLastError());
histograma<<<blocksPerGrid,threadsPerBlock>>>(threadsPerBlock,blocksPerGrid);
CUDA_CHECK_RETURN(cudaGetLastError());
//CUDA_CHECK_RETURN(cudaMemcpyFromSymbol(h_v_d, vector_V, N*sizeof(int)));
CUDA_CHECK_RETURN(cudaMemcpyFromSymbol(h_v_h, vector_H, M*sizeof(int)));
int acumula =0;
for(int i = 0; i<M; i++){
std::cout<<h_v_h[i]<<" ";
acumula += h_v_h[i];
}
std::cout<<"\n-------------------------"<<acumula<<"-----------------------------------\n";
/*
for(int i = 0; i<10; i++){
for(int j = 0; j<10; j++){
std::cout<<h_v_d[10*i+j]<<" ";
};
std::cout<<"\n";
}
*/
CUDA_CHECK_RETURN(cudaFree(states));
CUDA_CHECK_RETURN(cudaEventRecord(stop, 0));
CUDA_CHECK_RETURN(cudaEventSynchronize(stop));
CUDA_CHECK_RETURN(cudaEventElapsedTime(&t_duration[j],start,stop));
}
float t_max =0, t_min= FLT_MAX, media=0;
for(int i = 0; i< REPETICONES; i++){
media +=t_duration[i];
if(t_duration[i] > t_max){
t_max =t_duration[i];
}
if(t_duration[i]< t_min){
t_min= t_duration[i];
}
}
std::cout<< "Se han realizado "<<REPETICONES<<" repeticones\n";
std::cout<<"Obteniendo de media: "<<media/REPETICONES<<"ms \n";
std::cout<<"Y de máximo: "<<t_max<<"ms y mínimo: "<<t_min<<"ms\n";
return 0;
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err) {
if (err == cudaSuccess)
return;
std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (EXIT_FAILURE);
}
|
2,686 | #include "includes.h"
__global__ void updateVel(float2 *__restrict__ oldVel, float2 *__restrict__ newVel, unsigned int simWidth)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
oldVel[y*simWidth+x] = newVel[y*simWidth+x];
} |
2,687 | extern "C"
{
__global__ void A_emult_Bg0(const int n, const double *a, const double *b, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<n)
{
if (b[i]>0.0)
{c[i] += a[i];}
else
{c[i] += 0.0;}
}
}
} |
2,688 | #include "includes.h"
__global__ void awkward_ByteMaskedArray_getitem_nextcarry_outindex_kernel(int64_t* prefixed_mask, int64_t* to_carry, int64_t* outindex, int8_t* mask, int64_t length) {
int64_t block_id =
blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int64_t thread_id = block_id * blockDim.x + threadIdx.x;
if(thread_id < length) {
if (mask[thread_id] != 0) {
to_carry[prefixed_mask[thread_id] - 1] = thread_id;
outindex[thread_id] = prefixed_mask[thread_id] - 1;
} else {
outindex[thread_id] = -1;
}
}
} |
2,689 | #include "Vector3fDev.cuh";
|
2,690 | // nvcc EthanPixels.cu -o temp -lm
#include <math.h>
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
// size of vector
#define M 4 // Number of frames
#define N 10 // Number of pixels per frame
#define BLOCK 128 // Size of blocks, best if it is a power of 2.
// Globals
int *BlockOfFrames_CPU, *BlockOfFrames_GPU;
float *MeanFrame_CPU, *MeanFrame_GPU;
float *BlockOfLogNormalFrames_GPU;
float *MeanLogNormalFrame_CPU, *MeanLogNormalFrame_GPU;
float *StdvLogNormalFrame_CPU, *StdvLogNormalFrame_GPU;
dim3 dimBlock, dimGrid;
void AllocateMemory()
{
// This are the set of frames that will be used to generate the log normal frame
// and the standard deviation frame
BlockOfFrames_CPU = (int *)malloc(N*M*sizeof(int));
cudaMalloc((void**)&BlockOfFrames_GPU,N*M*sizeof(int));
cudaMalloc((void**)&BlockOfLogNormalFrames_GPU,N*M*sizeof(float));
// Will hold the log normal frame and the standard deviation of the frames minus the log normal
MeanFrame_CPU = (float *)malloc(N*sizeof(float));
MeanLogNormalFrame_CPU = (float *)malloc(N*sizeof(float));
StdvLogNormalFrame_CPU = (float *)malloc(N*sizeof(float));
cudaMalloc((void**)&MeanFrame_GPU,N*sizeof(float));
cudaMalloc((void**)&MeanLogNormalFrame_GPU,N*sizeof(float));
cudaMalloc((void**)&StdvLogNormalFrame_GPU,N*sizeof(float));
}
/*
However you get you 300,000 by 80 pixels loaded in here then CUDA will do the rest.
This is loading the big vector from 1st 300,000 then from 2nd 300,000 and so on until frame 80.
It may be faster to load the pixels the other way 80 first pixels then 80 second pixels and so on 300000 times.
Test it and see.
I just load (below) some small values to check that everything is working.
M is the number of frames and N is the number of pixels per frame
*/
void loadPixels()
{
for(int i = 0; i < M; i++)
{
for(int j = 0; j < N; j++)
{
BlockOfFrames_CPU[j +i*N] = i*j*5;
}
}
for(int j = 0; j < N; j++)
{
MeanFrame_CPU[j] = -1.0;
MeanLogNormalFrame_CPU[j] = -1.0;
StdvLogNormalFrame_CPU[j] = -1.0;
}
}
void SetUpCudaDevices()
{
dimBlock.x = BLOCK;
dimBlock.y = 1;
dimBlock.z = 1;
dimGrid.x = ((N-1)/BLOCK)+1;
dimGrid.y = 1;
dimGrid.z = 1;
}
void copyFramessUp()
{
cudaMemcpyAsync(BlockOfFrames_GPU, BlockOfFrames_CPU, N*M*sizeof(int), cudaMemcpyHostToDevice);
}
__global__ void creatingMeanPixelFrame(float *meanFrame, int *allFrames, int pixelsPerFrame, int frames)
{
int pixel = threadIdx.x + blockIdx.x*blockDim.x;
if(pixel < pixelsPerFrame)
{
float sum = 0.0;
for(int i = 0; i < frames; i++)
{
sum += allFrames[pixel + pixelsPerFrame*i];
}
meanFrame[pixel] = sum/(float)frames;
}
}
__global__ void creatingLogNormalFrames(float *meanFrame, int *allFrames, float *allFramesLogNormal, int pixelsPerFrame, int frames)
{
int pixel = threadIdx.x + blockIdx.x*blockDim.x;
if(pixel < pixelsPerFrame)
{
for(int i = 0; i < frames; i++)
{
allFramesLogNormal[pixel + pixelsPerFrame*i] = (float)allFrames[pixel + pixelsPerFrame*i] - meanFrame[pixel];
allFramesLogNormal[pixel + pixelsPerFrame*i] = abs(allFramesLogNormal[pixel + pixelsPerFrame*i]);
// WHat do you do if this is zero???
if(allFramesLogNormal[pixel + pixelsPerFrame*i] == 0.0) allFramesLogNormal[pixel + pixelsPerFrame*i] = 0.000001;
allFramesLogNormal[pixel + pixelsPerFrame*i] = logf(allFramesLogNormal[pixel + pixelsPerFrame*i]);
}
}
}
__global__ void creatingMeanLogNormalFrame(float *meanlogNormalFrame, float *allFramesLogNormal, int pixelsPerFrame, int frames)
{
int pixel = threadIdx.x + blockIdx.x*blockDim.x;
if(pixel < pixelsPerFrame)
{
float sum = 0.0;
for(int i = 0; i < frames; i++)
{
sum += allFramesLogNormal[pixel + pixelsPerFrame*i];
}
meanlogNormalFrame[pixel] = sum/(float)frames;
}
}
__global__ void creatingStdvLogNormalFrame(float *stdvLogNormalFrame, float *meanLogNormalFrame, float *allFramesLogNormal, int pixelsPerFrame, int frames)
{
int pixel = threadIdx.x + blockIdx.x*blockDim.x;
float temp;
if(pixel < pixelsPerFrame)
{
float sum = 0.0;
for(int i = 0; i < frames; i++)
{
temp = allFramesLogNormal[pixel + pixelsPerFrame*i] - meanLogNormalFrame[pixel];
sum += temp*temp;
}
stdvLogNormalFrame[pixel] = sqrtf((sum*sum)/(float)(frames-1));
}
}
void copyFramesDown()
{
cudaMemcpyAsync(MeanFrame_CPU, MeanFrame_GPU, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpyAsync(MeanLogNormalFrame_CPU, MeanLogNormalFrame_GPU, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpyAsync(StdvLogNormalFrame_CPU, StdvLogNormalFrame_GPU, N*sizeof(float), cudaMemcpyDeviceToHost);
}
void stats()
{
for(int i = 0; i < N; i++)
{
printf("MeanFrame_CPU[%d] = %f MeanLogNormalFrame_CPU[%d] = %f StdvLogNormalFrame_CPU[%d] = %f\n", i, MeanFrame_CPU[i], i, MeanLogNormalFrame_CPU[i], i, StdvLogNormalFrame_CPU[i]);
}
}
void cleanUp()
{
free(BlockOfFrames_CPU);
free(MeanFrame_CPU);
free(MeanLogNormalFrame_CPU);
free(StdvLogNormalFrame_CPU);
cudaFree(BlockOfFrames_GPU);
cudaFree(BlockOfLogNormalFrames_GPU);
cudaFree(MeanFrame_GPU);
cudaFree(MeanLogNormalFrame_GPU);
cudaFree(StdvLogNormalFrame_GPU);
}
void errorCheck(const char *message)
{
cudaError_t error;
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("\n CUDA ERROR: %s = %s\n", message, cudaGetErrorString(error));
exit(0);
}
}
int main()
{
AllocateMemory();
SetUpCudaDevices();
loadPixels();
copyFramessUp();
errorCheck("copyFramessUp");
cudaDeviceSynchronize();
creatingMeanPixelFrame<<<dimGrid,dimBlock>>>(MeanFrame_GPU, BlockOfFrames_GPU, N, M);
errorCheck("creatingMeanPixelFrame");
creatingLogNormalFrames<<<dimGrid,dimBlock>>>(MeanFrame_GPU, BlockOfFrames_GPU, BlockOfLogNormalFrames_GPU, N, M);
errorCheck("creatingLogNormalFrames");
creatingMeanLogNormalFrame<<<dimGrid,dimBlock>>>(MeanLogNormalFrame_GPU, BlockOfLogNormalFrames_GPU, N, M);
errorCheck("creatingMeanLogNormalFrame");
creatingStdvLogNormalFrame<<<dimGrid,dimBlock>>>(StdvLogNormalFrame_GPU, MeanLogNormalFrame_GPU, BlockOfLogNormalFrames_GPU, N, M);
errorCheck("creatingStdvLogNormalFrame");
copyFramesDown();
errorCheck("copyFramesDown");
cudaDeviceSynchronize();
stats();
cleanUp();
printf("\n DONE \n");
}
|
2,691 | #include "includes.h"
using namespace std;
__global__ void graphGenerate (float *a, float *b, int n){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i<n){
a[i]=threadIdx.x*2;
b[i]=threadIdx.x;
}
} |
2,692 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
//M and N number of threads (grid and block)
void secuential(const int a[] ,const int b[], int c[], const int sqrt_dim);
__global__ void multiply( const int a[] ,const int b[], int c[] , const int sqrt_dim, const int thread_number)
{
unsigned int ROW = blockIdx.y*blockDim.y+threadIdx.y; //index_j?
unsigned int COL = blockIdx.x*blockDim.x+threadIdx.x; //index_i?
float tmpSum = 0;
if(thread_number >= sqrt_dim * sqrt_dim){
if (ROW < sqrt_dim && COL < sqrt_dim) {
// each thread computes one element of the block sub-matrix
for (int i = 0; i < sqrt_dim; i++) {
tmpSum += a[ROW *sqrt_dim + i] * b[i * sqrt_dim + COL];
}
}
c[ROW * sqrt_dim + COL]= b[ROW * sqrt_dim + COL]; //c= b
c[ROW * sqrt_dim + COL]+= a[COL + ROW * sqrt_dim]; //c+= a^t
c[ROW * sqrt_dim + COL]+= tmpSum;
}
else{
unsigned int index=ROW * sqrt_dim + COL;
unsigned int dim=sqrt_dim*sqrt_dim;
if(index!=(thread_number-1)){//if not last thread deal with size_array/thread_nb array entries
//for(int j=index*(int)(dim/thread_number); j< index*(int)(dim/thread_number)+(int)(dim/thread_number); j++){
for(unsigned int r=ROW; r<= ROW + (int)(sqrt_dim/thread_number); r++){
for(unsigned int cl=COL; cl <= COL +(int)(sqrt_dim/thread_number); cl++){
tmpSum=0;
if (r < sqrt_dim && cl < sqrt_dim) {
// each thread computes one element of the block sub-matrix
for (int i = 0; i < sqrt_dim; i++) {
tmpSum += a[r *sqrt_dim + i] * b[i * sqrt_dim + cl];
}
}
c[r * sqrt_dim + cl]= b[r * sqrt_dim + cl]; //c= b
c[r * sqrt_dim + cl]+= a[COL + r * sqrt_dim]; //c+= a^t
c[r * sqrt_dim + cl]+= tmpSum;
}
}
//}
}
else{ //if last thread deal with all remaining array entries
for(unsigned int r=ROW; r<sqrt_dim; r++){
for(unsigned int cl=COL; cl<sqrt_dim; cl++){
tmpSum=0;
if (r < sqrt_dim && cl < sqrt_dim) {
// each thread computes one element of the block sub-matrix
for (int i = 0; i < sqrt_dim; i++) {
tmpSum += a[r *sqrt_dim + i] * b[i * sqrt_dim + cl];
}
}
c[r * sqrt_dim + cl]= b[r * sqrt_dim + cl]; //c= b
c[r * sqrt_dim + cl]+= a[COL + r * sqrt_dim]; //c+= a^t
c[r * sqrt_dim + cl]+= tmpSum;
}
}
}
}
}
int main(int argc, char *argv[]){
//Measure time
clock_t time_begin;
// pointers to host & device arrays
int *d_array1 = 0,*d_array2 = 0,*d_array3 = 0;
int *h_array1 = 0,*h_array2 = 0,*h_array3 = 0;
int *h_array_sec= 0;
int size_array=16; //here, size_array =L has to be a square
int N=3;
if(argc == 3){
size_array=atoi(argv[1]) * atoi(argv[1]) ;
N=atoi(argv[2]);
}
// malloc columns of host arrays
h_array1 = (int*)malloc( size_array * sizeof(int));
h_array_sec= (int*)malloc( size_array * sizeof(int));
h_array2 = (int*)malloc( size_array * sizeof(int));
h_array3 = (int*)malloc( size_array * sizeof(int));
//printf("Array A:\n");
for(int i=0; i<size_array; i++){
h_array1[i]=1;//rand()%10;
// printf("%i\t", h_array1[i]);
//if((i+1)%(int)sqrt((float)size_array)==0)
// printf("\n");
}
//printf("\n");
//printf("Array B:\n");
for(int i=0; i<size_array; i++){
h_array2[i]=1;//rand()%10;
//printf("%i\t", h_array2[i]);
//if((i+1)%(int)sqrt((float)size_array)==0)
// printf("\n");
}
//printf("\n");
// cudaMalloc a device array
cudaMalloc(&d_array1,size_array * sizeof(int));
cudaMalloc(&d_array2,size_array * sizeof(int));
cudaMalloc(&d_array3,size_array * sizeof(int));
// download and inspect the result on the host:
cudaMemcpy(d_array1, h_array1, sizeof(int)*size_array, cudaMemcpyHostToDevice);
cudaMemcpy(d_array2, h_array2, sizeof(int)*size_array, cudaMemcpyHostToDevice);
// dim3 bloque(N,N); //Bloque bidimensional de N*N hilos (max 512 threads in a block)
//dim3 grid(M,M); //Grid bidimensional de M*M bloques
int thread_number= N*N;
printf("%i threads, %ix%i matrix\n", thread_number, (int)sqrt((float)size_array), (int)sqrt((float)size_array));
time_begin=clock();
dim3 threadsPerBlock(N, N);
dim3 blocksPerGrid(1, 1);
if (N*N > 512){
threadsPerBlock.x = 512;
threadsPerBlock.y = 512;
blocksPerGrid.x = ceil(double(N)/double(threadsPerBlock.x));
blocksPerGrid.y = ceil(double(N)/double(threadsPerBlock.y));
}
multiply<<<threadsPerBlock, blocksPerGrid>>>(d_array1, d_array2 , d_array3,sqrt((float)size_array), thread_number);
cudaDeviceSynchronize();
//cudaThreadSynchronize();
// download and inspect the result on the host:
cudaMemcpy(h_array3, d_array3, sizeof(int)*size_array, cudaMemcpyDeviceToHost);
//printf("GPU time: %f seconds\n", clock() - time_begin);
//windows time
printf("GPU time, %i threads: %f seconds\n", thread_number,(((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); //1.18s
printf("Array C=B + AB^t + A^t :\n");
for(int i=0; i<size_array; i++){
printf("%i\t", h_array3[i]);
if((i+1)%(int)(sqrt((float)size_array))==0)
printf("\n");
}
printf("\n");
time_begin=clock();
secuential(h_array1, h_array2, h_array_sec, sqrt((float)size_array));
//printf("CPU time: %f seconds\n", clock() - time_begin);
//windows time
printf("CPU time: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); //1.18s
// deallocate memory
for(int i=0; i<size_array; i++){
if(h_array_sec[i] != h_array3[i]){
printf("GPU and CPU have different results at position %i\n", i);
break;
}
}
free(h_array3); free(h_array2); free(h_array1); free(h_array_sec);
cudaFree(d_array3);cudaFree(d_array2);cudaFree(d_array1);
}
void secuential(const int a[] ,const int b[], int c[], const int sqrt_dim){
int dim = sqrt_dim* sqrt_dim;
int index_i, index_j;
//int *c= (int *)malloc ( dim * sizeof(int));
for(int i=0; i< dim; i++){
index_i = (int)i%sqrt_dim;
index_j = (i-index_i)/sqrt_dim;
c[i]= b[i]; //c= b
c[i]+= a[index_j+ index_i * sqrt_dim]; //c+= a^t
for(int j=0;j<sqrt_dim;j++){ //row of first matrix
c[i]+=a[j+index_j * sqrt_dim ]*b[j + index_i*sqrt_dim]; //c+= a*b^t
}
}
/*printf("Sequential result: Array C=B + AB^t + A^t :\n");
for(int i=0; i<dim; i++){
printf("%i\t", c[i]);
if((i+1)%(int)(sqrt((float)dim))==0)
printf("\n");
}
printf("\n");*/
//free(c);
}
|
2,693 | #include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <tiffio.h>
#include <stdint.h>
__global__ void blur(uint8_t *d_out, uint8_t *d_in, int width, int height){
int id = (blockIdx.x*blockDim.x)+threadIdx.x;
if(id < width*height){
int x_edge = id % width;
int y_edge = (id - x_edge) / width;
int filter_size = 2;
float r_out = 0, g_out = 0, b_out = 0;
int count = 0;
for(int col = -filter_size; col <= filter_size; ++col){
for(int row = -filter_size; row <= filter_size; ++row){
if((x_edge+col)>=0 && (x_edge+col)<width && (y_edge+row)>=0 && (y_edge+row)<height){
int surroundingIds = (id+col+(row*width))*3;
r_out += d_in[surroundingIds];
g_out += d_in[surroundingIds+1];
b_out += d_in[surroundingIds+2];
count++;
}
}
}
d_out[id*3] = r_out/count;
d_out[id*3+1] = g_out/count;
d_out[id*3+2] = b_out/count;
}
}
int main(int argc, char **argv){
uint32_t width, height;
TIFF *iimage;
uint16_t bits_per_sample, photometric;
uint16_t planar_config;
uint16_t samples_per_pixel;
int size;
assert(argc == 3);
iimage = TIFFOpen(argv[1], "r");
assert(iimage);
assert(TIFFGetField(iimage, TIFFTAG_IMAGEWIDTH, &width));
assert(width > 0);
assert(TIFFGetField(iimage, TIFFTAG_IMAGELENGTH, &height));
assert(height > 0);
assert(TIFFGetField(iimage, TIFFTAG_BITSPERSAMPLE, &bits_per_sample) != 0);
assert(bits_per_sample == 8);
assert(TIFFGetField(iimage, TIFFTAG_PHOTOMETRIC, &photometric));
assert(photometric == PHOTOMETRIC_RGB);
assert(TIFFGetField(iimage, TIFFTAG_PLANARCONFIG, &planar_config) != 0);
assert(TIFFGetField(iimage, TIFFTAG_SAMPLESPERPIXEL, &samples_per_pixel));
assert(samples_per_pixel == 3);
size = width * height * samples_per_pixel * sizeof(char);
printf("size is %d\n",size);
printf("spp is %d\n",samples_per_pixel);
char *idata = (char *) malloc(size);
assert(idata != NULL);
char *curr = idata;
int count = TIFFNumberOfStrips(iimage);
size_t in;
for (int i = 0; i < count; ++i) {
in = TIFFReadEncodedStrip(iimage, i, curr, -1);
// assert(in != -1);
// printf("%li\n", in);
curr += in;
}
TIFFClose(iimage);
char *odata = (char *) malloc(size);
uint8_t* d_in;
cudaMalloc((void**) &d_in, size);
cudaMemcpy(d_in, idata, size, cudaMemcpyHostToDevice);
uint8_t* d_out;
cudaMalloc((void**) &d_out, size);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
blur<<<size/width, width>>>(d_out, d_in, width, height);
cudaEventRecord(stop);
cudaMemcpy(odata, d_out, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("kernel time is %fms\n", milliseconds);
assert(odata != NULL);
TIFF *oimage = TIFFOpen(argv[2], "w");
assert(oimage);
assert(TIFFSetField(oimage, TIFFTAG_IMAGEWIDTH, width));
assert(TIFFSetField(oimage, TIFFTAG_IMAGELENGTH, height));
assert(TIFFSetField(oimage, TIFFTAG_BITSPERSAMPLE, bits_per_sample));
assert(TIFFSetField(oimage, TIFFTAG_COMPRESSION, COMPRESSION_DEFLATE));
assert(TIFFSetField(oimage, TIFFTAG_PHOTOMETRIC, photometric));
assert(TIFFSetField(oimage, TIFFTAG_SAMPLESPERPIXEL, samples_per_pixel));
assert(TIFFSetField(oimage, TIFFTAG_PLANARCONFIG, planar_config));
assert(TIFFSetField(oimage, TIFFTAG_ROWSPERSTRIP, height));
size_t on = size;
assert(TIFFWriteEncodedStrip(oimage, 0, odata, on) == on);
TIFFClose(oimage);
free(idata);
free(odata);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
2,694 |
#define bottom_data(n,c,h,w) bottom_data[(n)*H*W*C+(c)*H*W+(h)*W+(w)]
#define top_data(n,c,h,w) top_data[(n)*OC*OH*OW+(c)*OH*OW+(h)*OW+(w)]
#define kernel(n,c,h,w) kernel[(n)*C*FW*FH+(c)*FW*FH+(h)*FW+(w)]
__global__ void DPUPooling(
int N, int C, int H, int W,float *bottom_data,
int N1,int OC, int OH, int OW, float *top_data,
int SH, int SW, int PH, int PW, int FH, int FW)
{
int init_pw = (blockIdx.x * blockDim.x + threadIdx.x) * 4;
int init_ph = (blockIdx.y * blockDim.y + threadIdx.y) * 4;
int init_c = (blockIdx.z * blockDim.z + threadIdx.z)* 1 ;
#pragma SIMD (n)
#pragma unroll
for(int n = 0; n < N; ++n){
#pragma unroll
for (int c = 0; c < 1; ++c) {
#pragma unroll
for (int ph = 0; ph < 4; ++ph) {
#pragma unroll
for (int pw = 0; pw < 4; ++pw) {
if((ph+init_ph) < OH && (pw+init_pw) < OW && (init_c+c) < OC){
int hs = (ph+init_ph) * SH -PH;
int ws = (pw+init_pw) * SW - PW;
int hend = min(hs + FH, H);
int wend = min(ws + FW, W);
int hstart = max(hs, 0);
int wstart = max(ws, 0);
float sum = 0.0f;
//cal
for (int h = hstart-hs; h < hend-hs; ++h) {
for (int w = wstart-ws; w < wend-ws; ++w) {
sum = fmax(bottom_data(n,init_c+c,h+hs,w+ws) , sum);
}
}
//ST
top_data(n,init_c+c,ph+init_ph,pw+init_pw) = sum;
}
}
}
}
}
}
int main(){
int N = 32 ; //input & output nums
int C = 256; //input_channel
int H= 27; //input_height
int W= 27; //input
int OC = 256; //output_channel
int OH = 13; //OH = (H + PH * 2 - FH)/SH + 1
int OW = 13; //OW = (W + PW * 2 - FW)/SW + 1
int SH = 2;
int SW = 2;
int PH = 0;
int PW = 0;
int FH = 3;
int FW = 3;
float *device_bottom_data;
float *device_top_data;
dim3 grid(1,1,64);//width,height,(channel*num)
dim3 block(4,4,4);//width,hegiht,channel
DPUPooling<<<grid,block>>>(N, C, H, W,device_bottom_data, N,OC,OH,OW,device_top_data,SH, SW, PH, PW, FH, FW);
return 0;
}
|
2,695 | #include <iostream>
using namespace std;
__global__ void doSomething(int* outdata) {
*outdata = 69;
}
int main(int argn, char** args) {
int a = 5;
cout << a << endl;
int* kOut = 0;
cudaMalloc((void**) &kOut, sizeof(int));
if (kOut == 0) {
cerr << "crap, malloc failed" << endl;
return 1;
}
doSomething<<<1,1>>>(kOut);
cudaMemcpy(&a, kOut, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(kOut);
cout << a << endl;
return 0;
} |
2,696 | /**
CUDAで学ぶアルゴリズムとデータ構造
ステップバイステップでN−クイーン問題を最適化
一般社団法人 共同通信社 情報技術局 鈴木 維一郎(suzuki.iichiro@kyodonews.jp)
コンパイルと実行
$ nvcc -O3 CUDA**_N-Queen.cu && ./a.out (-c|-r|-g)
-c:cpu
-r cpu再帰
-g GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define THREAD_NUM 96
#define MAX 27
//変数宣言
long Total=0 ; //GPU
long Unique=0; //GPU
int COUNT=0; //カウント用
int aBoard[MAX]; //版の配列
int down[2*MAX-1]; //down:flagA 縦 配置フラグ
//関数宣言CUDA
__global__ void nqueen_cuda(int *d_aBoard,int *d_results,int *d_count, int COUNT,int row,int size);
void solve_nqueen_cuda(int si,int steps);
bool InitCUDA();
//関数宣言CPU
void print(int size);
void NQueen(int row,int size);
void NQueenR(int row,int size);
//
__global__
void nqueen_cuda(int *d_aBoard,int *d_down,int *d_results,int *d_count,int COUNT,int row,int size){
bool matched;
while(row>=0){
matched=false;
for(int col=d_aBoard[row]+1;col<size;col++){
if(d_down[col]==0){ //downは効き筋ではない
if(d_aBoard[row]!=-1){ //Qは配置済み
d_down[d_aBoard[row]]=0;//downの効き筋を外す
}
d_aBoard[row]=col; //Qを配置
d_down[col]=1; //downは効き筋である
matched=true;
break;
}
}
if(matched){
row++;
if(row==size){
//cudaの中で printせず配列に格納して hostに返却する
//ex 0,1,1,3 だったら 3110
int sum=0;
for(int j=0;j<size;j++){
sum+=d_aBoard[j]*pow(10,j);
}
d_results[COUNT++]=sum;
row--;
}
}else{ //置けるところがない
if(d_aBoard[row]!=-1){
int col=d_aBoard[row]; /** colの代用 */
d_down[col]=0; //downの効き筋を解除
d_aBoard[row]=-1; //空き地に戻す
}
row--;
}
}
d_count[0]=COUNT;//カウントを代入
}
//
void solve_nqueen_cuda(int si,int steps){
//メモリ登録
int *h_aBoard;
int *h_down;
int *h_results;
int *h_count;
cudaMallocHost((void**)&h_aBoard,sizeof(int)*MAX);
cudaMallocHost((void**)&h_down,sizeof(int)*2*MAX-1);
cudaMallocHost((void**)&h_results,sizeof(int)*steps);
cudaMallocHost((void**)&h_count,sizeof(int));
int *d_aBoard;
int *d_down;
int *d_results;
int *d_count;
cudaMalloc((void**)&d_aBoard,sizeof(int)*MAX);
cudaMalloc((void**)&d_down,sizeof(int)*2*MAX-1);
cudaMalloc((void**)&d_results,sizeof(int)*steps);
cudaMalloc((void**)&d_count,sizeof(int));
//初期化
for(int i=0;i<si;i++){
h_aBoard[i]=-1;
}
//カウンターを初期化
h_count[0]=0;
//host to device
cudaMemcpy(d_aBoard,h_aBoard,
sizeof(int)*MAX,cudaMemcpyHostToDevice);
cudaMemcpy(d_down,h_down,
sizeof(int)*2*MAX-1,cudaMemcpyHostToDevice);
cudaMemcpy(d_results,h_results,
sizeof(int)*steps,cudaMemcpyHostToDevice);
cudaMemcpy(d_count,h_count,
sizeof(int),cudaMemcpyHostToDevice);
//実行
nqueen_cuda<<<1,1>>>(d_aBoard,d_down,d_results,d_count,0,0,si);
//device to host
cudaMemcpy(h_results,d_results,
sizeof(int)*steps,cudaMemcpyDeviceToHost);
cudaMemcpy(h_count,d_count,
sizeof(int),cudaMemcpyDeviceToHost);
//出力
for(int i=0;i<h_count[0];i++){
printf("%d:%08d\n",i+1,h_results[i]);
}
//開放
cudaFreeHost(h_aBoard);
cudaFreeHost(h_down);
cudaFreeHost(h_results);
cudaFreeHost(h_count);
cudaFree(d_aBoard);
cudaFree(d_down);
cudaFree(d_results);
cudaFree(d_count);
}
//
/** CUDA 初期化 **/
bool InitCUDA(){
int count;
cudaGetDeviceCount(&count);
if(count==0){fprintf(stderr,"There is no device.\n");return false;}
int i;
for(i=0;i<count;i++){
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){if(prop.major>=1){break;} }
}
if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;}
cudaSetDevice(i);
return true;
}
//出力用のメソッド
void print(int size){
printf("%d: ",++COUNT);
for(int j=0;j<size;j++){
printf("%d ",aBoard[j]);
}
printf("\n");
}
//CPU 非再帰 ロジックメソッド
void NQueen(int row,int size){
bool matched;
while(row>=0){
matched=false;
for(int col=aBoard[row]+1;col<size;col++){
if(down[col]==0){ //downは効き筋ではない
if(aBoard[row]!=-1){ //Qは配置済み
down[aBoard[row]]=0;//downの効き筋を外す
}
aBoard[row]=col; //Qを配置
down[col]=1; //downは効き筋である
matched=true;
break;
}
}
if(matched){
row++;
if(row==size){
print(size);
row--;
}
}else{ //置けるところがない
if(aBoard[row]!=-1){
int col=aBoard[row]; /** colの代用 */
down[col]=0; //downの効き筋を解除
aBoard[row]=-1; //空き地に戻す
}
row--;
}
}
}
//CPUR 再帰 ロジックメソッド
void NQueenR(int row,int size){
if(row==size){
print(size);
}else{
for(int col=aBoard[row]+1;col<size;col++){
aBoard[row]=col; //Qを配置
if(down[col]==0){
down[col]=1;
NQueenR(row+1,size);
down[col]=0;
}
aBoard[row]=-1; //空き地に戻す
}
}
}
//メインメソッド
int main(int argc,char** argv) {
int size=5;
bool cpu=false,cpur=false,gpu=false;
int argstart=1,steps=24576;
/** パラメータの処理 */
if(argc>=2&&argv[1][0]=='-'){
if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;}
else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;}
else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;}
else{
cpur=true;
}
argstart=2;
}
if(argc<argstart){
printf("Usage: %s [-c|-g|-r|-s]\n",argv[0]);
printf(" -c: CPU only\n");
printf(" -r: CPUR only\n");
printf(" -g: GPU only\n");
printf("Default CPUR to 8 queen\n");
}
/** 出力と実行 */
//aBoard配列を-1で初期化
for(int i=0;i<size;i++){ aBoard[i]=-1; }
if(cpu){
printf("\n\n2.CPU 非再帰 配置フラグ(制約テスト高速化)\n");
NQueen(0,size);
}
if(cpur){
printf("\n\n2.CPU 再帰 配置フラグ(制約テスト高速化)\n");
NQueenR(0,size);
}
if(gpu){
printf("\n\n2.GPU 非再帰 配置フラグ(制約テスト高速化)\n");
if(!InitCUDA()){return 0;}
solve_nqueen_cuda(size,steps);
}
return 0;
}
|
2,697 | // Ryan Jacoby
// Compiled on GNU/Linux with nvcc 10.2.89
// Test time with: nvprof --unified-memory-profiling off ./test
// Ran on RTX 2080 in 1.5752ms
#include<iostream>
__global__ void add(int, float *, float *);
int main() {
int N = 1<<20;
float *x, *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
for(int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
int blockSize = 1024;
int blocks = (N + blockSize - 1) / blockSize;
add<<<blocks, blockSize>>>(N, x, y); // Blocks: how many batches of threads; block size: how many threads per block
cudaDeviceSynchronize();
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
cudaFree(x);
cudaFree(y);
return 0;
}
__global__
void add(int n, float *x, float *y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
} |
2,698 | #include "includes.h"
__global__ void selection_sort_gpu(int b, int n, int m, int k, float *dist, int *idx, float *val) {
int batch_index = blockIdx.x;
dist+=m*n*batch_index;
idx+=m*k*batch_index;
val+=m*k*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
float *p_dist;
for (int j=index;j<m;j+=stride) {
p_dist = dist+j*n;
// selection sort for the first k elements
for (int s=0;s<k;++s) {
int min=s;
// find the min
for (int t=s+1;t<n;++t) {
if (p_dist[t]<p_dist[min]) {
min = t;
}
}
// update idx and val
idx[j*n+s] = min;
val[j*n+s] = p_dist[min];
// swap min-th and i-th element
float tmp = p_dist[min];
p_dist[min] = p_dist[s];
p_dist[s] = tmp;
}
}
} |
2,699 | #include "matrix.cuh"
__device__ matrix_list_t* device_matrix_list_constructor(buffer_t* buffer, unsigned int num)
{
matrix_list_t* list = (matrix_list_t*)buffer_malloc(buffer, sizeof(matrix_list_t));
list->num = num;
list->matrix_list = (matrix_t**)buffer_malloc(buffer, sizeof(matrix_t*) * num);
return list;
}
__device__ matrix_list_t* device_matrix_list_add(buffer_t* buffer, matrix_list_t* m1, matrix_list_t* m2)
{
//assert(m1->num == m2->num);
matrix_list_t* m = device_matrix_list_constructor(buffer, m1->num);
int i;
for(i=0; i<m1->num; i++)
{
m->matrix_list[i] = device_matrix_add(buffer, m1->matrix_list[i], m2->matrix_list[i]);
}
return m;
}
__device__ matrix_list_t* device_matrix_list_subtract(buffer_t* buffer, matrix_list_t* m1, matrix_list_t* m2)
{
//assert(m1->num == m2->num);
matrix_list_t* m = device_matrix_list_constructor(buffer, m1->num);
int i;
for(i=0; i<m1->num; i++)
{
m->matrix_list[i] = device_matrix_subtract(buffer, m1->matrix_list[i], m2->matrix_list[i]);
}
return m;
}
__device__ matrix_list_t* device_matrix_list_scalar_multiply(buffer_t* buffer, matrix_list_t* m1, float scalar)
{
matrix_list_t* m = device_matrix_list_constructor(buffer, m1->num);
int i;
for(i=0; i<m1->num; i++)
{
m->matrix_list[i] = device_matrix_scalar_multiply(buffer, m1->matrix_list[i], scalar);
}
return m;
}
__device__ void device_free_matrix_list(matrix_list_t* m)
{
} |
2,700 | #include <stdio.h>
__global__ void hello() {
printf("Hello world! I\'m a thread in block %d\n", blockIdx.x);
}
int main(int argc, char** argv) {
hello<<<16, 1>>>();
// this statement will make the printfs() to flush to stdout
cudaDeviceSynchronize();
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.