serial_no
int64 1
24.2k
| cuda_source
stringlengths 11
9.01M
|
|---|---|
2,001
|
#include<stdio.h>
__global__ void parallel_vector_add(int *d_a, int *d_b, int *d_c, int *d_n){
int i = (blockIdx.x*blockDim.x)+threadIdx.x ;
printf("I am thread #%d\n", i) ;
if(i < *d_n){
printf("T am about to compute c[%d].\n", i) ;
d_c[i] = d_a[i] + d_b[i] ;
}
else{
printf("I am doing nothing.\n") ;
}
}
int main(){
// daclare input and output in host
int n ;
scanf("%d", &n) ;
int h_a[n] ;
int h_b[n] ;
int h_c[n] ;
for(int i=0; i<n; i++){
h_a[i] = i ;
h_b[i] = n-i ;
}
// copy data from host to device
int *d_a ;
int *d_b ;
int *d_c ;
int *d_n ;
cudaMalloc((void **) &d_a, n*sizeof(int));
cudaMalloc((void **) &d_b, n*sizeof(int));
cudaMalloc((void **) &d_c, n*sizeof(int));
cudaMalloc((void **) &d_n, sizeof(int));
// timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(d_a, &h_a, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &h_b, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_n, &n, sizeof(int), cudaMemcpyHostToDevice);
// kernel launch
int numBlock = n/1024 ;
if(n%1024){
numBlock++ ;
}
cudaEventRecord(start);
parallel_vector_add<<<numBlock, 1024>>>(d_a, d_b, d_c, d_n) ;
/*
parallel_vector_add<<<n, 1>>>(d_a, d_b, d_c, d_n) ;
^
|
|
use 1 thread per block
test speed for n = 100000
if we use 1024 thread per block it use time about 3021 ms
but if we use 1 thread per block it use time about 416 ms
*/
cudaEventRecord(stop);
// copy data from device back to host and free
cudaMemcpy(&h_c, d_c, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
for(int i = 0; i<n; i++){
printf("%d ", h_c[i]);
}
printf("\ntime used = %f\n", milliseconds);
}
|
2,002
|
/*
Non-separable 2D, 3D and 4D Filtering with CUDA
Copyright (C) <2013> Anders Eklund, andek034@gmail.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef HELP_FUNCTIONS_CU_
#define HELP_FUNCTIONS_CU_
__device__ int Get_2D_Index(int x, int y, int DATA_W)
{
return x + y * DATA_W;
}
__device__ int Get_3D_Index(int x, int y, int z, int DATA_W, int DATA_H)
{
return x + y * DATA_W + z * DATA_W * DATA_H;
}
__device__ int Get_4D_Index(int x, int y, int z, int t, int DATA_W, int DATA_H, int DATA_D)
{
return x + y * DATA_W + z * DATA_W * DATA_H + t * DATA_W * DATA_H * DATA_D;
}
#endif
|
2,003
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/host_vector.h>
#include <algorithm>
#include <cstdio>
#include "a.cuh"
void test(int offset) {
printf("start\n");
thrust::device_vector<int> X(2 << offset);
std::generate(X.begin(), X.end(), rand);
int sum = thrust::reduce(X.begin(), X.end());
printf("sum=%d\n", sum);
printf("end\n");
}
|
2,004
|
/*
#include "KernelDependencies.h"
// TODO: teste collecting cells first and intersecting later. reduces registers,
// but without mem coalescing it is actually 1/3 original performance (300fps for 1 triangle)
// Kernel dependencies
#include "KernelDependencies.h"
// Global constants
static __device__ const float EPSILON = 2e-6f;
static __device__ const float MAX_VALUE = 1e20f;
// Grid data
texture<int2, 1> texCellPointers;
texture<int, 1> texCellTriangleIds;
// Geometry data
texture<float4, 1> texVertices;
texture<float4, 1> texNormals;
// 3-component texture fetch
template<class Type>
static inline __device__ float3 tex1Dfetch3( texture<Type, 1, cudaReadModeElementType> texRef, int idx )
{
float4 texel = tex1Dfetch( texRef, idx );
return make_float3( texel.x, texel.y, texel.z );
}
// Color conversion functions
static inline __device__ uchar4 make_color( unsigned char r, unsigned char g, unsigned char b )
{
return make_uchar4( r, g, b, 255 );
}
static inline __device__ uchar4 make_color( unsigned char value )
{
return make_uchar4( value, value, value, 255 );
}
static inline __device__ uchar4 make_color( float3 color, float alpha )
{
return make_uchar4( color.x * 255.0f, color.y * 255.0f, color.z * 255.0f, alpha * 255.0f );
}
// Clip ray against bounding box
static inline __device__ bool rayHitsBoundingBox( float3 boxMin, float3 boxMax, Ray& ray )
{
float3 t1 = ( boxMin - ray.orig ) * ray.invDir;
float3 t2 = ( boxMax - ray.orig ) * ray.invDir;
float3 minT1T2 = min( t1, t2 );
float3 maxT1T2 = max( t1, t2 );
ray.tnear = max( max( minT1T2.x, minT1T2.y ), minT1T2.z );
ray.tfar = min( min( maxT1T2.x, maxT1T2.y ), maxT1T2.z );
return ray.tnear <= ray.tfar;
}
// Compute intersection between triangle and current ray
// Moller-Trumbore algorithm
static inline __device__ void hitMT( int triangleId, const Ray& ray, Hit& hit )
{
float3 v0 = tex1Dfetch3( texVertices, triangleId );
float3 v1 = tex1Dfetch3( texVertices, triangleId + 1 );
float3 v2 = tex1Dfetch3( texVertices, triangleId + 2 );
float3 e1 = v1 - v0;
float3 e2 = v2 - v0;
float3 tvec = ray.orig - v0;
float3 p = cross(ray.dir, e2);
float3 q = cross(tvec, e1);
float invdet = 1.0f / dot(p, e1);
float u = dot(p, tvec) * invdet;
float v = dot(q, ray.dir) * invdet;
// Update hit
bool isHit = (u >= 0.0f) && (v >= 0.0f) && (u + v <= 1.0f);
float t = dot(q, e2) * invdet;
// Update hit
isHit &= (t > 0.0f) && (t < ray.tfar + EPSILON) && (t < hit.dist);
if( isHit )
{
hit.id = triangleId;
hit.u = u;
hit.v = v;
hit.dist = t;
}
}
// Compute interpolated shading normal
static inline __device__ float3 computeShadingNormal( const Hit& hit )
{
// Get triangle normals
float3 n0 = tex1Dfetch3( texNormals, hit.id );
float3 n1 = tex1Dfetch3( texNormals, hit.id + 1 );
float3 n2 = tex1Dfetch3( texNormals, hit.id + 2 );
return normalize( n0 * ( 1.0f - ( hit.u + hit.v ) ) + // v0 coord
n1 * hit.u + // v1 coord
n2 * hit.v ); // v2 coord
}
static inline __device__ uchar4 shade( const Ray& ray, const Hit& hit )
{
// Need normalized ray direction
float3 rayDirNormalized = normalize( ray.dir );
// Compute interpolated shading normal
float3 sampleNormal = computeShadingNormal( hit );
// Hard-coded material information
const float3 ambient = make_float3( 0.1f, 0.1f, 0.1f );
const float3 diffuse = make_float3( 0.0f, 0.0f, 1.0f );
// Headlight illumination
float nDotD = -dot( sampleNormal, rayDirNormalized );
float3 sampleColor = ( ambient + ( diffuse - ambient ) * nDotD ) * diffuse;
return make_color( sampleColor, 1.0f );
}
static inline __device__ int2 getCell( const Grid& grid, float3 coords )
{
int linearCoord = coords.x + coords.y*grid.gridSize.x + coords.z*grid.gridSize.x*grid.gridSize.y;
return tex1Dfetch( texCellPointers, linearCoord );
}
static inline __device__ bool isEmpty( int2 cell )
{
return cell.y == 0;
}
static inline __device__ int getTriangleStart( int2 cell )
{
return cell.x;
}
static inline __device__ int getTriangleCount( int2 cell )
{
return cell.y;
}
// Kernel for entire ray tracing pipeline
// Each thread traces a single ray
// Each thread computes its own ray direction
__global__ void rayTrace( Camera camera, Grid grid )
{
//////////////////////////////////////////////////////////////////////////
// Step 1: compute my screen coordinates
//////////////////////////////////////////////////////////////////////////
unsigned int screenX = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int screenY = blockIdx.y * blockDim.y + threadIdx.y;
float uStep = (float)screenX * camera.invScreenWidth;
float vStep = (float)screenY * camera.invScreenHeight;
unsigned int pixelAddress = screenY*camera.screenWidth + screenX;
//////////////////////////////////////////////////////////////////////////
// Step 2.1: minimal ray attributes for box clipping
//////////////////////////////////////////////////////////////////////////
Ray ray;
ray.orig = camera.position;
ray.dir = camera.baseDir + camera.nearU*uStep + camera.nearV*vStep;
ray.invDir = rcp( ray.dir );
// If don't hit bbox in local space, no need to trace underlying triangles
if( !rayHitsBoundingBox( grid.boxMin, grid.boxMax, ray ) )
{
// Background color
camera.frameBuffer[pixelAddress] = make_color( 255 );
return;
}
//////////////////////////////////////////////////////////////////////////
// Step 2.2: remaining ray attributes
//////////////////////////////////////////////////////////////////////////
float3 dirSignBits;
dirSignBits.x = ( ray.dir.x < 0.0f )? 1.0f : 0.0f;
dirSignBits.y = ( ray.dir.y < 0.0f )? 1.0f : 0.0f;
dirSignBits.z = ( ray.dir.z < 0.0f )? 1.0f : 0.0f;
float3 notDirSignBits = not( dirSignBits );
//////////////////////////////////////////////////////////////////////////
// Initial setup
//////////////////////////////////////////////////////////////////////////
// 1. Find initial cell where ray begins
// Since ray was already clipped against bbox (grid),
// ray tnear gives us the starting t (thus the start point as well)
float3 startPoint = ray.orig + ray.dir * ray.tnear;
// Floor is needed when working with float values (equivalent to truncating to int)
float3 cellCoords = floor( clamp( grid.worldToVoxel( startPoint ),
make_float3( 0.0f ), grid.gridSize - make_float3( 1.0f ) ) );
// 2. Compute stepX, stepY, stepZ
float3 cellStep = -dirSignBits + notDirSignBits;
// 3 Compute out of grid limits
float3 outLimit = -dirSignBits + grid.gridSize * notDirSignBits;
// 4. Compute tDeltaX, tDeltaY, tDeltaZ
float3 tDelta = abs( grid.cellSize * ray.invDir );
// 5. Compute tNextX, tNextY, tNextZ
float3 tMax = ( grid.voxelToWorld( cellCoords + notDirSignBits ) - ray.orig ) * ray.invDir;
//////////////////////////////////////////////////////////////////////////
// Trace ray through grid
//////////////////////////////////////////////////////////////////////////
// Find first non-empty cell
int2 cell = getCell( grid, cellCoords );
// Store hit information
Hit hit;
hit.dist = MAX_VALUE;
// Minimum tMax in all 3 dimensions, used for logical comparison to determine next cell
float minTmax;
// Stores 1 for next cell dimension and 0 for the others, used to select next cell
float3 comp;
// While inside grid
do
{
// Early traversal pre-computation
// Already begin computing next cell to be visited before testing current one
// To go to next cell, need to decide which dimension is next
// comp stores 1 for next dimension and 0 for others
minTmax = min( min( tMax.x, tMax.y ), tMax.z );
comp = step( tMax - make_float3( minTmax ), make_float3( EPSILON ) );
// Step ray according to comp
cellCoords += cellStep * comp;
tMax += tDelta * comp;
// If cell contains triangles, test intersection
if( !isEmpty( cell ) )
{
// We send the smallest tMax as the maximum valid distance
// This avoids false intersections outside current cell
ray.tfar = minTmax;
// Iterate through triangles in given cell and compute nearest intersection, if any
int i = getTriangleStart( cell );
int end = getTriangleStart( cell ) + getTriangleCount( cell );
while( i < end )
{
// Get triangle id
int triangleId = tex1Dfetch( texCellTriangleIds, i );
// Check for intersection
hitMT( triangleId, ray, hit );
// Go to next triangle
++i;
}
// If found hit
if( hit.dist < MAX_VALUE )
{
camera.frameBuffer[pixelAddress] = shade( ray, hit );
return;
}
}
// Get next cell
// The above code could all go here, but it is faster to do it early
cell = getCell( grid, cellCoords );
} while( cellCoords.x != outLimit.x && cellCoords.y != outLimit.y && cellCoords.z != outLimit.z );
// Background color
camera.frameBuffer[pixelAddress] = make_color( 255 );
}
//////////////////////////////////////////////////////////////////////
// TODO: need methods from "RayTracing_kernel.cu"
// TODO: all goes well until traceValidRays kernel -> requires 40+ registers!!!
__global__ void initRays( Camera camera, float4* rayOrigins, float4* rayDirs, float4* rayInvDirs )
{
//////////////////////////////////////////////////////////////////////////
// Step 1: compute my screen coordinates
//////////////////////////////////////////////////////////////////////////
unsigned int screenX = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int screenY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int pixelAddress = screenY*camera.screenWidth + screenX;
//////////////////////////////////////////////////////////////////////////
// Step 2: compute ray directions
//////////////////////////////////////////////////////////////////////////
float uStep = (float)screenX * camera.invScreenWidth;
float vStep = (float)screenY * camera.invScreenHeight;
float3 rayDir = camera.baseDir + camera.nearU*uStep + camera.nearV*vStep;
//////////////////////////////////////////////////////////////////////////
// Step 3: store values for next kernel
//////////////////////////////////////////////////////////////////////////
rayOrigins[pixelAddress] = make_float4( camera.position );
rayDirs[pixelAddress] = make_float4( rayDir );
rayInvDirs[pixelAddress] = make_float4( rcp( rayDir ) );
}
__global__ void hitSceneBox( Camera camera, Grid grid,
float4* rayOrigins, float4* rayInvDirs, float* tnears )
{
//////////////////////////////////////////////////////////////////////////
// Step 1: compute my screen coordinates
//////////////////////////////////////////////////////////////////////////
unsigned int screenX = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int screenY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int pixelAddress = screenY*camera.screenWidth + screenX;
//////////////////////////////////////////////////////////////////////////
// Step 2: minimal ray attributes for box clipping
//////////////////////////////////////////////////////////////////////////
float3 rayOrig = make_float3( rayOrigins[pixelAddress] );
float3 rayInvDir = make_float3( rayInvDirs[pixelAddress] );
//////////////////////////////////////////////////////////////////////////
// Step 3: clip ray against grid bounding box
//////////////////////////////////////////////////////////////////////////
float3 t1 = ( grid.boxMin - rayOrig ) * rayInvDir;
float3 t2 = ( grid.boxMax - rayOrig ) * rayInvDir;
float3 minT1T2 = min( t1, t2 );
float3 maxT1T2 = max( t1, t2 );
float tnear = max( max( minT1T2.x, minT1T2.y ), minT1T2.z );
float tfar = min( min( maxT1T2.x, maxT1T2.y ), maxT1T2.z );
bool hit = tnear <= tfar;
//////////////////////////////////////////////////////////////////////////
// Step 4: store values for next kernel
//////////////////////////////////////////////////////////////////////////
tnears[pixelAddress] = ( hit )? tnear : MAX_VALUE;
}
__global__ void traceValidRays( Camera camera, Grid grid,
float4* rayOrigins, float4* rayDirs, float4* rayInvDirs, float* tnears, Hit* hits )
{
//////////////////////////////////////////////////////////////////////////
// Step 1: compute my screen coordinates
//////////////////////////////////////////////////////////////////////////
unsigned int screenX = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int screenY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int pixelAddress = screenY*camera.screenWidth + screenX;
//////////////////////////////////////////////////////////////////////////
// Step 2: abort invalid rays
//////////////////////////////////////////////////////////////////////////
Ray ray;
ray.tnear = tnears[pixelAddress];
if( ray.tnear == MAX_VALUE )
{
// Background color
camera.frameBuffer[pixelAddress] = make_color( 255 );
return;
}
//////////////////////////////////////////////////////////////////////////
// Step 3: remaining ray attributes
//////////////////////////////////////////////////////////////////////////
ray.orig = make_float3( rayOrigins[pixelAddress] );
ray.dir = make_float3( rayDirs[pixelAddress] );
ray.invDir = make_float3( rayInvDirs[pixelAddress] );
float3 dirSignBits;
dirSignBits.x = ( ray.dir.x < 0.0f )? 1.0f : 0.0f;
dirSignBits.y = ( ray.dir.y < 0.0f )? 1.0f : 0.0f;
dirSignBits.z = ( ray.dir.z < 0.0f )? 1.0f : 0.0f;
float3 notDirSignBits = not( dirSignBits );
//////////////////////////////////////////////////////////////////////////
// Initial setup
//////////////////////////////////////////////////////////////////////////
// 1. Find initial cell where ray begins
// Since ray was already clipped against bbox (grid),
// ray tnear gives us the starting t (thus the start point as well)
float3 startPoint = ray.orig + ray.dir * ray.tnear;
// Floor is needed when working with float values (equivalent to truncating to int)
float3 cellCoords = floor( clamp( grid.worldToVoxel( startPoint ),
make_float3( 0.0f ), grid.cellTotal - make_float3( 1.0f ) ) );
// 2. Compute stepX, stepY, stepZ
float3 cellStep = -dirSignBits + notDirSignBits;
// 3 Compute out of grid limits
float3 outLimit = -dirSignBits + grid.cellTotal * notDirSignBits;
// 4. Compute tDeltaX, tDeltaY, tDeltaZ
float3 tDelta = abs( grid.cellSize * ray.invDir );
// 5. Compute tNextX, tNextY, tNextZ
float3 tMax = ( grid.voxelToWorld( cellCoords + notDirSignBits ) - ray.orig ) * ray.invDir;
//////////////////////////////////////////////////////////////////////////
// Trace ray through grid
//////////////////////////////////////////////////////////////////////////
// Find first non-empty cell
int2 cell = getCell( grid, cellCoords );
// Store hit information
Hit hit;
hit.dist = MAX_VALUE;
// Minimum tMax in all 3 dimensions, used for logical comparison to determine next cell
float minTmax;
// Stores 1 for next cell dimension and 0 for the others, used to select next cell
float3 comp;
// While inside grid
do
{
// Early traversal pre-computation
// Already begin computing next cell to be visited before testing current one
// To go to next cell, need to decide which dimension is next
// comp stores 1 for next dimension and 0 for others
minTmax = min( min( tMax.x, tMax.y ), tMax.z );
comp = step( tMax - make_float3( minTmax ), make_float3( EPSILON ) );
// Step ray according to comp
cellCoords += cellStep * comp;
tMax += tDelta * comp;
// If cell contains triangles, test intersection
if( !isEmpty( cell ) )
{
// We send the smallest tMax as the maximum valid distance
// This avoids false intersections outside current cell
ray.tfar = minTmax;
// Iterate through triangles in given cell and compute nearest intersection, if any
int i = getTriangleStart( cell );
int end = getTriangleStart( cell ) + getTriangleCount( cell );
while( i < end )
{
// Get triangle id
int triangleId = tex1Dfetch( texCellTriangleIds, i );
// Check for intersection
hitMT( triangleId, ray, hit );
// Go to next triangle
++i;
}
// If found hit
if( hit.dist < MAX_VALUE )
break;
}
// Get next cell
// The above code could all go here, but it is faster to do it early
cell = getCell( grid, cellCoords );
} while( cellCoords.x != outLimit.x && cellCoords.y != outLimit.y && cellCoords.z != outLimit.z );
//////////////////////////////////////////////////////////////////////////
// Step 4: store values for next kernel
//////////////////////////////////////////////////////////////////////////
hits[pixelAddress] = hit;
}
__global__ void shadeHits( Camera camera, float4* rayDirs, Hit* hits )
{
//////////////////////////////////////////////////////////////////////////
// Step 1: compute my screen coordinates
//////////////////////////////////////////////////////////////////////////
unsigned int screenX = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int screenY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int pixelAddress = screenY*camera.screenWidth + screenX;
//////////////////////////////////////////////////////////////////////////
// Step 2: abort rays that hit nothing
//////////////////////////////////////////////////////////////////////////
Hit hit = hits[pixelAddress];
if( hit.dist == MAX_VALUE )
{
camera.frameBuffer[pixelAddress] = make_color( 255 );
return;
}
//////////////////////////////////////////////////////////////////////////
// Step 3: shade intersected rays
//////////////////////////////////////////////////////////////////////////
Ray ray;
ray.dir = make_float3( rayDirs[pixelAddress] );
camera.frameBuffer[pixelAddress] = shade( ray, hit );
}
*/
|
2,005
|
#include <stdio.h>
#include <assert.h>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <limits>
#ifndef MAX
#define MAX(a,b) (a > b ? a : b)
#endif
__global__ void vectorAddGPU(float *a, float *b, float *c, int N, int offset)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < N)
{
c[offset + idx] = a[offset + idx] + b[offset + idx];
}
}
int check(float *a, float *b, float *c, int size) {
for (int i = 0; i < size; ++i) {
if (std::fabs(c[i] - (a[i] + b[i])) > std::numeric_limits<double>::epsilon()) {
return 0;
}
}
return 1;
}
void sample_vec_add(int size = 1048576)
{
int n = size;
int nBytes = n*sizeof(int);
float *a, *b; // host data
float *c; // results
a = (float *)malloc(nBytes);
b = (float *)malloc(nBytes);
c = (float *)malloc(nBytes);
float *a_d,*b_d,*c_d;
dim3 block(256);
dim3 grid((unsigned int)ceil(n/(float)block.x));
for(int i=0;i<n;i++)
{
a[i] = rand() / (float)RAND_MAX;
b[i] = rand() / (float)RAND_MAX;
c[i] = 0;
}
printf("Allocating device memory on host..\n");
cudaMalloc((void **)&a_d,n*sizeof(float));
cudaMalloc((void **)&b_d,n*sizeof(float));
cudaMalloc((void **)&c_d,n*sizeof(float));
printf("Copying to device..\n");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaMemcpy(a_d,a,n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b_d,b,n*sizeof(float), cudaMemcpyHostToDevice);
printf("Doing GPU Vector add\n");
vectorAddGPU<<<grid, block>>>(a_d, b_d, c_d, n, 0);
cudaMemcpy(c,c_d,n*sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("GPU time: %f ms\n", milliseconds);
cudaDeviceSynchronize();
int res = check(a, b, c, n);
if (res) {
std::cout << "Correct result" << std::endl;
} else {
std::cout << "Not correct result" << std::endl;
}
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
}
void streams_vec_add(int n_streams, int size = 1048576)
{
int n = size;
float *a, *b; // host data
float *c; // results
cudaHostAlloc( (void**) &a, n * sizeof(float) ,cudaHostAllocDefault );
cudaHostAlloc( (void**) &b, n * sizeof(float) ,cudaHostAllocDefault );
cudaHostAlloc( (void**) &c, n * sizeof(float) ,cudaHostAllocDefault );
float *a_d,*b_d,*c_d;
for(int i=0;i<n;i++)
{
a[i] = rand() / (float)RAND_MAX;
b[i] = rand() / (float)RAND_MAX;
c[i] = 0;
}
printf("Allocating device memory on host..\n");
cudaMalloc((void **)&a_d,n*sizeof(float));
cudaMalloc((void **)&b_d,n*sizeof(float));
cudaMalloc((void **)&c_d,n*sizeof(float));
printf("Copying to device..\n");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
printf("Doing GPU-stream Vector add\n");
const int NbStreams = n_streams;
const int StreamSize = n / NbStreams;
cudaStream_t Stream[NbStreams];
for ( int i = 0; i < NbStreams; i++ )
cudaStreamCreate(&Stream[i]);
for ( int i = 0; i < NbStreams; i++ )
{
int Offset = i * StreamSize;
cudaMemcpyAsync(&a_d[Offset], &a[Offset], StreamSize * sizeof(float), cudaMemcpyHostToDevice, Stream[ i ]);
cudaMemcpyAsync(&b_d[Offset], &b[Offset], StreamSize * sizeof(float), cudaMemcpyHostToDevice, Stream[ i ]);
cudaMemcpyAsync(&c_d[Offset], &c[Offset], StreamSize * sizeof(float), cudaMemcpyHostToDevice, Stream[ i ]);
dim3 block(1024);
dim3 grid((StreamSize - 1)/1024 + 1);
vectorAddGPU<<<grid, block, 0, Stream[i]>>>(a_d, b_d, c_d, StreamSize, Offset);
cudaMemcpyAsync(&a[Offset], &a_d[Offset], StreamSize * sizeof(float), cudaMemcpyDeviceToHost, Stream[ i ]);
cudaMemcpyAsync(&b[Offset], &b_d[Offset], StreamSize * sizeof(float), cudaMemcpyDeviceToHost, Stream[ i ]);
cudaMemcpyAsync(&c[Offset], &c_d[Offset], StreamSize * sizeof(float), cudaMemcpyDeviceToHost, Stream[ i ]);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << "STREAMS NUMBERS: " << NbStreams << std::endl;
printf("GPU-stream time: %f ms\n", milliseconds);
cudaDeviceSynchronize();
int res = check(a, b, c, n);
if (res) {
std::cout << "Correct result" << std::endl;
} else {
std::cout << "Not correct result" << std::endl;
}
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
cudaFreeHost(a);
cudaFreeHost(b);
cudaFreeHost(c);
}
int main(int argc, char **argv)
{
sample_vec_add(atoi(argv[1]));
sample_vec_add(atoi(argv[1]));
int n_streams = (argc == 3) ? atoi(argv[2]) : 8;
std::cout << "=================================================" << std::endl;
std::cout << "STREAMS NUMBERS: " << n_streams << std::endl;
streams_vec_add(n_streams, atoi(argv[1]));
return 0;
}
|
2,006
|
#include <iostream>
#include <stdio.h>
#include <cuda_runtime.h>
#include <chrono>
__global__ void warmingup(float *c) {
int tid = blockIdx.x*blockDim.x + threadIdx.x;
float a, b;
a=b=0.0f;
if (tid%2==0){
a=100.0f;
} else {
b=200.0f;
}
c[tid] = a+b;
}
__global__ void mathKernel1(float *c) {
int tid = blockIdx.x*blockDim.x + threadIdx.x;
float a, b;
a=b=0.0f;
if (tid%2==0){
a=100.0f;
} else {
b=200.0f;
}
c[tid] = a+b;
}
__global__ void mathKernel2(float *c) {
int tid = blockIdx.x*blockDim.x + threadIdx.x;
float a, b;
a=b=0.0f;
if (tid%2==0){
a=100.0f;
} else {
b=200.0f;
}
c[tid] = a+b;
}
__global__ void mathKernel3(float *c) {
int tid = blockIdx.x*blockDim.x + threadIdx.x;
float a, b;
a=b=0.0f;
bool ipred = (tid&2==0);
if (ipred){
a=100.0f;
} else {
b=200.0f;
}
c[tid] = a+b;
}
int main(int argc, char **argv) {
// setup device
int dev=0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
std::cout << argv[0] << " using device " << dev << ": " << deviceProp.name << std::endl;
// set up data size
int size = 64;
int blocksize = 64;
if(argc>1) blocksize = atoi(argv[1]);
if(argc>2) blocksize = atoi(argv[2]);
std::cout << "Data size " << size << std::endl;
// set up execution configuration
dim3 block (blocksize, 1);
dim3 grid ((size+block.x-1)/block.x, 1);
std::cout << "Execution configure (block " << block.x << " grid " << grid.x << ")" << std::endl;
// allocate gpu memory
float *d_C;
size_t nBytes = size*sizeof(float);
cudaMalloc((float**)&d_C, nBytes);
// run a warmup kernel to remove overhead
cudaDeviceSynchronize();
auto iStart = std::chrono::system_clock::now();
warmingup<<<grid, block>>> (d_C);
cudaDeviceSynchronize();
auto iElaps = std::chrono::system_clock::now() - iStart;
auto nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(iElaps).count();
std::cout << "warmup <<< " << grid.x << " " << block.x << " >>> elapsed " << nsec << std::endl;
// run kernel 1
iStart = std::chrono::system_clock::now();
mathKernel1<<<grid, block>>> (d_C);
cudaDeviceSynchronize();
iElaps = std::chrono::system_clock::now() - iStart;
nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(iElaps).count();
std::cout << "kernel1 <<< " << grid.x << " " << block.x << " >>> elapsed " << nsec << std::endl;
// run kernel 2
iStart = std::chrono::system_clock::now();
mathKernel2<<<grid, block>>> (d_C);
cudaDeviceSynchronize();
iElaps = std::chrono::system_clock::now() - iStart;
nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(iElaps).count();
std::cout << "kernel2 <<< " << grid.x << " " << block.x << " >>> elapsed " << nsec << std::endl;
// run kernel 3
iStart = std::chrono::system_clock::now();
mathKernel3<<<grid, block>>> (d_C);
cudaDeviceSynchronize();
iElaps = std::chrono::system_clock::now() - iStart;
nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(iElaps).count();
std::cout << "kernel3 <<< " << grid.x << " " << block.x << " >>> elapsed " << nsec << std::endl;
// free gpu memory and reset device
cudaFree(d_C);
cudaDeviceReset();
return EXIT_SUCCESS;
}
|
2,007
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <iostream>
#include <fstream>
#include <sstream>
#include <vector>
#include "assert.h"
using namespace std;
std::vector<int> readFile(string filename)
{
ifstream infile (filename);
vector<int> vnum;
string line;
int index = 0;
while(getline(infile, line))
{
stringstream ss (line);
string sint;
while(getline(ss, sint, ','))
{
vnum.push_back(stoi(sint));
index += 1;
}
}
return vnum;
}
void writeFile(const char* filename, int* data, int limit)
{
ofstream myfile(filename);
if (myfile.is_open())
{
for (int i = 0; i < limit; i++)
{
myfile << data[i];
if(i!= limit-1)
myfile << ", ";
}
myfile.close();
}
else
{
printf("Unable to open/write to file %s", filename);
}
}
__global__ void q2a_global_counter(int* gpu_out, int* gpu_in, int n)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int numbersPerBlockThread = (n / (blockDim.x * gridDim.x)) + 1;
int start = idx * numbersPerBlockThread;
int stop = min(n, (start + numbersPerBlockThread));
for(int i=start; i<stop; i++)
{
int hundreds_value = gpu_in[i] / 100;
atomicAdd(&(gpu_out[hundreds_value]), 1);
}
__syncthreads();
}
__global__ void q2b_shared_mem_counter(int* gpu_out, int* gpu_in, int n)
{
extern __shared__ int shared_out[];
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int incrementalAdd = 0;
if(threadIdx.x == 0)
{
shared_out[10] = {0};
}
while(idx + incrementalAdd < n)
{
int hundreds_val = gpu_in[idx + incrementalAdd] / 100;
atomicAdd(&shared_out[hundreds_val], 1);
incrementalAdd += (blockIdx.x+1) * blockDim.x;
}
__syncthreads();
if(idx == 0)
{
for(int i=0; i < 10; i++)
{
atomicAdd(&(gpu_out[i]), shared_out[i]);
__syncthreads();
}
}
}
__global__ void q2c_prll_prfx_sum(int* gpu_out, int* gpu_in, int n)
{
// gpu_in = [510,1095,1051,1035,1063,1012,1067,1053,1053,1061]
// gpu_out =[510, 1605, 2656, 3691, 4754, 5766, 6833, 7886, 8939, 10000]
extern __shared__ int shared_mem[];
int tidx = threadIdx.x;
int idx = tidx + blockIdx.x * blockDim.x;;
int offset = 1;
int n2 = (int)pow(2.0, ceil(log((double)n)/log(2.0))); // next power of 2
if(tidx == 0)
{
for(int i=0;i<n2;i++)
{
if(i<n)
shared_mem[i] = gpu_in[i];
else
shared_mem[i] = 0; // extend non-power-of-2 entries to 0.
}
}
__syncthreads();
// UPWARD FIRST PASS SUM
for(int depth_idx = n2 >> 1; depth_idx > 0; depth_idx >>= 1)
{
__syncthreads();
if(tidx < depth_idx)
{
int ai = offset*(2*tidx+1)-1;
int bi = offset*(2*tidx+2)-1;
shared_mem[bi] += shared_mem[ai];
}
offset <<= 1;
}
__syncthreads();
// UPWARD FIRST PASS ENDS AND 2nd DOWNWARD PASS BEGINS
if(tidx == 0)
{
shared_mem[n2-1] = 0;
}
for(int depth_idx=1; depth_idx<n2; depth_idx<<=1)
{
offset >>= 1;
__syncthreads();
if(tidx < depth_idx)
{
int ai = offset*(2*tidx+1)-1;
int bi = offset*(2*tidx+2)-1;
if(ai < 0 || bi < 0)
continue;
int temp = shared_mem[ai];
shared_mem[ai] = shared_mem[bi];
shared_mem[bi] += temp;
}
}
__syncthreads();
if(idx < n)
{
gpu_out[idx] = shared_mem[idx+1];
}
}
int main(int argc, char **argv)
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp devProps;
if (cudaGetDeviceProperties(&devProps, dev) == 0)
{
printf("Using device %d: %s\nglobal mem: %dB; compute v%d.%d; clock: %d kHz\n",
dev, devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
printf("sharedMemPerBlock: %zu sharedMemPerMultiprocessor: %zu\n", devProps.sharedMemPerBlock, devProps.sharedMemPerMultiprocessor);
printf("regsPerMultiprocessor: %d\n", devProps.regsPerMultiprocessor);
}
vector<int> vnum = readFile("inp.txt");
// vector<int> vnum = readFile("inp1mil.txt");
const int IN_SIZE = vnum.size();
const int IN_BYTES = IN_SIZE * sizeof(int);
const int OUT_SIZE = 10; //this is specific to the output range.
const int OUT_BYTES = OUT_SIZE * sizeof(int);
int* numbers;
numbers = (int *)malloc(IN_BYTES);
for(int i=0; i < vnum.size(); i++)
numbers[i] = vnum[i];
// int MAX_THREADS_PER_BLOCK = 512;
int threads = 32;
int blocks = 8;
printf("Input size: %d blocks: %d threads: %d\n\n", IN_SIZE, blocks, threads);
int *gpu_in;
int *gpu_out_2a;
int *gpu_out_2b;
int *gpu_out_2c;
int cpu_out_2a[OUT_SIZE] = {0};
int cpu_out_2b[OUT_SIZE] = {0};
int cpu_out_2c[OUT_SIZE] = {0};
cudaError_t ret;
float elapsedTime_2a;
float elapsedTime_2b;
float elapsedTime_2c;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
ret = cudaMalloc((void **) &gpu_in, IN_BYTES);
printf("gpu_in Malloc %s\n", ret == cudaSuccess ? "Success!": cudaGetErrorString(ret));
ret = cudaMalloc((void **) &gpu_out_2a, OUT_BYTES);
printf("gpu_out_2a Malloc %s\n", ret == cudaSuccess ? "Success!": cudaGetErrorString(ret));
ret = cudaMalloc((void **) &gpu_out_2b, OUT_BYTES);
printf("gpu_out_2b Malloc %s\n", ret == cudaSuccess ? "Success!": cudaGetErrorString(ret));
ret = cudaMalloc((void **) &gpu_out_2c, OUT_BYTES);
printf("gpu_out_2c Malloc %s\n", ret == cudaSuccess ? "Success!": cudaGetErrorString(ret));
ret = cudaMemcpy((void *)gpu_in, (void *)numbers, IN_BYTES , cudaMemcpyHostToDevice);
printf("gpu_in Memcpy %s\n", ret == cudaSuccess ? "Success!": cudaGetErrorString(ret));
// see https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#execution-configuration
// for <<<Dg, Db, Ns, S>>> parameter explanation.
cudaEventRecord(start, 0);
q2a_global_counter<<<blocks, threads>>>(gpu_out_2a, gpu_in, IN_SIZE);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime_2a, start, stop);
cudaEventRecord(start, 0);
q2b_shared_mem_counter<<<blocks, threads, OUT_SIZE*sizeof(int)>>>(gpu_out_2b, gpu_in, IN_SIZE);
// ret = cudaPeekAtLastError();
// printf("cudaPeekAtLastError %s\n", ret == cudaSuccess ? "Success!": cudaGetErrorString(ret));
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime_2b, start, stop);
cudaEventRecord(start, 0);
int n2 = (int)pow(2.0, ceil(log((double)OUT_SIZE)/log(2.0))); // next power of 2
q2c_prll_prfx_sum<<<blocks, threads, (n2)*sizeof(int)>>>(gpu_out_2c, gpu_out_2a, OUT_SIZE);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime_2c, start, stop);
ret = cudaMemcpy(cpu_out_2a, gpu_out_2a, OUT_BYTES, cudaMemcpyDeviceToHost);
printf("cpu_out_2a Memcpy %s\n", ret == cudaSuccess ? "Success!": cudaGetErrorString(ret));
ret = cudaMemcpy(cpu_out_2b, gpu_out_2b, OUT_BYTES, cudaMemcpyDeviceToHost);
printf("cpu_out_2b Memcpy %s\n", ret == cudaSuccess ? "Success!": cudaGetErrorString(ret));
ret = cudaMemcpy(cpu_out_2c, gpu_out_2c, OUT_BYTES, cudaMemcpyDeviceToHost);
printf("cpu_out_2c Memcpy %s\n", ret == cudaSuccess ? "Success!": cudaGetErrorString(ret));
// correct output:
// 2a & b): 510, 1095, 1051, 1035, 1063, 1012, 1067, 1053, 1053, 1061
// 2c) 510, 1605, 2656, 3691, 4754, 5766, 6833, 7886, 8939, 10000
printf("\n\n2a: %f\n", elapsedTime_2a);
for(int i=0;i<OUT_SIZE;i++)
{
printf("%d=%d ", i, cpu_out_2a[i]);
}
writeFile("q2a.txt", cpu_out_2a, OUT_SIZE);
printf("\n\n2b: %f\n", elapsedTime_2b);
for(int i=0;i<OUT_SIZE;i++)
{
printf("%d=%d ", i, cpu_out_2b[i]);
}
writeFile("q2b.txt", cpu_out_2b, OUT_SIZE);
printf("\n\n2c: %f\n", elapsedTime_2c);
for(int i=0;i<OUT_SIZE;i++)
{
printf("%d=%d ", i, cpu_out_2c[i]);
}
writeFile("q2c.txt", cpu_out_2c, OUT_SIZE);
printf("\n\n");
cudaFree(gpu_in);
cudaFree(gpu_out_2a);
cudaFree(gpu_out_2b);
cudaFree(gpu_out_2c);
return 0;
}
|
2,008
|
#include "cuda_runtime.h"
#include <iostream>
__global__ void empty() {}
int main() {
int device_id = 0;
cudaSetDevice(device_id);
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
int repeat = 1000;
cudaEventRecord(start, stream);
for (int i = 0; i < repeat; ++i) {
empty<<<1, 1>>>();
}
cudaEventRecord(end, stream);
cudaEventSynchronize(end);
float elapse_time = 0.0f;
cudaEventElapsedTime(&elapse_time, start, end);
std::cout << "launch latency: " << elapse_time / (1.0f * repeat) << " ms."
<< std::endl;
return 0;
}
|
2,009
|
__device__ float fracf(float x) {
return x - floorf(x);
}
__device__ float random (float s, float t, float *rSeed) {
return fracf(sinf(s*12.98123198*rSeed[0] + t*78.231233*rSeed[1])*43758.5453123);
}
__device__ float fitness(
float p1x, float p1y, float p2x, float p2y, float p3x, float p3y,
float *obs, int nObs) {
float ox, oy, ux, uy, vx, vy, wx, wy, pux, puy, pvx, pvy, pwx, pwy, cross1, cross2, cross3, fit;
ux = p2x-p1x; uy = p2y-p1y;
vx = p3x-p1x; vy = p3y-p1y;
fit = ux*vy - uy*vx;
if(fit<0) fit = -fit;
for (int i=0; i<nObs; i++) {
ox = obs[i*2+0];
oy = obs[i*2+1];
ux = p2x-p1x; uy=p2y-p1y;
pux = ox-p1x; puy=oy-p1y;
vx = p3x-p2x; vy=p3y-p2y;
pvx = ox-p2x; pvy=oy-p2y;
wx = p1x-p3x; wy=p1y-p3y;
pwx = ox-p3x; pwy=oy-p3y;
cross1 = ux*puy-uy*pux;
cross2 = vx*pvy-vy*pvx;
cross3 = wx*pwy-wy*pwx;
if( cross1 > 0 && cross2 > 0 && cross3 > 0){ fit /= 2.0; }
if( cross1 < 0 && cross2 < 0 && cross3 < 0){ fit /= 2.0; }
}
if(p1x > 1 || p1x < -1) { fit = 0; }
if(p1y > 1 || p1y < -1) { fit = 0; }
if(p2x > 1 || p2x < -1) { fit = 0; }
if(p2y > 1 || p2y < -1) { fit = 0; }
if(p3x > 1 || p3x < -1) { fit = 0; }
if(p3y > 1 || p3y < -1) { fit = 0; }
return fit;
}
__global__ void computeFitness(float *obs, float *gene, float *fit, int* metaData)
{
const int idx = threadIdx.x + blockDim.x*blockIdx.x;
if(idx>metaData[1]) return;
fit[idx] = fitness(
gene[idx*6], gene[idx*6+1], gene[idx*6+2], gene[idx*6+3], gene[idx*6+4], gene[idx*6+5],
obs, metaData[0]);
}
__global__ void rearrangePopulation(float *gene, float *fit, int* metaData)
{
const int idx = threadIdx.x + blockDim.x*blockIdx.x;
int nGene = metaData[1];
int nHalf = nGene / 2;
if(idx> nHalf) return;
int j = nGene - 1 - idx;
if (fit[idx] < fit[j]) {
for(int k=0; k<6; k++) {
float t = gene[idx*6+k];
gene[idx*6+k] = gene[j*6+k];
gene[j*6+k] = t;
t = fit[idx];
fit[idx] = fit[j];
fit[j] = t;
}
}
}
__global__ void crossOver(float *gene, float *rSeed, int* metaData)
{
const int idx = 2 * (threadIdx.x + blockDim.x*blockIdx.x);
int nGene = metaData[1];
int nHalf = nGene / 2;
if(idx> nHalf) return;
int childStart = int(nGene / 2);
int j = idx + 1;
int mutRegion = int(nHalf*1.5);
int idx2 = idx + mutRegion;
float w[6] = {
random(gene[idx2*6], gene[j*6+1], rSeed),
random(gene[idx2*6+1], gene[j*6], rSeed),
random(gene[j*6], gene[idx2*6+1], rSeed),
random(gene[idx2*6+1], gene[j*6+1], rSeed),
random(gene[idx2*6], gene[j*6], rSeed),
random(gene[j*6+1], gene[idx2*6+1], rSeed)
};
int childIdx = childStart + int(idx/2);
for(int i=0; i<6; i++) {
gene[childIdx*6 + i] = (1.0-w[i])*gene[idx*6+i] + w[i]*gene[j*6+i];
}
}
__global__ void mutate(float *gene, float *rSeed, int* metaData)
{
const int idx = (threadIdx.x + blockDim.x*blockIdx.x);
int nGene = metaData[1];
int nQuater = int(nGene / 4);
if(idx> nQuater) return;
int mutStart = int(nGene / 2) + nQuater;
int i = mutStart + idx;
float mut1 = 1.0 + (random(gene[i*6], gene[i*6], rSeed) - 0.5);
float mut2 = 1.0 + (random(gene[i*6+1], gene[i*6], rSeed) - 0.5);
float mut3 = 1.0 + (random(gene[i*6], gene[i*6+1], rSeed) - 0.5);
gene[i*6+0] = mut1 * gene[idx*6+0];
gene[i*6+1] = mut1 * gene[idx*6+1];
gene[i*6+2] = mut2 * gene[idx*6+2];
gene[i*6+3] = mut2 * gene[idx*6+3];
gene[i*6+4] = mut3 * gene[idx*6+4];
gene[i*6+5] = mut3 * gene[idx*6+5];
}
__global__ void shuffleGene(float *gene, float *fit, float *rSeed, int* metaData) {
const int idx = threadIdx.x + blockDim.x*blockIdx.x;
int nGene = metaData[1];
int nHalf = nGene / 2;
if(idx> nHalf) return;
int Offset = int(nHalf/5.3);
int j = nHalf + (idx + Offset)%nHalf;
for(int k=0; k<6; k++) {
float t = gene[idx*6+k];
gene[idx*6+k] = gene[j*6+k];
gene[j*6+k] = t;
t = fit[idx];
fit[idx] = fit[j];
fit[j] = t;
}
}
|
2,010
|
// Matrix addition, CPU version
// gcc matrix_cpu.c -o matrix_cpu -std=c99
#include <stdio.h>
#include <math.h>
void printDeviceProperties(){
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
__global__
void add_matrix(float *a, float *b, float *c, int N) {
int indexX = blockIdx.x * blockDim.x + threadIdx.x;
int indexY = blockIdx.y * blockDim.y + threadIdx.y;
int index = indexY * N + indexX;
c[index] = a[index] + b[index];
}
// https://www.youtube.com/watch?v=fu0gbHnRGYk
__global__
void clear_my_bitch_out(float *c, int N) {
int indexX = blockIdx.x * blockDim.x + threadIdx.x;
int indexY = blockIdx.y * blockDim.y + threadIdx.y;
int index = indexY * N + indexX;
c[index] = 0;
}
int main() {
printDeviceProperties();
for (unsigned int i = 5; i < 11; i++) {
const int N = pow(2, i);
const int blockSize = pow(2, 4);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float* a = new float[N*N];
float* b = new float[N*N];
float* c = new float[N*N];
float* ad;
float* bd;
float* cd;
const int size = N * N * sizeof(float);
cudaMalloc((void**)&ad, size);
cudaMalloc((void**)&bd, size);
cudaMalloc((void**)&cd, size);
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
a[i+j*N] = 10 + i;
b[i+j*N] = (float)j / N;
}
}
cudaMemcpy(ad, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(bd, b, size, cudaMemcpyHostToDevice);
dim3 dimBlock(blockSize, blockSize);
dim3 dimGrid(N/blockSize, N/blockSize);
cudaEventRecord(start);
add_matrix<<<dimGrid, dimBlock>>>(ad, bd, cd, N);
cudaEventRecord(stop);
cudaThreadSynchronize();
cudaMemcpy(c, cd, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
printf("%0.2f ", c[i+j*N]);
}
printf("\n");
}
printf(
"GPU execution took %f milliseconds for N=%d, Blocks=%dx%d, Grid=%dx%d.\n",
milliseconds, N, dimBlock.x, dimBlock.y, dimGrid.x, dimGrid.y);
// Try to clean up everything on the GPU, and do it twice!
clear_my_bitch_out<<<dimGrid, dimBlock>>>(cd, N);
cudaDeviceReset();
}
}
|
2,011
|
/*
* Module : Twine
* Copyright : [2016..2017] Trevor L. McDonell
* License : BSD3
*
* Maintainer : Trevor L. McDonell <tmcdonell@cse.unsw.edu.au>
* Stability : experimental
* Portability : non-portable (GHC extensions)
*
* Convert between Accelerate's Struct-of-Array representation of complex
* numbers and the Array-of-Struct representation necessary for CUBLAS.
*
*/
#include <cuda.h>
#include <cuComplex.h>
#ifdef __cplusplus
extern "C" {
#endif
__global__ void interleave
(
cuDoubleComplex * __restrict__ cplx,
const double * __restrict__ real,
const double * __restrict__ imag,
const int size
)
{
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < size; ix += gridSize) {
const double re = real[ix];
const double im = imag[ix];
cplx[ix] = make_cuDoubleComplex(re, im);
}
}
__global__ void deinterleave
(
double * __restrict__ real,
double * __restrict__ imag,
const cuDoubleComplex * __restrict__ cplx,
const int size
)
{
const int gridSize = blockDim.x * gridDim.x;
int ix;
for (ix = blockDim.x * blockIdx.x + threadIdx.x; ix < size; ix += gridSize) {
const cuDoubleComplex c = cplx[ix];
real[ix] = cuCreal(c);
imag[ix] = cuCimag(c);
}
}
#ifdef __cplusplus
}
#endif
|
2,012
|
#include <iostream>
#include "../include/gdeque.h"
#include <thrust/device_vector.h>
#define def_dvec(t) thrust::device_vector<t>
#define to_ptr(x) thrust::raw_pointer_cast(&x[0])
using namespace std;
__global__ void test(float *output){
gpu_stl::deque<float> deque;
int idx = 0;
output[idx++] = deque.empty();
output[idx++] = deque.size();
output[idx++] = 10086;
for(int i=1;i<=15;++i){
if(i%2) deque.push_front(i*1.7);
else deque.push_back(i*1.7);
output[idx++] = deque.empty();
output[idx++] = deque.size();
}
output[idx++] = 10086;
while(!deque.empty()){
output[idx++] = deque.empty();
output[idx++] = deque.size();
output[idx++] = deque.front();
output[idx++] = deque.back();
deque.pop_front();
}
}
int main(){
def_dvec(float) dev_out(120, 0);
test<<<1, 1>>>(to_ptr(dev_out));
for(auto k:dev_out) cout<<k<<' ';
cout<<endl;
return 0;
}
|
2,013
|
#include "includes.h"
__global__ void doNothing() {}
|
2,014
|
#include <stdio.h>
#include <cuda.h>
// size of array
#define N 4096
//vector addition kernel
__global__ void vectorAddKernel(int *a, int *b, int *c)
{
int tdx = blockIdx.x * blockDim.x + threadIdx.x;
if(tdx < N)
{
c[tdx] = a[tdx] + b[tdx];
}
}
int main(void)
{
cudaSetDevice(3);
// grid and block sizes
dim3 grid(8,1,1);
dim3 block(512,1,1);
// host arrays
int a_h[N];
int b_h[N];
int c_h[N];
// device memory pointers
int *a_d;
int *b_d;
int *c_d;
// load arrays with some numbers
for(int i=0; i<N; i++)
{
a_h[i] = i;
b_h[i] = i*1;
}
//allocate device memory
cudaMalloc((void**)&a_d, N*sizeof(int));
cudaMalloc((void**)&b_d, N*sizeof(int));
cudaMalloc((void**)&c_d, N*sizeof(int));
//copy the host arrays to device
cudaMemcpy(a_d, a_h, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b_h, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(c_d, c_h, N*sizeof(int), cudaMemcpyHostToDevice);
//CUDA events to measure time
cudaEvent_t start;
cudaEvent_t stop;
float elapsedTime;
//start timer
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//launch kernel
vectorAddKernel<<<grid,block>>>(a_d, b_d, c_d);
//stop timer
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
//copy the result to host
cudaMemcpy(c_h, c_d, N * sizeof(int), cudaMemcpyDeviceToHost);
//print the results
for(int i =0; i < N; i++)
{
printf("%i+%i = %i\n", a_h[i], b_h[i], c_h[i]);
}
//print out execution time
printf("Time to calculate results: %f ms.\n", elapsedTime);
//clean up
cudaFree(a_h);
cudaFree(b_h);
cudaFree(c_h);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
2,015
|
#include "includes.h"
__global__ void kernel ( void ) {
}
|
2,016
|
#include<iostream>
#define SECTION_SIZE 32
using namespace std;
__global__ void Prefix_sum_oneblock_kernel(float *X, float *Y, int InputSize, float *S) {
__shared__ float XY[SECTION_SIZE];
int i = blockIdx.x*blockDim.x+ threadIdx.x;
XY[threadIdx.x] = X[i];
/*if (i < InputSize && threadIdx.x != 0) {
XY[threadIdx.x] = X[i-1];
}else{
XY[threadIdx.x] = 0;
}*/
// the code below performs iterative scan on XY
for (unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) {
__syncthreads();
XY[threadIdx.x] += XY[threadIdx.x-stride];
}
if(i<InputSize) Y[i] = XY[threadIdx.x];
//Y[i] = XY[threadIdx.x];
__syncthreads();
if(threadIdx.x == 0) S[blockIdx.x] = XY[SECTION_SIZE-1]; // get the last element in each section
}
__global__ void Add_scalar_to_subvector(float *Y, float *S, int InputSize){
int i = (blockIdx.x+1)*blockDim.x+ threadIdx.x;
if(i<InputSize) Y[i] += S[blockIdx.x];
//Y[i] += S[blockIdx.x];
}
int main(){
int n;
cin >> n;
int size = n*sizeof(float);
//float *A, *B, *C;
float *X_h = (float *) malloc( size ); // allocate CPU memory
float *Y_h = (float *) malloc( size );
for(int i=0; i<n; i++){ X_h[i] = 1; Y_h[i]=0; }
float *X, *Y, *S, *S1;
cudaMalloc(&X, size); // allocate GPU memory
cudaMalloc(&Y, size);
cudaMemcpy(X, X_h, size, cudaMemcpyHostToDevice);
int BLOCK_SIZE = SECTION_SIZE;
int GRID_SIZE=ceil(n/BLOCK_SIZE);
int size_s = GRID_SIZE*sizeof(float);
cudaMalloc(&S, size_s);
cudaMalloc(&S1, size_s);
Prefix_sum_oneblock_kernel<<<GRID_SIZE,BLOCK_SIZE>>> (X, Y, n, S);
Prefix_sum_oneblock_kernel<<<GRID_SIZE,BLOCK_SIZE>>> (S, S, n, S1);
Add_scalar_to_subvector<<<GRID_SIZE,BLOCK_SIZE>>> (Y, S, n);
cudaMemcpy(Y_h, Y, size, cudaMemcpyDeviceToHost);
for(int i=0; i<n; i++){
cout<<i<<" "<<Y_h[i]<<endl;
}
cudaFree(X); cudaFree(Y);
free(X_h); free(Y_h);
}
|
2,017
|
#include <stdio.h>
#include <cuda.h>
const int MAX_THREAD_NUMBER = 1000000;
__device__ long long counterArray[MAX_THREAD_NUMBER] = {0};
extern "C" __device__ void bambooProfile(long bambooIndex)
{
int blockId = blockIdx.x
+ blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
long long index = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
counterArray[index]++;
}
|
2,018
|
/*
Implementing parallell plus reduce in CUDA.
*/
#include <stdio.h>
#define NUM_THREADS 16
#define NUM_BLOCKS 8
unsigned int serial_reduce(unsigned int* array, const unsigned int size){
unsigned int sum = 0;
for(int i = 0; i < size; i++){
sum += array[i];
}
return sum;
}
__global__ void reduce(unsigned int* d_in, unsigned int* d_out){
unsigned int local_idx = threadIdx.x;
unsigned int global_idx = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int num_threads = blockDim.x;
extern __shared__ unsigned int shared_array [];
shared_array[local_idx] = d_in[global_idx];
__syncthreads();
for(unsigned int i = 1; i < num_threads; i *= 2){
if(local_idx % 2 * i == 0){
shared_array[local_idx] = shared_array[local_idx] + shared_array[local_idx + i];
}
__syncthreads();
}
d_out[blockIdx.x] = shared_array[0];
}
int main(){
const unsigned int NUM_ELEMENTS = NUM_THREADS * NUM_BLOCKS;
const unsigned int IN_BYTES = NUM_ELEMENTS * sizeof(int);
const unsigned int OUT_BYTES = NUM_BLOCKS * sizeof(int);
unsigned int h_in [NUM_ELEMENTS];
for(int i = 0; i < NUM_ELEMENTS; i++){
h_in[i] = i;
}
unsigned int h_out [NUM_BLOCKS];
unsigned int* d_in;
unsigned int* d_out;
cudaMalloc((void **) &d_in, IN_BYTES);
cudaMalloc((void **) &d_out, OUT_BYTES);
cudaMemcpy(d_in, h_in, IN_BYTES, cudaMemcpyHostToDevice);
reduce<<<NUM_BLOCKS, NUM_THREADS, NUM_THREADS * sizeof(int)>>>(d_in, d_out);
cudaMemcpy(h_out, d_out, OUT_BYTES, cudaMemcpyDeviceToHost);
printf("True: %d \n", serial_reduce(h_in, NUM_ELEMENTS));
// Doing a final serial reduce since output is of size NUM_BLOCKS
printf("Output: %d", serial_reduce(h_out, NUM_BLOCKS));
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
2,019
|
// Corresponding header file: /include/mirror_ops.h
#include <cuda_runtime.h>
#include <stdio.h>
/* Mirror operations */
__global__
void mirror(const uchar4* const inputChannel, uchar4* outputChannel, int numRows, int numCols, bool vertical)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if ( col >= numCols || row >= numRows )
{
return;
}
if(!vertical)
{
int thread_x = blockDim.x * blockIdx.x + threadIdx.x;
int thread_y = blockDim.y * blockIdx.y + threadIdx.y;
int thread_x_new = thread_x;
int thread_y_new = numRows-thread_y;
int myId = thread_y * numCols + thread_x;
int myId_new = thread_y_new * numCols + thread_x_new;
outputChannel[myId_new] = inputChannel[myId];
}
else
{
int thread_x = blockDim.x * blockIdx.x + threadIdx.x;
int thread_y = blockDim.y * blockIdx.y + threadIdx.y;
int thread_x_new = numCols-thread_x;
int thread_y_new = thread_y;
int myId = thread_y * numCols + thread_x;
int myId_new = thread_y_new * numCols + thread_x_new;
outputChannel[myId_new] = inputChannel[myId]; // linear data store in global memory
}
}
uchar4* mirror_ops(uchar4 *d_inputImageRGBA, size_t numRows, size_t numCols, bool vertical)
{
//Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(4,4,1);
//Calculate Grid SIze
int a=numCols/blockSize.x, b=numRows/blockSize.y;
const dim3 gridSize(a+1,b+1,1);
const size_t numPixels = numRows * numCols;
uchar4 *d_outputImageRGBA;
cudaMalloc(&d_outputImageRGBA, sizeof(uchar4) * numPixels);
//Call mirror kernel.
mirror<<<gridSize, blockSize>>>(d_inputImageRGBA, d_outputImageRGBA, numRows, numCols, vertical);
cudaDeviceSynchronize();
//Initialize memory on host for output uchar4*
uchar4* h_out;
h_out = (uchar4*)malloc(sizeof(uchar4) * numPixels);
//copy output from device to host
cudaMemcpy(h_out, d_outputImageRGBA, sizeof(uchar4) * numPixels, cudaMemcpyDeviceToHost);
//cleanup memory on device
cudaFree(d_inputImageRGBA);
cudaFree(d_outputImageRGBA);
//return h_out
return h_out;
}
|
2,020
|
__global__ void vecAdd(float *in1, float *in2, float *out, int len) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < len)
out[i] = in1[i] + in2[i];
}
int main(int argc, char **argv) {
int inputLength = 1<<28;
float *hostInput1;
float *hostInput2;
float *hostOutput;
/*hostInput1 = (float*) malloc (sizeof(float) * inputLength);
hostInput2 = (float*) malloc (sizeof(float) * inputLength);
hostOutput = (float*) malloc (sizeof(float) * inputLength);a*/
cudaMallocHost(&hostInput1, inputLength*sizeof(float));
cudaMallocHost(&hostInput2, inputLength*sizeof(float));
cudaMallocHost(&hostOutput, inputLength*sizeof(float));
for(int i=0;i<inputLength;i++) {
hostInput1[i] = i%1024;
hostInput2[i] = i%1024;
}
cudaStream_t stream[4];
float *d_A[4], *d_B[4], *d_C[4];
int i, k, Seglen = 16384;
int Gridlen = (Seglen - 1) / 256 + 1;
for (i = 0; i < 4; i++) {
cudaStreamCreateWithFlags(&stream[i],cudaStreamNonBlocking);
cudaMalloc((void **)&d_A[i], Seglen * sizeof(float));
cudaMalloc((void **)&d_B[i], Seglen * sizeof(float));
cudaMalloc((void **)&d_C[i], Seglen * sizeof(float));
}
for (i = 0; i < inputLength; i += Seglen * 4) {
for (k = 0; k < 4; k++) {
cudaMemcpyAsync(d_A[k], hostInput1 + i + k * Seglen,
Seglen * sizeof(float), cudaMemcpyHostToDevice,
stream[k]);
cudaMemcpyAsync(d_B[k], hostInput2 + i + k * Seglen,
Seglen * sizeof(float), cudaMemcpyHostToDevice,
stream[k]);
vecAdd<<<Gridlen, 256, 0, stream[k]>>>(d_A[k], d_B[k], d_C[k],
Seglen);
}
cudaStreamSynchronize(stream[0]);
cudaStreamSynchronize(stream[1]);
cudaStreamSynchronize(stream[2]);
for (k = 0; k < 4; k++) {
cudaMemcpyAsync(hostOutput + i + k * Seglen, d_C[k],
Seglen * sizeof(float), cudaMemcpyDeviceToHost,
stream[k]);
}
}
cudaDeviceSynchronize();
cudaFreeHost(hostInput1);
cudaFreeHost(hostInput2);
cudaFreeHost(hostOutput);
for (k = 0; k < 3; k++) {
cudaFree(d_A[k]);
cudaFree(d_B[k]);
cudaFree(d_C[k]);
}
return 0;
}
|
2,021
|
//
// Created by heidies on 7/8/18.
//
#include <cuda_runtime.h>
#include <iostream>
#include <sys/time.h>
using namespace std;
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if(error != cudaSuccess){ \
printf("Error: %s %d, ", __FILE__, __LINE__); \
printf("code: %d, reason %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
int recursiveReduce(int *data, const int size){
if(size == 1) return data[0];
int const stride = size / 2;
for(int i = 0; i < stride; i++){
data[i] += data[i + stride];
}
return recursiveReduce(data, stride);
}
__global__ void warmingUp(int *g_idata, int *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
if(idx >= n) return ;
for(int stride = 1; stride < blockDim.x; stride <<= 1){
if(tid % (2 * stride) == 0)
idata[tid] += idata[tid + stride];
__syncthreads();
}
if(tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceNeighbored(int *g_idata, int *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
if(idx >= n) return ;
for(int stride = 1; stride < blockDim.x; stride <<= 1){
if(tid % (2 * stride) == 0)
idata[tid] += idata[tid + stride];
__syncthreads();
}
if(tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceNeighboredLess(int *g_idata, int *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
if(idx >= n) return;
for(int stride = 1; stride < blockDim.x; stride <<= 1){
int index = 2 * stride * tid;
if(index < blockDim.x / 2)
idata[tid] += idata[tid + stride];
__syncthreads();
}
if(tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceInterleaved(int *g_idata, int *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
if(idx >= n) return;
for(int stride = blockDim.x / 2; stride > 0; stride >>= 1){
if(tid < stride)
idata[tid] += idata[tid + stride];
__syncthreads();
}
if(tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrolling2(int *g_idata, int *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = (2 * blockIdx.x) * blockDim.x + threadIdx.x;
int *idata = g_idata + (2 * blockIdx.x) * blockDim.x;
if(idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x];
__syncthreads();
for(int stride = blockDim.x / 2; stride > 0; stride >>= 1){
if(tid < stride)
idata[tid] += idata[tid + stride];
__syncthreads();
}
if(tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrolling4(int *g_idata, int *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = (4 * blockIdx.x) * blockDim.x + threadIdx.x;
int *idata = g_idata + (4 * blockIdx.x) * blockDim.x;
if(idx + 3 * blockDim.x < n){
g_idata[idx] += g_idata[idx + blockDim.x];
g_idata[idx] += g_idata[idx + 2 * blockDim.x];
g_idata[idx] += g_idata[idx + 3 * blockDim.x];
}
__syncthreads();
for(int stride = blockDim.x / 2; stride > 0; stride >>= 1){
if(tid < stride)
idata[tid] += idata[tid + stride];
__syncthreads();
}
if(tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrolling8(int *g_idata, int *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = (8 * blockIdx.x) * blockDim.x + threadIdx.x;
int *idata = g_idata + (8 * blockIdx.x) * blockDim.x;
if(idx + 7 * blockDim.x < n){
g_idata[idx] += g_idata[idx + blockDim.x];
g_idata[idx] += g_idata[idx + 2 * blockDim.x];
g_idata[idx] += g_idata[idx + 3 * blockDim.x];
g_idata[idx] += g_idata[idx + 4 * blockDim.x];
g_idata[idx] += g_idata[idx + 5 * blockDim.x];
g_idata[idx] += g_idata[idx + 6 * blockDim.x];
g_idata[idx] += g_idata[idx + 7 * blockDim.x];
}
__syncthreads();
for(int stride = blockDim.x / 2; stride > 0; stride >>= 1){
if(tid < stride)
idata[tid] += idata[tid + stride];
__syncthreads();
}
if(tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrollWarps8(int *g_idata, int *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = (8 * blockIdx.x) * blockDim.x + threadIdx.x;
int *idata = g_idata + (8 * blockIdx.x) * blockDim.x;
if(idx + 7 * blockDim.x < n){
g_idata[idx] += g_idata[idx + blockDim.x];
g_idata[idx] += g_idata[idx + 2 * blockDim.x];
g_idata[idx] += g_idata[idx + 3 * blockDim.x];
g_idata[idx] += g_idata[idx + 4 * blockDim.x];
g_idata[idx] += g_idata[idx + 5 * blockDim.x];
g_idata[idx] += g_idata[idx + 6 * blockDim.x];
g_idata[idx] += g_idata[idx + 7 * blockDim.x];
}
__syncthreads();
for(int stride = blockDim.x / 2; stride > 32; stride >>= 1){
if(tid < stride)
idata[tid] += idata[tid + stride];
__syncthreads();
}
if(tid < 32){
volatile int *vmem = idata;
vmem[tid] += vmem[tid + 32];
vmem[tid] += vmem[tid + 16];
vmem[tid] += vmem[tid + 8];
vmem[tid] += vmem[tid + 4];
vmem[tid] += vmem[tid + 2];
vmem[tid] += vmem[tid + 1];
}
if(tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceCompleteUnrollWarps8(int *g_idata, int *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = (8 * blockIdx.x) * blockDim.x + threadIdx.x;
int *idata = g_idata + (8 * blockIdx.x) * blockDim.x;
if(idx + 7 * blockDim.x < n){
g_idata[idx] += g_idata[idx + blockDim.x];
g_idata[idx] += g_idata[idx + 2 * blockDim.x];
g_idata[idx] += g_idata[idx + 3 * blockDim.x];
g_idata[idx] += g_idata[idx + 4 * blockDim.x];
g_idata[idx] += g_idata[idx + 5 * blockDim.x];
g_idata[idx] += g_idata[idx + 6 * blockDim.x];
g_idata[idx] += g_idata[idx + 7 * blockDim.x];
}
__syncthreads();
if(blockDim.x >= 1024 && tid < 512)
idata[tid] += idata[tid + 512];
__syncthreads();
if(blockDim.x >= 512 && tid < 256)
idata[tid] += idata[tid + 256];
__syncthreads();
if(blockDim.x >= 256 && tid < 128)
idata[tid] += idata[tid + 128];
__syncthreads();
if(blockDim.x >= 128 && tid < 64)
idata[idx] += idata[tid + 64];
__syncthreads();
if(tid < 32){
volatile int *vmem = idata;
vmem[tid] += vmem[tid + 32];
vmem[tid] += vmem[tid + 16];
vmem[tid] += vmem[tid + 8];
vmem[tid] += vmem[tid + 4];
vmem[tid] += vmem[tid + 2];
vmem[tid] += vmem[tid + 1];
}
if(tid == 0) g_odata[blockIdx.x] = idata[0];
}
template <unsigned int iBlockSize>
__global__ void reduceCompleteUnroll(int *g_idata, int *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = (8 * blockIdx.x) * blockDim.x + threadIdx.x;
int *idata = g_idata + (8 * blockIdx.x) * blockDim.x;
if(idx + 7 * blockDim.x < n){
g_idata[idx] += g_idata[idx + blockDim.x];
g_idata[idx] += g_idata[idx + 2 * blockDim.x];
g_idata[idx] += g_idata[idx + 3 * blockDim.x];
g_idata[idx] += g_idata[idx + 4 * blockDim.x];
g_idata[idx] += g_idata[idx + 5 * blockDim.x];
g_idata[idx] += g_idata[idx + 6 * blockDim.x];
g_idata[idx] += g_idata[idx + 7 * blockDim.x];
}
__syncthreads();
if(iBlockSize >= 1024 && tid < 512)
idata[tid] += idata[tid + 512];
__syncthreads();
if(iBlockSize >= 512 && tid < 256)
idata[tid] += idata[tid + 256];
__syncthreads();
if(iBlockSize >= 256 && tid < 128)
idata[tid] += idata[tid + 128];
__syncthreads();
if(iBlockSize >= 128 && tid < 64)
idata[idx] += idata[tid + 64];
__syncthreads();
if(tid < 32){
volatile int *vmem = idata;
vmem[tid] += vmem[tid + 32];
vmem[tid] += vmem[tid + 16];
vmem[tid] += vmem[tid + 8];
vmem[tid] += vmem[tid + 4];
vmem[tid] += vmem[tid + 2];
vmem[tid] += vmem[tid + 1];
}
if(tid == 0) g_odata[blockIdx.x] = idata[0];
}
double seconds(){
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6) * 1e+3;
}
int main(int argc, char **argv){
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
cout << "Starting reduction at " << argv[0] << " ";
cout << "device " << dev << ": " << deviceProp.name << " ";
cudaSetDevice(dev);
bool bResult = false;
int size = 1 << 24;
cout << " with array size " << size << " ";
int blocksize = 512;
if(argc > 1)
blocksize = atoi(argv[1]);
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
cout << "grid " << grid.x << " block " << block.x << endl;
size_t nBytes = size * sizeof(int);
int *h_idata = (int *)malloc(nBytes);
int *h_odata = (int *)malloc(grid.x * sizeof(int));
int *tmp = (int *)malloc(nBytes);
for(int i = 0; i < size; ++ i){
h_idata[i] = (int)(rand() & 0xFF);
}
memcpy(tmp, h_idata, nBytes);
double iStart, iElaps;
int gpu_sum = 0;
int *d_idata = NULL;
cudaMalloc((void **)&d_idata, nBytes);
int *d_odata = NULL;
cudaMalloc((void **)&d_odata, grid.x * sizeof(int));
iStart = seconds();
int cpu_sum = recursiveReduce(tmp, size);
iElaps = seconds() - iStart;
cout << "cpu reduce elapsed " << iElaps << " ms cpu_sum: " << cpu_sum << endl;
cudaMemcpy(d_idata, h_idata, nBytes, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
iStart = seconds();
warmingUp<<<grid, block>>>(d_idata, d_odata, size);
cudaDeviceSynchronize();
iElaps = seconds() - iStart;
cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost);
gpu_sum = 0;
for(int i = 0; i < grid.x; ++ i){
gpu_sum += h_odata[i];
}
cout << "gpu warmingUp elapsed " << iElaps << " ms gpu_sum: " << gpu_sum << " <<<grid " << grid.x << " block " << block.x << ">>>" << endl;
cudaMemcpy(d_idata, h_idata, nBytes, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
iStart = seconds();
reduceNeighbored<<<grid, block>>>(d_idata, d_odata, size);
cudaDeviceSynchronize();
iElaps = seconds() - iStart;
cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost);
gpu_sum = 0;
for(int i = 0; i < grid.x; ++ i){
gpu_sum += h_odata[i];
}
cout << "gpu Neighbored elapsed " << iElaps << " ms gpu_sum: " << gpu_sum << " <<<grid " << grid.x << " block " << block.x << ">>>" << endl;
cudaMemcpy(d_idata, h_idata, nBytes, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
iStart = seconds();
reduceCompleteUnrollWarps8<<<grid.x / 8, block>>>(d_idata, d_odata, size);
cudaDeviceSynchronize();
iElaps = seconds() - iStart;
cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost);
gpu_sum = 0;
for(int i = 0; i < grid.x / 8; ++ i){
gpu_sum += h_odata[i];
}
cout << "gpu nroll elapsed " << iElaps << " ms gpu_sum: " << gpu_sum << " <<<grid " << grid.x / 8 << " block " << block.x << ">>>" << endl;
free(h_idata);
free(h_odata);
free(tmp);
cudaFree(d_idata);
cudaFree(d_odata);
cudaDeviceReset();
bResult = (gpu_sum == cpu_sum);
if(!bResult) cout << "Test failed!" << endl;
return EXIT_SUCCESS;
}
|
2,022
|
#include <stdio.h>
#define CUCHK(call) { \
cudaError_t err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
fflush(stderr); \
exit(EXIT_FAILURE); \
} }
__global__ void vec_add(float *a, float *b, int n)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
for (int i = tid; i < n; i += blockDim.x*gridDim.x) {
a[i] = a[i] + b[i];
}
}
int main(int argc, char *argv[])
{
int n = 64*1024*1024;
float *a, *b;
CUCHK(cudaMallocManaged(&a, n*sizeof(float)));
CUCHK(cudaMallocManaged(&b, n*sizeof(float)));
for (int i = 0; i < n; i++) {
a[i] = 1;
b[i] = 2;
}
for (int iter = 0; iter < 2; iter++) {
vec_add<<<n/128, 128>>>(a, b, n);
CUCHK(cudaDeviceSynchronize());
for (int i = 0; i < n; i++) {
a[i] += 1;
}
}
CUCHK(cudaFree(a));
CUCHK(cudaFree(b));
}
|
2,023
|
#include "includes.h"
__global__ void Dx_Forward_Kernel(float* output, const float* input, const int width, const int height, const int nChannels)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= width || y >= height)
return;
int offset = y*width + x;
if (x == width - 1)
{
for (int c = 0; c < nChannels; c++)
output[offset*nChannels + c] = 0;
}
else
{
for (int c = 0; c < nChannels; c++)
output[offset*nChannels + c] = input[(offset + 1)*nChannels + c] - input[offset*nChannels + c];
}
}
|
2,024
|
#include <cuda.h>
__global__ void deviceburst(float *x, float *initsums, int n, int k, float *bigmaxs, int *startend) {
int partition = (n - k + 1) / (blockDim.x * gridDim.x) + 1;
int me = blockIdx.x * blockDim.x + threadIdx.x;
int left = me * partition;
int left_limit = left + partition;
int length = k;
float sum = initsums[me];
float mean = sum / length;
startend[me * 2] = left;
startend[me * 2 + 1] = left + length - 1;
bigmaxs[me] = mean;
while (left + length < n && left < left_limit) {
float next = x[left + length];
if (next > mean) {
if (next > x[left]) {
sum = sum + next - x[left];
left += 1;
} else {
sum = sum + next;
length += 1;
}
} else {
for (int i = 0; i <= length - k + 1; i++) {
sum = sum - x[left];
}
left += length - k + 1;
length = k;
sum = sum + x[left + length];
}
mean = sum / length;
if (mean > bigmaxs[me]) {
startend[me * 2] = left;
startend[me * 2 + 1] = left + length - 1;
bigmaxs[me] = mean;
}
}
}
float arraysum(float *x, int n, int start, int end) {
float sum = 0;
for (int i = start; i < n && i < end; i++) {
sum = sum + x[i];
}
return sum;
}
int arraymaxidx(float *x, int n) {
float max = x[0];
int maxidx = 0;
for (int i = 1; i < n; i++) {
if (x[i] > max) {
max = x[i];
maxidx = i;
}
}
return maxidx;
}
void maxburst(float *x, int n, int k, int *startend, float *bigmax) {
int gridDimX = 128;
int blockDimX = 256;
int threads_count = gridDimX * blockDimX;
float *device_x;
cudaMalloc((void **)&device_x, sizeof(float) * n);
cudaMemcpy(device_x, x, sizeof(float) * n, cudaMemcpyHostToDevice);
float *device_bigmaxs;
cudaMalloc((void **)&device_bigmaxs, sizeof(float) * threads_count);
int *device_startends;
cudaMalloc((void **)&device_startends, sizeof(int) * threads_count * 2);
float *initsums = (float *)malloc(sizeof(float) * threads_count);
int partition = (n - k + 1) / threads_count + 1;
for (int i = 0; i < threads_count; i++) {
initsums[i] = arraysum(x, n, i * partition, k);
}
float *device_initsums;
cudaMalloc((void **)&device_initsums, sizeof(float) * threads_count);
cudaMemcpy(device_initsums, initsums, sizeof(float) * threads_count, cudaMemcpyHostToDevice);
free(initsums);
dim3 dimGrid(gridDimX, 1);
dim3 dimBlock(blockDimX, 1, 1);
deviceburst<<<dimGrid, dimBlock>>>(device_x, device_initsums, n, k, device_bigmaxs, device_startends);
cudaThreadSynchronize();
cudaFree(device_x);
float *bigmaxs = (float *)malloc(sizeof(float) * threads_count);
cudaMemcpy(bigmaxs, device_bigmaxs, sizeof(float) * threads_count, cudaMemcpyDeviceToHost);
cudaFree(device_bigmaxs);
int *startends = (int *)malloc(sizeof(int) * threads_count * 2);
cudaMemcpy(startends, device_startends, sizeof(int) * threads_count * 2, cudaMemcpyDeviceToHost);
cudaFree(device_startends);
int maxidx = arraymaxidx(bigmaxs, threads_count);
bigmax[0] = bigmaxs[maxidx];
startend[0] = startends[maxidx * 2];
startend[1] = startends[maxidx * 2 + 1];
}
// -------
// Testing
//
// CSIF
// clear && /usr/local/cuda-8.0/bin/nvcc -Wno-deprecated-gpu-targets -g -G Skip.cu && a.out
#include <stdio.h>
#include <sys/time.h>
int main() {
int n = 50000;
int k = 20000;
float *x = (float *)malloc(sizeof(float) * n);
srand(0);
for (int i = 0; i < n; i++) {
x[i] = (float)rand() / (float)(RAND_MAX / 100.0);
}
int startend[] = {0, 0};
float bigmax = 0;
struct timeval start;
gettimeofday(&start, NULL);
maxburst(x, n, k, startend, &bigmax);
struct timeval end;
gettimeofday(&end, NULL);
float duration = (end.tv_sec - start.tv_sec) * 1000.0 + (end.tv_usec - start.tv_usec) / 1000.0;
printf("%f (from %d to %d) (%fms)\n", bigmax, startend[0], startend[1], duration);
}
|
2,025
|
#include <cuda_runtime.h>
#include <cstdio>
__global__ void my_kernel() {
int tid = threadIdx.x;
printf("Hello CUDA %d.\n", tid);
}
int main() {
my_kernel<<<1, 8>>>();
cudaDeviceSynchronize();
}
|
2,026
|
#include <stdio.h>
#include <cuda_runtime.h>
#include <cufft.h>
#include <device_launch_parameters.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <ctime>
#include <string>
#include <fstream>
#include <math.h>
using namespace std;
#define rawR 7
#define rawC 840
#define rawL (rawR*rawC)
#define LENGTH 840
#define BATCH 1
#define LENGTHPAD 1024
#define NRANK 2
static __global__ void cufftComplexScale(cufftComplex *idata, cufftComplex *odata, const int size, float scale)
{
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
if (threadID < size)
{
odata[threadID].x = idata[threadID].x * scale;
odata[threadID].y = idata[threadID].y * scale;
}
}
int main()
{
int n[NRANK] = {rawR, rawC};
// create arrays
float speed2d[rawR][rawC];
// read raw data
ifstream rawData("../data/speedData.txt");
// generate 2d speed data
if (!(rawData.is_open())){
cout<<"faild to read data." << endl;
}
for (int row=0; row<rawR; row++){
for (int col=0; col<rawC; col++){
rawData >> speed2d[row][col];
}
}
rawData.close();
// print array for debug
// for (int row=0; row<rawR; row++){
// for (int col=0; col<10; col++){
// cout << speed2d[row][col] << '\t';
// }
// cout << '\n';
// }
// host data pointer
cufftComplex *CompData2d=(cufftComplex*)malloc(rawC*rawR*sizeof(cufftComplex));
// 2 d
for (int i=0; i<rawR; i++){
for (int j=0; j<rawC; j++){
CompData2d[i*rawC+j].x = speed2d[i][j];
CompData2d[i*rawC+j].y = 0;
}
}
cufftComplex *d_fftData; // device data pointer
cudaMalloc((void**)&d_fftData,rawC*rawR*sizeof(cufftComplex));
cudaMemcpy(d_fftData,CompData2d,rawC*rawR*sizeof(cufftComplex),cudaMemcpyHostToDevice);
// create the cuda event to count the running time for GPU
cudaEvent_t start1, stop1;
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cufftHandle plan;
cufftPlanMany(&plan, NRANK, n,
NULL, 1, 0,
NULL, 1, 0,
CUFFT_C2C, BATCH);
// execute kernel
cufftExecC2C(plan,(cufftComplex*)d_fftData,(cufftComplex*)d_fftData,CUFFT_FORWARD);
cudaEventRecord(start1);
cufftExecC2C(plan, (cufftComplex*)d_fftData, (cufftComplex*)d_fftData, CUFFT_INVERSE);
dim3 dimBlock(1024);
dim3 dimGrid(6);
cufftComplexScale <<<dimGrid, dimBlock>>>((cufftComplex*)d_fftData,(cufftComplex*)d_fftData,rawC*rawR,1.0f / (rawC*rawR));
cudaEventRecord(stop1);
cudaEventSynchronize(stop1);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start1, stop1);
cout << "GPU FFT time used: "<< milliseconds << " ms\n";
cudaDeviceSynchronize();
cudaMemcpy(CompData2d,d_fftData,rawC*rawR*sizeof(cufftComplex)*BATCH,cudaMemcpyDeviceToHost);
//store result to txt
ofstream myFile;
myFile.open("../data/2d_batch_inverse.txt");
for (int i=0; i<rawR; i++){
for (int j=0; j<rawC; j++){
myFile << CompData2d[i*rawC+j].x <<','<< CompData2d[i*rawC+j].y << endl;
}
}
cufftDestroy(plan);
free(CompData2d);
cudaFree(d_fftData);
return 0;
}
|
2,027
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#include <cuda.h>
#define TW 16
__global__ void matrix_sum(int *C, int *A, int *B, int rows, int cols, int dim) {
// Get col
int col = blockDim.x * blockIdx.x + threadIdx.x;
// Get row
int row = blockDim.y * blockIdx.y + threadIdx.y;
// Get index
int index = row * cols + col;
if((row < rows && col < cols) && (index < dim)) {
// Sum
C[index] = A[index] + B[index];
}
}
int main()
{
// Declaration
int *A, *B, *C;
int i, j;
int *d_a, *d_b, *d_c;
int rows, cols;
//Scan values
scanf("%d", &rows);
scanf("%d", &cols);
// Calculation of dimensions
int dim = rows * cols;
int size = dim * sizeof(int);
// Alloc local arrays
A = (int *)malloc(size);
B = (int *)malloc(size);
C = (int *)malloc(size);
// Alloc devise arrays
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Initialize arrays
for(i = 0; i < rows; i++){
for(j = 0; j < cols; j++){
A[i*cols+j] = B[i*cols+j] = i+j;
}
}
// Copy to devise arrays
cudaMemcpy(d_a, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, B, size, cudaMemcpyHostToDevice);
//Init dimGrid and dimBlocks
dim3 dimGrid(ceil((float)cols / TW), ceil((float)rows / TW), 1);
dim3 dimBlock(TW, TW, 1);
// Call function
matrix_sum<<<dimGrid, dimBlock>>>(d_c, d_a, d_b, rows, cols, dim);
// Copy result to local array
cudaMemcpy(C, d_c, size, cudaMemcpyDeviceToHost);
long long int somador=0;
// Obtain sum
for(i = 0; i < rows; i++){
for(j = 0; j < cols; j++){
somador+=C[i*cols+j];
}
}
// print sum
printf("%lli\n", somador);
// Free memory
free(A);
free(B);
free(C);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return (0);
}
|
2,028
|
//
/*// Created by sergio on 13/02/19.
//
#include <iostream>
#include <stdint.h> // Para medir el clock
#include <cstdlib> // std
#include <iomanip> // Formateo de datos
#include <string>
#define CWIDTHLEFT 40
#define CWIDTHRIGHT 30
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
int main(){
int devCount = 1;
if (devCount == 0)
{
std::cout << "No se detecto el modulo cuda cargado";
exit (-1);
}
for (int i = 0; i < devCount; i++)
{
cudaDeviceProp devProp;
cudaGetDeviceProperties (&devProp, i);
std::cout.setf (std::ios::right);
std::cout << std::setw (CWIDTHLEFT) << std::setfill ('*') << " Info de la placa cuda "
<< i << " ";
std::cout.unsetf (std::ios::right);
std::cout.setf (std::ios::left);
std::cout << std::setw (CWIDTHRIGHT - 2) << std::setfill ('*') << "(todo en bytes) ";
std::cout << std::endl;
std::cout.unsetf (std::ios::left);
std::cout.setf (std::ios::left);
std::cout << std::setw (CWIDTHLEFT) << std::setfill (' ') << "Nombre:";
std::cout.setf (std::ios::right);
std::cout << std::setw (CWIDTHRIGHT) << devProp.name << std::endl;
std::cout.unsetf (std::ios::right);
std::cout << std::setw (CWIDTHLEFT) << "Total Memoria Global:";
std::cout.setf (std::ios::right);
std::cout << std::setw (CWIDTHRIGHT) << devProp.totalGlobalMem << std::endl;
std::cout.unsetf (std::ios::right);
std::cout << std::setw (CWIDTHLEFT) << "Memoria shared por bloque (SMM): ";
std::cout.setf (std::ios::right);
std::cout << std::setw (CWIDTHRIGHT) << devProp.sharedMemPerBlock << std::endl;
std::cout.unsetf (std::ios::right);
std::cout << std::setw (CWIDTHLEFT) << "Registros por bloque: ";
std::cout.setf (std::ios::right);
std::cout << std::setw (CWIDTHRIGHT) << devProp.regsPerBlock << std::endl;
std::cout.unsetf (std::ios::right);
std::cout << std::setw (CWIDTHLEFT) << "Wrap size: ";
std::cout.setf (std::ios::right);
std::cout << std::setw (CWIDTHRIGHT) << devProp.warpSize << std::endl;
std::cout.unsetf (std::ios::right);
std::cout << std::setw (CWIDTHLEFT) << "Max threads por bloque: ";
std::cout.setf (std::ios::right);
std::cout << std::setw (CWIDTHRIGHT) << devProp.maxThreadsPerBlock << std::endl;
std::cout.unsetf (std::ios::right);
std::cout << std::setw (CWIDTHLEFT) << "Max threads por SMM: ";
std::cout.setf (std::ios::right);
std::cout << std::setw (CWIDTHRIGHT) << devProp.maxThreadsPerMultiProcessor
<< std::endl;
std::cout.unsetf (std::ios::right);
//Dimeciones maximas por bloque
for (int j = 0; j < 3; ++j)
{
// c++11
std::string sstr = "Dimecion maxima " + std::to_string (j) + " por bloque:";
std::cout << std::setw (CWIDTHLEFT) << sstr;
std::cout.setf (std::ios::right);
std::cout << std::setw (CWIDTHRIGHT) << devProp.maxThreadsDim[j] << std::endl;
std::cout.unsetf (std::ios::right);
}
//Dimeciones maximas por grid
for (int j = 0; j < 3; ++j)
{
// c++11
std::string sstr = "Dimecion maxima " + std::to_string (j) + " por grid:";
std::cout << std::setw (CWIDTHLEFT) << sstr;
std::cout.setf (std::ios::right);
std::cout << std::setw (CWIDTHRIGHT) << devProp.maxGridSize[j] << std::endl;
std::cout.unsetf (std::ios::right);
}
std::cout << std::setw (CWIDTHLEFT) << "Clock rate: ";
std::cout.setf (std::ios::right);
std::cout << std::setw (CWIDTHRIGHT) << devProp.clockRate << std::endl;
std::cout.unsetf (std::ios::right);
std::cout << std::setw (CWIDTHLEFT) << "Total Memoria constante: ";
std::cout.setf (std::ios::right);
std::cout << std::setw (CWIDTHRIGHT) << devProp.totalConstMem << std::endl;
std::cout.unsetf (std::ios::right);
std::cout << std::setw (CWIDTHLEFT) << "Soporta copia y ejecucion concurrente: ";
std::cout.setf (std::ios::right);
std::cout << std::setw (CWIDTHRIGHT) << (devProp.deviceOverlap ? "Sep" : "No")
<< std::endl;
std::cout.unsetf (std::ios::right);
std::cout << std::setw (CWIDTHLEFT) << "Multiprocesadores: ";
std::cout.setf (std::ios::right);
std::cout << std::setw (CWIDTHRIGHT) << devProp.multiProcessorCount << std::endl;
std::cout.unsetf (std::ios::right);
// Calculo de cuda cores
int mp = devProp.multiProcessorCount;
int cores;
switch (devProp.major)
{
case 2: // Fermi
if (devProp.minor == 1)
cores = mp * 48;
else
cores = mp * 32;
break;
case 3: // Kepler
cores = mp * 192;
break;
case 5: // Maxwell
cores = mp * 128;
break;
case 6: // Pascal
if (devProp.minor == 1)
cores = mp * 128;
else if (devProp.minor == 0)
cores = mp * 64;
else
std::cout << " desconocido ";
break;
default:
std::cout << " desconocido ";
break;
}
std::cout << std::setw (CWIDTHLEFT) << "Cuda cores: ";
std::cout.setf (std::ios::right);
std::cout << std::setw (CWIDTHRIGHT) << cores << std::endl;
std::cout.unsetf (std::ios::right);
}
}
*/
|
2,029
|
// RUN: %clangxx -ccc-print-phases --sysroot=%S/Inputs/SYCL -target x86_64-unknown-linux-gnu -fsycl -fsycl-targets=nvptx64-nvidia-cuda -Xsycl-target-backend --cuda-gpu-arch=sm_80 --cuda-gpu-arch=sm_80 -c %s 2>&1 | FileCheck %s --check-prefix=DEFAULT-PHASES
// Test the correct placement of the offloading actions for compiling CUDA sources (*.cu) in SYCL.
// DEFAULT-PHASES: +- 0: input, "{{.*}}", cuda, (device-sycl, sm_80)
// DEFAULT-PHASES: +- 1: preprocessor, {0}, cuda-cpp-output, (device-sycl, sm_80)
// DEFAULT-PHASES: +- 2: compiler, {1}, ir, (device-sycl, sm_80)
// DEFAULT-PHASES: +- 3: offload, "device-sycl (nvptx64-nvidia-cuda:sm_80)" {2}, ir
// DEFAULT-PHASES: | +- 4: input, "{{.*}}", cuda, (device-cuda, sm_80)
// DEFAULT-PHASES: | +- 5: preprocessor, {4}, cuda-cpp-output, (device-cuda, sm_80)
// DEFAULT-PHASES: | +- 6: compiler, {5}, ir, (device-cuda, sm_80)
// DEFAULT-PHASES: |- 7: offload, "device-cuda (nvptx64-nvidia-cuda:sm_80)" {6}, ir
// DEFAULT-PHASES: +- 8: linker, {3, 7}, ir, (device-sycl, sm_80)
// DEFAULT-PHASES: +- 9: offload, "device-sycl (nvptx64-nvidia-cuda:sm_80)" {8}, ir
// DEFAULT-PHASES: | +- 10: input, "{{.*}}", cuda, (host-cuda-sycl)
// DEFAULT-PHASES: | +- 11: append-footer, {10}, cuda, (host-cuda-sycl)
// DEFAULT-PHASES: | +- 12: preprocessor, {11}, cuda-cpp-output, (host-cuda-sycl)
// DEFAULT-PHASES: | +- 13: offload, "host-cuda-sycl (x86_64-unknown-linux-gnu)" {12}, "device-sycl (nvptx64-nvidia-cuda:sm_80)" {2}, cuda-cpp-output
// DEFAULT-PHASES: | +- 14: compiler, {13}, ir, (host-cuda-sycl)
// DEFAULT-PHASES: | | +- 15: backend, {6}, assembler, (device-cuda, sm_80)
// DEFAULT-PHASES: | | +- 16: assembler, {15}, object, (device-cuda, sm_80)
// DEFAULT-PHASES: | | +- 17: offload, "device-cuda (nvptx64-nvidia-cuda:sm_80)" {16}, object
// DEFAULT-PHASES: | | |- 18: offload, "device-cuda (nvptx64-nvidia-cuda:sm_80)" {15}, assembler
// DEFAULT-PHASES: | |- 19: linker, {17, 18}, cuda-fatbin, (device-cuda)
// DEFAULT-PHASES: | +- 20: offload, "host-cuda-sycl (x86_64-unknown-linux-gnu)" {14}, "device-cuda (nvptx64-nvidia-cuda)" {19}, ir
// DEFAULT-PHASES: | +- 21: backend, {20}, assembler, (host-cuda-sycl)
// DEFAULT-PHASES: |- 22: assembler, {21}, object, (host-cuda-sycl)
// DEFAULT-PHASES: 23: clang-offload-bundler, {9, 22}, object, (host-cuda-sycl)
// RUN: %clangxx -ccc-print-phases --sysroot=%S/Inputs/SYCL --cuda-path=%S/Inputs/CUDA_111/usr/local/cuda -fsycl-libspirv-path=%S/Inputs/SYCL/lib/nvidiacl -target x86_64-unknown-linux-gnu -fsycl -fsycl-targets=nvptx64-nvidia-cuda -Xsycl-target-backend --cuda-gpu-arch=sm_80 --cuda-gpu-arch=sm_80 %s 2>&1 | FileCheck %s --check-prefix=DEFAULT-PHASES2
// DEFAULT-PHASES2: +- 0: input, "{{.*}}", cuda, (host-cuda-sycl)
// DEFAULT-PHASES2: +- 1: append-footer, {0}, cuda, (host-cuda-sycl)
// DEFAULT-PHASES2: +- 2: preprocessor, {1}, cuda-cpp-output, (host-cuda-sycl)
// DEFAULT-PHASES2: | +- 3: input, "{{.*}}", cuda, (device-sycl, sm_80)
// DEFAULT-PHASES2: | +- 4: preprocessor, {3}, cuda-cpp-output, (device-sycl, sm_80)
// DEFAULT-PHASES2: |- 5: compiler, {4}, ir, (device-sycl, sm_80)
// DEFAULT-PHASES2: +- 6: offload, "host-cuda-sycl (x86_64-unknown-linux-gnu)" {2}, "device-sycl (nvptx64-nvidia-cuda:sm_80)" {5}, cuda-cpp-output
// DEFAULT-PHASES2: +- 7: compiler, {6}, ir, (host-cuda-sycl)
// DEFAULT-PHASES2: | +- 8: input, "{{.*}}", cuda, (device-cuda, sm_80)
// DEFAULT-PHASES2: | +- 9: preprocessor, {8}, cuda-cpp-output, (device-cuda, sm_80)
// DEFAULT-PHASES2: | +- 10: compiler, {9}, ir, (device-cuda, sm_80)
// DEFAULT-PHASES2: | +- 11: backend, {10}, assembler, (device-cuda, sm_80)
// DEFAULT-PHASES2: | +- 12: assembler, {11}, object, (device-cuda, sm_80)
// DEFAULT-PHASES2: | +- 13: offload, "device-cuda (nvptx64-nvidia-cuda:sm_80)" {12}, object
// DEFAULT-PHASES2: | |- 14: offload, "device-cuda (nvptx64-nvidia-cuda:sm_80)" {11}, assembler
// DEFAULT-PHASES2: |- 15: linker, {13, 14}, cuda-fatbin, (device-cuda)
// DEFAULT-PHASES2: +- 16: offload, "host-cuda-sycl (x86_64-unknown-linux-gnu)" {7}, "device-cuda (nvptx64-nvidia-cuda)" {15}, ir
// DEFAULT-PHASES2: +- 17: backend, {16}, assembler, (host-cuda-sycl)
// DEFAULT-PHASES2: +- 18: assembler, {17}, object, (host-cuda-sycl)
// DEFAULT-PHASES2: +- 19: offload, "host-cuda-sycl (x86_64-unknown-linux-gnu)" {18}, object
// DEFAULT-PHASES2: +- 20: linker, {19}, image, (host-cuda-sycl)
// DEFAULT-PHASES2: | |- 21: offload, "device-cuda (nvptx64-nvidia-cuda:sm_80)" {10}, ir
// DEFAULT-PHASES2: | +- 22: linker, {5, 21}, ir, (device-sycl, sm_80)
// DEFAULT-PHASES2: {{.*}}: offload, "host-cuda-sycl (x86_64-unknown-linux-gnu)" {20}, "device-sycl (nvptx64-nvidia-cuda:sm_80)" {{{.*}}}, image
|
2,030
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Tomás Oliveira e Silva, November 2017
//
// ACA 2017/2018
//
// modify_sector CUDA kernel (each thread deals with one sector)
//
extern "C" __global__
void modify_sector_cuda_kernel(unsigned int * __restrict__ sector_data,unsigned int * __restrict__ sector_number,unsigned int n_sectors,unsigned int sector_size)
{
unsigned int x,y,idx,i,a,c,n_words;
unsigned int *lo,*hi;
lo = sector_data;
hi = sector_data + n_sectors * sector_size / 4;
//
// compute the thread number
//
x = (unsigned int)threadIdx.x + (unsigned int)blockDim.x * (unsigned int)blockIdx.x;
y = (unsigned int)threadIdx.y + (unsigned int)blockDim.y * (unsigned int)blockIdx.y;
idx = (unsigned int)blockDim.x * (unsigned int)gridDim.x * y + x;
if(idx >= n_sectors)
return; // safety precaution
//
// convert the sector size into number of 4-byte words (it is assumed that sizeof(unsigned int) = 4)
//
n_words = sector_size / 4u;
//
// adjust pointers (N.B. the memory layout may not be optimal)
//
sector_data += n_words * idx;
sector_number += idx;
//
// initialize the linear congruencial pseudo-random number generator
// (section 3.2.1.2 of The Art of Computer Programming presents the theory behind the restrictions on a and c)
//
i = sector_number[0]; // get the sector number
a = 0xACA00001u ^ ((i & 0x0F0F0F0Fu) << 2); // a must be a multiple of 4 plus 1
c = 0x00ACA001u ^ ((i & 0xF0F0F0F0u) >> 3); // c must be odd
x = 0xACA02017u; // initial state
//
// modify the sector data
//
for(i = 0u;i < n_words;i++)
{
unsigned int *addr;
x = a * x + c; // update the pseudo-random generator state
addr = §or_data[i];
if(addr >= lo && addr < hi)
*addr ^= x; // modify the sector data
}
}
|
2,031
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
__global__ void imageblur( int* inputImage, int* outputImage, int filterSize, double* filter, int imageRow, int imageCol){
int pixelx = blockIdx.x * blockDim.x + threadIdx.x;
int pixely = blockIdx.y * blockDim.y + threadIdx.y;
double blur_value = 0.0;
if (pixelx >= imageCol || pixely >= imageRow) {
return;
}
//multiply with blur kernel
for (int finalx = 0; finalx < filterSize; finalx++) {
for (int finaly = 0; finaly < filterSize; finaly++) {
int imagex = pixelx + finalx - filterSize / 2 ;
int imagey = pixely + finaly - filterSize / 2;
int imagePixel;
if(imagex < 0 || imagex >= imageCol || imagey < 0 || imagey >= imageRow){
imagePixel = 0;
} else {
imagePixel = inputImage[imagey*imageCol+imagex];
}
blur_value += (filter[finaly*filterSize+finalx] * imagePixel);
}
}
outputImage[pixely*imageCol+pixelx] = (int)(blur_value/15.0);
}
int main(int argc, char const *argv[]) {
int imagex = 3, imagey = 3;
int numberOfPixels = imagex*imagey*sizeof(int);
int *d_image = 0; int *d_blurImage = 0; double *d_filter = 0; //device
int *h_image = 0; int *h_blurImage = 0; double *h_filter = 0; //host
//malloc memory device and host
h_image = (int*)malloc(numberOfPixels);
cudaMalloc((void**)&d_image, numberOfPixels);
h_blurImage = (int*)malloc(numberOfPixels);
cudaMalloc((void**)&d_blurImage, numberOfPixels);
h_filter = (double*)malloc(9*sizeof(double));
cudaMalloc((void**)&d_filter, 9*sizeof(double));
if(h_image == 0 || d_image == 0 || h_blurImage == 0 || d_blurImage == 0){
printf("Could not allocate memory");
return 1;
}
//Initialise Filter
h_filter[0] = 1.0; h_filter[1] = 2.0; h_filter[2] = 1.0;
h_filter[3] = 2.0; h_filter[4] = 3.0; h_filter[5] = 2.0;
h_filter[6] = 1.0; h_filter[7] = 2.0; h_filter[8] = 1.0;
// Randomly Initialize Image
srand(time(NULL));
for(int i = 0; i < (imagex*imagey); i++){
h_image[i] = (rand() % 256);
}
//Copy host memory to device
cudaMemcpy( d_image, h_image, numberOfPixels, cudaMemcpyHostToDevice);
cudaMemcpy( d_filter, h_filter, 9*sizeof(double), cudaMemcpyHostToDevice);
const dim3 blockSize(4,4,1);
const dim3 gridSize(imagex/blockSize.x+1,imagey/blockSize.y+1,1);
//Call
imageblur<<<gridSize, blockSize>>>(d_image, d_blurImage, 3, d_filter, imagey, imagex);
//copy blurred image to host
cudaMemcpy(h_blurImage, d_blurImage, numberOfPixels, cudaMemcpyDeviceToHost);
printf("Image : \n");
for(int i = 0; i < imagex; i++){
for(int j = 0; j < imagey; j++){
printf("%d ", h_image[i*imagex + j]);
}
printf("\n");
}
printf("Blur Image: \n");
for(int i = 0; i < imagex; i++){
for(int j = 0; j < imagey; j++){
printf("%d ", h_blurImage[i*imagex + j]);
}
printf("\n");
}
//Clean Memory
free(h_image); free(h_blurImage); free(h_filter);
cudaFree(d_image); cudaFree(d_blurImage); cudaFree(d_filter);
return 0;
}
|
2,032
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <sys/time.h>
#include <algorithm>
#include <unistd.h>
#define max(a, b) (a > b ? a : b)
#define min(a, b) (a < b ? a : b)
// Max threadsize is 1024 32*32
typedef unsigned char ubyte;
void printWorld(ubyte *world, uint size);
void zeroWorld(ubyte *world, uint size);
void copy(ubyte *pattern, int patternsize, ubyte *world, uint size);
int coords(int x, int y, int size);
__global__ void game_of_life_turn(ubyte *world, ubyte *buffer_world, short size)
{
// We need to find the x,y of the cell we are looking at
// Because this is a 1d array we have to do some maths.
uint x = threadIdx.x + (blockDim.x * blockIdx.x);
uint y = threadIdx.y + (blockDim.y * blockIdx.y);
// Find the y rows
uint y_up = (y + 1) % size;
uint y_down = (y + size - 1) % size;
// Find the y offsets
uint y_offset = y * size;
uint y_up_offset = y_up * size;
uint y_down_offset = y_down * size;
//printf("(%d,%d) (%d %d) (%d %d) (%d, %d) (%d, %d)\n", x, y, threadIdx.x, threadIdx.y, blockDim.x, blockDim.y, blockIdx.x, blockIdx.y, gridDim.x, gridDim.y);
uint x_left = (x - 1 + size) % size;
uint x_right = (x + 1) % size;
uint offset = x + y_offset;
uint aliveCells = world[x_left + y_up_offset] +
world[x + y_up_offset] +
world[x_right + y_up_offset] +
world[x_left + y_offset] +
world[x_right + y_offset] +
world[x_left + y_down_offset] +
world[x + y_down_offset] +
world[x_right + y_down_offset];
//Any live cell with two or three live neighbours survives.
//Any dead cell with three live neighbours becomes a live cell.
//All other live cells die in the next generation. Similarly, all other dead cells stay dead.
buffer_world[offset] = aliveCells == 3 || (aliveCells == 2 && world[offset]) ? 1 : 0;
}
int main()
{
// To keep the math easy the size of the world must be a square of a square, i.e. X^2^2
// This is so we can easily divide up the world into square blocks for processing
// To make it even easier size should be a poer of 2, i.e. 2^X
uint size = 256;
int turns = 10000;
uint ncells = size * size;
// With the max number of threads being 1024
// The number of threads here will describe the number of blocks
uint threadsCount = min(ncells, 1024);
uint threadDimSize = sqrt(threadsCount);
// Threads create a block of sqrt(threadCount)^2
dim3 threadsPerBlock(threadDimSize, threadDimSize);
// Now we need to find the number of blocks this is the size/ThreadDimSize
uint blockDimSize = size / threadDimSize;
dim3 numBlocks(blockDimSize, blockDimSize);
// Lets make sure our math is correct
// The number of cells is a multiple of threadcount
assert(ncells % threadsCount == 0);
// the number of blocks * num of threads = size
assert(blockDimSize * threadDimSize == size);
//printf("Size %d, ncells %d, Threads: %d, ThreadDimSize %d, BlockDimSize %d\n", size, ncells, threadsCount, threadDimSize, blockDimSize);
// We make a 1d array of bytes, ehere each byte is a cell, to describe the world
ubyte *host_world;
int worldSize = sizeof(ubyte) * ncells;
host_world = (ubyte *)malloc(worldSize);
// We setup the world by first zeroing it out, then copying a pattern (this is the glider)
zeroWorld(host_world, size);
ubyte pattern[5][5] = {
{0, 0, 0, 1, 0},
{0, 1, 0, 1, 0},
{0, 0, 1, 1, 0},
{0, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
};
copy((ubyte *)pattern, 5, host_world, size);
// printWorld(host_world, size);
// Set up the Device Memory by create the world and a buffer
// Then by Mallocing on the device, then copying the world over to the device
ubyte *device_world, *device_buffer_world;
cudaMalloc((void **)&device_world, worldSize);
cudaMalloc((void **)&device_buffer_world, worldSize);
cudaMemcpy(device_world, host_world, worldSize, cudaMemcpyHostToDevice);
// Time some stuff
struct timeval t0, t1;
gettimeofday(&t0, NULL);
// Run the world
int turn;
for (turn = 0; turn < turns; turn++)
{
game_of_life_turn<<<numBlocks, threadsPerBlock>>>(device_world, device_buffer_world, size);
std::swap(device_world, device_buffer_world);
}
// Finish timing
gettimeofday(&t1, NULL);
// Copy the value of the world back to host memory
cudaMemcpy(host_world, device_world, worldSize, cudaMemcpyDeviceToHost);
// system("clear");
// printWorld(host_world, size);
// How many seconds it took to execute
float seconds = t1.tv_sec - t0.tv_sec + 1E-6 * (t1.tv_usec - t0.tv_usec);
// How many total calculations
float MMcellCalculations = (1.0 * turns * ncells) / 1000000;
// Millions of Calculations per second
float MMcellsCalculatedperSecond = MMcellCalculations / seconds;
printf("CUDA: %d, %f MMCps in %f\n", size, MMcellsCalculatedperSecond, seconds);
// Free all the Device and host memory
cudaFree(device_world);
cudaFree(device_buffer_world);
free(host_world);
return 0;
}
void copy(ubyte *pattern, int patternsize, ubyte *world, uint size)
{
ubyte x, y;
for (y = 0; y < patternsize; y++)
{
for (x = 0; x < patternsize; x++)
{
world[x + (size * y)] = pattern[x + (y * patternsize)];
}
}
}
void zeroWorld(ubyte *world, uint size)
{
int x, y;
for (y = 0; y < size; ++y)
{
for (x = 0; x < size; ++x)
{
world[x + (y * size)] = 0;
}
}
}
void printWorld(ubyte *world, uint size)
{
int x, y;
printf(" ------ \n");
for (y = 0; y < size; y++)
{
for (x = 0; x < size; x++)
{
printf("%d", world[x + (y * size)]);
}
printf("\n");
}
printf(" ------ \n\n");
}
|
2,033
|
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#ifndef PRINT_SUFFIX
#define PRINT_SUFFIX "<find_cudadevices>"
#endif
#define MY_CUDA_VER (__CUDACC_VER_MAJOR__ * 100 + __CUDACC_VER_MINOR__)
inline void check(cudaError_t result)
{
if (result)
{
fprintf(stderr, PRINT_SUFFIX "%s (%s)", cudaGetErrorName(result), cudaGetErrorString(result));
cudaDeviceReset();
// Make sure we call CUDA Device Reset before exiting
exit(0);
}
}
inline void print_value(size_t value)
{
// in case we don't have '%zu'
printf("%llu", (unsigned long long)value);
}
inline void print_value(bool value)
{
printf(value ? "true" : "false");
}
inline void print_value(int value)
{
printf("%d", value);
}
template <typename T, size_t len>
inline void print_value(const T (&value)[len])
{
printf("(");
for (size_t i = 0; i < len - 1; i++)
{
print_value(value[i]);
printf(", ");
}
print_value(value[len - 1]);
printf(")");
}
inline void print_value(unsigned int value)
{
printf("%u", value);
}
inline void print_value(const void *value)
{
printf("\"%s\"", (const char *)value);
}
template <size_t len>
inline void print_value(const char (&value)[len])
{
printf("\"");
for (size_t i = 0; i < len; i++)
printf("%02hhx", value[i]);
printf("\"");
}
template <>
inline void print_value<16>(const char (&value)[16])
{
// speicalized for uuid
printf("\"%02hhx%02hhx%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx-%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx\"",
value[0], value[1], value[2], value[3],
value[4], value[5], value[6], value[7],
value[8], value[9], value[10], value[11],
value[12], value[13], value[14], value[15]);
}
#if MY_CUDA_VER >= 1000
inline void print_value(const cudaUUID_t &value)
{
print_value(value.bytes);
}
#endif
template <typename T>
inline void print_property(const char *name, const T &value)
{
printf(PRINT_SUFFIX " %s = ", name);
print_value(value);
printf("\n");
}
inline void print_device(int id)
{
cudaDeviceProp deviceProp;
check(cudaGetDeviceProperties(&deviceProp, id));
#define PRINT_PROPERTY(name) print_property(#name, deviceProp.name)
#define PRINT_BOOL_PROPERTY(name) print_property(#name, static_cast<bool>(deviceProp.name))
#define PRINT_STR_PROPERTY(name) print_property(#name, static_cast<const void *>(deviceProp.name))
// cuda 8.0
PRINT_STR_PROPERTY(name);
PRINT_PROPERTY(totalGlobalMem);
PRINT_PROPERTY(sharedMemPerBlock);
PRINT_PROPERTY(regsPerBlock);
PRINT_PROPERTY(warpSize);
PRINT_PROPERTY(memPitch);
PRINT_PROPERTY(maxThreadsPerBlock);
PRINT_PROPERTY(maxThreadsDim);
PRINT_PROPERTY(maxGridSize);
PRINT_PROPERTY(clockRate);
PRINT_PROPERTY(totalConstMem);
PRINT_PROPERTY(major);
PRINT_PROPERTY(minor);
PRINT_PROPERTY(textureAlignment);
PRINT_PROPERTY(texturePitchAlignment);
PRINT_BOOL_PROPERTY(deviceOverlap);
PRINT_PROPERTY(multiProcessorCount);
PRINT_BOOL_PROPERTY(kernelExecTimeoutEnabled);
PRINT_BOOL_PROPERTY(integrated);
PRINT_BOOL_PROPERTY(canMapHostMemory);
PRINT_PROPERTY(computeMode);
PRINT_PROPERTY(maxTexture1D);
PRINT_PROPERTY(maxTexture1DMipmap);
PRINT_PROPERTY(maxTexture1DLinear);
PRINT_PROPERTY(maxTexture2D);
PRINT_PROPERTY(maxTexture2DMipmap);
PRINT_PROPERTY(maxTexture2DLinear);
PRINT_PROPERTY(maxTexture2DGather);
PRINT_PROPERTY(maxTexture3D);
PRINT_PROPERTY(maxTexture3DAlt);
PRINT_PROPERTY(maxTextureCubemap);
PRINT_PROPERTY(maxTexture1DLayered);
PRINT_PROPERTY(maxTexture2DLayered);
PRINT_PROPERTY(maxTextureCubemapLayered);
PRINT_PROPERTY(maxSurface1D);
PRINT_PROPERTY(maxSurface2D);
PRINT_PROPERTY(maxSurface3D);
PRINT_PROPERTY(maxSurface1DLayered);
PRINT_PROPERTY(maxSurface2DLayered);
PRINT_PROPERTY(maxSurfaceCubemap);
PRINT_PROPERTY(maxSurfaceCubemapLayered);
PRINT_PROPERTY(surfaceAlignment);
PRINT_BOOL_PROPERTY(concurrentKernels);
PRINT_BOOL_PROPERTY(ECCEnabled);
PRINT_PROPERTY(pciBusID);
PRINT_PROPERTY(pciDeviceID);
PRINT_PROPERTY(pciDomainID);
PRINT_BOOL_PROPERTY(tccDriver);
PRINT_PROPERTY(asyncEngineCount);
PRINT_BOOL_PROPERTY(unifiedAddressing);
PRINT_PROPERTY(memoryClockRate);
PRINT_PROPERTY(memoryBusWidth);
PRINT_PROPERTY(l2CacheSize);
PRINT_PROPERTY(maxThreadsPerMultiProcessor);
PRINT_BOOL_PROPERTY(streamPrioritiesSupported);
PRINT_BOOL_PROPERTY(globalL1CacheSupported);
PRINT_BOOL_PROPERTY(localL1CacheSupported);
PRINT_PROPERTY(sharedMemPerMultiprocessor);
PRINT_PROPERTY(regsPerMultiprocessor);
PRINT_BOOL_PROPERTY(isMultiGpuBoard);
PRINT_PROPERTY(multiGpuBoardGroupID);
PRINT_PROPERTY(singleToDoublePrecisionPerfRatio);
PRINT_BOOL_PROPERTY(pageableMemoryAccess);
PRINT_BOOL_PROPERTY(concurrentManagedAccess);
PRINT_BOOL_PROPERTY(managedMemory);
#if MY_CUDA_VER >= 900
// Added in cuda 9.0
PRINT_BOOL_PROPERTY(computePreemptionSupported);
PRINT_BOOL_PROPERTY(canUseHostPointerForRegisteredMem);
PRINT_BOOL_PROPERTY(cooperativeLaunch);
PRINT_BOOL_PROPERTY(cooperativeMultiDeviceLaunch);
PRINT_PROPERTY(sharedMemPerBlockOptin);
#endif
#if MY_CUDA_VER >= 902
// Added in cuda 9.2
PRINT_BOOL_PROPERTY(pageableMemoryAccessUsesHostPageTables);
PRINT_BOOL_PROPERTY(directManagedMemAccessFromHost);
#endif
#if MY_CUDA_VER >= 1000
// Added in cuda 10.0
PRINT_PROPERTY(uuid);
PRINT_PROPERTY(luid);
PRINT_PROPERTY(luidDeviceNodeMask);
#endif
}
int main(int argc, char *argv[])
{
printf("\n");
fprintf(stderr, "\n");
int count = 0;
check(cudaGetDeviceCount(&count));
for (int i = 0; i < count; i++)
{
printf(PRINT_SUFFIX "DEVICE #%d\n", i);
print_device(i);
}
return 0;
}
|
2,034
|
#include <vector>
#include <stdint.h>
#include <stddef.h>
__device__ bool NV12ToRGB(uint8_t * pData, int Height, int Width, int bitdepth, void * pOut)
{
}
|
2,035
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define I(ix,iz) (ix)+nx*(iz)
# define PI 3.141592653589793
__global__ void propagator_U(float *Ux, float *Uz, float *Txx, float *Txz, float *Tzz, float *P, int nx, int nz, float dt, float dh)
{
int ix = threadIdx.x + blockDim.x * blockIdx.x;
int iz = threadIdx.y + blockDim.y * blockIdx.y;
if (ix > 0 && ix < (nx-1) && iz > 0 && iz < (nz-1))
{
Ux[I(ix,iz)] = Ux[I(ix,iz)] + (1/P[I(ix,iz)])*(dt/dh)*(Txx[I(ix,iz)]-Txx[I(ix-1,iz)]+Txz[I(ix,iz)]-Txz[I(ix,iz-1)]);
Uz[I(ix,iz)] = Uz[I(ix,iz)] + (1/P[I(ix,iz)])*(dt/dh)*(Txz[I(ix+1,iz)]-Txz[I(ix,iz)]+Tzz[I(ix,iz+1)]-Tzz[I(ix,iz)]);
}
}
__global__ void propagator_T(float *Ux, float *Uz, float *Txx, float *Txz, float *Tzz, float *Vp, float *Vs,float *P, float *source, int nx, int nz, float dt, float dh, int sx, int sz, int it)
{
int ix = threadIdx.x+blockDim.x*blockIdx.x;
int iz = threadIdx.y+blockDim.y*blockIdx.y;
if(ix < nx && iz<nz){
if(ix > 0 && ix < (nx-1) && iz > 0 && iz < (nz-1))
{
Txx[I(ix,iz)]=Txx[I(ix,iz)] + ((pow(Vp[I(ix,iz)],2)*P[I(ix,iz)])*(dt/dh)*(Ux[I(ix+1,iz)]-Ux[I(ix,iz)])) + ((P[I(ix,iz)]*(pow(Vp[I(ix,iz)],2)-2*pow(Vs[I(ix,iz)],2)))*(dt/dh)*(Uz[I(ix,iz)]-Uz[I(ix,iz-1)]));
Tzz[I(ix,iz)]=Tzz[I(ix,iz)] + ((pow(Vp[I(ix,iz)],2)*P[I(ix,iz)])*(dt/dh)*(Uz[I(ix,iz)]-Uz[I(ix,iz-1)])) + ((P[I(iz,iz)]*(pow(Vp[I(ix,iz)],2)-2*pow(Vs[I(ix,iz)],2)))*(dt/dh)*(Ux[I(ix+2,iz)]-Ux[I(ix,iz)]));
Txz[I(ix,iz)]=Txz[I(ix,iz)] + (pow(Vs[I(ix,iz)],2)*P[I(ix,iz)])*(dt/dh)*(Ux[I(ix,iz+1)]-Ux[I(ix,iz)]+Uz[I(ix,iz)]-Uz[I(ix-1,iz)]);
}
if (ix == (sx-1) && iz == (sz-1))
{
//Txx[I(ix,iz)] += source[it];
Tzz[I(ix,iz)] += source[it];
}
}
}
int main()
{
int nx = 100;
int nz = 100;
float dh = 20;
float dt = 0.002;
float tend=1;
int nt = ceil(tend/dt);
int sx = 50, sz = 50 ;
float f = 4;
float *P_h = (float*)calloc(nx*nz,sizeof(float));
float *Vs_h = (float*)calloc(nx*nz,sizeof(float));
float *Vp_h = (float*)calloc(nx*nz,sizeof(float));
float *source_h = (float*)calloc(nt,sizeof(float));
float *U =(float*)calloc(nx*nz*nt ,sizeof(float));
FILE *ro = fopen("density.bin","rb");
fread(P_h,nx*nz,sizeof(float),ro);
fclose(ro);
FILE *v_p = fopen("VelocityP.bin","rb");
fread(Vp_h,nx*nz,sizeof(float),v_p);
fclose(v_p);
FILE *v_s = fopen("VelocityS.bin","rb");
fread(Vs_h,nx*nz,sizeof(float),v_s);
fclose(v_s);
/******* SOURCE *****/
int it = 0;
float t = 0;
for (it=0;it<nt;it++)
{
source_h[it] = -(1.0-2.0*pow(PI*f*(t-(1.0/f)),2))*exp(-pow(PI*f*(t-(1.0/f)),2));
t += dt;
}
/******* CUDA *******/
float *Ux, *Uz, *Txx, *Txz, *Tzz, *P, *Vs, *Vp;
float *source;
cudaMalloc((void **) &Ux, nx*nz*sizeof(float));
cudaMalloc((void **) &Uz, nx*nz*sizeof(float));
cudaMalloc((void **) &Txx, nx*nz*sizeof(float));
cudaMalloc((void **) &Txz, nx*nz*sizeof(float));
cudaMalloc((void **) &Tzz, nx*nz*sizeof(float));
cudaMalloc((void **) &P, nx*nz*sizeof(float));
cudaMalloc((void **) &Vp, nx*nz*sizeof(float));
cudaMalloc((void **) &Vs, nx*nz*sizeof(float));
cudaMalloc((void **) &source, nt*sizeof(float));
cudaMemcpy(P, P_h, nx*nz*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Vs, Vs_h, nx*nz*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Vp, Vp_h, nx*nz*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(source, source_h, nt*sizeof(float), cudaMemcpyHostToDevice);
cudaMemset(Ux, 0, nx*nz*sizeof(float));
cudaMemset(Uz, 0, nx*nz*sizeof(float));
cudaMemset(Txx, 0, nx*nz*sizeof(float));
cudaMemset(Txz, 0, nx*nz*sizeof(float));
cudaMemset(Tzz, 0, nx*nz*sizeof(float));
dim3 Grid(((nx-1)/32)+1,((nz-1)/32)+1);
dim3 Block(32,32);
for(it=0;it<nt;it++)
{
propagator_U <<<Grid,Block>>>(Ux, Uz, Txx, Txz, Tzz, P, nx, nz, dt, dh);
propagator_T <<<Grid,Block>>>(Ux, Uz, Txx, Txz, Tzz, Vp, Vs, P, source, nx, nz, dt, dh, sx, sz, it);
cudaMemcpy(U+(nx*nz*it), Ux , nx*nz*sizeof(float), cudaMemcpyDeviceToHost);
}
FILE *field_f;
field_f=fopen("field.bin", "wb");
fwrite(U,sizeof(float),nx*nz*nt, field_f);
fclose(field_f);
FILE *source_f;
source_f=fopen("source.bin", "wb");
fwrite(source_h,sizeof(float),nt, source_f);
fclose(source_f);
cudaFree(Ux);
cudaFree(Uz);
cudaFree(Txx);
cudaFree(Txz);
cudaFree(Tzz);
cudaFree(P);
cudaFree(Vs);
cudaFree(Vp);
cudaFree(source);
free(P_h);
free(Vs_h);
free(Vp_h);
free(source_h);
free(U);
return 0;
}
|
2,036
|
#include <iostream>
#include <numeric>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#include <curand.h>
#include <math.h>
#include <fstream>
#include <sstream>
#include <cstdio>
#include <ctime>
static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t);
void sequentialKMeans(int N, int K, int EPS, float*devData);
#define N 80000
#define K 200
#define EPS 0.000001
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
#define CUDA_CALL(x) do { if((x)!=cudaSuccess) { printf("Error at %s:%d\n",__FILE__,__LINE__);return EXIT_FAILURE;}} while(0)
#define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) {printf("Error at %s:%d\n",__FILE__,__LINE__); return EXIT_FAILURE;}} while(0)
using namespace std;
int init(float *vectorsDev){
curandGenerator_t gen;
CURAND_CALL(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen, 234ULL));
CURAND_CALL(curandGenerateUniform(gen, vectorsDev, 2*N));
CURAND_CALL(curandDestroyGenerator(gen));
return 0;
}
int main(int argc, char* argv[]){
std::clock_t start;
double duration;
int iter = 0;
float *devData, *hostData, *mean;
bool stopCriterion = false;
short *clusters;
float oldMeanX, oldMeanY;
hostData = (float*) malloc(2*N*sizeof(float));
mean = (float*) malloc(2*K*sizeof(float));
clusters = (short*) malloc(N*sizeof(short));
CUDA_CALL(cudaMalloc((float **)&devData, 2*N*sizeof(float)));
init(devData);
CUDA_CHECK_RETURN(cudaMemcpy(hostData, devData, 2*N*sizeof(float), cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
memcpy(mean, hostData, 2*K*sizeof(float));
start = std::clock();
while (!stopCriterion) {
stopCriterion = true;
/*
* distanze e minimo
*/
for (int v = 0; v < N; v++) {
float minDistance = 3.0;
short minIndex = -1;
float distance = 0;
for (int c = 0; c < K; c++) {
distance = pow((hostData[2*v] - mean[2*c]),2)+pow((hostData[2*v+1] - mean[2*c+1]),2);
if (distance < minDistance) {
minIndex = c;
minDistance = distance;
}
}
clusters[v] = minIndex;
}
/*
* nuova media
*/
for (int i = 0; i < K; i++) {
int numComponents = 0;
float *arraySum = (float*)calloc(2, sizeof(float));
for (int j = 0; j < N; j++) {
if (clusters[j] == i) {
numComponents++;
arraySum[0] += hostData[2*j];
arraySum[1] += hostData[2*j+1];
}
}
oldMeanX = mean[2*i];
mean[2*i] = arraySum[0] / numComponents;
oldMeanY = mean[2*i+1];
mean[2*i+1] = arraySum[1] / numComponents;
if(abs(mean[2*i] - oldMeanX) > EPS || abs(mean[2*i+1] - oldMeanY) > EPS){
stopCriterion = false;
}
}
iter++;
}
printf("Numero di iterazioni: %d\r\n", iter);
duration = ( std::clock() - start ) / (double) CLOCKS_PER_SEC;
std::cout<<"durata: "<< duration <<'\n';
free(hostData);
free(mean);
free(clusters);
CUDA_CHECK_RETURN(cudaFree(devData));
return EXIT_SUCCESS;
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess)
return;
std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
|
2,037
|
#include<stdio.h>
#include<stdlib.h>
__global__ void mykernel(int* a,int* b,int* c){
//no code is here
*c=*a+*b;
}
int main(){
int a=1;
int b=9;
int c;
int* d_a;
int* d_b;
int* d_c;
cudaMalloc((void**)&d_a,sizeof(int));
cudaMalloc((void**)&d_b,sizeof(int));
cudaMalloc((void**)&d_c,sizeof(int));
cudaMemcpy(d_a,&a,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_b,&b,sizeof(int),cudaMemcpyHostToDevice);
mykernel<<<1,1>>>(d_a,d_b,d_c);
cudaMemcpy(&c,d_c,sizeof(int),cudaMemcpyDeviceToHost);
printf("the summation is %d",c);
return 0;
}
/*
#include<stdio.h>
__global__ void cuda_hello(){
printf("Hello World from GPU!\n");
}
int main() {
cuda_hello<<<1,1>>>();
printf("hello world from host");
return 0;
}*/
|
2,038
|
__global__ void JacobiSVD(int* S, int* V, int m, int n)
{
const int iterations = 30;
int tid_x = threadIdx.x;
int bsz_x = blockDim.x;
int tid_y = threadIdx.y;
int gid_y = blockIdx.y * blockDim.y + tid_y;
__shared__ int acc[512];
int* acc1 = acc;
int* acc2 = acc + 256;
__shared__ int s_S[16*81];
__shared__ int s_V[16*81];
__shared__ int d[16*9];
n = 10, m = 3;
for (int i = 0; i < n-1; i++) {
for (int j = i+1; j < n; j++) {
int* Si = s_S + tid_y*81 + i*m;
int* Sj = s_S + tid_y*81 + j*m;
int p = (int)0;
for (int k = 0; k < m; k++)
p += Si[k]*Sj[k];
int y = d[tid_y*9 + i] - d[tid_y*9 + j];
int r = p*2;
int r2 = r*2;
int c, s;
if (y >= 0) {
c = (r + y) / r2;
s = r2*c;
}
else {
s = (r - y) / r2;
c = r2*s;
}
if (tid_x < m) {
int t0 = c*Si[tid_x] + s*Sj[tid_x];
int t1 = c*Sj[tid_x] - s*Si[tid_x];
Si[tid_x] = t0;
Sj[tid_x] = t1;
acc1[tid_y*16 + tid_x] = t0*t0;
acc2[tid_y*16 + tid_x] = t1*t1;
}
}
__syncthreads();
}
__syncthreads();
for (int i = 0; i <= 4; i++)
V[gid_y * 81 + tid_x+i*bsz_x] = s_V[tid_y * 81 + tid_x+i*bsz_x];
if (tid_x == 0)
V[gid_y * 81 + 80] = s_V[tid_y * 81 + 80];
__syncthreads();
}
|
2,039
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <ctime>
#include <algorithm>
#define Color uchar4
float4 Sum(float4 a, Color b, float coef)
{
return { a.x + (float)b.x * coef,
a.y + (float)b.y * coef,
a.z + (float)b.z * coef,
a.w + (float)b.w * coef };
}
Color ToColor(float4 a, float normalizeValue)
{
return { (unsigned char)(a.x / normalizeValue),
(unsigned char)(a.y / normalizeValue),
(unsigned char)(a.z / normalizeValue),
(unsigned char)(a.w / normalizeValue) };
}
int GetCoordinate(int x, int y, int w, int h)
{
int x_n = std::max(0, std::min(w - 1, x));
int y_n = std::max(0, std::min(h - 1, y));
return y_n * w + x_n;
}
void Kernel(Color* out, int w, int h, int radius, bool isX)
{
float PI = 3.14159265359;
Color c;
float4 newColor;
float r = radius != 0
? radius
: 1;
float sum = 0;
float coef = 0;
int i, j, k;
for (i = 0; i < h; i++)
{
for (j = 0; j < w; j++)
{
newColor = { 0,0,0,0 };
sum = 0;
for (k = -radius; k <= radius; k++)
{
int x = std::max(0, std::min(w - 1, j));
int y = std::max(0, std::min(h - 1, i));
c = isX
? out[GetCoordinate(j + k, i, w, h)]
: out[GetCoordinate(j, i + k, w, h)];
coef = exp(-(float)(k * k) / (2 * r * r)) / (r * sqrt(2 * PI));
newColor = Sum(newColor,
c,
coef);
sum += coef;
}
out[i * w + j] = ToColor(newColor, sum);
}
}
}
void GetFilteredImage(Color* result, int width, int height, int radius)
{
//Y
Kernel(result, width, height, radius, false);
//X
Kernel(result, width, height, radius, true);
}
int main()
{
std::string inFile;
std::string outFile;
int radius;
std::cin >> inFile >> outFile >> radius;
radius /= 10000.0;
auto* file = fopen(inFile.c_str(), "rb");
int width, height;
fread(&width, sizeof(int), 1, file);
fread(&height, sizeof(int), 1, file);
auto hostMap = (Color*)malloc(sizeof(Color) * width * height);
fread(hostMap, sizeof(Color), width * height, file);
fclose(file);
clock_t begin = clock();
GetFilteredImage(hostMap, width, height, radius);
clock_t end = clock();
std::cout << double(end - begin) / CLOCKS_PER_SEC << std::endl;
file = fopen(outFile.c_str(), "wb");
fwrite(&width, sizeof(int), 1, file);
fwrite(&height, sizeof(int), 1, file);
fwrite(hostMap, sizeof(Color), width * height, file);
fclose(file);
free(hostMap);
}
|
2,040
|
#include <thrust/sort.h>
struct t
{
int j,k,l;
};
int main()
{
const int N = 6;
int i;
int keys[N] = { 1, 4, 2, 8, 5, 7};
struct t values[N]= { {3,4,5},{5,6,7},{8,9,10},{11,12,13},{14,15,16},{17,18,19}};
//int *values2[N]= { {13,14,15},{15,16,17},{18,19,110},{111,112,113},{114,15,16},{17,18,19}};
thrust::sort_by_key(keys, keys + N, values );
for(i=0;i<N;i++)
{
printf("%d %d %d\n",values[i].j,values[i].k,values[i].l);
// printf("%d %d %d\n",values2[i][0],values2[i][1],values2[i][2]);
}
printf("i=%d\n",i);
}
|
2,041
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <cstring>
#include <string>
using namespace std;
#define NUM_DATA 512
__global__ void vecAdd(int *a,int *b,int *c)
{
int tid = threadIdx.x;
c[tid] = a[tid] + b[tid];
}
int main()
{
int *a,*b,*c;
int *d_a,*d_b,*d_c;
int memSize = sizeof(int)*NUM_DATA;
cout << "elements : " << NUM_DATA <<"\n";
a = new int[NUM_DATA]; memset(a,0,memSize);
b = new int[NUM_DATA]; memset(a,0,memSize);
c = new int[NUM_DATA]; memset(a,0,memSize);
for(int i = 0 ; i < NUM_DATA; i++)
{
a[i] = rand() % 10;
b[i] = rand() % 10;
}
cudaMalloc(&d_a,memSize);
cudaMalloc(&d_b,memSize);
cudaMalloc(&d_c,memSize);
cudaMemcpy(d_a,a,memSize,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,memSize,cudaMemcpyHostToDevice);
vecAdd<<<1,NUM_DATA>>>(d_a,d_b,d_c);
cudaDeviceSynchronize();
cudaMemcpy(c,d_c,memSize,cudaMemcpyDeviceToHost);
bool result = true;
for(int i = 0 ; i < NUM_DATA; i++)
{
if(a[i] + b[i] != c[i]){
cout << "Gpu has error in vecAdd\n";
result = false;
}
}
if(result)
cout << "GPU WORKS WELL \n";
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
delete[] a; delete[] b; delete[] c;
return 0;
}
|
2,042
|
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#define CHECK(cmnd) { \
cudaError_t ierr = cmnd; \
if (ierr != cudaSuccess) { \
printf("Error: %s:%d: ", __FILE__, __LINE__, cudaGetErrorString(ierr)); \
exit(ierr); \
} \
}
void initData(float * arr, const int n) {
time_t t;
srand((unsigned int) time(&t));
for (int k=0; k<n; k++) arr[k] = (float)(rand() & 0xFF) / 10.0;
}
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp, NULL);
return (double)tp.tv_sec + (double)tp.tv_usec*1.0e-6;
}
void Add_on_host(const float * a, const float * b, float * c, const int n) {
for (int k=0; k<n; k++) c[k] = a[k] + b[k];
}
__global__ void Add_on_device(const float * a, const float * b, float * c, const int n) {
size_t k = threadIdx.x + blockIdx.x * blockDim.x;
if (k < n) c[k] = a[k] + b[k];
}
void check_result(const float *a, const float *b, const int n){
const double epsilon = 1.0e-8;
double diff = 0.0;
bool match = 1;
for (int k=0; k<n; k++){
diff = abs(a[k] - b[k]);
if (diff > epsilon){
match = 0;
printf("Error: check_result: diff=%16.12f at k=%d\n", diff, k);
break;
}
}
if (match) printf("Success: all elements match better than epsilon=%16.12f\n", epsilon);
}
int main(int argc, char ** argv) {
printf("Info: Starting %s ... \n", argv[0]);
// problem sizes and kernel configs
const int n_elem = 1 << 24;
const size_t n_byte = n_elem * sizeof(float);
const int tpb_x = 128;
dim3 tpb(tpb_x, 1, 1);
dim3 nblocks((n_elem + tpb_x - 1) / tpb_x, 1, 1);
// timing
double t0, dt_host, dt_gpu, dt_h2d, dt_kern, dt_d2h;
// addition on host
t0 = cpuSecond();
float *h_a, *h_b, *h_ref; //, *d_ref;
h_a = (float *)malloc(n_byte);
h_b = (float *)malloc(n_byte);
h_ref = (float *)malloc(n_byte); // reference result from host
// d_ref = (float *)malloc(n_byte); // reference result from device
initData(h_a, n_elem);
initData(h_b, n_elem);
memset(h_a, 0, n_byte);
memset(h_b, 0, n_byte);
Add_on_host(h_a, h_b, h_ref, n_elem);
dt_host = cpuSecond() - t0;
// device addition
const int dev = 0;
cudaDeviceProp dev_prop;
CHECK(cudaSetDevice(dev));
printf("Info: device #%d is: %s\n", dev, dev_prop.name);
t0 = cpuSecond();
float *d_a, *d_b, *d_c;
CHECK(cudaMalloc((float **)&d_a, n_byte));
CHECK(cudaMalloc((float **)&d_b, n_byte));
CHECK(cudaMalloc((float **)&d_c, n_byte));
CHECK(cudaMemcpy(d_a, h_a, n_byte, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_b, h_b, n_byte, cudaMemcpyHostToDevice));
dt_h2d = cpuSecond() - t0;
// Kernel launch
t0 = cpuSecond();
Add_on_device<<<nblocks, tpb>>>(d_a, d_b, d_c, n_elem);
CHECK(cudaDeviceSynchronize());
dt_kern = cpuSecond() - t0;
float * h_res;
h_res = (float *)malloc(n_byte);
t0 = cpuSecond();
CHECK(cudaMemcpy(h_res, d_c, n_byte, cudaMemcpyDeviceToHost));
dt_d2h = cpuSecond() - t0;
check_result(h_ref, h_res, n_elem);
// {
// const double epsilon = 1.0e-8;
// double diff = 0.0;
// bool match = 1;
// for (int k=0; k<n_elem; k++){
// diff = abs(h_ref[k] - d_ref[k]);
// if (diff > epsilon){
// match = 0;
// printf("Error: check_result: diff=%16.12f at k=%d\n", diff, k);
// break;
// }
// }
// if (match) printf("Success: all elements match better than epsilon=%16.12f\n", epsilon);
// }
dt_gpu = dt_h2d + dt_kern + dt_d2h;
printf("\n%s\n", "Timing results ...");
printf("dt_host: %12.8f (sec)\n", dt_host);
printf("dt_h2d: %12.8f (sec)\n", dt_h2d);
printf("dt_kern: %12.8f (sec)\n", dt_kern);
printf("dt_d2h: %12.8f (sec)\n", dt_d2h);
printf("dt_gpu: %12.8f (sec)\n", dt_gpu);
printf("dt_host / dt_gpu = %6.2f \n", dt_host / dt_gpu);
printf("\n");
// Free up the memory on host and device
free(h_a); free(h_b); free(h_ref); free(h_res);
CHECK(cudaFree(d_a)); CHECK(cudaFree(d_b)); CHECK(cudaFree(d_c));
return 0;
}
|
2,043
|
#include "includes.h"
__global__ void cube(float * d_out, float * d_in){
// Todo: Fill in this function
int index = threadIdx.x;
float f = d_in[index];
d_out[index] = f*f*f;
}
|
2,044
|
#include <stdio.h>
#define T 16 // As Threads
#define array_size 64
__global__ void vecMultiplyReverse(int *A, int *B, int *C)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i%2 == 0)
{
C[i] = A[i] + B[i];
}
else if(i%2 != 0)
{
C[i] = A[i] - B[i];
}
}
int main (int argc, char *argv[])
{
int i;
int size = T*sizeof(int);
int a[T],b[T],c[T], *devA,*devB,*devC;
for (i=0; i< T; i++)
{
a[i] = i + 2;
b[i] = i + 1;
}
cudaMalloc( (void**)&devA,size);
cudaMalloc( (void**)&devB,size);
cudaMalloc( (void**)&devC,size);
cudaMemcpy( devA, a, size, cudaMemcpyHostToDevice);
cudaMemcpy( devB, b, size, cudaMemcpyHostToDevice);
cudaMemcpy( devC, c, size, cudaMemcpyHostToDevice);
dim3 dimBlock(T);
dim3 dimGrid(array_size/T - 1);
vecMultiplyReverse<<<dimGrid,dimBlock>>>(devA,devB,devC);
printf("Before A: \n");
for (i=0; i< T; i++)
{
printf("%d ", a[i]);
}
printf("\n");
printf("Before B: \n");
for (i=0; i< T; i++)
{
printf("%d ", b[i]);
}
printf("\n");
cudaMemcpy(a, devA, size, cudaMemcpyDeviceToHost);
cudaMemcpy(b, devB, size, cudaMemcpyDeviceToHost);
cudaMemcpy(c, devC, size, cudaMemcpyDeviceToHost);
cudaFree(devA);
cudaFree(devB);
cudaFree(devC);
printf("After\n");
for (i=0; i < T; i++)
{
printf("%d ",c[i]);
}
printf("\n");
return 0;
}
|
2,045
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>
#include <math_constants.h>
#include <stdlib.h>
#include <vector>
#include <algorithm>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <cstdlib>
#define BINNUM 9
#define CELLSIZE 6
#define CELLBLOCKSIZE 3
#define STEPSIZE 3
#define K 5
#define KERNEL_WIDTH 31
__global__ void computeGradient(float* outputGr,int* outputTag,float* input,unsigned int width,unsigned int height)
{
unsigned int tx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int ty = blockIdx.y*blockDim.y + threadIdx.y;
float gradientX, gradientY;
float angle;
int nAngle;
int indTag;
if(tx < width && ty < height)
{
if(tx == 0) //image boundary
gradientX = input[tx+1 + ty*width] - input[tx + ty*width];
else if(tx == width-1)
gradientX = input[tx + ty*width] - input[tx-1 + ty*width];
else if(ty == 0 || ty == height-1)
gradientX = 0;
else
gradientX = input[tx+1 + ty*width] - input[tx-1 + ty*width];
if(ty == 0) //image boundary
gradientY = input[tx + (ty+1)*width] - input[tx + (ty)*width];
else if (ty == height-1)
gradientY = input[tx + (ty)*width] - input[tx + (ty-1)*width];
else if (tx ==0 || tx == width-1)
gradientY = 0;
else
gradientY = input[tx + (ty+1)*width] - input[tx + (ty-1)*width];
outputGr[tx + ty*width] = sqrt(gradientX*gradientX + gradientY*gradientY);
//outputGr[tx + ty*width] = gradientX;
if(gradientX == 0)
gradientX = 1e-5;
//angle = ((atan(gradientY/gradientX)));
angle = ((atan(gradientY/gradientX)+(CUDART_PI_F/2))*180)/CUDART_PI_F;
nAngle = 180/BINNUM;
indTag = ceil(angle/nAngle);
if(indTag == 0)
indTag=1;
else if(indTag==10)
indTag=9;
outputTag[tx + ty*width] = indTag;
}
//outputX[tx + ty*width] = gradientX;
//outputY[tx + ty*width] = gradientY;
}
__global__ void computeBinHOG(float* outputHOGFeature,float* outputWeight,int *outputPosition,int* outputOffset,float* Gr,int* Tag,unsigned int xStepNum,unsigned int yStepNum,
unsigned int width,unsigned int height)
{
unsigned int tx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int ty = blockIdx.y*blockDim.y + threadIdx.y;
int centerX = floor(float(width)/2.0+0.5) - 1;
int centerY = floor(float(height)/2.0+0.5) - 1;
if(tx < xStepNum && ty < yStepNum)
{
//pixel position of center
int x = tx*STEPSIZE+1.5*CELLSIZE-1;
int y = ty*STEPSIZE+1.5*CELLSIZE-1;
int leftupperX = x - 1.5*CELLSIZE +1 ;
int leftupperY = y - 1.5*CELLSIZE +1 ;
//__shared__ float tmp[CELLSIZE][CELLSIZE];
float tmp[CELLBLOCKSIZE*CELLBLOCKSIZE][BINNUM] = {0.0f};
int inc = 0;
for(int i = 0; i < CELLBLOCKSIZE;i++)
{
for(int j=0; j<CELLBLOCKSIZE;j++)
{
int indX = leftupperX + j*CELLSIZE;
int indY = leftupperY + i*CELLSIZE;
for(int p=0;p<CELLSIZE;p++)
{
for(int q=0;q<CELLSIZE;q++)
{
//int currendindX = indX+p;
//int currendindY = indY+q;
int binind = Tag[indX+q + width*(indY+p)]-1;
float tmpdebug = Gr[indX+q + width*(indY+p)];
tmp[inc][binind] +=tmpdebug;
//tmp[inc][binind] += Gr[indX+q + width*(indY+p)];
}
}
inc++;
}
}
float norm = 0.0f;
int nonempty = 0;
for(int p=0;p<CELLBLOCKSIZE*CELLBLOCKSIZE;p++)
{
for(int q=0;q<BINNUM;q++)
{
norm+= (tmp[p][q]*tmp[p][q]);
if(tmp[p][q]>0)
nonempty ++;
}
}
norm = sqrt(norm);
norm +=1e-5;
//if(norm <= 0.0f)
// norm = 1e-5;
//float acc = 0.0;
for(int p=0;p<CELLBLOCKSIZE*CELLBLOCKSIZE;p++)
{
for(int q=0;q<BINNUM;q++)
{
//outputHOGFeature[ (tx+ty*xStepNum)*CELLBLOCKSIZE*CELLBLOCKSIZE*BINNUM +(q+p*BINNUM)] = tmp[p][q]/norm;
outputHOGFeature[ (ty+tx*yStepNum)*CELLBLOCKSIZE*CELLBLOCKSIZE*BINNUM +(q+p*BINNUM)] = tmp[p][q]/norm;
//acc +=tmp[p][q]/norm;
}
}
/*outputPosition[(tx + ty*xStepNum)*2] = x;
outputPosition[(tx + ty*xStepNum)*2 + 1] = y;
outputOffset[(tx + ty*xStepNum)*2] = centerX - x;
outputOffset[(tx + ty*xStepNum)*2 + 1] = centerY - y;
outputWeight[tx + ty*xStepNum] = float(nonempty)/81.0f;*/
outputPosition[(ty+tx*yStepNum)*2] = x;
outputPosition[(ty+tx*yStepNum)*2 + 1] = y;
outputOffset[(ty+tx*yStepNum)*2] = centerX - x;
outputOffset[(ty+tx*yStepNum)*2 + 1] = centerY - y;
outputWeight[(ty+tx*yStepNum)] = float(nonempty)/81.0f;
//outputWeight[(ty+tx*yStepNum)] = float(acc)/1.0f;
}
}
__global__ void computeBinHOGTar(float* outputHOGFeature,int *outputPosition,float* Gr,int* Tag,unsigned int xStepNum,unsigned int yStepNum,
unsigned int width,unsigned int height)
{
unsigned int tx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int ty = blockIdx.y*blockDim.y + threadIdx.y;
if(tx < xStepNum && ty < yStepNum)
{
//pixel position of center
int x = tx*STEPSIZE+1.5*CELLSIZE-1;
int y = ty*STEPSIZE+1.5*CELLSIZE-1;
int leftupperX = x - 1.5*CELLSIZE +1 ;
int leftupperY = y - 1.5*CELLSIZE +1 ;
//__shared__ float tmp[CELLSIZE][CELLSIZE];
float tmp[CELLBLOCKSIZE*CELLBLOCKSIZE][BINNUM] = {0.0f};
int inc = 0;
for(int i = 0; i < CELLBLOCKSIZE;i++)
{
for(int j=0; j<CELLBLOCKSIZE;j++)
{
int indX = leftupperX + j*CELLSIZE;
int indY = leftupperY + i*CELLSIZE;
for(int p=0;p<CELLSIZE;p++)
{
for(int q=0;q<CELLSIZE;q++)
{
//int currendindX = indX+p;
//int currendindY = indY+q;
int binind = Tag[indX+q + width*(indY+p)]-1;
tmp[inc][binind] += Gr[indX+q + width*(indY+p)];
}
}
inc++;
}
}
float norm = 0.0f;
int nonempty = 0;
for(int p=0;p<CELLBLOCKSIZE*CELLBLOCKSIZE;p++)
{
for(int q=0;q<BINNUM;q++)
{
norm+= (tmp[p][q]*tmp[p][q]);
if(tmp[p][q]>0)
nonempty ++;
}
}
norm = sqrt(norm);
norm +=1e-5;
//if(norm <= 0.0f)
// norm = 1e-5;
for(int p=0;p<CELLBLOCKSIZE*CELLBLOCKSIZE;p++)
{
for(int q=0;q<BINNUM;q++)
{
//outputHOGFeature[ (tx+ty*xStepNum)*CELLBLOCKSIZE*CELLBLOCKSIZE*BINNUM +(q+p*BINNUM)] = tmp[p][q]/norm;
outputHOGFeature[ (ty+tx*yStepNum)*CELLBLOCKSIZE*CELLBLOCKSIZE*BINNUM +(q+p*BINNUM)] = tmp[p][q]/norm;
}
}
//outputPosition[(tx + ty*xStepNum)*2] = x;
//outputPosition[(tx + ty*xStepNum)*2 + 1] = y;
outputPosition[(ty+tx*yStepNum)*2] = x;
outputPosition[(ty+tx*yStepNum)*2 + 1] = y;
}
}
__global__ void computeDistance(float* output,float* hog,float* hogTar,unsigned int width,unsigned int height)
{
/*__shared__ float tmp[2][81];
unsigned int bx = blockIdx.x * 1;
unsigned int by = blockIdx.y * 1;
unsigned int tx = threadIdx.x * 1;
if(tx < 81)
{
tmp[0][tx] = hog[tx + bx*CELLBLOCKSIZE*CELLBLOCKSIZE*BINNUM];
tmp[1][tx] = hogTar[tx + by*CELLBLOCKSIZE*CELLBLOCKSIZE*BINNUM];
}
__syncthreads();
float perDistance=0.0;
float tmp1 = 0.0;
float tmp2 = 0.0;
for(int i=0;i<81;i++)
{
tmp1 = tmp[0][i];
tmp2 = tmp[1][i];
perDistance += ((tmp1-tmp2)*(tmp1-tmp2))/(tmp1 + tmp2 + 1e-5);
}
output[bx + by*width] = 0.5 * perDistance;*/
unsigned int tx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int ty = blockIdx.y*blockDim.y + threadIdx.y;
if(tx < width && ty < height)
{
float perDistance = 0.0;
//float tmp[CELLBLOCKSIZE*CELLBLOCKSIZE*BINNUM]= {0.0f};
//float tmpTar[CELLBLOCKSIZE*CELLBLOCKSIZE*BINNUM]= {0.0f};
float tmp = 0.0;
float tmpTar = 0.0;
//int i=0;
for(int i =0;i<CELLBLOCKSIZE*CELLBLOCKSIZE*BINNUM;i++)
{
tmp = hog[i + tx*CELLBLOCKSIZE*CELLBLOCKSIZE*BINNUM];
tmpTar = hogTar[i + ty*CELLBLOCKSIZE*CELLBLOCKSIZE*BINNUM];
//float tmp = hog[i*(width) + tx];
//float tmpTar = hogTar[i*height + ty];
perDistance = perDistance + ((tmp-tmpTar)*(tmp-tmpTar))/(tmp + tmpTar + 1e-5);
//perDistance=1;
}
output[tx + ty*width] = 0.5 * perDistance;
//output[ty + tx*height] = 0.5 * perDistance;
}
}
__global__ void vote(float *OutputVoteMatrix,float *HOGDistance, float *weight,int *offset,int *samplepixel,
unsigned int width,unsigned int height,unsigned int voteWidth,unsigned int voteHeight)
{
unsigned int tx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int ty = blockIdx.y*blockDim.y + threadIdx.y;
if(tx < width && ty < height)
{
//if (HOGDistance[tx + ty*width] <= threshold[ty])
if (HOGDistance[tx + ty*width] <= 2)//
{
int x = samplepixel[ty*2];
int y = samplepixel[ty*2+1];
int offsetx = offset[tx*2];
int offsety = offset[tx*2+1];
int centerx = x + offsetx;
int centery = y + offsety;
if(centerx >= 0 && centery >= 0 && centerx < voteWidth && centery< voteHeight)
{
OutputVoteMatrix[centerx + centery*voteWidth] += 1/(HOGDistance[tx + ty*width]+1e-5 + 1) * weight[tx];
}
}
}
}
__global__ void genGaussianFilter(float* output)
{
unsigned int tx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int ty = blockIdx.y*blockDim.y + threadIdx.y;
__shared__ float gaussianKernel[KERNEL_WIDTH][KERNEL_WIDTH];
float sigma = 5.0;
float gaussianSum = 0.0;
gaussianKernel[ty][tx] = powf(2.71828,-((tx-15)*(tx-15)/(2*sigma*sigma) + (ty-15)*(ty-15)/(2*sigma*sigma)));
__syncthreads();
for(int i=0;i<KERNEL_WIDTH;i++)
for(int j=0;j<KERNEL_WIDTH;j++)
{
gaussianSum += gaussianKernel[i][j];
}
output[ty*KERNEL_WIDTH + tx] = gaussianKernel[ty][tx]/gaussianSum;
}
__global__ void gaussianFilter(float* output,float* input, float* gaussianKernel,unsigned int width,unsigned int height)
{
unsigned int tx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int ty = blockIdx.y*blockDim.y + threadIdx.y;
if(tx < width && ty < height)
{
float accum = 0.0;
float offset = 15;
for (int i = 0; i < KERNEL_WIDTH; ++i)
{
for (int j = 0; j < KERNEL_WIDTH; ++j)
{
int ind = tx+j-offset + (ty+i-offset) * width;
if(ind > 0 && ind < width*height)
accum += gaussianKernel[j + KERNEL_WIDTH*i] * input[ind];
}
output[tx + ty*width] = accum;
}
}
}
__global__ void detectMaximal(float* output,float* input,unsigned int width,unsigned int height,float maximal)
{
unsigned int tx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int ty = blockIdx.y*blockDim.y + threadIdx.y;
if(tx < width-1 && ty < height-1 && tx >0 && ty>0)
{
if (input[tx + ty*width] > 0.8*maximal && input[tx + ty*width] > input[tx-1 + (ty-1)*width] &&
input[tx + ty*width] > input[tx + (ty-1)*width] && input[tx + ty*width] > input[tx+1 + (ty-1)*width] &&
input[tx + ty*width] > input[tx-1 + (ty)*width] && input[tx + ty*width] > input[tx+1 + (ty)*width] &&
input[tx + ty*width] > input[tx-1 + (ty+1)*width] && input[tx + ty*width] > input[tx + (ty+1)*width] &&
input[tx + ty*width] > input[tx+1 + (ty+1)*width])
{
output[tx + ty*width] = (tx+1) + (ty)*width;
}
}
}
__global__ void computeThreshold(float* output,float* input)
{
unsigned int tx = threadIdx.x;
unsigned int bx = blockIdx.x;
__shared__ float partialMin[500];
partialMin[tx] = input[tx + bx*blockDim.x];
unsigned int stride = 1;
for (stride = 1; stride < blockDim.x; stride *= 2)
{
__syncthreads();
if (tx % (2 * stride) == 0 && tx + stride < blockDim.x)
partialMin[tx] = partialMin[tx] < partialMin[tx + stride] ? partialMin[tx]:partialMin[tx + stride];
}
/*unsigned int stridenew = stride/2;
float kmax = partialMin[0];
for(int i=0;i<blockDim.x;i+=stridenew)
{
if(kmax < partialMin[i])
kmax = partialMin[i];
}*/
output[0] = partialMin[0];
}
extern "C" float* computeHOG(float *img,float* imgTar,unsigned int width,unsigned int height,unsigned int widthTar,unsigned int heightTar)
{
//For performance analysis
cudaEvent_t start, stop0,stop1, stop2, stop3, stop4, stop5, stop6,stop7,stop8,stop9,stop10,stop11,stop12,stop13,stop14;
cudaEventCreate(&start);
cudaEventCreate(&stop0);
cudaEventCreate(&stop1);
cudaEventCreate(&stop2);
cudaEventCreate(&stop3);
cudaEventCreate(&stop4);
cudaEventCreate(&stop5);
cudaEventCreate(&stop6);
cudaEventCreate(&stop7);
cudaEventCreate(&stop8);
cudaEventCreate(&stop9);
cudaEventCreate(&stop10);
cudaEventCreate(&stop11);
cudaEventCreate(&stop12);
cudaEventCreate(&stop13);
cudaEventCreate(&stop14);
cudaEventRecord(start, 0);
int xStepNum = floor((width-CELLSIZE*CELLBLOCKSIZE)/float(STEPSIZE));
int yStepNum = floor((height-CELLSIZE*CELLBLOCKSIZE)/float(STEPSIZE));
int xStepNumTar = floor((widthTar-CELLSIZE*CELLBLOCKSIZE)/float(STEPSIZE));
int yStepNumTar = floor((heightTar-CELLSIZE*CELLBLOCKSIZE)/float(STEPSIZE));
float *Gr;
cudaMalloc((void**)&Gr , width*height*sizeof(float));
int *Tag;
cudaMalloc((void**)&Tag , width*height*sizeof(int));
float *HOGFeature;
cudaMalloc((void**)&HOGFeature , xStepNum*yStepNum*BINNUM*CELLBLOCKSIZE*CELLBLOCKSIZE*sizeof(float));
cudaMemset(HOGFeature,0,xStepNum*yStepNum*BINNUM*CELLBLOCKSIZE*CELLBLOCKSIZE*sizeof(float));
int *centerPosition;
cudaMalloc((void**)¢erPosition , 2*xStepNum*yStepNum*sizeof(int));
int *offset;
cudaMalloc((void**)&offset , 2*xStepNum*yStepNum*sizeof(int));
float *weight;
cudaMalloc((void**)&weight , xStepNum*yStepNum*sizeof(float));
float *GrTar;
cudaMalloc((void**)&GrTar , widthTar*heightTar*sizeof(float));
int *TagTar;
cudaMalloc((void**)&TagTar , widthTar*heightTar*sizeof(int));
int *samplepixel;
cudaMalloc((void**)&samplepixel , 2*xStepNum*yStepNum*sizeof(int));
float *HOGFeatureTar;
cudaMalloc((void**)&HOGFeatureTar , xStepNumTar*yStepNumTar*BINNUM*CELLBLOCKSIZE*CELLBLOCKSIZE*sizeof(float));
cudaMemset(HOGFeatureTar,0,xStepNumTar*yStepNumTar*BINNUM*CELLBLOCKSIZE*CELLBLOCKSIZE*sizeof(float));
float *HOGDistance;
cudaMalloc((void**)&HOGDistance , xStepNumTar*yStepNumTar*xStepNum*yStepNum*sizeof(float));
cudaMemset(HOGDistance,0,xStepNumTar*yStepNumTar*xStepNum*yStepNum*sizeof(float));
float *votematrix;
cudaMalloc((void**)&votematrix , widthTar*heightTar* sizeof(float));
cudaMemset(votematrix,0,widthTar*heightTar*sizeof(float));
float *gaussianKernel_dev;
cudaMalloc((void**)&gaussianKernel_dev , KERNEL_WIDTH*KERNEL_WIDTH* sizeof(float));
cudaMemset(gaussianKernel_dev,0,KERNEL_WIDTH*KERNEL_WIDTH*sizeof(float));
float *votematrix_smoothed;
cudaMalloc((void**)&votematrix_smoothed , widthTar*heightTar* sizeof(float));
cudaMemset(votematrix_smoothed,0,widthTar*heightTar*sizeof(float));
float *vote_maxposition;
cudaMalloc((void**)&vote_maxposition , widthTar*heightTar* sizeof(float));
cudaMemset(vote_maxposition,0,widthTar*heightTar*sizeof(float));
//********************
cudaEventRecord(stop0, 0);
cudaEventSynchronize(stop0);
float elapsedTime0;
cudaEventElapsedTime(&elapsedTime0, start, stop0);
printf("Initialization :%f ms\n", elapsedTime0);
//********************
dim3 dimBlock(16,16,1);
dim3 dimGrid(width/dimBlock.x + 1, height/dimBlock.y + 1,1);
computeGradient<<<dimGrid,dimBlock>>>(Gr,Tag,img,width,height);
//********************
cudaEventRecord(stop1, 0);
cudaEventSynchronize(stop1);
float elapsedTime1;
cudaEventElapsedTime(&elapsedTime1, start, stop1);
printf("Gradient Computation for Ref Image :%f ms\n", elapsedTime1);
//********************
dimBlock = dim3(16,16,1);
dimGrid = dim3(xStepNum/dimBlock.x + 1, yStepNum/dimBlock.y + 1,1);
computeBinHOG<<<dimGrid,dimBlock>>>(HOGFeature,weight,centerPosition,offset,Gr,Tag,xStepNum,yStepNum,width,height);
//********************
cudaEventRecord(stop2, 0);
cudaEventSynchronize(stop2);
float elapsedTime2;
cudaEventElapsedTime(&elapsedTime2, start, stop2);
printf("HOG Computation for Ref Image :%f ms\n", elapsedTime2);
//********************
dimBlock = dim3(16,16,1);
dimGrid = dim3(widthTar/dimBlock.x + 1, heightTar/dimBlock.y + 1,1);
computeGradient<<<dimGrid,dimBlock>>>(GrTar,TagTar,imgTar,widthTar,heightTar);
//********************
cudaEventRecord(stop3, 0);
cudaEventSynchronize(stop3);
float elapsedTime3;
cudaEventElapsedTime(&elapsedTime3, start, stop3);
printf("Gradient Computation for Target Image :%f ms\n", elapsedTime3);
//********************
dimBlock = dim3(16,16,1);
dimGrid = dim3(xStepNumTar/dimBlock.x + 1, yStepNumTar/dimBlock.y + 1,1);
computeBinHOGTar<<<dimGrid,dimBlock>>>(HOGFeatureTar,samplepixel,GrTar,TagTar,xStepNumTar,yStepNumTar,widthTar,heightTar);
//********************
cudaEventRecord(stop4, 0);
cudaEventSynchronize(stop4);
float elapsedTime4;
cudaEventElapsedTime(&elapsedTime4, start, stop4);
printf("HOG Computation for Target Image :%f ms\n", elapsedTime4);
//********************
dimBlock = dim3(16,16,1);
dimGrid = dim3((xStepNum*yStepNum)/dimBlock.x + 1, (xStepNumTar*yStepNumTar)/dimBlock.y + 1,1);
computeDistance<<<dimGrid,dimBlock>>>(HOGDistance,HOGFeature,HOGFeatureTar,xStepNum*yStepNum,xStepNumTar*yStepNumTar);
//********************
cudaEventRecord(stop5, 0);
cudaEventSynchronize(stop5);
float elapsedTime5;
cudaEventElapsedTime(&elapsedTime5, start, stop5);
printf("Distance Matrix Computation:%f ms\n", elapsedTime5);
//********************
//float *HOGDistanceSorted = new float[xStepNumTar*yStepNumTar*xStepNum*yStepNum];
//cudaMemcpy(HOGDistanceSorted,HOGDistance, xStepNumTar*yStepNumTar*xStepNum*yStepNum*sizeof(float), cudaMemcpyDeviceToHost);
/*float *threshold_host = new float[xStepNumTar*yStepNumTar];
for(int i=0;i<xStepNumTar*yStepNumTar;i++)
{
thrust::sort(HOGDistanceSorted + i*(xStepNum*yStepNum), HOGDistanceSorted +i*(xStepNum*yStepNum) +xStepNum*yStepNum);
threshold_host[i] = HOGDistanceSorted[i*(xStepNum*yStepNum) + K - 1];
}*/
/*float *threshold_dev;
cudaMalloc((void**)&threshold_dev , xStepNumTar*yStepNumTar* sizeof(float));
cudaMemcpy(threshold_dev,threshold_host, xStepNumTar*yStepNumTar*sizeof(float), cudaMemcpyHostToDevice);*/
//cudaMemcpy(threshold_dev,threshold_host, xStepNumTar*yStepNumTar*sizeof(float), cudaMemcpyHostToDevice);
/*dimBlock = dim3(xStepNum*yStepNum,1,1);
dimGrid = dim3(xStepNumTar*yStepNumTar,1,1);
float *threshold_dev;
cudaMalloc((void**)&threshold_dev , xStepNumTar*yStepNumTar* sizeof(float));
cudaMemset(threshold_dev,0,xStepNumTar*yStepNumTar*sizeof(float));
computeThreshold<<<dimGrid,dimBlock>>>(threshold_dev,HOGDistance);*/
dimBlock = dim3(16,16,1);
dimGrid = dim3((xStepNum*yStepNum)/dimBlock.x + 1, (xStepNumTar*yStepNumTar)/dimBlock.y + 1,1);
//vote<<<dimGrid,dimBlock>>>(votematrix,threshold_dev,HOGDistance,weight,offset,samplepixel,xStepNum*yStepNum,xStepNumTar*yStepNumTar,widthTar,heightTar);
vote<<<dimGrid,dimBlock>>>(votematrix,HOGDistance,weight,offset,samplepixel,xStepNum*yStepNum,xStepNumTar*yStepNumTar,widthTar,heightTar);
//********************
cudaEventRecord(stop6, 0);
cudaEventSynchronize(stop6);
float elapsedTime6;
cudaEventElapsedTime(&elapsedTime6, start, stop6);
printf("Threshold and Voting:%f ms\n", elapsedTime6);
//********************
dimBlock = dim3(KERNEL_WIDTH,KERNEL_WIDTH,1);
dimGrid = dim3(1,1,1);
genGaussianFilter<<<dimGrid,dimBlock>>>(gaussianKernel_dev);
//********************
cudaEventRecord(stop7, 0);
cudaEventSynchronize(stop7);
float elapsedTime7;
cudaEventElapsedTime(&elapsedTime7, start, stop7);
printf("Gaussian Kernel Generation:%f ms\n", elapsedTime7);
//********************
dimBlock = dim3(16,16,1);
dimGrid = dim3(widthTar/dimBlock.x + 1, heightTar/dimBlock.y + 1,1);
gaussianFilter<<<dimGrid,dimBlock>>>(votematrix_smoothed,votematrix, gaussianKernel_dev,widthTar,heightTar);
//********************
cudaEventRecord(stop8, 0);
cudaEventSynchronize(stop8);
float elapsedTime8;
cudaEventElapsedTime(&elapsedTime8, start, stop8);
printf("Gaussian Blur:%f ms\n", elapsedTime8);
//********************
float *votematrix_smoothed_sorted = new float[widthTar*heightTar];
cudaMemcpy(votematrix_smoothed_sorted,votematrix_smoothed, widthTar*heightTar*sizeof(float), cudaMemcpyDeviceToHost);
thrust::sort(votematrix_smoothed_sorted, votematrix_smoothed_sorted +widthTar*heightTar);
float maxvalue = votematrix_smoothed_sorted[widthTar*heightTar-1];
//********************
cudaEventRecord(stop9, 0);
cudaEventSynchronize(stop9);
float elapsedTime9;
cudaEventElapsedTime(&elapsedTime9, start, stop9);
printf("Maximal Voting Value Computation:%f ms\n", elapsedTime9);
//********************
dimBlock = dim3(16,16,1);
dimGrid = dim3(widthTar/dimBlock.x + 1, heightTar/dimBlock.y + 1,1);
detectMaximal<<<dimGrid,dimBlock>>>(vote_maxposition,votematrix_smoothed,widthTar,heightTar,maxvalue);
//********************
cudaEventRecord(stop10, 0);
cudaEventSynchronize(stop10);
float elapsedTime10;
cudaEventElapsedTime(&elapsedTime10, start, stop10);
printf("Local Maximal Detection:%f ms\n", elapsedTime10);
//********************
float *vote_maxposition_sorted = new float[widthTar*heightTar];
cudaMemcpy(vote_maxposition_sorted,vote_maxposition, widthTar*heightTar*sizeof(float), cudaMemcpyDeviceToHost);
thrust::sort(vote_maxposition_sorted, vote_maxposition_sorted +widthTar*heightTar);
int centernum = 0;
for(int i=widthTar*heightTar-1;i>=0;i--)
{
if(vote_maxposition_sorted[i] > 0)
centernum++;
else
break;
}
//********************
cudaEventRecord(stop11, 0);
cudaEventSynchronize(stop11);
float elapsedTime11;
cudaEventElapsedTime(&elapsedTime11, start, stop11);
printf("Transfering Detected Logo Position to HOST:%f ms\n", elapsedTime11);
//********************
//float *ret = new float[centernum];
//cudaMemcpy(ret,vote_maxposition_sorted + widthTar*heightTar - centernum, centernum*sizeof(float), cudaMemcpyHostToHost);
float *ret = new float[centernum+1];
ret[0] = float(centernum);
cudaMemcpy(ret+1,vote_maxposition_sorted + widthTar*heightTar - centernum, centernum*sizeof(float), cudaMemcpyHostToHost);
//float *ret = new float[widthTar*heightTar];
//cudaMemcpy(ret,votematrix_smoothed, widthTar*heightTar*sizeof(float), cudaMemcpyDeviceToHost);
return ret;
}
|
2,046
|
#include "includes.h"
__global__ void sga_up_forward (const int n, const float *filters, const int height, const int width, const int depth, const int wsize, float *top_data){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
// int wsize=radius+1;
int base = index / width * step * depth + index % width; //up->down
int fbase = index / width * step * wsize + index % width;
for (int row = height - 1; row >= 0; row--)
{
int shift = fbase + row * width;
for (int d = 0; d < depth; d++)
{
float temp = 0;
int location = base + d * step + row * width;
temp += top_data[location] * filters[shift];
if (row + 1 < height)
temp += top_data[location + width] * filters[shift + step];
else
temp += top_data[location] * filters[shift + step];
if (row + 2 < height)
temp +=
top_data[location + 2 * width] * filters[shift + 2 * step];
else
temp += top_data[location] * filters[shift + 2 * step];
if (row + 1 < height && d - 1 >= 0)
temp +=
top_data[location + width - step] * filters[shift + 3 * step];
else
temp += top_data[location] * filters[shift + 3 * step];
if (row + 1 < height && d + 1 < depth)
temp +=
top_data[location + width + step] * filters[shift + 4 * step];
else
temp += top_data[location] * filters[shift + 4 * step];
top_data[location] = temp;
}
}
}
|
2,047
|
#include <stdio.h>
void create_diagonal_matrix(float *Dmatrix, float matrix[3], int array_length)
{
for(int i=0;i<array_length;i++)
{
for(int j=0;j<array_length;j++)
{
if(i==j)
Dmatrix[j*array_length+i]=matrix[i];
else
Dmatrix[j*array_length+i]=0;
}
}
}
int main()
{
float matrix[3] = {1,2,3};
int array_length = sizeof(matrix)/sizeof(matrix[0]);
float *Dmatrix;
Dmatrix = (float *)malloc(array_length*array_length);
create_diagonal_matrix(Dmatrix, matrix, array_length);
for(int i=0; i<(array_length*array_length); i++)
{
printf("%f \t", Dmatrix[i]);
}
printf("\n");
return 0;
}
|
2,048
|
#include "includes.h"
__global__ void init_one_vec(float* d_one_vec, size_t length)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= length) return;
d_one_vec[i] = 1.f;
}
|
2,049
|
#include "includes.h"
//Macros
#define min(a, b) ( (a)<(b)? (a): (b) )
#define max(a, b) ( (a)>(b)? (a): (b) )
//Constants
#define MAX_VECTOR_COUNT 5
//Vector structure
typedef struct {
float e[3];
}Vec3f;
//Global array
Vec3f vecArray[MAX_VECTOR_COUNT];
Vec3f newvecArray[MAX_VECTOR_COUNT];
//forward declarations
__global__ void reduce(Vec3f *input, Vec3f *output){
extern __shared__ Vec3f sdata[];
// each thread loadsome element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = threadIdx.x + blockIdx.x * blockDim.x;
sdata[tid] = input[i];
__syncthreads();
//perform reduction in shared mem
for(unsigned int s=1; s < blockDim.x; s *= 2) {
//int s = 2;
if(tid % (2*s) == 0){
sdata[tid].e[0] += sdata[tid + s].e[0]; //summing
sdata[tid].e[1] += sdata[tid + s].e[1];
sdata[tid].e[2] += sdata[tid + s].e[2];
/*
sdata[tid].e[0] = min( sdata[tid].e[0], sdata[tid + s].e[0] ); //min
sdata[tid].e[1] = min( sdata[tid].e[1], sdata[tid + s].e[1] );
sdata[tid].e[2] = min( sdata[tid].e[2], sdata[tid + s].e[2] );
sdata[tid].e[0] = max( sdata[tid].e[0], sdata[tid + s].e[0] ); //max
sdata[tid].e[1] = max( sdata[tid].e[1], sdata[tid + s].e[1] );
sdata[tid].e[2] = max( sdata[tid].e[2], sdata[tid + s].e[2] );
*/
}
__syncthreads();
}
// write result for this block to global mem
if(tid == 0) output[blockIdx.x] = sdata[0];
}
|
2,050
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string.h>
#define NUM_ELEMENTS 7
#define MAX_ELEMENTS_BLOCK 2048
struct Point
{
unsigned int X;
unsigned int Y;
unsigned int leftID; // counter-clockwise neighbor
unsigned int rightID; // clockwise neighbor
};
extern __shared__ Point hullData[];
cudaError_t convexHull(Point* h_data, int numPoints);
Point* h_data;
Point* d_data;
__device__ void findHull(int &currA, int &currB)
{
int result;
//int startIndex;
int currAorig = currA;
int currBorig = currB;
Point c;
bool isEven = (threadIdx.x % 2) == 0;
if (isEven)
{
c = hullData[hullData[currA].leftID];
}
else
{
c = hullData[hullData[currA].rightID];
}
bool hullFound = false;
while (!hullFound)
{
result = ((hullData[currB].X - hullData[currA].X)*(c.Y - hullData[currA].Y) - (hullData[currB].Y - hullData[currA].Y)*(c.X - hullData[currA].X));
/*if (i == 1 && (idx == 49 || idx == 48))
printf("idx: %d a: %d b: %d\n", idx, currA, currB);*/
if (isEven)
{
if (result >= 0 && hullData[currA].leftID != currAorig)
{
currA = hullData[currA].leftID;
c = hullData[hullData[currA].leftID];
}
else
{
c = hullData[hullData[currB].rightID];
//result = ((b.X - a.X)*(c.Y - a.Y) - (b.Y - a.Y)*(c.X - a.X));
result = ((hullData[currB].X - hullData[currA].X)*(c.Y - hullData[currA].Y) - (hullData[currB].Y - hullData[currA].Y)*(c.X - hullData[currA].X));
if (result >= 0 && hullData[currB].rightID != currBorig)
{
currB = hullData[currB].rightID;
c = hullData[hullData[currA].leftID];
}
else
{
hullFound = true;
}
}
}
else
{
if (result <= 0 && hullData[currA].rightID != currAorig)
{
currA = hullData[currA].rightID;
c = hullData[hullData[currA].rightID];
}
else
{
c = hullData[hullData[currB].leftID];
result = ((hullData[currB].X - hullData[currA].X)*(c.Y - hullData[currA].Y) - (hullData[currB].Y - hullData[currA].Y)*(c.X - hullData[currA].X));
if (result <= 0 && hullData[currB].leftID != currBorig)
{
currB = hullData[currB].leftID;
c = hullData[hullData[currA].rightID];
}
else
{
hullFound = true;
}
}
}
}
}
__device__ void findHull(int &currA, int &currB, Point* data)
{
int result;
//int startIndex;
int currAorig = currA;
int currBorig = currB;
Point c;
bool isEven = (threadIdx.x % 2) == 0;
if (isEven)
{
c = data[data[currA].leftID];
}
else
{
c = data[data[currA].rightID];
}
//if (threadIdx.x == 0)
// printf("thread: %d\n currA: %d ( %d, %d ) currB: %d ( %d, %d ) c: ( %d, %d )\n", threadIdx.x, currA, data[currA].X, data[currA].Y, currB, data[currB].X, data[currB].Y, c.X, c.Y);
/*if (threadIdx.x == 0 && blockIdx.x == 0)
{
int j = 0;
int stop = j;
int count = 0;
//printf("%d\n", h_data[0].rightID);
for (int i=0; i<50; i++)
{
if (j == stop && i != 0)
break;
printf("i: %d ( %d, %d )\nr: %d l: %d\n\n", i, data[j].X, data[j].Y, data[j].rightID, data[j].leftID);
j = data[j].rightID;
count++;
}
printf("\nCount: %d\n", count);
}*/
bool hullFound = false;
while (!hullFound)
{
result = ((data[currB].X - data[currA].X)*(c.Y - data[currA].Y) - (data[currB].Y - data[currA].Y)*(c.X - data[currA].X));
/*if (i == 1 && (idx == 49 || idx == 48))
printf("idx: %d a: %d b: %d\n", idx, currA, currB);*/
if (isEven)
{
if (result >= 0 && data[currA].leftID != currAorig)
{
currA = data[currA].leftID;
c = data[data[currA].leftID];
}
else
{
c = data[data[currB].rightID];
//result = ((b.X - a.X)*(c.Y - a.Y) - (b.Y - a.Y)*(c.X - a.X));
result = ((data[currB].X - data[currA].X)*(c.Y - data[currA].Y) - (data[currB].Y - data[currA].Y)*(c.X - data[currA].X));
if (result >= 0 && data[currB].rightID != currBorig)
{
currB = data[currB].rightID;
c = data[data[currA].leftID];
}
else
{
hullFound = true;
}
}
}
else
{
if (result <= 0 && data[currA].rightID != currAorig)
{
currA = data[currA].rightID;
c = data[data[currA].rightID];
}
else
{
c = data[data[currB].leftID];
result = ((data[currB].X - data[currA].X)*(c.Y - data[currA].Y) - (data[currB].Y - data[currA].Y)*(c.X - data[currA].X));
if (result <= 0 && data[currB].leftID != currBorig)
{
currB = data[currB].leftID;
c = data[data[currA].rightID];
}
else
{
hullFound = true;
}
}
}
}
}
__global__ void divideAndConquer(Point* data, int numElements)
{
int idx = threadIdx.x;
int bidx = blockIdx.x;
int numElementsPBlock = blockDim.x * 2;
int numThreads = blockDim.x;
//int numBlocks = gridDim.x;
bool isEven = (idx % 2) == 0;
/*
if (idx == 0)
{
printf("%d\n", idx);
printf("%d\n", bidx);
printf("%d\n", numElementsPBlock);
printf("%d\n", numElements);
printf("%d\n", numThreads);
printf("%d\n", numBlocks);
}*/
hullData[idx] = data[idx + (numElementsPBlock * bidx)];
if ((idx + (numElementsPBlock * bidx)) + numThreads < numElements)
hullData[idx + numThreads] = data[(idx + (numElementsPBlock * bidx)) + numThreads];
/*hullData[idx + (2 * blockDim.x)] = data[idx + (2 * blockDim.x)];
hullData[idx + (3 * blockDim.x)] = data[idx + (3 * blockDim.x)];*/
__syncthreads();
if ((idx << 1) + 1 < numElementsPBlock)
{
hullData[(idx << 1)].leftID = (idx << 1) + 1;
hullData[(idx << 1)].rightID = (idx << 1) + 1;
hullData[(idx << 1) + 1].leftID = (idx << 1);
hullData[(idx << 1) + 1].rightID = (idx << 1);
}
else
{
hullData[(idx << 1)].leftID = (idx << 1);
hullData[(idx << 1)].rightID = (idx << 1);
}
//printf("thread: %d\n (%d, %d)\n neighborRight: %d\n neighborLeft: %d\n (%d, %d)\n neighborRight: %d\n neighborLeft: %d\n", idx, hullData[(idx << 1)].X, hullData[(idx << 1)].Y, hullData[(idx << 1)].rightID, hullData[(idx << 1)].leftID, hullData[(idx << 1) + 1].X, hullData[(idx << 1) + 1].Y, hullData[(idx << 1) + 1].rightID, hullData[(idx << 1) + 1].leftID);
/*int currA = startIndex + 1;
int currB = startIndex + 2;
Point c = hullData[hullData[currA].leftID];
if (!isEven)
{
c = hullData[hullData[currA].rightID];
}*/
//int startIndex;
int currA;
int currB;
__syncthreads();
for (int i = 1; i < ((numElementsPBlock + 1) / 2); i *= 2)
{
int index = 4 * i * (idx / 2);
/*if (idx == 0)
printf("-------------------- i = %d --------------------\n", i);
__syncthreads();
if (i == 2 && (idx == 49 || idx == 48))
printf("thread: %d\n %d: (%d, %d)\n neighborRight: %d\n neighborLeft: %d\n %d: (%d, %d)\n neighborRight: %d\n neighborLeft: %d\n", idx, (idx << 1), hullData[(idx << 1)].X, hullData[(idx << 1)].Y, hullData[(idx << 1)].rightID, hullData[(idx << 1)].leftID, ((idx << 1) + 1), hullData[(idx << 1) + 1].X, hullData[(idx << 1) + 1].Y, hullData[(idx << 1) + 1].rightID, hullData[(idx << 1) + 1].leftID);*/
if (index + (i << 1) < numElementsPBlock)
{
currA = index + (i << 1) - 1;
currB = index + (i << 1);
findHull(currA, currB);
}
__syncthreads();
if (index + (i << 1) < numElementsPBlock)
{
if (isEven)
{
hullData[currA].rightID = currB;
hullData[currB].leftID = currA;
}
else
{
hullData[currA].leftID = currB;
hullData[currB].rightID = currA;
}
}
//__syncthreads();
// if (isEven)
// {
// int j = 0;
// int count = 0;
// //printf("%d\n", h_data[0].rightID);
// for (int i=0; i<numElements; i++)
// {
// if (j != 0 || i == 0)
// printf("id: %d %d, %d\n", idx, hullData[j].X, hullData[j].Y);
// else
// break;
// j = hullData[j].rightID;
// count++;
// //system("PAUSE");
//
// }
// __syncthreads();
// printf("\nCount: %d\n", count);
// }
}
__syncthreads();
hullData[idx].rightID = (hullData[idx].rightID + (numElementsPBlock * blockIdx.x));
hullData[idx].leftID = (hullData[idx].leftID + (numElementsPBlock * blockIdx.x));
if (idx + numThreads < numElementsPBlock)
{
hullData[idx + numThreads].rightID = (hullData[idx + numThreads].rightID + (numElementsPBlock * blockIdx.x));
hullData[idx + numThreads].leftID = (hullData[idx + numThreads].leftID + (numElementsPBlock * blockIdx.x));
}
__syncthreads();
data[idx + (numElementsPBlock * bidx)] = hullData[idx];
if ((idx + (numElementsPBlock * bidx)) + numThreads < numElements)
data[(idx + (numElementsPBlock * bidx)) + numThreads] = hullData[idx + numThreads];
__syncthreads();
}
__global__ void divideAndConquerBlocks(Point* data, int numElements, int iteration)
{
int idx = threadIdx.x;
//int bidx = blockIdx.x;
bool isEven = (idx % 2) == 0;
int currA;
int currB;
//currA = ((((MAX_ELEMENTS_BLOCK * 2) * ((idx / 2) + 1)) + (MAX_ELEMENTS_BLOCK * (MAX_ELEMENTS_BLOCK / 4) * bidx)) * iteration) - 1;
//currB = ((((MAX_ELEMENTS_BLOCK * 2) * ((idx / 2) + 1)) + (MAX_ELEMENTS_BLOCK * (MAX_ELEMENTS_BLOCK / 4) * bidx)) * iteration);
currA = (((MAX_ELEMENTS_BLOCK * 2 * (idx / 2)) + MAX_ELEMENTS_BLOCK) * iteration) - 1;
currB = (((MAX_ELEMENTS_BLOCK * 2 * (idx / 2)) + MAX_ELEMENTS_BLOCK) * iteration);
//printf("Id: %d Before FindHull--- currA: %d currB: %d\n", idx, currA, currB);
findHull(currA, currB, data);
//printf("Id: %d After FindHull--- currA: %d currB: %d\n", idx, currA, currB);
__syncthreads();
if (isEven)
{
data[currA].rightID = currB;
data[currB].leftID = currA;
}
else
{
data[currA].leftID = currB;
data[currB].rightID = currA;
}
__syncthreads();
}
int main(int argc, char** argv)
{
FILE* input;
if (argc > 1)
{
input = fopen(argv[1], "r");
}
else
{
input = fopen("sorted_8192.txt", "r");
}
//get number of points
int numPoints;
fscanf(input, "%d", &numPoints);
fscanf(input, "%d", &numPoints);
//printf("%d\n", numPoints);
//system("PAUSE");
h_data = (Point*)malloc(sizeof(Point) * numPoints);
//initialize input
for (int i = 0; i < numPoints; i++){
fscanf(input, "%d %d", &h_data[i].X, &h_data[i].Y);
}
cudaError_t cudaStatus = convexHull(h_data, numPoints);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
//system("PAUSE");
return 1;
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
//system("PAUSE");
return 1;
}
free(h_data);
//system("PAUSE");
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t convexHull(Point* h_data, int numPoints)
{
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
}
cudaStatus = cudaMalloc((void**)&d_data, numPoints * sizeof(Point));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(d_data, h_data, numPoints * sizeof(Point), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
//printf("\n\nNum Threads to be launched: %d\n\n", numThreads);
int numBlocks = 1;
if ((numPoints % MAX_ELEMENTS_BLOCK) == 0)
numBlocks = (numPoints / MAX_ELEMENTS_BLOCK);
else
numBlocks = ((numPoints / MAX_ELEMENTS_BLOCK) + 1);
int numThreads = 1;
if (numBlocks > 1)
numThreads = (MAX_ELEMENTS_BLOCK / 2);
else
numThreads = ((numPoints + 1) / 2);
printf("\n----------Starting first DnC---------\nnumBlocks: %d numThreads: %d\n\n", numBlocks, numThreads);
divideAndConquer<<<numBlocks, numThreads, sizeof(Point) * (numThreads * 2)>>>(d_data, numPoints);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "divideAndConquer launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
}
int j = 1;
for (int i = 1; i < numBlocks; i *= 2)
{
int newNumBlocks = (numBlocks / i) / (MAX_ELEMENTS_BLOCK / 4);
if (newNumBlocks == 0)
newNumBlocks++;
int newNumThreads = 1;
int num = 1024;
if (newNumBlocks > 1)
newNumThreads = (MAX_ELEMENTS_BLOCK / 2);
else
{
if (numBlocks > 1024)
{
newNumThreads = (num / j);
}
else
{
newNumThreads = (numBlocks / j);
}
j *= 2;
}
printf("\n----------Starting second DnC---------\nnewNumBlocks: %d newNumThreads: %d\n\n", newNumBlocks, newNumThreads);
divideAndConquerBlocks<<<newNumBlocks, newNumThreads>>>(d_data, numPoints, i);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "divideAndConquer launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
}
}
cudaStatus = cudaMemcpy(h_data, d_data, numPoints * sizeof(Point), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaFree(d_data);
j = 0;
int count = 0;
//printf("%d\n", h_data[0].rightID);
for (int i=0; i<numPoints; i++)
{
if (j == 0 && i != 0)
break;
printf("j: %d ( %d, %d )\nr: %d l: %d\n\n", j, h_data[j].X, h_data[j].Y, h_data[j].rightID, h_data[j].leftID);
j = h_data[j].rightID;
count++;
//system("PAUSE");
}
printf("\nCount: %d\n", count);
/*for (int i = 0; i < numPoints; i++)
{
printf("%d, %d\n", h_data[i].X, h_data[i].Y);
system("PAUSE");
}*/
return cudaStatus;
}
|
2,051
|
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22) {
comp = -0.0f * asinf(-1.4250E-37f);
float tmp_1 = +1.5977E25f;
comp = tmp_1 - var_3 + logf(-1.5157E8f + +1.3702E-42f);
for (int i=0; i < var_1; ++i) {
comp = (-1.8297E-37f * -1.8644E-41f - sinf(fmodf((+1.0674E-36f + var_4 / -1.5221E-11f * -0.0f / (+1.5517E-3f / var_5)), fmodf(-1.9105E-35f, (var_6 / var_7 / -1.8187E34f * var_8 * var_9 / var_10)))));
float tmp_2 = +1.3993E-13f;
comp = tmp_2 / (-0.0f * logf((-0.0f - (+1.1006E35f / var_11))));
comp += var_12 - var_13 * (var_14 * (+1.4838E22f + var_15));
}
for (int i=0; i < var_2; ++i) {
float tmp_3 = -1.8137E35f;
comp += tmp_3 / var_16 / (+1.9024E-43f * (var_17 / (var_18 - var_19)));
float tmp_4 = var_20 * var_21 / (-1.7084E-1f + +1.0482E-43f / (+1.1885E35f / -1.3962E36f));
comp = tmp_4 * (var_22 - -1.2407E35f);
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23);
cudaDeviceSynchronize();
return 0;
}
|
2,052
|
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
struct saxpy_functor
{
const float a;
saxpy_functor(float _a) : a(_a) {}
__host__ __device__
float operator()(float x, float y) {
return a*x+y;
}
};
void saxpy(float a, thrust::device_vector<float>& x,
thrust::device_vector<float>& y) {
saxpy_functor func(a);
thrust::transform(x.begin(), x.end(), y.begin(), y.begin(), func); //Y = func(X)
}
#include <thrust/fill.h>
#include <thrust/sequence.h>
int main(){
thrust::host_vector<float> h1(1 << 24);
thrust::host_vector<float> h2(1 << 24);
thrust::sequence(h1.begin(), h1.end());
thrust::fill(h2.begin(), h2.end(), 0.87);
thrust::device_vector<float> d1 = h1;
thrust::device_vector<float> d2 = h2;
saxpy(3.0, d1, d2);
h2=d2;
h1=d1;
for(int i=0;i<(1<<8);i++){
printf("%d\t%g\t%g\n",i, h1[i], h2[i]);
}
return 0;
}
|
2,053
|
__global__ void convolution(int filterWidth,float *filter,int imageHeight,int imageWidth,float *inputImage,float *outputImage)
{
int i= blockIdx.x * blockDim.x + threadIdx.x;
int j= blockIdx.y * blockDim.y + threadIdx.y;
// Iterate over the rows of the source image
int halffilterSize = filterWidth >> 1 ;
float sum;
int k, l;
sum = 0; // Reset sum for new source pixel
// Apply the filter to the neighborhood
for (k = -halffilterSize; k <= halffilterSize; k++)
{
for (l = -halffilterSize; l <= halffilterSize; l++)
{
if (j + k >= 0 && j + k < imageHeight &&
i + l >= 0 && i + l < imageWidth)
{
sum += inputImage[(j + k) * imageWidth + i + l] *
filter[(k + halffilterSize) * filterWidth +
l + halffilterSize];
}
}
}
outputImage[j * imageWidth + i] = sum;
//outputImage[j * imageWidth + i] = 255;
}
extern "C" void hostFE(int filterWidth, float *filter, int imageHeight, int imageWidth,
float *inputImage, float *outputImage)
{
float * d_filter,*d_inputImage,*d_outputImage;
int filterSize = filterWidth * filterWidth * sizeof(float);
int inputImageSize = imageHeight * imageWidth * sizeof(int);
int outputImageSize = inputImageSize;
cudaMalloc((void**)&d_filter,filterSize);
cudaMalloc((void**)&d_inputImage,inputImageSize);
cudaMalloc((void**)&d_outputImage,outputImageSize);
cudaMemcpy(d_filter,filter,filterSize,cudaMemcpyHostToDevice);
cudaMemcpy(d_inputImage,inputImage,inputImageSize,cudaMemcpyHostToDevice);
int block_size_x = 16;
int block_size_y = 16;
dim3 blockSize(block_size_x,block_size_y);
dim3 numBlock(imageWidth/block_size_x,imageHeight/block_size_y);
convolution<<<numBlock,blockSize>>>(filterWidth,d_filter,imageHeight,imageWidth,d_inputImage,d_outputImage);
cudaMemcpy(outputImage,d_outputImage,outputImageSize,cudaMemcpyDeviceToHost);
cudaFree(d_outputImage);
cudaFree(d_inputImage);
cudaFree(d_filter);
}
|
2,054
|
#include "includes.h"
//Udacity HW 4
//Radix Sorting
__global__ void histogram(unsigned int* in, unsigned int* hist, int n,unsigned int nBins, unsigned int mask, unsigned int current_bits)
{
extern __shared__ unsigned int s_local_hist[];
for(int j = threadIdx.x; j < nBins; j += blockDim.x)
s_local_hist[j] = 0;
__syncthreads();
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
unsigned int bin = (in[i] >> current_bits) & mask;
atomicAdd(&s_local_hist[bin], 1);
}
__syncthreads();
for (int bin = threadIdx.x; bin < nBins; bin += blockDim.x)
{
hist[bin * gridDim.x + blockIdx.x] = s_local_hist[bin];
}
// for (unsigned int bin = threadIdx.x; bin < nBins; bin += blockDim.x)
// atomicAdd(&hist[bin], s_local_hist[bin]);
}
|
2,055
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void simpleKernel(float *dst, float *src1, float *src2)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
//float temp = src[idx];
dst[idx] = src1[idx] + src2[idx];
}
int execute_uva(bool copy=false, bool print=false)
{
float *src1,*src2, *dst;
float *dsrc1, *dsrc2, *ddst, *dsrc2_1;
size_t rsize = 256;
size_t size = sizeof(float) * rsize * rsize;
//cpu buffers
src1 = (float *)malloc(size);
src2 = (float *)malloc(size);
dst = (float *)malloc(size);
for (int i = 0; i < rsize * rsize; ++i) {
src1[i] = (float)i;
src2[i] = (float)(2 * i);
}
//gpu buffers
cudaSetDevice(0);
cudaDeviceEnablePeerAccess(1, 0);
cudaMalloc(&ddst, size);
cudaMalloc(&dsrc1, size);
cudaMemcpy(dsrc1, src1, size, cudaMemcpyHostToDevice);
// device setting here
cudaSetDevice(1);
cudaDeviceEnablePeerAccess(0, 0);
cudaMalloc(&dsrc2, size);
cudaMemcpy(dsrc2, src2, size, cudaMemcpyHostToDevice);
//Launch the kernel
cudaSetDevice(0);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
if (copy) {
//Add all the overhead of copying to the times
//including memory allocation
cudaMalloc(&dsrc2_1, size);
cudaMemcpy(dsrc2_1, dsrc2, size, cudaMemcpyDefault);
simpleKernel<<<rsize, rsize>>>(ddst, dsrc2_1, dsrc2);
} else {
simpleKernel<<<rsize, rsize>>>(ddst, dsrc1, dsrc2);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float time_ms = 0;
cudaEventElapsedTime(&time_ms, start, stop);
cudaMemcpy(dst, ddst, size, cudaMemcpyDeviceToHost);
if (print)
for(int i = 0; i < rsize*rsize; ++i) {
printf("%d: %f\n", i, dst[i]);
}
printf("Last item: %f\n", dst[rsize*rsize-1]);
printf("Elapsed time: %f\n", time_ms);
// clean gpu buffers
cudaFree(ddst);
cudaFree(dsrc1);
if (copy) cudaFree(dsrc2_1);
// Just in case
cudaSetDevice(1);
cudaFree(dsrc2);
// clean cpu buffers
free(src1);
free(src2);
free(dst);
return 0;
}
int main()
{
int canAccess10, canAccess01;
cudaDeviceCanAccessPeer(&canAccess10, 1, 0);
printf("Access status: %d\n", canAccess10);
cudaDeviceCanAccessPeer(&canAccess01, 0, 1);
printf("Access status: %d\n", canAccess01);
if (canAccess10 && canAccess01) {
execute_uva(true);
}
return 0;
}
|
2,056
|
// to use CUDA, uncomment the following line
#define USE_CUDA
#include <stdio.h>
#include <time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuComplex.h>
#define COLOR_DEPTH 256
#define DEFAULT_WIDTH 3840
#define DEFAULT_HEIGHT 2160
#define MAX_THREAD 65536
#ifndef USE_CUDA
void computePixelNoGPU(cuDoubleComplex a, cuDoubleComplex b, unsigned short n, unsigned char* r)
{
int i, j;
cuDoubleComplex c, z;
for(unsigned int x=0; x<DEFAULT_WIDTH; x++)
{
for(unsigned int y=0; y<DEFAULT_HEIGHT; y++)
{
i = x + y * DEFAULT_WIDTH;
c = make_cuDoubleComplex((double)x,(double)y);
c = cuCadd(cuCmul(a, c), b);
z = make_cuDoubleComplex(0.0,0.0);
for(j = 0; j < n && cuCabs(z) < 2; j++)
{
z = cuCadd(cuCmul(z, z), c);
}
if(cuCabs(z) > 2)
{
r[i*3] = j * COLOR_DEPTH / n;
r[i*3+1] = r[i*3];
r[i*3+2] = r[i*3];
}
else
{
r[i*3] = 0;
r[i*3+1] = 0;
r[i*3+2] = 0;
}
}
}
}
#else
__global__ void computePixel(cuDoubleComplex* a_d, cuDoubleComplex* b_d, unsigned short n, unsigned char* r_d)
{
int i,j,x,y;
cuDoubleComplex z_d, c_d;
i = blockIdx.x * blockDim.x + threadIdx.x;
x = i % DEFAULT_WIDTH;
y = i / DEFAULT_WIDTH;
c_d = make_cuDoubleComplex((double)x,(double)y);
c_d = cuCadd(cuCmul(a_d[0], c_d), b_d[0]);
z_d = make_cuDoubleComplex(0.0,0.0);
for(j = 0; j < n && cuCabs(z_d) < 2; j++)
{
z_d = cuCadd(cuCmul(z_d, z_d), c_d);
}
if(cuCabs(z_d) > 2)
{
r_d[i*3] = j * COLOR_DEPTH / n;
r_d[i*3+1] = r_d[i*3];
r_d[i*3+2] = r_d[i*3];
}
else
{
r_d[i*3] = 0;
r_d[i*3+1] = 0;
r_d[i*3+2] = 0;
}
}
#endif
int main()
{
// initializations
struct timespec start, end;
double time_elapsed;
unsigned char* r;
cuDoubleComplex a;
cuDoubleComplex b;
#ifdef USE_CUDA
unsigned char* r_device;
cuDoubleComplex* a_device;
cuDoubleComplex* b_device;
#endif
// starting time measure
clock_gettime(CLOCK_MONOTONIC, &start);
// zoom and rotation parameters
a = make_cuDoubleComplex(1.0/DEFAULT_WIDTH*3,0.0005);
b = make_cuDoubleComplex(-1.5,-1.6);
// allocating memory on RAM
r = (unsigned char*) malloc(sizeof(unsigned char)*DEFAULT_WIDTH*DEFAULT_HEIGHT*3);
#ifdef USE_CUDA
// allocating memory on GPU
cudaMalloc((void**) &r_device, sizeof(unsigned char)*DEFAULT_WIDTH*DEFAULT_HEIGHT*3);
cudaMalloc((void**) &a_device, sizeof(cuDoubleComplex));
cudaMalloc((void**) &b_device, sizeof(cuDoubleComplex));
// copy a and b
cudaMemcpy(a_device, &a, sizeof(cuDoubleComplex), cudaMemcpyHostToDevice);
cudaMemcpy(b_device, &b, sizeof(cuDoubleComplex), cudaMemcpyHostToDevice);
// making the GPU do the job
computePixel<<<DEFAULT_WIDTH*DEFAULT_HEIGHT/MAX_THREAD, MAX_THREAD>>>(a_device, b_device, 256, r_device);
// copying result from GPU to RAM
cudaMemcpy(r, r_device, sizeof(unsigned char)*DEFAULT_WIDTH*DEFAULT_HEIGHT*3, cudaMemcpyDeviceToHost);
#else
// making the CPU do the job
computePixelNoGPU(a, b, 256, r);
#endif
// writing result in a file
FILE* file = fopen("test.data", "wb");
if(file == NULL)
{
return -1;
}
fwrite(r, sizeof(unsigned char), DEFAULT_WIDTH*DEFAULT_HEIGHT*3, file);
fclose(file);
// freeing GPU's and CPU's memory
#ifdef USE_CUDA
cudaFree(a_device);
cudaFree(b_device);
cudaFree(r_device);
#endif
free(r);
// ending the timer and print the result
clock_gettime(CLOCK_MONOTONIC, &end);
time_elapsed = end.tv_nsec - start.tv_nsec;
time_elapsed /= 1000000000.0;
time_elapsed += (double) (end.tv_sec - start.tv_sec);
printf("time elapsed : %f\n", time_elapsed);
// return success state
return 0;
}
|
2,057
|
#define uint unsigned int
#define HX(i,j,n) Hx[i+IHx*(j)+IHx*JHx*n]
#define BX(i,j,n) Bx[i+IHx*(j)+IHx*JHx*n]
#define HY(i,j,n) Hy[i+IHy*(j)+IHy*JHy*n]
#define BY(i,j,n) By[i+IHy*(j)+IHy*JHy*n]
#define EZ(i,j,n) Ez[i+IEz*(j)+IEz*JEz*n]
#define DZ(i,j,n) Dz[i+IEz*(j)+IEz*JEz*n]
#define DZX(i,j,n) Dzx[i+IEz*(j)+IEz*JEz*n]
#define DZY(i,j,n) Dzy[i+IEz*(j)+IEz*JEz*n]
template <unsigned int BlockX, unsigned int BlockY> __global__ void FDTD2DKernel(
float *Hx,
float *Bx,
float *Hy,
float *By,
float *Ez,
float *Dz,
float *Dzx,
float *Dzy,
float *urHx,
float *urHy,
float *erEz,
float *ScmHx,
float *ScmHy,
float *Sc,
float *Scsx,
float *Scsy,
float *ScmsmxHy,
float *ScmsmyHx,
const float delta,
const float dtscalar,
const float dt,
const uint PMLw,
const float e0,
const float u0,
const float Two_pi_f_deltat,
const uint NHW,
const uint Is,
const uint Js,
const uint IHx,
const uint JHx,
const uint IHy,
const uint JHy,
const uint IEz,
const uint JEz,
const uint n,
const uint n0,
const uint n1,
const uint n2,
const uint flag)
{
uint i = BlockX*blockIdx.x+threadIdx.x;
uint j = BlockY*blockIdx.y+threadIdx.y;
// Half time step flag is either 0 or 1 indicating whether magnetic field or electric field is to be calculated, respectively.
if (flag == 0)
{
if (i < IHx)
{
// Normal space.
if (j >= PMLw && j < JHx-PMLw)
{
BX(i,j,n2) = (1-ScmHx[i+IHx*j])/(1+ScmHx[i+IHx*j]) * BX(i,j,n1) + ( (dt/delta)/(1+ScmHx[i+IHx*j]) * (EZ(i,j,n1)-EZ(i,j+1,n1)) );
HX(i,j,n2) = BX(i,j,n2)/(u0*urHx[i+IHx*j]);
BY(i+1,j+1,n2) = (1-ScmHy[(i+1)+IHy*(j+1)])/(1+ScmHy[(i+1)+IHy*(j+1)]) * BY(i+1,j+1,n1) + ( (dt/delta)/(1+ScmHy[(i+1)+IHy*(j+1)]) * (EZ(i+1,j+1,n1)-EZ(i,j+1,n1)) );
HY(i+1,j+1,n2) = BY(i+1,j+1,n2)/(u0*urHy[(i+1)+IHy*(j+1)]);
}
// Lower PML region.
if (j < PMLw)
{
BX(i,j,n2) = (1-ScmsmyHx[i+IHx*j])/(1+ScmsmyHx[i+IHx*j]) * BX(i,j,n1) + ( (dt/delta)/(1+ScmsmyHx[i+IHx*j]) * (EZ(i,j,n1)-EZ(i,j+1,n1)) );
HX(i,j,n2) = BX(i,j,n2)/(u0*urHx[i+IHx*j]);
BY(i+1,j+1,n2) = (1-ScmsmxHy[(i+1)+IHy*(j+1)])/(1+ScmsmxHy[(i+1)+IHy*(j+1)]) * BY(i+1,j+1,n1) + ( (dt/delta)/(1+ScmsmxHy[(i+1)+IHy*(j+1)]) * (EZ(i+1,j+1,n1)-EZ(i,j+1,n1)) );
HY(i+1,j+1,n2) = BY(i+1,j+1,n2)/(u0*urHy[(i+1)+IHy*(j+1)]);
}
// Upper PML region.
if (j >= JHx-PMLw && j < JHx)
{
BX(i,j,n2) = (1-ScmsmyHx[i+IHx*j])/(1+ScmsmyHx[i+IHx*j]) * BX(i,j,n1) + ( (dt/delta)/(1+ScmsmyHx[i+IHx*j]) * (EZ(i,j,n1)-EZ(i,j+1,n1)) );
HX(i,j,n2) = BX(i,j,n2)/(u0*urHx[i+IHx*j]);
BY(i+1,j+1,n2) = (1-ScmsmxHy[(i+1)+IHy*(j+1)])/(1+ScmsmxHy[(i+1)+IHy*(j+1)]) * BY(i+1,j+1,n1) + ( (dt/delta)/(1+ScmsmxHy[(i+1)+IHy*(j+1)]) * (EZ(i+1,j+1,n1)-EZ(i,j+1,n1)) );
HY(i+1,j+1,n2) = BY(i+1,j+1,n2)/(u0*urHy[(i+1)+IHy*(j+1)]);
}
}
}
else
{
if (i < IEz)
{
if (j != 0 && j < JEz-1 )
{
DZ(i,j,n2) = (1-Sc[i+IEz*j])/(1+Sc[i+IEz*j]) * DZ(i,j,n1) + ( (dt/delta)/(1+Sc[i+IEz*j]) * ( HY(i+1,j,n2) - HY(i,j,n2) - HX(i,j,n2) + HX(i,j-1,n2)) );
EZ(i,j,n2) = DZ(i,j,n2)/(e0*erEz[i+IEz*j]);
}
// Source.
if (j == Js && n < NHW)
{
EZ(i,j,n2) = EZ(i,j,n2) + 1 * sin (Two_pi_f_deltat * n) / dtscalar;
DZ(i,j,n2) = e0 * EZ(i,j,n2);
}
// Lower PML region.
if (j > 0 && j < PMLw+1)
{
DZX(i,j,n2) = (1-Scsx[i+IEz*j])/(1+Scsx[i+IEz*j]) * DZX(i,j,n1) + ( (dt/delta)/(1+Scsx[i+IEz*j]) * ( HY(i+1,j,n2) - HY(i,j,n2)) );
DZY(i,j,n2) = (1-Scsy[i+IEz*j])/(1+Scsy[i+IEz*j]) * DZY(i,j,n1) + ( (dt/delta)/(1+Scsy[i+IEz*j]) * (- HX(i,j,n2) + HX(i,j-1,n2)) );
DZ(i,j,n2) = DZX(i,j,n2) + DZY(i,j,n2);
EZ(i,j,n2) = DZ(i,j,n2)/(e0*erEz[i+IEz*j]);
}
// Upper PML region.
if (j >= JEz-PMLw-1 && j < JEz-1)
{
DZX(i,j,n2) = (1-Scsx[i+IEz*j])/(1+Scsx[i+IEz*j]) * DZX(i,j,n1) + ( (dt/delta)/(1+Scsx[i+IEz*j]) * ( HY(i+1,j,n2) - HY(i,j,n2)) );
DZY(i,j,n2) = (1-Scsy[i+IEz*j])/(1+Scsy[i+IEz*j]) * DZY(i,j,n1) + ( (dt/delta)/(1+Scsy[i+IEz*j]) * (- HX(i,j,n2) + HX(i,j-1,n2)) );
DZ(i,j,n2) = DZX(i,j,n2) + DZY(i,j,n2);
EZ(i,j,n2) = DZ(i,j,n2)/(e0*erEz[i+IEz*j]);
}
}
}
}
|
2,058
|
#include "includes.h"
using namespace std;
int threads;
__global__ void gcd_vector(int * d_out, int integer_m){
int idx = threadIdx.x;
for(int i = idx; i<integer_m; i+=blockDim.x){
int u = i, v = integer_m;
while ( v != 0) {
int r = u % v;
u = v;
v = r;
}
if(u == 1){
d_out[idx]++;
}
}
}
|
2,059
|
#include "includes.h"
__global__ void FilmGradeKernelA( float* p_Input, int p_Width, int p_Height, float p_Exp) {
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x < p_Width && y < p_Height) {
const int index = (y * p_Width + x) * 4;
p_Input[index] = p_Input[index] + p_Exp * 0.01f;
}}
|
2,060
|
//pass
//--blockDim=512 --gridDim=1 --warp-sync=32 --no-inline
#include <cuda.h>
#include <stdio.h>
#define N 32
__global__ void scan (int* A)
{
int tid = threadIdx.x;
unsigned int lane = tid & 31;
if (lane >= 1) A[tid] = A[tid - 1] + A[tid];
if (lane >= 2) A[tid] = A[tid - 2] + A[tid];
if (lane >= 4) A[tid] = A[tid - 4] + A[tid];
if (lane >= 8) A[tid] = A[tid - 8] + A[tid];
if (lane >= 16) A[tid] = A[tid - 16] + A[tid];
}
|
2,061
|
#include "includes.h"
__global__ void VecAdd(int n, const float *A, const float *B, float* C) {
//DEVICE(GPU)CODE
/********************************************************************
*
* Compute C = A + B
* where A is a (1 * n) vector
* where B is a (1 * n) vector
* where C is a (1 * n) vector
*
********************************************************************/
//added for extra compute time
long long start = clock64();
long long cycles_elapsed;
do{cycles_elapsed = clock64() - start;}
while(cycles_elapsed <20000);
//end of added compute time
// INSERT KERNEL CODE HERE
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n)
C[i] = A[i] + B[i];
}
|
2,062
|
//#include <stdlib.h>
//#include <stdio.h>
//#include <cuda_runtime.h>
//#include <helper_functions.h>
//#include <curand_kernel.h>
//#include "device_launch_parameters.h"
//#include "../../common/book.h"
//#include "../../common/cpu_anim.h"
//#include "../../common/Utils.h"
//#include "../../common/BlockUtils.h"
//#include "../../common/Texutils.h"
//#include "../../common/Rando.h"
//#include "../../common/PlyBlock.h"
//
//struct AppBlock {
// unsigned char *output_bitmap;
// CPUAnimBitmap *cPUAnimBitmap;
//
// cudaEvent_t start, stop;
// float totalTime;
// float frames;
//
// PlyBlock *plyBlock;
//};
//
//
//__device__ __inline__ int2 TileSize();
//
//__device__ __inline__ int2 NodeInTilePos();
//
//__device__ __inline__ int NodeIndexInPly(int2 nitDex, int2 tipDex, int2 tileSize, int2 plySize);
//
//__device__ __inline__ int2 NodeInPlyPos(int2 nitDex, int2 tipDex, int2 tileSize);
//
//__global__ void blend_kernel(float *dst, bool dstOut, cudaTextureObject_t texIn, cudaTextureObject_t texOut, curandState *dev_curandStates, int width, float speed);
//
//__global__ void loco_kernel(float *dst, bool dstOut, cudaTextureObject_t texIn, cudaTextureObject_t texOut, int width, float speed);
//
//__global__ void loco_kernel2(float *dst, bool dstOut, cudaTextureObject_t texIn, cudaTextureObject_t texOut, int2 gridsize, float speed);
//
//__global__ void noiseTrimKernel(float *dst, curandState *dev_curandStates, float speed);
//
//__global__ void noiseTrimKernel2(float *dst, curandState *dev_curandStates, float speed, int area);
//
//void anim_gpu(AppBlock *d, int ticks);
//
//void anim_exit(AppBlock *d);
//
//__device__ __inline__ int2 TileSize()
//{
// int2 step;
// step.x = blockDim.x* gridDim.x;
// step.y = blockDim.y* gridDim.y;
// return step;
//}
//
//__device__ __inline__ int2 NodeInTilePos()
//{
// int2 step;
// step.x = threadIdx.x + blockIdx.x * blockDim.x;
// step.y = threadIdx.y + blockIdx.y * blockDim.y;
// return step;
//}
//
//__device__ __inline__ int NodeIndexInPly(int2 nitDex, int2 tipDex, int2 tileSize, int2 plySize)
//{
// return
// tileSize.y + plySize.x * tipDex.y +
// tileSize.y + tileSize.x * tipDex.x +
// tileSize.x + nitDex.y +
// nitDex.x;
//}
//
//__device__ __inline__ int2 NodeInPlyPos(int2 nitDex, int2 tipDex, int2 tileSize)
//{
// int2 step;
// step.x = tileSize.x * tipDex.x + nitDex.x;
// step.y = tileSize.y * tipDex.y + nitDex.y;
// return step;
//}
//
//void MakeAppBlock(AppBlock **out, unsigned int width, unsigned int seed)
//{
// AppBlock *appBlock = (AppBlock *)malloc(sizeof(AppBlock));
// *out = appBlock;
//
// MakePlyBlock(&appBlock->plyBlock, width, seed);
// unsigned int plyMemSize = appBlock->plyBlock->area * sizeof(float);
//
// appBlock->totalTime = 0;
// appBlock->frames = 0;
// HANDLE_ERROR(cudaEventCreate(&appBlock->start));
// HANDLE_ERROR(cudaEventCreate(&appBlock->stop));
//
// // intialize the constant data
// float *temp = RndFloat0to1(width*width);
// //HANDLE_ERROR(cudaMemcpy(appBlock->plyBlock->dev_constSrc, temp, plyMemSize, cudaMemcpyHostToDevice));
// HANDLE_ERROR(cudaMemcpy(appBlock->plyBlock->dev_inSrc, temp, plyMemSize, cudaMemcpyHostToDevice));
// free(temp);
//}
//
//int main(int argc, const char **argv)
//{
// int gridWidth = 512;
// float speed = 0.1;
// float noise = 0.5;
//
// if (checkCmdLineFlag(argc, (const char **)argv, "gridwidth"))
// {
// gridWidth = getCmdLineArgumentInt(argc, (const char **)argv, "gridwidth");
// }
// if (checkCmdLineFlag(argc, (const char **)argv, "speed"))
// {
// speed = getCmdLineArgumentFloat(argc, (const char **)argv, "speed");
// }
// if (checkCmdLineFlag(argc, (const char **)argv, "noise"))
// {
// noise = getCmdLineArgumentFloat(argc, (const char **)argv, "noise");
// }
//
//
// AppBlock *appBlock;
// MakeAppBlock(&appBlock, gridWidth, 1283);
// CPUAnimBitmap cPUAnimBitmap(gridWidth, gridWidth, appBlock);
// appBlock->cPUAnimBitmap = &cPUAnimBitmap;
// HANDLE_ERROR(cudaMalloc((void**)&appBlock->output_bitmap, cPUAnimBitmap.image_size()));
//
// cPUAnimBitmap.anim_and_exit((void(*)(void*, int))anim_gpu,
// (void(*)(void*))anim_exit);
//
//}
//
//
//#define SPEED 0.5f
//
////// this kernel takes in a 2-d array of floats
////// it updates the value-of-interest by a scaled value based
////// on itself and its nearest neighbors
//__global__ void blend_kernel(float *dst, bool dstOut, cudaTextureObject_t texIn, cudaTextureObject_t texOut, curandState *dev_curandStates, int width, float speed) {
// // map from threadIdx/BlockIdx to pixel position
// int x = threadIdx.x + blockIdx.x * blockDim.x;
// int y = threadIdx.y + blockIdx.y * blockDim.y;
// int offset = x + y * blockDim.x * gridDim.x;
//
// int left = offset - 1;
// int right = offset + 1;
// if (x == 0) left++;
// if (x == width - 1) right--;
//
// int top = offset - width;
// int bottom = offset + width;
// if (y == 0) top += width;
// if (y == width - 1) bottom -= width;
//
// float t, l, c, r, b;
// if (dstOut) {
// t = tex1Dfetch<float>(texIn, top);
// l = tex1Dfetch<float>(texIn, left);
// c = tex1Dfetch<float>(texIn, offset);
// r = tex1Dfetch<float>(texIn, right);
// b = tex1Dfetch<float>(texIn, bottom);
//
// }
// else {
// t = tex1Dfetch<float>(texOut, top);
// l = tex1Dfetch<float>(texOut, left);
// c = tex1Dfetch<float>(texOut, offset);
// r = tex1Dfetch<float>(texOut, right);
// b = tex1Dfetch<float>(texOut, bottom);
// }
//
// curandState localState = dev_curandStates[offset];
// float randy = curand_normal(&localState);
//
// float res = c + speed * (t + b + r + l - 4 * c) + randy * 0.0015;
//
// if (res < -1)
// {
// dst[offset] = -1;
// }
// else if (res > 1)
// {
// dst[offset] = 1;
// }
// else
// {
// dst[offset] = res;
// }
//}
//
//
//__global__ void noiseTrimKernel(float *dst, curandState *dev_curandStates, float speed) {
// // map from threadIdx/BlockIdx to pixel position
// int x = threadIdx.x + blockIdx.x * blockDim.x;
// int y = threadIdx.y + blockIdx.y * blockDim.y;
// int offset = x + y * blockDim.x * gridDim.x;
//
// curandState localState = dev_curandStates[offset];
// float randy = curand_normal(&localState);
//
// float oriug = dst[offset];
// float res = oriug + speed * randy;
//
// if (res < -1)
// {
// dst[offset] = -1;
// }
// else if (res > 1)
// {
// dst[offset] = 1;
// }
// else
// {
// dst[offset] = res;
// }
//}
//
//
//__global__ void noiseTrimKernel2(float *dst, curandState *dev_curandStates, float speed, int area)
//{
// int step = gridDim.x * blockDim.x * gridDim.y;
// int start = threadIdx.x + blockIdx.y * blockDim.x * gridDim.x + blockIdx.x * blockDim.x;
// curandState localState = dev_curandStates[start];
// for (int i = start; i < area; i += step)
// {
// float randy = curand_normal(&localState);
// float oriug = dst[i];
// float res = oriug + speed * randy;
//
// if (res < -1)
// {
// dst[i] = 1;
// }
// else if (res > 1)
// {
// dst[i] = -1;
// }
// else
// {
// dst[i] = res;
// }
// }
//}
//
//
//__global__ void loco_kernel(float *dst, bool dstOut, cudaTextureObject_t texIn, cudaTextureObject_t texOut, int width, float speed) {
// // map from threadIdx/BlockIdx to pixel position
// int x = threadIdx.x + blockIdx.x * blockDim.x;
// int y = threadIdx.y + blockIdx.y * blockDim.y;
// int offset = x + y * blockDim.x * gridDim.x;
//
// int left = offset - 1;
// int right = offset + 1;
// if (x == 0) left++;
// if (x == width - 1) right--;
//
// int top = offset - width;
// int bottom = offset + width;
// if (y == 0) top += width;
// if (y == width - 1) bottom -= width;
//
// float t, l, c, r, b;
// if (dstOut) {
// t = tex1Dfetch<float>(texIn, top);
// l = tex1Dfetch<float>(texIn, left);
// c = tex1Dfetch<float>(texIn, offset);
// r = tex1Dfetch<float>(texIn, right);
// b = tex1Dfetch<float>(texIn, bottom);
//
// }
// else {
// t = tex1Dfetch<float>(texOut, top);
// l = tex1Dfetch<float>(texOut, left);
// c = tex1Dfetch<float>(texOut, offset);
// r = tex1Dfetch<float>(texOut, right);
// b = tex1Dfetch<float>(texOut, bottom);
// }
//
// dst[offset] = c + speed * (t + b + r + l - 4 * c);
//}
//
//__global__ void loco_kernel2(float *dst, bool dstOut, cudaTextureObject_t texIn, cudaTextureObject_t texOut, int2 plySize, float speed) {
//
// int2 tileSize = TileSize();
// int2 nitDex = NodeInTilePos();
// int2 tipDex;
// int nodesInPly = plySize.x * plySize.y;
//
//
// for (int i = nitDex.x; i < plySize.x; i += tileSize.x)
// {
// tipDex.x = i;
// for (int j = nitDex.y; j < plySize.y; j += tileSize.y)
// {
// tipDex.y = j;
//
// int2 coords = NodeInPlyPos(nitDex, tipDex, tileSize);
// if ((coords.x >= plySize.x) || (coords.y >= plySize.y))
// {
// break;
// }
//
// int offset = coords.y * plySize.x + coords.x;
//
// int left = offset - 1;
// int right = offset + 1;
// if (coords.x == 0) left++;
// if (coords.x == plySize.x - 1) right--;
//
// int top = offset - plySize.x;
// int bottom = offset + plySize.x;
// if (coords.y == 0) top += plySize.x;
// if (coords.y == plySize.y - 1) bottom -= plySize.x;
//
// float t, l, c, r, b;
// if (dstOut) {
// t = tex1Dfetch<float>(texIn, top);
// l = tex1Dfetch<float>(texIn, left);
// c = tex1Dfetch<float>(texIn, offset);
// r = tex1Dfetch<float>(texIn, right);
// b = tex1Dfetch<float>(texIn, bottom);
//
// }
// else {
// t = tex1Dfetch<float>(texOut, top);
// l = tex1Dfetch<float>(texOut, left);
// c = tex1Dfetch<float>(texOut, offset);
// r = tex1Dfetch<float>(texOut, right);
// b = tex1Dfetch<float>(texOut, bottom);
// }
//
// dst[offset] = c + speed * (t + b + r + l - 4 * c);
// }
//
// __syncthreads();
// }
//}
//
//
//
//void anim_gpu(AppBlock *appBlock, int ticks) {
// HANDLE_ERROR(cudaEventRecord(appBlock->start, 0));
// const int chop = 16;
// int width = appBlock->plyBlock->width;
// dim3 blocks(width / chop, width / chop);
// dim3 threads(chop, chop);
// CPUAnimBitmap *cPUAnimBitmap = appBlock->cPUAnimBitmap;
//
// // since tex is global and bound, we have to use a flag to
// // select which is in/out per iteration
// volatile bool dstOut = true;
// for (int i = 0; i<250; i++) {
// float *in, *out;
// if (dstOut) {
// in = appBlock->plyBlock->dev_inSrc;
// out = appBlock->plyBlock->dev_outSrc;
// }
// else {
// out = appBlock->plyBlock->dev_inSrc;
// in = appBlock->plyBlock->dev_outSrc;
// }
//
// //blend_kernel << <blocks, threads >> >(
// // out, dstOut, *appBlock->plyBlock->texIn, *appBlock->plyBlock->texOut,
// // appBlock->plyBlock->randData->dev_curandStates,
// // width, SPEED*2.65);
// int2 gridSize;
// gridSize.x = width;
// gridSize.y = width;
//
// loco_kernel2 << <blocks, threads >> >(
// out, dstOut, *appBlock->plyBlock->texIn, *appBlock->plyBlock->texOut,
// gridSize, SPEED*0.5);
//
// //loco_kernel << <blocks, threads >> >(
// // out, dstOut, *appBlock->plyBlock->texIn, *appBlock->plyBlock->texOut,
// // width, SPEED*0.5);
//
// //HANDLE_ERROR(cudaDeviceSynchronize());
// //dim3 blocks2(32,32);
// //dim3 threads2(256);
//
// noiseTrimKernel << <blocks, threads>> >(
// out, appBlock->plyBlock->randData->dev_curandStates,
// SPEED);
//
// HANDLE_ERROR(cudaDeviceSynchronize());
//
// dstOut = !dstOut;
// }
//
// float_to_color << <blocks, threads >> >(appBlock->output_bitmap, appBlock->plyBlock->dev_outSrc);
//
// void *q = cPUAnimBitmap->get_ptr();
// unsigned char *r = appBlock->output_bitmap;
// int sz = cPUAnimBitmap->image_size();
//
// HANDLE_ERROR(cudaMemcpy(cPUAnimBitmap->get_ptr(), appBlock->output_bitmap, cPUAnimBitmap->image_size(), cudaMemcpyDeviceToHost));
//
// HANDLE_ERROR(cudaEventRecord(appBlock->stop, 0));
// HANDLE_ERROR(cudaEventSynchronize(appBlock->stop));
// float elapsedTime;
// HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, appBlock->start, appBlock->stop));
// appBlock->totalTime += elapsedTime;
// ++appBlock->frames;
// printf("Average Time per frame: %3.1f ms\n", appBlock->totalTime / appBlock->frames);
// printf("tic: %d\n\n", ticks);
//}
//
//
//void anim_exit(AppBlock *appBlock) {
//
// cudaDestroyTextureObject(*(appBlock->plyBlock->texIn));
// cudaDestroyTextureObject(*appBlock->plyBlock->texOut);
// //cudaDestroyTextureObject(*appBlock->plyBlock->texConst);
//
// HANDLE_ERROR(cudaFree(appBlock->plyBlock->dev_inSrc));
// HANDLE_ERROR(cudaFree(appBlock->plyBlock->dev_outSrc));
// HANDLE_ERROR(cudaFree(appBlock->plyBlock->dev_constSrc));
//
// HANDLE_ERROR(cudaEventDestroy(appBlock->start));
// HANDLE_ERROR(cudaEventDestroy(appBlock->stop));
//}
|
2,063
|
#include <iostream>
#include <cmath>
#include <cstdlib>
#include <climits>
#include <cuda.h>
#include <cuda_runtime.h>
using namespace std;
#define SIZE 14*14
#define X_SIZE 14
#define NUM_EXAMPLES 3
#define BLOCK_SIZE 3
//Function prototype
//int SAT(int, int, int, int, int);
//Global variable
//Create big ass array for test char
unsigned char image[3*SIZE] = {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 191, 127, 255, 255, 191, 127, 20 , 255, 255, 255,
255, 255, 255, 255, 127, 0 , 255, 255, 127, 0 , 235, 255, 255, 255,
255, 255, 255, 255, 127, 0 , 40 , 40 , 230, 0 , 235, 255, 255, 255,
255, 255, 255, 255, 175, 127, 127, 127, 235, 0 , 235, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 127, 0 , 235, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 127, 0 , 235, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 127, 0 , 235, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 245, 220, 245, 255, 255, 255, 255, 255,
255, 255, 255, 255, 127, 120, 180, 125, 120, 255, 255, 255, 255, 255,
255, 255, 255, 255, 245, 255, 255, 255, 100, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 110, 252, 252, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 120, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 128, 0 , 45 , 127, 255, 255, 255, 255, 255,
255, 255, 255, 255, 250, 20 , 240, 30 , 252, 255, 255, 255, 255, 255,
255, 255, 255, 255, 250, 15 , 15 , 235, 205, 135, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 150, 50 , 255, 255, 191, 127, 30 , 255, 255, 255,
255, 255, 255, 255, 120, 0 , 255, 235, 127, 0 , 235, 255, 255, 255,
255, 255, 255, 255, 113, 0 , 40 , 40 , 230, 0 , 235, 255, 255, 255,
255, 255, 255, 255, 175, 150, 127, 127, 235, 0 , 235, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 127, 0 , 235, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 127, 0 , 235, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 127, 0 , 235, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 45 , 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255};
//Define constant memory for device image
__constant__ unsigned char _image[3*SIZE];
__constant__ int _label[NUM_EXAMPLES];
//Function SAT
__device__ int SAT(int x1, int y1, int x2, int y2, int img)
{
unsigned int area = 0;
for(int j = y1; j <= y2; j++)
{
for(int i = x1; i <= x2; i++)
{
area += _image[img*SIZE + j*X_SIZE + i];
}
}
return area;
}
__global__ void compute(int *error, int x, int y, int w, int h, int theta, int f, int comp)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
//Compute features and classify with them
f = SAT(x, y, x+w-1, y+h-1, i) - SAT(x+w, y, x+2*w-1, y+h-1, i);
comp = abs(f - 255*w*h);
if(comp < theta)
{
if(_label[i] == 0)
{
//printf("label[%i] == 0\n", i);
atomicAdd(error, 1);
}
}
else
{
if(_label[i] == 1)
{
//printf("label[%i] == 1\n", i);
atomicAdd(error, 1);
}
}
__syncthreads();
//printf("error: %i\n", error[0]);
}
int main(int argc, char*argv[])
{
int label[3];
label[0] = atoi(argv[1]);
label[1] = atoi(argv[2]);
label[2] = atoi(argv[3]);
int hi = atoi(argv[4]);
int i = 0;
int theta = 0;
int min_theta = 0;
int e[1] = {0};
int min_e = INT_MAX;
int f = 0;
int comp = 0;
//CUDA stuffs
dim3 dimGrid(ceil(NUM_EXAMPLES/BLOCK_SIZE), 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
unsigned char* dev_in;
unsigned char* dev_out;
int* dev_e = (int*)malloc(sizeof(int));
int size = 3*SIZE*sizeof(unsigned char);
cudaError_t error[3];
error[0] = cudaMalloc((void**) &dev_in, size);
error[1] = cudaMalloc((void**) &dev_out, size);
error[2] = cudaMalloc((void**) &dev_e, sizeof(int));
cout << "cudaMalloc dev_in error: " << cudaGetErrorString(error[0]) << endl;
cout << "cudaMalloc dev_out error: " << cudaGetErrorString(error[1]) << endl;
cout << "cudaMalloc dev_e error: " << cudaGetErrorString(error[2]) << endl;
//error[0] = cudaMemcpy(dev_e, e, sizeof(int), cudaMemcpyHostToDevice);
error[1] = cudaMemcpyToSymbol(_image, image, size);
error[2] = cudaMemcpyToSymbol(_label, label, 3*sizeof(int));
//cout << "cudaMemcpy error: " << cudaGetErrorString(error[0]) << endl;
cout << "cudaMemcpyToSymbol image error: " << cudaGetErrorString(error[1]) << endl;
cout << "cudaMemcpyToSymbol label error: " << cudaGetErrorString(error[2]) << endl;
//END CUDA stuffs
for(int x = 0; x < 14; x++)
{
for(int y = 0; y < 14; y++)
{
for(int h = 1; h <= 15 - y; h++)
{
for(int w = 1; w <= (15 - x)/2; w++)
{
theta = hi;
i++;
while(theta > 0)
{
e[0] = 0;
error[0] = cudaMemcpy(dev_e, e, sizeof(int), cudaMemcpyHostToDevice);
//cout << "cudaMemcpy error: " << cudaGetErrorString(error[0]) << endl;
//Call the cuda kernel
compute<<<dimGrid, dimBlock>>>(dev_e, x, y, w, h, theta, f, comp);
cudaDeviceSynchronize();
error[0] = cudaMemcpy(e, dev_e, sizeof(int), cudaMemcpyDeviceToHost);
//cout << "cudaMemcpy error: " << cudaGetErrorString(error[0]) << endl;
//Keep track of current best theta value
if(e[0] <= min_e)
{
min_e = e[0];
min_theta = theta;
}
//Compute new threshold bounds based on number of misclassifications
theta--;
/*//Display classification info
cout << "\tNumber misclassified:" << endl;
cout << "\t---------------------" << endl;
cout << "\te: " << e[0] << endl;
cout << "\tNew theta bounds:" << endl;
cout << "\t-----------------" << endl;
cout << "\ttheta: " << theta << endl << endl;*/
}
cout << "==========================" << endl;
cout << "feature(" << x << ", " << y << ", " << w << ", " << h << ")" << endl;
cout << "Best theta classifier: " << min_theta << endl;
cout << "Number misclassified: " << min_e << endl;
cout << "==========================" << endl << endl;
}
}
}
}
cout << "Total number of features: " << i << endl;
cout << "Memory size for array to hold feature values: " << i*sizeof(int) << " B" << endl << endl;
return 0;
}
|
2,064
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
#include <limits.h>
#include <float.h>
#include <iostream>
#include <sys/time.h>
#include <stack>
#define G 6.67408E-11 //Gravitational constant
#define lvl 9 //depth of quad tree till which we'll divide plane
using namespace std;
struct vect //Structure for 2D coordinate
{
float x; // X coordinate
float y; // Y coordinate
};
struct node //Structure for each node of the quad tree
{
vect body; //centre of mass of bodies in current node
float mass; //total mass of bodies in current node
int child[4]; //children indices in nodes array
int l,r; //index limit in body array of bodies in current node
vect min, max; //min and max X and Y coordinates of bodies belonging to current node
};
//Function calculate Gravitational force between two body
vect gravity (vect a, vect b, float m1, float m2)
{
float res=G*m1*m2;
float r=(a.y-b.y)*(a.y-b.y)+(a.x-b.x)*(a.x-b.x);
if (r>0) res/=r;
vect vec;
vec.y=a.y-b.y;
vec.x=a.x-b.x;
r=sqrt(r);
if (r>0) vec.y/=r, vec.x/=r;
vec.y*=res;
vec.x*=res;
return vec;
}
//This function will construct particular level of the tree.
//Each node will be divided further into four new nodes and bodies in the array will be swapped so that bodies belonging to same node remain together in the array
void construct(vect *body, float *mass, node *nodes, int level, int tot)
{
int index;
int total = 1<<(2*level); //total nodes in current level
int offset=((1<<(2*level))-1)/3; //total nodes in tree upto previous level
int off=offset+total; //total nodes upto current level in tree
for (int m=0; m<total; m++)
{
int tid=m*4;
index=m+offset; //actual index in nodes array
node nd=nodes[index];
if (nodes[index].l<=nodes[index].r)
{
float xl=nd.min.x, xr=nd.max.x;
float yl=nd.min.y, yr=nd.max.y;
float xmid=xl+(xr-xl)/2;
float ymid=yl+(yr-yl)/2;
float l=nd.l, r=nd.r;
node child[4];
for (int i=0;i<4;i++)
{
for (int j=0;j<4;j++) child[i].child[j]=-1;
child[i].min.x=child[i].min.y=FLT_MAX, child[i].max.x=child[i].max.y=FLT_MIN;
}
int i=l-1;
float m=0, x=0, y=0, mm=0, xx=0, yy=0;
for (int j=l;j<=r;j++) //swapping of bodies belonging to current node based on x-coordinates creating two children
{
if (body[j].x<=xmid)
{
i++;
vect temp=body[i];
body[i]=body[j];
body[j]=temp;
float t=mass[i];
mass[i]=mass[j];
mass[j]=t;
}
}
child[2].l=l, child[2].r=i;
child[3].l=i+1, child[3].r=r;
for (int k=2;k<=3;k++)
{
m=mm=x=xx=y=yy=0;
l=child[k].l, r=child[k].r;
i=l-1;
int cnt=0;
for (int j=l;j<=r;j++) //swapping of bodies in two children created previously based on y-coordinates, each creating two new children
{
x+=body[j].x;
y+=body[j].y;
m+=mass[j];
if (body[j].y<=ymid)
{
xx+=body[j].x, yy+=body[j].y, mm+=mass[j];
cnt++;
i++;
vect temp=body[i];
body[i]=body[j];
body[j]=temp;
float t=mass[i];
mass[i]=mass[j];
mass[j]=t;
}
}
if(cnt>0) child[k].mass=mm, child[k].body.x=xx/cnt, child[k].body.y=yy/cnt;
child[k].l=l, child[k].r=i;
mm=m-mm, xx=x-xx, yy=y-yy, cnt=r-l+1-cnt;
if(cnt>0) child[k-2].mass=mm, child[k-2].body.x=xx/cnt, child[k-2].body.y=yy/cnt;
child[k-2].l=i+1, child[k-2].r=r;
}
for (int i=0;i<4;i++)
{
if (i%2) child[i].min.x=xmid, child[i].max.x=xr;
else child[i].min.x=xl, child[i].max.x=xmid;
if (i<2) child[i].min.y=ymid, child[i].max.y=yr;
else child[i].min.y=yl, child[i].max.y=ymid;
if (off+tid+i<tot) nodes[off+tid+i]=child[i];
nd.child[i]=off+tid+i;
}
}
else
{
for (int i=0;i<4;i++)
{
if (off+tid+i<tot)
{
nodes[off+tid+i].l=0;
nodes[off+tid+i].r=-1;
}
nd.child[i]=off+tid+i;
}
}
nodes[index]=nd;
}
}
//This function calculates force on bodies
void calculate(vect *body, float *mass, node *nodes, vect *force, int n, float theta)
{
int l=((1<<(2*(lvl-1)))-1)/3; //total nodes in tree upto max depth
for (int index=0;index<n;index++)
{
stack <int> st;
st.push(0);
vect bd=body[index];
while (!st.empty()) //for each body do DFS until reached leaf
{
int t=st.top();
st.pop();
node nd=nodes[t];
float s=fmax(nd.max.x-nd.min.x, nd.max.y-nd.min.y);
float x=bd.x-nd.body.x, y=bd.y-nd.body.y;
float dist=sqrt(x*x+y*y);
float val=FLT_MAX;
if (dist>0) val=s/dist;
if (val<theta) //Barnes-Hutt approximation
{
vect frc=gravity(nd.body, bd, nd.mass, mass[index]);
force[index].x+=frc.x;
force[index].y+=frc.y;
}
else
{
if (t>=l) //if reached leaf
{
vect frc=gravity(nd.body, bd, nd.mass, mass[index]);
force[index].x+=frc.x;
force[index].y+=frc.y;
continue;
}
for (int i=0;i<4;i++)
{
int temp=nd.child[i];
if (temp==-1 || nodes[temp].l>nodes[temp].r) continue;
st.push(temp);
}
}
}
}
}
float maxx(float a, float b)
{
return (a<b?b:a);
}
float minn(float a, float b)
{
return (a<b?a:b);
}
int main()
{
int n;
printf("n : ");
scanf("%d", &n);
vect body[n];
float mass[n];
float m=0, x=0, y=0;
for (int i=0;i<n;i++)
{ //Here we're going to take random inputs
body[i].x=rand()%1000000;
body[i].y=rand()%1000000;
mass[i]=rand()%1000000;
m+=mass[i], x+=body[i].x, y+=body[i].y;
}
x/=n, y/=n; //centre of mass of the system
vect force[n];
vect mn, mx;
mn.x=mn.y=FLT_MAX, mx.x=mx.y=FLT_MIN;
struct timeval start, stop;
gettimeofday(&start, NULL);
for (int i=0;i<n;i++)
{
force[i].x=force[i].y=0;
mn.x=min(mn.x, body[i].x);
mx.x=max(mx.x, body[i].x);
mn.y=min(mn.y, body[i].y);
mx.y=max(mx.y, body[i].y);
}
gettimeofday(&stop, NULL);
long seconds=(stop.tv_sec-start.tv_sec);
float t=seconds*1000+float(stop.tv_usec-start.tv_usec)/1000;
vect gmin=mn, gmax=mx;
printf("%f %f\n %f %f\n", gmin.x, gmin.y, gmax.x, gmax.y);
int tot=1<<(2*lvl);
tot=(tot-1)/3;
node h_nodes[tot];
for (int i=0;i<tot;i++)
{
for (int j=0;j<4;j++) h_nodes[i].child[j]=-1;
}
vect temp;
temp.x=x, temp.y=y;
h_nodes[0].body=temp, h_nodes[0].mass=m, h_nodes[0].l=0, h_nodes[0].r=n-1, h_nodes[0].min=gmin, h_nodes[0].max=gmax;
for (int i=0;i<lvl-1;i++) //creation of tree level by level
{
gettimeofday(&start, NULL);
construct(body, mass, h_nodes, i, tot);
gettimeofday(&stop, NULL);
t+=((stop.tv_sec-start.tv_sec)*1000+float(stop.tv_usec-start.tv_usec)/1000);
}
float theta;
printf("theta : ");
scanf("%f", &theta);
gettimeofday(&start, NULL);
calculate(body, mass, h_nodes, force, n, theta);
gettimeofday(&stop, NULL);
t+=((stop.tv_sec-start.tv_sec)*1000+float(stop.tv_usec-start.tv_usec)/1000);
//t shows total number of time this serial code take
x=0, y=0;
for(int i=0;i<n;i++)
{
printf("%d : x %f y %f m %f : %.15f %.15f\n", i, body[i].x, body[i].y, mass[i], force[i].x, force[i].y);
x+=force[i].x, y+=force[i].y;
}
// printf("%f %f\n", x, y);
printf("cpu time : %f\n", t);
}
|
2,065
|
#include <stdio.h>
#include <ctime>
#include <cassert>
#include <cmath>
#include <utility>
#include <vector>
#include <algorithm>
#include <cstdlib>
#include <memory>
#include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
float __device__ rad2deg(float radians)
{
return radians * 180.0 / M_PI;
}
float __device__ deg2rad(float degrees)
{
return degrees * M_PI / 180.0;
}
float __device__ xy2theta(const float & _x, const float & _y )
{
if ( _x >= 0 & _y >= 0)
return (180/M_PI) * atan(_y / _x);
else if ( _x < 0 & _y >= 0)
return 180 - ( (180/M_PI) * atan(_y / (-_x)) );
else if ( _x < 0 & _y < 0)
return 180 + ( (180/M_PI) * atan(_y / _x) );
else if ( _x >= 0 & _y < 0)
return 360 - ( (180/M_PI) * atan((-_y) / _x) );
} // xy2theta
// convert pointcloud to grid map
void __global__ point2gridmap(float* point, int* ring, int* sector, int* height, int d_size, int max_length, int max_height, int num_ring, int num_sector, int num_height)
{
int gid = threadIdx.x + blockDim.x*blockIdx.x;
if(gid >= d_size) return;
float gap_ring, gap_sector, gap_height;
gap_ring = (float)max_length / (float)num_ring;
gap_sector = 360.0/(float)num_sector;
gap_height = 2.0 * (float)max_height / (float)num_height;
float x, y, z;
x = point[gid];
y = point[gid + d_size];
z = point[gid + 2 * d_size];
if(x == 0.0)
x = 0.0001;
if(y == 0.0)
y = 0.0001;
if(z == 0.0)
z = 0.0001;
float theta = xy2theta(x, y);
float faraway = sqrt(pow(x,2) + pow(y,2));
int idx_ring = floor(faraway / gap_ring);
int idx_sector = floor(theta / gap_sector);
int idx_height = floor((z + (float)max_height) / gap_height);
if(idx_ring >= num_ring)
idx_ring = num_ring - 1;
height[gid] = idx_height;
ring[gid] = idx_ring;
sector[gid] = idx_sector;
__syncthreads();
}
|
2,066
|
#include<iostream>
#include<time.h>
#include<climits>
#include<stdlib.h>
using namespace std;
__global__ void minimum(int *a, int *b, int n) {
int tid = threadIdx.x;
int min_limit = INT_MAX;
for(int i=tid; i<min(tid+256, n); i++) {
if(min_limit > a[i])
min_limit = a[i];
}
b[tid] = min_limit;
}
int main(){
int n = 128;
int *a = (int *)malloc(n * sizeof(int));
srand(time(0));
for(int i=0;i<n;i++) {
a[i] = rand();
}
for(int i=0; i<n; i++){
cout<<a[i]<<" ";
}
cout<<endl;
//creating the device array
int *dev_a, *dev_b;
int size = n * sizeof(int);
float total_elapsed_time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMalloc(&dev_a, size);
cudaMalloc(&dev_b, sizeof(int));
int * ans;
ans = (int *)malloc(sizeof(int));
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
minimum<<<1, n>>>(dev_a, dev_b, n);
cudaDeviceSynchronize();
cudaMemcpy(ans, dev_b, sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventElapsedTime(&total_elapsed_time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout<<"Minimum of the array is "<<ans[0]<<endl;
cout<<"total elapsed time "<<total_elapsed_time<<"ms"<<endl;
//calculating the serial way
clock_t start_cpu = clock();
int min = INT_MAX;
for(int i=0;i<n;i++) {
if(a[i] < min)
min = a[i];
}
clock_t stop_cpu = clock();
cout<<"Min by CPU "<<min<<endl;
clock_t total_time = (stop_cpu - start_cpu) * 1000 / CLOCKS_PER_SEC;
cout<<total_time<<endl;
return 0;
}
|
2,067
|
#include <iostream>
#include <time.h>
#define N 50000
#define BLK_SIZE 256
using namespace std;
struct Atom
{
int x;
int y;
int z;
int a, b, c, d, e, f;
};
__global__ void AtomKernel(Atom *atoms, int *sum);
__global__ void CoalescedKernel(int *x, int *y, int *z, int *sum);
int main()
{
int i;
//host
Atom *atoms = (Atom*) malloc(N * sizeof(Atom));
for (i = 0; i < N; i++)
{
atoms[i].x = 10;
atoms[i].y = 10;
atoms[i].z = 10;
}
int *x = (int*) malloc(N * sizeof(int));
int *y = (int*) malloc(N * sizeof(int));
int *z = (int*) malloc(N * sizeof(int));
memset(x, 10, N * sizeof(int));
memset(y, 10, N * sizeof(int));
memset(z, 10, N * sizeof(int));
//device
Atom *atomsD;
cudaMalloc(&atomsD, N * sizeof(Atom));
cudaMemcpy(atomsD, atoms, N * sizeof(Atom), cudaMemcpyHostToDevice);
int *xD;
int *yD;
int *zD;
int *sumD;
cudaMalloc(&xD, N * sizeof(int));
cudaMalloc(&yD, N * sizeof(int));
cudaMalloc(&zD, N * sizeof(int));
cudaMalloc(&sumD, N * sizeof(int));
cudaMemset(xD, 10, N * sizeof(int));
cudaMemset(yD, 10, N * sizeof(int));
cudaMemset(zD, 10, N * sizeof(int));
for (i = 0; i < 10000; i++)
{
AtomKernel<<<N / BLK_SIZE + 1, BLK_SIZE>>>(atomsD, sumD);
cudaDeviceSynchronize();
}
for (i = 0; i < 10000; i++)
{
CoalescedKernel<<<N / BLK_SIZE + 1, BLK_SIZE>>>(xD, yD, zD, sumD);
cudaDeviceSynchronize();
}
return 0;
}
__global__ void AtomKernel(Atom *atoms, int *sum)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
sum[idx] = 0;
sum[idx] += atoms[idx].x * atoms[idx].x;
sum[idx] += atoms[idx].y * atoms[idx].y;
sum[idx] += atoms[idx].z * atoms[idx].z;
}
__global__ void CoalescedKernel(int *x, int *y, int *z, int *sum)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
sum[idx] = 0;
sum[idx] += x[idx] * x[idx];
sum[idx] += y[idx] * y[idx];
sum[idx] += z[idx] * z[idx];
}
|
2,068
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <vector>
#include <string>
#include <array>
#include <iomanip>
#include <chrono>
#include <fstream>
typedef std::chrono::high_resolution_clock Clock;
#define pi 3.141592653589793238462643383279502884197169399375105820974944592307816406286208998628034825342117067982148086513282306647093844609550582231725359408128481117450284102701938521f
#define omega (2.f*pi)
#define omega0 (omega*3.f/2.f)
#define beta (omega0/4.f)
#define roll_num 16
#define wrarp_num 32
#define grid_num 4
#define time 10
#define h_exp_3 5
#define t_step_exp 8
#define batch_size ( wrarp_num * grid_num * 32)
#define t_step (1.f/(1<<t_step_exp))
#define h_level_3 (1<<h_exp_3)
#define h (1.f/(1<<(h_exp_3*3+t_step_exp)))
#define t_size (sizeof(unsigned long) * batch_size)
#define p_size (sizeof(float)*(4 + 2 * roll_num) * batch_size)
__device__ void __RK4(unsigned long t_index, float x, float v, float *outx, float *outv, float y, float dy);
__device__ void RK4(unsigned long t_index, float x, float v, float *outx, float *outv, float y, float dy);
void RK4_CPU(unsigned t_index, float y, float dy, float& x, float& v);
__global__ void kernel(unsigned long *index, float * p) {
unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned i = 0; i < roll_num; ++i) {
__RK4(index[id] + i,
p[batch_size*(2 * i + 2) + id], p[batch_size*(2 * i + 3) + id],
p + batch_size * (2 * i + 4) + id, p + batch_size * (2 * i + 5) + id,
p[id], p[batch_size + id]);
}
}
struct workinggroup
{
float *p_h, *p_d;
unsigned long *t_index_h, *t_index_d;
cudaStream_t stream;
int gpu;
workinggroup(int gpu_id) :gpu(gpu_id) {
t_index_h = (unsigned long*)malloc(t_size);
p_h = (float*)malloc(p_size);
cudaSetDevice(gpu);
cudaStreamCreate(&stream);
cudaMalloc((void**)&t_index_d, t_size);
cudaMalloc((void**)&p_d, p_size);
}
~workinggroup() {
free(t_index_h);
free(p_h);
cudaSetDevice(gpu);
cudaStreamDestroy(stream);
cudaFree(t_index_d);
cudaFree(p_d);
}
inline unsigned long &t_index(unsigned long index) { return t_index_h[index]; }
inline float &gamma(unsigned long index) { return p_h[0 * batch_size + index]; }
inline float &dgamma(unsigned long index) { return p_h[1 * batch_size + index]; }
inline float &x(unsigned long t_index, unsigned long index) { return p_h[(2 * t_index + 2) * batch_size + index]; }
inline float &v(unsigned long t_index, unsigned long index) { return p_h[(2 * t_index + 3) * batch_size + index]; }
inline float &x_0(unsigned long index) { return x(0, index); }
inline float &v_0(unsigned long index) { return v(0, index); }
inline float &x_n(unsigned long index) { return x(roll_num, index); }
inline float &v_n(unsigned long index) { return v(roll_num, index); }
inline void lunch_plan() {
cudaSetDevice(gpu);
cudaMemcpyAsync((void*)t_index_d, (void*)t_index_h, t_size, cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync((void*)p_d, (void*)p_h, p_size, cudaMemcpyHostToDevice, stream);
kernel <<<grid_num, wrarp_num * 32, 0, stream >>> (t_index_d, p_d);
cudaMemcpyAsync((void*)p_h, (void*)p_d, p_size, cudaMemcpyDeviceToHost, stream);
}
inline void sync() { cudaSetDevice(gpu); cudaStreamSynchronize(stream); }
inline bool check() { cudaSetDevice(gpu); return cudaStreamQuery(stream) != cudaErrorNotReady; }
};
struct info
{
unsigned long gpu; //gpu using
unsigned long t_file; //time step per file
unsigned long t_num; //time step per second
unsigned long t_max; //time range [0, t_max]
unsigned long t_n; //total sample number
unsigned long t_current; //time step finished
unsigned long gamma_num; //testing gamma's number
float init_gamma; //gamma start
float d_gamma; //gamma step
float x0; //init x
float v0; //init v
};
int main(int argc, char *argv[])
{
info para;
bool continue_working = false;
unsigned long init_t = 0;
unsigned long start_t = 0;
unsigned long file_id = 0;
float gamma_range = 0.2;
para.t_num = (1 << t_step_exp);
para.t_file = 128 * para.t_num;
para.gpu = 0;
para.x0 = 0;
para.v0 = 0;
para.t_max = 10;
para.init_gamma = 1.f;
para.d_gamma = gamma_range / (batch_size * 3);
para.t_n = para.t_max * para.t_num;
para.gamma_num = batch_size * 3;
if (argc > 1) {
for (int i = 0; i < argc; ++i) {
std::string arg = argv[i];
if (arg == "-h" || arg == "--help" || arg == "-?") {
std::cout << "-g:\tset the gpu for calculate" << std::endl;
std::cout << "-x:\tset init x0" << std::endl;
std::cout << "-v:\tset init v0" << std::endl;
std::cout << "-t:\tset time range" << std::endl;
std::cout << "-y:\tset minimal gamma" << std::endl;
std::cout << "-yr:\tset the gamma searching range (d_gamma = gamma_range / " << para.gamma_num << ")" << std::endl;
std::cout << "-h, --help, -?: " << std::endl;
return 0;
}
else if (arg == "-g")
para.gpu = std::stoi(argv[++i]);
else if (arg == "-x")
para.x0 = std::stof(argv[++i]);
else if (arg == "-v")
para.v0 = std::stof(argv[++i]);
else if (arg == "-t") {
para.t_max = std::stoul(argv[++i]);
para.t_n = para.t_max * para.t_num;
}
else if (arg == "-y")
para.init_gamma = std::stof(argv[++i]);
else if (arg == "-yr") {
gamma_range = std::stof(argv[++i]);
para.d_gamma = gamma_range / (batch_size * 3);
}
}
}
else {
std::ifstream info_file("info.dat", std::ios::binary);
if (info_file.is_open()) {
info tmp;
info_file.read((char*)&tmp, sizeof(info));
if (tmp.t_current != tmp.t_n) {
continue_working = true;
start_t = (tmp.t_current / roll_num);
init_t = start_t * roll_num - 1;
file_id = init_t / tmp.t_file;
para = tmp;
std::cout << "Find unfinished task! Auto start continue working from t id " << start_t * roll_num << " ...\n(disable by adding any arg)\n\n\n\n" << std::endl;
}
}
}
std::cout << "Starting calculation:\n" << "\tx0: " << para.x0 << " v0: " << para.v0
<< "\n\tgamma: [ " << para.init_gamma << " , " << (para.init_gamma + para.gamma_num*para.d_gamma) << " ) -- " << para.gamma_num << " steps with step length: " << para.d_gamma
<< "\n\tt_max: " << para.t_max << "\n\ton GPU " << para.gpu << "\n\n" << std::endl;
float *tmp_data = new float[para.gamma_num * roll_num * 2];
if (continue_working) {
std::ifstream data(std::string("data") + std::to_string(file_id) + std::string(".dat"), std::ios::binary);
data.seekg(para.gamma_num * 2 * (init_t % para.t_file) * sizeof(float));
data.read((char*)tmp_data, para.gamma_num * 2 * sizeof(float));
}
workinggroup a(para.gpu), b(para.gpu), c(para.gpu);
workinggroup *g[] = { &a, &b, &c };
for (int i = 0; i < 3; ++i) {
for (unsigned long j = 0; j < batch_size; ++j) {
g[i]->t_index(j) = start_t * roll_num;
g[i]->gamma(j) = para.init_gamma;
g[i]->dgamma(j) = para.d_gamma * (batch_size*i + j);
g[i]->x_0(j) = continue_working ? tmp_data[2 * (batch_size * i + j)] : para.x0;
g[i]->v_0(j) = continue_working ? tmp_data[2 * (batch_size * i + j) + 1] : para.v0;
}
g[i]->lunch_plan();
}
auto start = Clock::now();
for (unsigned long t = start_t; t < para.t_n / roll_num; ++t) {
auto s = Clock::now();
para.t_current = (t + 1) * roll_num;
for (int i = 0; i < 3; ++i) {
g[i]->sync();
for (unsigned long j = 0; j < batch_size; ++j) for (unsigned long k = 0; k < roll_num; ++k) {
tmp_data[2 * (para.gamma_num * k + i * batch_size + j)] = g[i]->x(k + 1, j);
tmp_data[2 * (para.gamma_num * k + i * batch_size + j) + 1] = g[i]->v(k + 1, j);
}
if (t != (para.t_n / roll_num - 1)) {
for (unsigned long j = 0; j < batch_size; ++j) {
g[i]->t_index(j) = para.t_current;
g[i]->x_0(j) = g[i]->x_n(j);
g[i]->v_0(j) = g[i]->v_n(j);
}
g[i]->lunch_plan();
}
}
std::ofstream data_file;
data_file.open(std::string("data") + std::to_string(t * roll_num / para.t_file) + std::string(".dat"), std::ios::binary | std::ios::out | std::ios::in);
if (data_file.is_open())
data_file.seekp(t * roll_num % para.t_file * para.gamma_num * 2 * sizeof(float));
else
data_file.open(std::string("data") + std::to_string(t * roll_num / para.t_file) + std::string(".dat"), std::ios::binary | std::ios::out);
data_file.write((char*)tmp_data, para.gamma_num * roll_num * 2 * sizeof(float));
data_file.close();
std::ofstream info_file("info.dat", std::ios::binary);
info_file.write((char*)¶, sizeof(para));
info_file.close();
auto e = Clock::now();
std::cout.setf(std::ios::fixed);
std::cout << "Finish calculation: 0 - " << ((t + 1) * roll_num - 1) << "\t(current t ID: " << (t + 1) * roll_num << ") / " << para.t_n
<< " |\tfrom " << std::setprecision(3) << t * roll_num * t_step << "s to " << std::setprecision(3) << (t + 1) * roll_num * t_step << "s |\tused "
<< std::setprecision(2) << (static_cast<double>(std::chrono::duration_cast<std::chrono::nanoseconds>(e - s).count()) / 1000000) << "ms\t("
<< std::setprecision(4) << ((t + 1) * roll_num * 100.f / para.t_n) << "% used: "
<< std::setprecision(3)
<< (static_cast<double>(std::chrono::duration_cast<std::chrono::nanoseconds>(e - start).count()) / 3600000000000) << "hours ETA: "
<< std::setprecision(3)
<< (para.t_n - (t + 1) * roll_num) *(static_cast<double>(std::chrono::duration_cast<std::chrono::nanoseconds>(e - start).count()) / ((t + 1 - start_t) * roll_num) / 3600000000000) << " hours)"
<< std::endl;
}
delete[] tmp_data;
return 0;
}
__device__ void __RK4(unsigned long t_index, float x, float v, float *outx, float *outv, float y, float dy) {
/**********************************************************************
(v' , x') = f((v+dv+ddv , x+dx+ddx) , t+dt): gamma = gamma + dgamma
ot = omega * t;
odt = omega * dt;
bv = -2*beta* v
y_cos_t = gamma*omega0^2* cos(ot)
y_sin_t = gamma*omega0^2* sin(ot)
dy_cos_t = dgamma*omega0^2* cos(ot)
dy_sin_t = dgamma*omega0^2* sin(ot)
cos_x = omega0^2* cos(x)
sin_x = omega0^2* sin(x)
tmp_1 = bv + y_cos_t * cos(odt)
tmp_2 = - y_sin_t * sin(odt) + dy_cos_t * cos(odt)
tmp_3 = - dy_sin_t * sin(odt)
v' = (h^1) tmp_1 - sin_x * cos(dx) * cos(ddx)
(h^2) tmp_2 - cos_x * sin(dx) * cos(ddx) - 2*beta * dv
(h^3) tmp_3 - cos_x * cos(dx) * sin(ddx) + sin_x * sin(dx) * sin(ddx) - 2*beta * ddv
x' = (h^1) + v
(h^2) + dv
(h^2) + ddv
**********************************************************************/
const float T = fmodf(t_step * t_index, 2 * pi / omega);
const float betav = -2.f * beta * v;
const float beta2 = -2.f * beta;
const float cos_x = omega0 * omega0 * cosf(x);
const float sin_x = omega0 * omega0 * sinf(x);
const float hv = h * (v);
const float hhv = .5f * hv;
float dx1 = 0, dv1 = 0;
for (unsigned long i = 0; i < h_level_3; ++i) {
float dx2 = 0, dv2 = 0;
for (unsigned long j = 0; j < h_level_3; ++j) {
float dx3 = 0, dv3 = 0;
const float ot = omega * (T + (h * h_level_3 * h_level_3) * i + (h * h_level_3) * j);
const float y_cos_t = y * (omega0 * omega0) * cosf(ot);
const float y_sin_t = y * (omega0 * omega0) * sinf(ot);
const float dy_cos_t = dy * (omega0 * omega0) * cosf(ot);
const float dy_sin_t = dy * (omega0 * omega0) * sinf(ot);
for (unsigned long k = 0; k < h_level_3; ++k) {
float dx2_ = 0, dx3_ = 0;
float dv1_ = 0, dv2_ = 0, dv3_ = 0;
float dx, ddx, dv, ddv, odt;
float tmp_dx2, tmp_dx3;
float tmp_dv1, tmp_dv2, tmp_dv3;
float tmp_1, tmp_2, tmp_3;
//K1
odt = (omega * h) * k;
tmp_1 = betav + y_cos_t * __cosf(odt);
tmp_2 = dy_cos_t * __cosf(odt) - y_sin_t * __sinf(odt);
tmp_3 = -dy_sin_t * __sinf(odt);
dv = dv1; ddv = dv2 + dv3;
dx = dx1; ddx = dx2 + dx3;
dv1_ += h * (tmp_1 - sin_x * __cosf(dx) * __cosf(ddx));
dv2_ += h * (tmp_2 - cos_x * __sinf(dx) * __cosf(ddx) - beta2 * dv);
dv3_ += h * (tmp_3 - cos_x * __cosf(dx) * __sinf(ddx) + sin_x * __sinf(dx) * __sinf(ddx) - beta2 * ddv);
dx2_ += h * dv;
dx3_ += h * ddv;
//t of K2 & K3
odt = (omega * h) * (k + .5f);
tmp_1 = betav + y_cos_t * __cosf(odt);
tmp_2 = dy_cos_t * __cosf(odt) - y_sin_t * __sinf(odt);
tmp_3 = -dy_sin_t * __sinf(odt);
//K2
dv = dv1 + .5f * dv1_; ddv = (dv2 + .5f * dv2_) + (dv3 + .5f * dv3_);
dx = dx1 + hhv; ddx = (dx2 + .5f * dx2_) + (dx3 + .5f * dx3_);
dv1_ += 2.f * (tmp_dv1 = h * (tmp_1 - sin_x * __cosf(dx) * __cosf(ddx)));
dv2_ += 2.f * (tmp_dv2 = h * (tmp_2 - cos_x * __sinf(dx) * __cosf(ddx) - beta2 * dv));
dv3_ += 2.f * (tmp_dv3 = h * (tmp_3 - cos_x * __cosf(dx) * __sinf(ddx) + sin_x * __sinf(dx) * __sinf(ddx) - beta2 * ddv));
dx2_ += 2.f * (tmp_dx2 = h * dv);
dx3_ += 2.f * (tmp_dx3 = h * ddv);
//K3
dv = dv1 + .5f * tmp_dv1; ddv = (dv2 + .5f * tmp_dv2) + (dv3 + .5f * tmp_dv3);
dx = dx1 + hhv; ddx = (dx2 + .5f * tmp_dx2) + (dx3 + .5f * tmp_dx3);
dv1_ += 2.f * (tmp_dv1 = h * (tmp_1 - sin_x * __cosf(dx) * __cosf(ddx)));
dv2_ += 2.f * (tmp_dv2 = h * (tmp_2 - cos_x * __sinf(dx) * __cosf(ddx) - beta2 * dv));
dv3_ += 2.f * (tmp_dv3 = h * (tmp_3 - cos_x * __cosf(dx) * __sinf(ddx) + sin_x * __sinf(dx) * __sinf(ddx) - beta2 * ddv));
dx2_ += 2.f * (tmp_dx2 = h * dv);
dx3_ += 2.f * (tmp_dx3 = h * ddv);
//K4
odt = (omega * h) * (k + 1);
tmp_1 = betav + y_cos_t * __cosf(odt);
tmp_2 = dy_cos_t * __cosf(odt) - y_sin_t * __sinf(odt);
tmp_3 = -dy_sin_t * __sinf(odt);
dv = dv1 + tmp_dv1; ddv = (dv2 + tmp_dv2) + (dv3 + tmp_dv3);
dx = dx1 + hv; ddx = (dx2 + tmp_dx2) + (dx3 + tmp_dx3);
dv1_ += h * (tmp_1 - sin_x * __cosf(dx) * __cosf(ddx));
dv2_ += h * (tmp_2 - cos_x * __sinf(dx) * __cosf(ddx) - beta2 * dv);
dv3_ += h * (tmp_3 - cos_x * __cosf(dx) * __sinf(ddx) + sin_x * __sinf(dx) * __sinf(ddx) - beta2 * ddv);
dx2_ += h * dv;
dx3_ += h * ddv;
//sum
dv1 += dv1_ / 6.f;
dv2 += dv2_ / 6.f;
dv3 += dv3_ / 6.f;
dx1 += hv;
dx2 += dx2_ / 6.f;
dx3 += dx3_ / 6.f;
}
dx2 += dx3;
dv2 += dv3;
}
dx1 += dx2;
dv1 += dv2;
}
(*outx) = x + dx1;
(*outv) = v + dv1;
}
__device__ void RK4(unsigned long t_index, float x, float v, float *outx, float *outv, float y, float dy) {
/**********************************************************************
(v' , x') = f((v+dv+ddv , x+dx+ddx) , t+dt): gamma = gamma + dgamma
ot = omega * t;
odt = omega * dt;
bv = -2*beta* v
y_cos_t = gamma*omega0^2* cos(ot)
y_sin_t = gamma*omega0^2* sin(ot)
dy_cos_t = dgamma*omega0^2* cos(ot)
dy_sin_t = dgamma*omega0^2* sin(ot)
cos_x = omega0^2* cos(x)
sin_x = omega0^2* sin(x)
tmp_1 = bv + y_cos_t * cos(odt)
tmp_2 = - y_sin_t * sin(odt) + dy_cos_t * cos(odt)
tmp_3 = - dy_sin_t * sin(odt)
v' = (h^1) tmp_1 - sin_x * cos(dx) * cos(ddx)
(h^2) tmp_2 - cos_x * sin(dx) * cos(ddx) - 2*beta * dv
(h^3) tmp_3 - cos_x * cos(dx) * sin(ddx) + sin_x * sin(dx) * sin(ddx) - 2*beta * ddv
x' = (h^1) + v
(h^2) + dv
(h^2) + ddv
**********************************************************************/
const float T = fmodf(t_step * t_index, 2 * pi / omega);
const float betav = -2.f * beta * v;
const float beta2 = -2.f * beta;
const float cos_x = omega0 * omega0 * cosf(x);
const float sin_x = omega0 * omega0 * sinf(x);
const float hv = h * (v);
const float hhv = .5f * hv;
float dx1 = 0, dv1 = 0;
for (unsigned long i = 0; i < h_level_3; ++i) {
float dx2 = 0, dv2 = 0;
for (unsigned long j = 0; j < h_level_3; ++j) {
float dx3 = 0, dv3 = 0;
const float ot = omega * (T + (h * h_level_3 * h_level_3) * i + (h * h_level_3) * j);
const float y_cos_t = y * (omega0 * omega0) * cosf(ot);
const float y_sin_t = y * (omega0 * omega0) * sinf(ot);
const float dy_cos_t = dy * (omega0 * omega0) * cosf(ot);
const float dy_sin_t = dy * (omega0 * omega0) * sinf(ot);
for (unsigned long k = 0; k < h_level_3; ++k) {
float dx2_ = 0, dx3_ = 0;
float dv1_ = 0, dv2_ = 0, dv3_ = 0;
float dx, ddx, dv, ddv, odt;
float tmp_dx2, tmp_dx3;
float tmp_dv1, tmp_dv2, tmp_dv3;
float tmp_1, tmp_2, tmp_3;
//K1
odt = (omega * h) * k;
tmp_1 = betav + y_cos_t * cosf(odt);
tmp_2 = dy_cos_t * cosf(odt) - y_sin_t * sinf(odt);
tmp_3 = -dy_sin_t * sinf(odt);
dv = dv1; ddv = dv2 + dv3;
dx = dx1; ddx = dx2 + dx3;
dv1_ += h * (tmp_1 - sin_x * cosf(dx) * cosf(ddx));
dv2_ += h * (tmp_2 - cos_x * sinf(dx) * cosf(ddx) - beta2 * dv);
dv3_ += h * (tmp_3 - cos_x * cosf(dx) * sinf(ddx) + sin_x * sinf(dx) * sinf(ddx) - beta2 * ddv);
dx2_ += h * dv;
dx3_ += h * ddv;
//t of K2 & K3
odt = (omega * h) * (k + .5f);
tmp_1 = betav + y_cos_t * cosf(odt);
tmp_2 = dy_cos_t * cosf(odt) - y_sin_t * sinf(odt);
tmp_3 = -dy_sin_t * sinf(odt);
//K2
dv = dv1 + .5f * dv1_; ddv = (dv2 + .5f * dv2_) + (dv3 + .5f * dv3_);
dx = dx1 + hhv; ddx = (dx2 + .5f * dx2_) + (dx3 + .5f * dx3_);
dv1_ += 2.f * (tmp_dv1 = h * (tmp_1 - sin_x * cosf(dx) * cosf(ddx)));
dv2_ += 2.f * (tmp_dv2 = h * (tmp_2 - cos_x * sinf(dx) * cosf(ddx) - beta2 * dv));
dv3_ += 2.f * (tmp_dv3 = h * (tmp_3 - cos_x * cosf(dx) * sinf(ddx) + sin_x * sinf(dx) * sinf(ddx) - beta2 * ddv));
dx2_ += 2.f * (tmp_dx2 = h * dv);
dx3_ += 2.f * (tmp_dx3 = h * ddv);
//K3
dv = dv1 + .5f * tmp_dv1; ddv = (dv2 + .5f * tmp_dv2) + (dv3 + .5f * tmp_dv3);
dx = dx1 + hhv; ddx = (dx2 + .5f * tmp_dx2) + (dx3 + .5f * tmp_dx3);
dv1_ += 2.f * (tmp_dv1 = h * (tmp_1 - sin_x * cosf(dx) * cosf(ddx)));
dv2_ += 2.f * (tmp_dv2 = h * (tmp_2 - cos_x * sinf(dx) * cosf(ddx) - beta2 * dv));
dv3_ += 2.f * (tmp_dv3 = h * (tmp_3 - cos_x * cosf(dx) * sinf(ddx) + sin_x * sinf(dx) * sinf(ddx) - beta2 * ddv));
dx2_ += 2.f * (tmp_dx2 = h * dv);
dx3_ += 2.f * (tmp_dx3 = h * ddv);
//K4
odt = (omega * h) * (k + 1);
tmp_1 = betav + y_cos_t * cosf(odt);
tmp_2 = dy_cos_t * cosf(odt) - y_sin_t * sinf(odt);
tmp_3 = -dy_sin_t * sinf(odt);
dv = dv1 + tmp_dv1; ddv = (dv2 + tmp_dv2) + (dv3 + tmp_dv3);
dx = dx1 + hv; ddx = (dx2 + tmp_dx2) + (dx3 + tmp_dx3);
dv1_ += h * (tmp_1 - sin_x * cosf(dx) * cosf(ddx));
dv2_ += h * (tmp_2 - cos_x * sinf(dx) * cosf(ddx) - beta2 * dv);
dv3_ += h * (tmp_3 - cos_x * cosf(dx) * sinf(ddx) + sin_x * sinf(dx) * sinf(ddx) - beta2 * ddv);
dx2_ += h * dv;
dx3_ += h * ddv;
//sum
dv1 += dv1_ / 6.f;
dv2 += dv2_ / 6.f;
dv3 += dv3_ / 6.f;
dx1 += hv;
dx2 += dx2_ / 6.f;
dx3 += dx3_ / 6.f;
}
dx2 += dx3;
dv2 += dv3;
}
dx1 += dx2;
dv1 += dv2;
}
(*outx) = x + dx1;
(*outv) = v + dv1;
}
void RK4_CPU(unsigned t_index, float y, float dy, float& x, float& v) {
const float T = fmod(t_step * t_index, 2 * pi / omega);
const float betav = -2.f * beta * v;
const float beta2 = -2.f * beta;
const float cos_x = omega0 * omega0 * cosf(x);
const float sin_x = omega0 * omega0 * sinf(x);
const float hv = h * v;
const float hhv = .5f * hv;
float dx1 = 0, dv1 = 0;
for (unsigned long i = 0; i < h_level_3; ++i) {
float dx2 = 0, dv2 = 0;
for (unsigned long j = 0; j < h_level_3; ++j) {
float dx3 = 0, dv3 = 0;
const float ot = omega * (T + (h * h_level_3 * h_level_3) * i + (h * h_level_3) * j);
const float y_cos_t = y * (omega0 * omega0) * cosf(ot);
const float y_sin_t = y * (omega0 * omega0) * sinf(ot);
const float dy_cos_t = dy * (omega0 * omega0) * cosf(ot);
const float dy_sin_t = dy * (omega0 * omega0) * sinf(ot);
for (unsigned long k = 0; k < h_level_3; ++k) {
float dx2_ = 0, dx3_ = 0;
float dv1_ = 0, dv2_ = 0, dv3_ = 0;
float dx, ddx, dv, ddv, odt;
float tmp_dx2, tmp_dx3;
float tmp_dv1, tmp_dv2, tmp_dv3;
float tmp_1, tmp_2, tmp_3;
//K1
odt = (omega * h) * k;
tmp_1 = betav + y_cos_t * cosf(odt);
tmp_2 = dy_cos_t * cosf(odt) - y_sin_t * sinf(odt);
tmp_3 = -dy_sin_t * sinf(odt);
dv = dv1; ddv = dv2 + dv3;
dx = dx1; ddx = dx2 + dx3;
dv1_ += h * (tmp_1 - sin_x * cosf(dx) * cosf(ddx));
dv2_ += h * (tmp_2 - cos_x * sinf(dx) * cosf(ddx) - beta2 * dv);
dv3_ += h * (tmp_3 - cos_x * cosf(dx) * sinf(ddx) + sin_x * sinf(dx) * sinf(ddx) - beta2 * ddv);
dx2_ += h * dv;
dx3_ += h * ddv;
//t of K2 & K3
odt = (omega * h) * (k + .5f);
tmp_1 = betav + y_cos_t * cosf(odt);
tmp_2 = dy_cos_t * cosf(odt) - y_sin_t * sinf(odt);
tmp_3 = -dy_sin_t * sinf(odt);
//K2
dv = dv1 + .5f * dv1_; ddv = (dv2 + .5f * dv2_) + (dv3 + .5f * dv3_);
dx = dx1 + hhv; ddx = (dx2 + .5f * dx2_) + (dx3 + .5f * dx3_);
dv1_ += 2.f * (tmp_dv1 = h * (tmp_1 - sin_x * cosf(dx) * cosf(ddx)));
dv2_ += 2.f * (tmp_dv2 = h * (tmp_2 - cos_x * sinf(dx) * cosf(ddx) - beta2 * dv));
dv3_ += 2.f * (tmp_dv3 = h * (tmp_3 - cos_x * cosf(dx) * sinf(ddx) + sin_x * sinf(dx) * sinf(ddx) - beta2 * ddv));
dx2_ += 2.f * (tmp_dx2 = h * dv);
dx3_ += 2.f * (tmp_dx3 = h * ddv);
//K3
dv = dv1 + .5f * tmp_dv1; ddv = (dv2 + .5f * tmp_dv2) + (dv3 + .5f * tmp_dv3);
dx = dx1 + hhv; ddx = (dx2 + .5f * tmp_dx2) + (dx3 + .5f * tmp_dx3);
dv1_ += 2.f * (tmp_dv1 = h * (tmp_1 - sin_x * cosf(dx) * cosf(ddx)));
dv2_ += 2.f * (tmp_dv2 = h * (tmp_2 - cos_x * sinf(dx) * cosf(ddx) - beta2 * dv));
dv3_ += 2.f * (tmp_dv3 = h * (tmp_3 - cos_x * cosf(dx) * sinf(ddx) + sin_x * sinf(dx) * sinf(ddx) - beta2 * ddv));
dx2_ += 2.f * (tmp_dx2 = h * dv);
dx3_ += 2.f * (tmp_dx3 = h * ddv);
//K4
odt = (omega * h) * (k + 1);
tmp_1 = betav + y_cos_t * cosf(odt);
tmp_2 = dy_cos_t * cosf(odt) - y_sin_t * sinf(odt);
tmp_3 = -dy_sin_t * sinf(odt);
dv = dv1 + tmp_dv1; ddv = (dv2 + tmp_dv2) + (dv3 + tmp_dv3);
dx = dx1 + hv; ddx = (dx2 + tmp_dx2) + (dx3 + tmp_dx3);
dv1_ += h * (tmp_1 - sin_x * cosf(dx) * cosf(ddx));
dv2_ += h * (tmp_2 - cos_x * sinf(dx) * cosf(ddx) - beta2 * dv);
dv3_ += h * (tmp_3 - cos_x * cosf(dx) * sinf(ddx) + sin_x * sinf(dx) * sinf(ddx) - beta2 * ddv);
dx2_ += h * dv;
dx3_ += h * ddv;
//sum
dv1 += dv1_ / 6.f;
dv2 += dv2_ / 6.f;
dv3 += dv3_ / 6.f;
dx1 += hv;
dx2 += dx2_ / 6.f;
dx3 += dx3_ / 6.f;
}
dx2 += dx3;
dv2 += dv3;
}
dx1 += dx2;
dv1 += dv2;
}
x += dx1;
v += dv1;
}
|
2,069
|
/*
* makeProjection()
* Forms E and E^T matrices from eigenvectors
* float** eT: E^T, populated by function
* float** e: E, populated by function
* float** eigenvec: matrix of eigenvectors, unsorted
* int* indices: indices to accept from eigenvectors
* int N: degrees of freedom
*/
__global__ void makeProjection( float *eT, float *e, float *eigenvec, int *indices, int M, int N ) {
int elementNum = blockIdx.x * blockDim.x + threadIdx.x;
if( elementNum >= M * N ) {
return;
}
int m = elementNum / N;
int n = elementNum % N;
e[n * M + m] = eigenvec[n * M + indices[m]];
eT[m * N + n] = e[n * M + m];
}
|
2,070
|
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
struct Target
{
int2 min;
int2 max;
__device__ bool contains(int2 pos) const
{
return pos.x >= min.x && pos.x <= max.x
&& pos.y >= min.y && pos.y <= max.y;
}
};
__device__ bool simulate(Target target, int2 init_vel)
{
int2 vel = init_vel;
int2 pos = make_int2(0, 0);
int max_y = 0;
while (pos.y >= target.min.y) {
pos.x += vel.x;
pos.y += vel.y;
vel.x += vel.x != 0 ? (vel.x > 0 ? -1 : 1) : 0;
vel.y -= 1;
max_y = max(pos.y, max_y);
if (target.contains(pos)) return true;
}
return false;
}
__global__ void find_max_y(Target target, int x0, int y0, int *p_max_y)
{
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
bool hit = simulate(target, make_int2(x0 + x, y0 + y));
if (hit) {
atomicAdd(p_max_y, 1);
}
}
int main(int argc, char **argv)
{
Target target;
scanf("target area: x=%d..%d, y=%d..%d", &target.min.x, &target.max.x, &target.min.y, &target.max.y);
int mx = abs(target.min.x);
int my = abs(target.min.y);
int *p_max_y;
cudaMallocManaged(&p_max_y, sizeof(int));
*p_max_y = 0;
dim3 block_threads = { 8, 8 };
dim3 num_blocks = { (mx*4+block_threads.x-1)/block_threads.x, (my*4+block_threads.y-1)/block_threads.y };
int x0 = mx * -2, y0 = my * -2;
find_max_y<<<num_blocks, block_threads>>>(target, x0, y0, p_max_y);
int max_y = 0;
cudaMemcpy(&max_y, p_max_y, sizeof(int), cudaMemcpyDefault);
cudaFree(p_max_y);
printf("%d\n", max_y);
return 0;
}
|
2,071
|
#include <stdio.h>
__device__ int x;
__global__ void unaligned_kernel(void) {
*(int*) ((char*)&x + 1) = 42;
}
__device__ void out_of_bounds_function(void) {
*(int*) 0x87654320 = 42;
}
__global__ void out_of_bounds_kernel(void) {
out_of_bounds_function();
}
void run_unaligned(void) {
printf("Running unaligned_kernel\n");
unaligned_kernel<<<1,1>>>();
printf("Ran unaligned_kernel: %s\n",
cudaGetErrorString(cudaGetLastError()));
printf("Sync: %s\n", cudaGetErrorString(cudaThreadSynchronize()));
}
void run_out_of_bounds(void) {
printf("Running out_of_bounds_kernel\n");
out_of_bounds_kernel<<<1,1>>>();
printf("Ran out_of_bounds_kernel: %s\n",
cudaGetErrorString(cudaGetLastError()));
printf("Sync: %s\n", cudaGetErrorString(cudaThreadSynchronize()));
}
int main() {
int *devMem;
printf("Mallocing memory\n");
cudaMalloc((void**)&devMem, 1024);
run_unaligned();
run_out_of_bounds();
cudaDeviceReset();
cudaFree(devMem);
return 0;
}
|
2,072
|
#include "includes.h"
__global__ void HydroUpdatePrim_CUDA3_kernel(float *Rho, float *Vx, float *Vy, float *Vz, float *Etot, float *dUD, float *dUS1, float *dUS2, float *dUS3, float *dUTau, float dt, int size)
{
// get thread and block index
const long tx = threadIdx.x;
const long bx = blockIdx.x;
const long by = blockIdx.y;
int igrid = tx + bx*CUDA_BLOCK_SIZE + by*CUDA_BLOCK_SIZE*CUDA_GRID_SIZE;
if (igrid < 2 || igrid > size - 3)
return;
float D, S1, S2, S3, Tau;
D = Rho[igrid];
S1 = D*Vx[igrid];
S2 = D*Vy[igrid];
S3 = D*Vz[igrid];
Tau = D*Etot[igrid];
D += dUD[igrid];
S1 += dUS1[igrid];
S2 += dUS2[igrid];
S3 += dUS3[igrid];
Tau += dUTau[igrid];
Rho[igrid] = D;
Vx[igrid] = S1/D;
Vy[igrid] = S2/D;
Vz[igrid] = S3/D;
Etot[igrid] = Tau/D;
}
|
2,073
|
//Based on the work of Andrew Krepps
#include <stdio.h>
#include <math.h>
#include <chrono>
/*
* Used for multiplying two square matrices of the same size.
* Uses shared memory to store matrix c until it is time to copy
* the final array out to the CPU.
*/
__host__ cudaEvent_t get_time(void)
{
cudaEvent_t time;
cudaEventCreate(&time);
cudaEventRecord(time);
return time;
}
__global__ void dotMultAddShared(float *A, float *B, float *C, const int sideLen) {
const uint cola = (blockIdx.x * blockDim.x) + threadIdx.x;
const uint rowa = (blockIdx.y * blockDim.y) + threadIdx.y;
const uint idx = (rowa*sideLen) + cola;
extern __shared__ float a[];
a[idx]= A[idx];
// Hold threads in block for copy to finish
__syncthreads();
//printf("a[%d]: %f b[%d]: %f\n", idx, a[idx], idx, B[idx]);
C[idx] = a[idx] * B[idx] + a[idx] + B[idx];
}
/*
* Used for multiplying two square matrices of the same size.
* Uses global memory for matrix c until it is time to copy
* the final array out to the CPU.
*/
__global__ void dotMultAdd(float *A, float *B, float *C, const int sideLen) {
uint cola = (blockIdx.x * blockDim.x) + threadIdx.x;
uint rowa = (blockIdx.y * blockDim.y) + threadIdx.y;
int idx = (rowa*sideLen) + cola;
C[idx] = A[idx] * B[idx] + A[idx] + B[idx];
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = (1 << 20);
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
// =======================================================================================
// Start my code
// Using square matrices for ease.
// Calculate matrix size and adjust totalThreads if it has no integer sqrt.
const int side = static_cast<int>(sqrt(totalThreads));
printf("Matrix size: %d x %d\n", side, side);
if (totalThreads!=(side*side)) {
totalThreads = side*side;
numBlocks = totalThreads/blockSize;
}
printf("Total threads: %d\n", totalThreads);
printf("Block size: %d\n", blockSize);
printf("Number of blocks: %d\n", numBlocks);
//
float *h_A, *h_B, *h_C1, *h_C2;
float *d_a, *d_b, *d_c;
h_A = (float*)malloc(totalThreads*sizeof(float));
h_B = (float*)malloc(totalThreads*sizeof(float));
h_C1 = (float*)malloc(totalThreads*sizeof(float));
h_C2 = (float*)malloc(totalThreads*sizeof(float));
cudaMalloc((void**)&d_a, totalThreads*sizeof(float));
cudaMalloc((void**)&d_b, totalThreads*sizeof(float));
cudaMalloc((void**)&d_c, totalThreads*sizeof(float));
for(int i=0; i<totalThreads; i++) {
h_A[i] = 2.0f;
h_B[i] = 1.0f;
h_C1[i] = 0.0f;
h_C2[i] = 0.0f;
}
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start,0);
cudaEventCreate(&kernel_stop,0);
cudaMemcpy(d_a, h_A, totalThreads*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_B, totalThreads*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, h_C1, totalThreads*sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(kernel_start, 0);
dotMultAddShared<<<numBlocks, blockSize, totalThreads*sizeof(int)>>>(d_a, d_b, d_c, side);
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
cudaMemcpy(h_C1, d_c, totalThreads*sizeof(float), cudaMemcpyDeviceToHost);
float delta = 0.0f;
cudaEventElapsedTime(&delta, kernel_start, kernel_stop);
printf("duration using shared memory: %fmsn\n", delta);
cudaEventRecord(kernel_start, 0);
dotMultAdd<<<numBlocks, blockSize>>>(d_a, d_b, d_c, side);
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
cudaMemcpy(h_C2, d_c, totalThreads*sizeof(float), cudaMemcpyDeviceToHost);
cudaEventElapsedTime(&delta, kernel_start, kernel_stop);
printf("duration using global memory: %fmsn\n", delta);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaEventDestroy(kernel_start);
cudaEventDestroy(kernel_stop);
bool different = false;
for(int i=0; i<totalThreads; i++) {
if(h_C1[i] != h_C2[i])
different = true;
}
if(different)
printf("Different values detected at [%d]: h_C1=%f h_C2=%f\n", 0, h_C1[0], h_C2[0]);
else
printf("%2.1f*%2.1f + %2.1f + %2.1f = %2.1f\n", h_A[0], h_B[0], h_A[0], h_B[0], h_C1[0]);
}
|
2,074
|
#include "includes.h"
__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
const int batch=2048;
__shared__ float buf[batch*5];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*5;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*5+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*5+0];
float y1=xyz[(i*n+j)*5+1];
float r1=xyz[(i*n+j)*5+2];
float g1=xyz[(i*n+j)*5+3];
float b1=xyz[(i*n+j)*5+4];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&5);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*5+0]-x1;
float y2=buf[k*5+1]-y1;
float r2=buf[k*5+2]-r1;
float g2=buf[k*5+3]-g1;
float b2=buf[k*5+4]-b1;
float d=x2*x2+y2*y2+r2*r2+g2*g2+b2*b2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*5+5]-x1;
float y2=buf[k*5+6]-y1;
float r2=buf[k*5+7]-r1;
float g2=buf[k*5+8]-g1;
float b2=buf[k*5+9]-b1;
float d=x2*x2+y2*y2+r2*r2+g2*g2+b2*b2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*5+10]-x1;
float y2=buf[k*5+11]-y1;
float r2=buf[k*5+12]-r1;
float g2=buf[k*5+13]-g1;
float b2=buf[k*5+14]-b1;
float d=x2*x2+y2*y2+r2*r2+g2*g2+b2*b2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*5+15]-x1;
float y2=buf[k*5+16]-y1;
float r2=buf[k*5+17]-r1;
float g2=buf[k*5+18]-g1;
float b2=buf[k*5+19]-b1;
float d=x2*x2+y2*y2+r2*r2+g2*g2+b2*b2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*5+0]-x1;
float y2=buf[k*5+1]-y1;
float r2=buf[k*5+2]-r1;
float g2=buf[k*5+3]-g1;
float b2=buf[k*5+4]-b1;
float d=x2*x2+y2*y2+r2*r2+g2*g2+b2*b2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*5+5]-x1;
float y2=buf[k*5+6]-y1;
float r2=buf[k*5+7]-r1;
float g2=buf[k*5+8]-g1;
float b2=buf[k*5+9]-b1;
float d=x2*x2+y2*y2+r2*r2+g2*g2+b2*b2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*5+10]-x1;
float y2=buf[k*5+11]-y1;
float r2=buf[k*5+12]-r1;
float g2=buf[k*5+13]-g1;
float b2=buf[k*5+14]-b1;
float d=x2*x2+y2*y2+r2*r2+g2*g2+b2*b2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*5+15]-x1;
float y2=buf[k*5+16]-y1;
float r2=buf[k*5+17]-r1;
float g2=buf[k*5+18]-g1;
float b2=buf[k*5+19]-b1;
float d=x2*x2+y2*y2+r2*r2+g2*g2+b2*b2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*5+0]-x1;
float y2=buf[k*5+1]-y1;
float r2=buf[k*5+2]-r1;
float g2=buf[k*5+3]-g1;
float b2=buf[k*5+4]-b1;
float d=x2*x2+y2*y2+r2*r2+g2*g2+b2*b2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
|
2,075
|
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <time.h>
#include <vector>
#include <cuda.h>
const int A = 1920;
const int B = 132;
const int C = 396;
std::vector<float> TakeNaive(const std::vector<float>& x,
const std::vector<int>& indices) {
std::vector<float> y(A * C);
for (int i = 0; i < A; ++i) {
for (int j = 0; j < C; ++j) {
y[i * C + j] = x[i * B + indices[j]];
}
}
return y;
}
void* GPUMalloc(size_t sz) {
void* p = nullptr;
cudaError_t err = cudaMalloc(&p, sz);
if (err != cudaSuccess) {
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(err));
abort();
}
return p;
}
void GPUMemcpy(void* dst, const void* src, size_t cnt, enum cudaMemcpyKind kind) {
cudaError_t err = cudaMemcpy(dst, src, cnt, kind);
if (err != cudaSuccess) {
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(err));
abort();
}
}
__global__ void TakeCudaKernel(float* x, int* indices, float* y, int n) {
int i = blockIdx.x;
int j = threadIdx.x;
y[i * C + j] = x[i * B + indices[j]];
}
std::vector<float> TakeCuda(const std::vector<float>& x,
const std::vector<int>& indices,
int n) {
std::vector<float> y(A * C);
float* xg = (float*)GPUMalloc(x.size() * sizeof(x[0]));
int* ig = (int*)GPUMalloc(indices.size() * sizeof(indices[0]));
float* yg = (float*)GPUMalloc(y.size() * sizeof(y[0]));
fprintf(stderr, "memcpy xg\n");
GPUMemcpy(xg, x.data(), x.size() * sizeof(x[0]), cudaMemcpyHostToDevice);
fprintf(stderr, "memcpy ig\n");
GPUMemcpy(ig, indices.data(), indices.size() * sizeof(indices[0]), cudaMemcpyHostToDevice);
fprintf(stderr, "take\n");
clock_t start = clock();
for (int i = 0; i < n; ++i) {
TakeCudaKernel<<<A, C>>>(xg, ig, yg, 0);
}
cudaError_t err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(err));
abort();
}
fprintf(stderr, "Elapsed %f\n",
((double)(clock() - start)) / CLOCKS_PER_SEC);
fprintf(stderr, "memcpy yg\n");
GPUMemcpy(&y[0], yg, y.size() * sizeof(y[0]), cudaMemcpyDeviceToHost);
fprintf(stderr, "done\n");
return y;
}
int main() {
std::vector<float> x(A * B);
std::vector<int> indices(C);
for (size_t i = 0; i < x.size(); ++i) {
x[i] = i;
}
for (size_t i = 0; i < indices.size(); ++i) {
indices[i] = i * i % C;
}
std::vector<float> ey = TakeNaive(x, indices);
std::vector<float> ay = TakeCuda(x, indices, 1);
for (size_t i = 0; i < ey.size(); ++i) {
//fprintf(stderr, "%zu %f %f\n", i, ey[i], ay[i]);
assert(abs(ey[i] - ay[i]) < 1e-10);
}
TakeCuda(x, indices, 40);
}
|
2,076
|
/*****************************************************************************
Example : cuda-matrix-matrix-multiplication.cu
Objective : Write a CUDA Program to perform Matrix Matrix multiplication.
Input : None
Output : Execution time in seconds , Gflops achieved
Created : Aug 2011
E-mail : RarchK
****************************************************************************/
#include<stdio.h>
#include<cuda.h>
#define BLOCKSIZE 16
#define SIZE 128
cudaDeviceProp deviceProp;
cudaEvent_t start,stop;
cudaError_t ret;
double *host_MatA,*host_MatB,*host_MatC,*CPU_Result;
double *device_MatA,*device_MatB,*device_MatC;
int size = SIZE;
float elapsedTime;
double Tsec,gflops;
/*kernel funtion*/
__global__ void Muld(double* A, double* B, int wA, int wB, double* C)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = wA * BLOCKSIZE * by;
int aEnd = aBegin + wA - 1;
int aStep = BLOCKSIZE;
int bBegin = BLOCKSIZE * bx;
int bStep = BLOCKSIZE * wB;
double Csub = 0;
for(int a = aBegin, b = bBegin; a <= aEnd ; a += aStep, b += bStep)
{
__shared__ double As[BLOCKSIZE][BLOCKSIZE];
__shared__ double Bs[BLOCKSIZE][BLOCKSIZE];
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b+ wB * ty + tx];
__syncthreads();
for(int k= 0; k< BLOCKSIZE; ++k)
Csub += As[ty][k] * Bs[k][tx];
__syncthreads();
}
int c = wB * BLOCKSIZE * by + BLOCKSIZE * bx;
C[ c+ wB * ty + tx] = Csub;
}/* end of Muld device code */
/*mem error*/
void mem_error(char *arrayname, char *benchmark, int len, char *type)
{
printf("\nMemory not sufficient to allocate for array %s\n\tBenchmark : %s \n\tMemory requested = %d number of %s elements\n",arrayname, benchmark, len, type);
exit(-1);
}
/*cuda safe call*/
void CUDA_SAFE_CALL(cudaError_t call)
{
cudaError_t ret = call;
//printf("RETURN FROM THE CUDA CALL:%d\t:",ret);
switch(ret)
{
case cudaSuccess:
// printf("Success\n");
break;
/* case cudaErrorInvalidValue:
{
printf("ERROR: InvalidValue:%i.\n",__LINE__);
exit(-1);
break;
}
case cudaErrorInvalidDevicePointer:
{
printf("ERROR:Invalid Device pointeri:%i.\n",__LINE__);
exit(-1);
break;
}
case cudaErrorInvalidMemcpyDirection:
{
printf("ERROR:Invalid memcpy direction:%i.\n",__LINE__);
exit(-1);
break;
} */
default:
{
printf(" ERROR at line :%i.%d' ' %s\n",__LINE__,ret,cudaGetErrorString(ret));
exit(-1);
break;
}
}
}
/* void SetUp_CUDA_Exe_Config() */
void check_block_grid_dim(cudaDeviceProp devProp,dim3 blockDim,dim3 gridDim)
{
if( blockDim.x >= devProp.maxThreadsDim[0] || blockDim.y >= devProp.maxThreadsDim[1] || blockDim.z >= devProp.maxThreadsDim[2] )
{
printf("\nBlock Dimensions exceed the maximum limits:%d * %d * %d \n",devProp.maxThreadsDim[0],devProp.maxThreadsDim[1],devProp.maxThreadsDim[2]);
exit(-1);
}
if( gridDim.x >= devProp.maxGridSize[0] || gridDim.y >= devProp.maxGridSize[1] || gridDim.z >= devProp.maxGridSize[2] )
{
printf("\nGrid Dimensions exceed the maximum limits:%d * %d * %d \n",devProp.maxGridSize[0],devProp.maxGridSize[1],devProp.maxGridSize[2]);
exit(-1);
}
}
/*function to free memory*/
void dfree(double * arr[],int len)
{
for(int i=0;i<len;i++)
CUDA_SAFE_CALL(cudaFree(arr[i]));
printf("mem freed\n");
}
/*calculate Gflops*/
double calculate_gflops(double &Tsec)
{
//printf("time taken is %.8lf\n",Tsec);
double gflops=(1.0e-9 * (( 1.0 * size*size*size )/Tsec));
//printf("Gflops is \t%f\n",gflops);
return gflops;
}
/*get device count*/
int get_DeviceCount()
{
int count;
cudaGetDeviceCount(&count);
return count;
}
/*launch kernel function is called in main()*/
void launch_kernel_MatMatMult()
{
/* threads_per_block= BLOCKSIZE, blocks_per_grid=size/dimBlock */
printf("in launch kernel\n");
dim3 dimBlock(BLOCKSIZE,BLOCKSIZE);
dim3 dimGrid(size/dimBlock.x,size/dimBlock.y);
//checking the maximum limit of blocksize and gridsize-------------------
check_block_grid_dim(deviceProp,dimBlock,dimGrid);
printf("after check\n");
cudaEventRecord(start,0);
Muld<<<dimGrid,dimBlock>>>(device_MatA,device_MatB,size,size,device_MatC);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
Tsec=elapsedTime*1.0e-3;
calculate_gflops(Tsec);
}
/* Fill in the vector with double precision values */
void fill_dp_vector(double* vec,int size)
{
int ind;
for(ind=0;ind<size;ind++)
vec[ind]=drand48();
}
/*function to print on the screen*/
void print_on_screen(char * program_name,float tsec,double gflops,int size,int flag)//flag=1 if gflops has been calculated else flag =0
{
printf("\n---------------%s----------------\n",program_name);
printf("\tSIZE\t TIME_SEC\t Gflops\n");
if(flag==1)
printf("\t%d\t%f\t%lf\t",size,tsec,gflops);
else
printf("\t%d\t%lf\t%lf\t",size,"---","---");
}
/*-----main()-----*/
int main()
{
int device_Count=get_DeviceCount();
printf("\n\nNUmber of Devices : %d\n\n", device_Count);
// Device Selection, Device 1: Tesla C1060
cudaSetDevice(1);
int device;
// Current Device Detection
cudaGetDevice(&device);
cudaGetDeviceProperties(&deviceProp,device);
printf("Using device %d: %s \n", device, deviceProp.name);
/* allocate memory for GPU events
start = (cudaEvent_t) malloc (sizeof(cudaEvent_t));
stop = (cudaEvent_t) malloc (sizeof(cudaEvent_t));
if(start==NULL)
mem_error("start","MatMatMult",1,"cudaEvent_t");
if(stop==NULL)
mem_error("stop","MatMatMult",1,"cudaEvent_t");*/
//event creation...
CUDA_SAFE_CALL(cudaEventCreate (&start));
CUDA_SAFE_CALL(cudaEventCreate (&stop));
/*allocating the memory for each matrix */
host_MatA = new double[size*size];
host_MatB = new double[size*size];
host_MatC = new double[size*size];
CPU_Result= new double[size*size];
if(host_MatA==NULL)
mem_error("host_MatA","MatMatMult",size,"double");
if(host_MatB==NULL)
mem_error("host_MatB","MatMatMult",size,"double");
if(host_MatC==NULL)
mem_error("host_MatC","MatMatMult",size,"double");
if(CPU_Result==NULL)
mem_error("CPU_Result","MatMatMult",size,"double");
//--------filling the matrix with double precision-----------
fill_dp_vector(host_MatA,size*size);
fill_dp_vector(host_MatB,size*size);
//allocating memory on GPU
CUDA_SAFE_CALL(cudaMalloc( (void**)&device_MatA,size*size*sizeof(double)));
CUDA_SAFE_CALL(cudaMalloc( (void**)&device_MatB, size*size*sizeof(double)));
CUDA_SAFE_CALL(cudaMalloc( (void**)&device_MatC,size*size*sizeof(double)));
// copying host matrix to device matrix
CUDA_SAFE_CALL(cudaMemcpy((void*)device_MatA, (void*)host_MatA, size*size* sizeof(double) , cudaMemcpyHostToDevice ));
CUDA_SAFE_CALL(cudaMemcpy((void*)device_MatB, (void*)host_MatB, size*size*sizeof(double) , cudaMemcpyHostToDevice ));
launch_kernel_MatMatMult(); //launching the kernel
//retriving result from device
CUDA_SAFE_CALL(cudaMemcpy((void*)host_MatC, (void*)device_MatC, size*size*sizeof(double) , cudaMemcpyDeviceToHost ));
//comparing result of CPU-GPU
//relError(CPU_Result,host_MatC,size*size);
//printing the result on screen
print_on_screen("MAT MAT Mult",Tsec,calculate_gflops(Tsec),size,1);
//free the device memory----------
double *array[3];
array[0]=device_MatA;
array[1]=device_MatB;
array[2]=device_MatC;
dfree(array,3);
//free host memory----------
free(host_MatA);
free(host_MatB);
free(host_MatC);
free(CPU_Result);
}// end of main
|
2,077
|
#include "user_device.cuh"
__device__ float global_cache[GLOBAL_CACHE_SIZE];
/**
* function name: atomicMax_float
* Return Type: float
* Description:
* "atomicMax" for float.
* Compare old value (*maxVal) and new value (value).
* If new value is larger than old value, than new value will overwrite old value.
* This function will return old value.
*/
__device__
float atomicMax_float(float *maxVal, float value) {
float f_old = *maxVal;
return atomicCAS((int *)maxVal, __float_as_int(f_old), __float_as_int(f_old < value ? value : f_old));
}
/**
* function name: device_maxValueVector
* Return Type: void
* Description:
* This kernel function find max value from large vector.
* Stage 1: Find max value from partial vector which allocated to each thread.
* Stage 2: Find max value from thread block.
* Stage 3: Find max value from grid.
*/
__global__
void device_maxValueVector(float *vec, float *p_maxVal, int vector_size, int *p_block_cnt, int numOps) {
extern __shared__ float cache[];
int index = blockDim.x * blockIdx.x + threadIdx.x;
int offset = gridDim.x * blockDim.x;
float tmpMax = -INFINITY, tmpCmp;
// Stage 1: Thread. Get max value of thread's partial vector.
for (int i = 0; i < numOps; i++) {
if (index < vector_size) {
tmpCmp = vec[index];
} else { // index is out of range.
tmpCmp = -INFINITY;
}
tmpMax = (tmpMax < tmpCmp) ? tmpCmp : tmpMax;
index += offset;
}
cache[threadIdx.x] = tmpMax;
__syncthreads();
// Stage 2: Thread Block. Get max value from 'global_cache[]'
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x; i++) {
tmpCmp = cache[i];
tmpMax = (tmpMax < tmpCmp) ? tmpCmp : tmpMax;
}
global_cache[blockIdx.x] = tmpMax;
atomicAdd(p_block_cnt, 1); // Counter for synchronization of thread blocks.
} else {
return;
}
// Stage 3: Grid, Get max value of from thread blocks' results.
if ((*p_block_cnt) == gridDim.x) {
for (int i = 0; i < gridDim.x; i++) {
tmpCmp = global_cache[i];
tmpMax = (tmpMax < tmpCmp) ? tmpCmp : tmpMax;
}
(*p_maxVal) = tmpMax;
} else {
return;
}
}
|
2,078
|
#include "includes.h"
__global__ void CudaPermuteWeightsPVToCudnn( float *dest, float *src, int numArbors, int outFeatures, int ny, int nx, int inFeatures) {
// Parameter dimensions are PV source dimensions
int kSrc = (blockIdx.x * blockDim.x) + threadIdx.x;
if (kSrc < outFeatures * ny * nx * inFeatures) {
int kA = kSrc / (outFeatures * ny * nx * inFeatures);
int kOF = (kSrc % (outFeatures * ny * nx * inFeatures)) / (ny * nx * inFeatures);
int kY = (kSrc % (ny * nx * inFeatures)) / (nx * inFeatures);
int kX = (kSrc % (nx * inFeatures)) / inFeatures;
int kIF = (kSrc % inFeatures);
int sA = outFeatures * inFeatures * ny * nx;
int sOF = inFeatures * ny * nx;
int sIF = ny * nx;
int sY = nx;
int kDest = kA * sA + kOF * sOF + kIF * sIF + (ny - kY - 1) * sY + (nx - kX - 1);
dest[kDest] = src[kSrc];
}
}
|
2,079
|
#include "Utils.cuh"
#include <curand.h>
#include <algorithm>
#include <cuda_profiler_api.h>
size_t Shape3d::size()const { return depth * height * width; }
Shape3d::Shape3d() : Shape3d(1) {}
Shape3d::Shape3d(size_t width) : Shape3d(1, width) {}
Shape3d::Shape3d(size_t height, size_t width) : Shape3d(1, height, width) {}
Shape3d::Shape3d(size_t depth, size_t height, size_t width) : depth(depth), height(height), width(width) {}
Shape3d::Shape3d(const Shape3d& other) : Shape3d(other.depth, other.height, other.width) { }
val_t getRand(val_t min, val_t max) {
return ((rand() / (val_t)RAND_MAX) * (max - min)) + min;
}
std::vector<val_t> randomVector(size_t size, val_t min, val_t max) {
std::vector<val_t> ret(size);
for (int i = 0; i < size; i++) {
ret[i] = getRand(min, max);
}
return ret;
}
std::vector<val_t> randomVector(size_t size) {
return randomVector(size, -1, 1);
}
uint32_t intFromBytes(unsigned char* bytes) {
return (uint32_t)bytes[0] << 24 |
(uint32_t)bytes[1] << 16 |
(uint32_t)bytes[2] << 8 |
(uint32_t)bytes[3];
}
void devAlloc(void** devPtr, size_t size) {
//cudaError_t err = cudaMalloc(devPtr, size);
cudaError_t err = cudaMallocManaged(devPtr, size);
if (err != cudaSuccess) {
printf("error alocating memory of size: %ld\n", size);
}
}
void devDelete(void* devPtr) {
cudaError err = cudaFree(devPtr);
if (err != cudaSuccess) {
printf("error %d to free memory: %p\n", cudaGetLastError(), devPtr);
}
}
struct cuRandData {
curandGenerator_t gen;
bool isInit = false;
};
struct cuRandData cuRand;
__global__ void randomInRange_Kernal(val_t* arr, val_t min, val_t max, size_t size) {
for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < size; idx += blockDim.x * gridDim.x) {
auto range = (max - min);
range *= range > 0 ? 1 : -1;
arr[idx] =
range * arr[idx] +
min;
}
}
/**
void devRandArr(val_t* arr, size_t size, val_t min, val_t max) {
if (arr == nullptr || size == 0)
return;
if (cuRand.isInit == false) {
curandCreateGenerator(&cuRand.gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(cuRand.gen, 1234ULL);
cuRand.isInit = true;
}
if (typeid(val_t) == typeid(float)) {
curandGenerateUniform(cuRand.gen, (float*)arr, size);
}
else {
curandGenerateUniformDouble(cuRand.gen, (double*)arr, size);
}
int threads = DEFAULT_THREAD_SIZE;
int blocks = std::min((int)ceilf(size / (float)threads), DEFAULT_THREAD_SIZE);
randomInRange_Kernal << <blocks, threads >> > (arr, min, max, size);
cudaDeviceSynchronize();
}
*/
std::map<std::string, struct Times> Timer::timers;
void Timer::abort(const std::string& timerName) {
auto& timer = timers[timerName];
if (timer.isRunning == false) {
printf("%s timer can't be aborted without been started\n", timerName.data());
exit(1);
}
timer.isRunning = false;
//timer.currentTimer = clock();
}
void Timer::start(const std::string& timerName) {
auto& timer = timers[timerName];
if (timer.isRunning) {
printf("%s timer was already started\n", timerName.data());
exit(1);
}
else {
timer.startTime = clock();
timer.isRunning = true;
}
}
void Timer::stop(const std::string& timerName) {
auto& timer = timers[timerName];
if (timer.isRunning == false) {
printf("%s timer can't stop if it wasn't started\n", timerName.data());
exit(1);
}
double duration = (clock() - timer.startTime) / (double)CLOCKS_PER_SEC;
if (duration > timer.maxTime) {
timer.maxTime = duration;
}
if (duration < timer.minTime) {
timer.minTime = duration;
}
timer.totalTime += duration;
timer.epocs++;
timer.isRunning = false;
//timer.currentTimer = time_since_epoc();
}
void Timer::printTimers() {
for (auto timePair : timers) {
printf("%s => arv: %lf min: %lf max: %lf\n",
timePair.first.data(),
timePair.second.totalTime / timePair.second.epocs,
timePair.second.minTime,
timePair.second.maxTime);
}
}
cudaStream_t CudaStreamProvider::get(size_t streamIdx) {
assert(streamIdx < maxMainStreams);
return mainStreams.at(streamIdx);
}
cudaStream_t CudaStreamProvider::get(size_t streamIdx, size_t subStreamIdx) {
assert(streamIdx < maxMainStreams);
assert(subStreamIdx < maxSubStreams);
return subStreams[streamIdx][subStreamIdx];
}
void CudaStreamProvider::wait(size_t streamIdx) {
for (int i = 0; i < maxSubStreams; i++) {
cudaEventRecord(event, subStreams[streamIdx][i]);
cudaStreamWaitEvent(mainStreams[streamIdx], event, 0);
}
}
CudaStreamProvider::CudaStreamProvider() {}
void CudaStreamProvider::Init(size_t maxMainStreams, size_t maxSubStreams) {
this->maxMainStreams = maxMainStreams;
this->maxSubStreams = maxSubStreams;
mainStreams.resize(maxMainStreams);
subStreams.resize(maxMainStreams);
for (int i = 0; i < maxMainStreams; i++) {
cudaStreamCreate(&mainStreams[i]);
subStreams[i].resize(maxSubStreams);
for (int j = 0; j < subStreams[i].size(); j++) {
cudaStreamCreate(&subStreams[i][j]);
}
}
cudaEventCreate(&event, cudaEventDisableTiming);
}
size_t CudaStreamProvider::getMaxSubStreams()const {
return maxSubStreams;
}
size_t CudaStreamProvider::getMaxMainStreams()const {
return maxMainStreams;
}
|
2,080
|
#include <iostream>
#include <cuda_runtime.h>
using namespace std;
#define N 20
#define BLOCK_DIM 10
void random_inits(int a[N][N]){
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
a[i][j] = rand() % 10;
}
}
}
__global__ void add(int a[N][N], int b[N][N], int c[N][N]){
int i = threadIdx.x;
int j = threadIdx.y;
c[i][j] = a[i][j] + b[i][j];
}
void show(int a[N][N], int b[N][N], int c[N][N]){
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
cout << "matrix[" << i << "][" << j << "]" << " = " << a[i][j] << " + " << b[i][j] << "=" << c[i][j] <<"\t";
}
cout << endl;
}
}
int main(void){
int a[N][N];
int b[N][N];
int c[N][N];
random_inits(a);
random_inits(b);
int (*d_a)[N], (*d_b)[N], (*d_c)[N];
cudaMalloc((void**)&d_a, (N*N)*sizeof(int));
cudaMalloc((void**)&d_b, (N*N)*sizeof(int));
cudaMalloc((void**)&d_c, (N*N)*sizeof(int));
cudaMemcpy(d_a, a, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N,N);
add<<<numBlocks,threadsPerBlock>>>(d_a,d_b,d_c);
cudaMemcpy(c, d_c, (N*N)*sizeof(int), cudaMemcpyDeviceToHost);
show(a, b, c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cout<< endl;
return 0;
}
|
2,081
|
#include <iostream>
#include <cuda_runtime.h>
using namespace std;
// Derived class
class Rectangle
{
public:
//don't use constructor
Rectangle()
{
// width = (int *)malloc(sizeof(int));
// height = (int *)malloc(sizeof(int));
// *width = 10;
// *height = 30;
}
void build_function()
{
cudaMallocManaged(&width,sizeof(int));
cudaMallocManaged(&height,sizeof(int));
*width = 10;
*height = 30;
}
__device__ __host__ void getArea()
{
product = *width * *height;
}
int product;
private:
int* width;
int* height;
};
// Base class
class Shape
{
public:
//don't use constructor
Shape()
{
// rect = new Rectangle();
}
void build_function()
{
cudaMallocManaged(&rect,sizeof(Rectangle));
rect->build_function();
}
Rectangle* rect;
};
__global__ void kernel(Shape* sha)
{
sha->rect->getArea();
}
int main(void)
{
// Shape* sha = new Shape();
Shape* sha;
cudaMallocManaged(&sha,sizeof(Shape));
sha->build_function();
kernel<<<1,1,0>>>(sha);
cudaDeviceSynchronize();
cout << "Total area: " << sha->rect->product << endl;
return 0;
}
|
2,082
|
#include<iostream>
__constant__ float M[10];
int main(){
float h_M[]={1,2,3,4,5,7,8,9,0};
cudaMemcpyToSymbol(M,h_M,10*sizeof(float));
std::cout<< "yo"<<std::endl;
}
|
2,083
|
__device__ int evalDirt() {
return 4;
}
|
2,084
|
#include "includes.h"
__global__ void conv2genericrev(float *input, float *kernel, float *output, int input_n, int input_h, int input_w, int kernel_n, int kernel_h, int kernel_w, float alpha, int stride_h, int stride_w)
{
// output dimensions
int output_h = input_h - (kernel_h - 1) * stride_h;
int output_w = input_w - (kernel_w - 1) * stride_w;
// this thread only processes one output, defined by the block Ids
int kk = blockIdx.x;
int ii = blockIdx.y;
// batch id
int batch = threadIdx.z;
// kernel id
int kid = threadIdx.x;
int nkids = blockDim.x;
// thread ID
int tid = kid + batch*blockDim.x;
int nthreads = blockDim.x * blockDim.z;
// one thread only sees one output
output = output + (kk * input_n + ii) * output_h*output_w;
// put the output in shared memory
__shared__ float shared_output[CUDA_SHARED_MEM_SIZE];
// generate tid outputs in shared memory
float *output_s = shared_output + tid*output_w*output_h;
// convolution loop
int xx, yy, kx, ky;
yy = threadIdx.y;
float *output_p = output_s + yy * output_w;
for(xx=0; xx<output_w; xx++) {
// Dot product in two dimensions... (between input image and kernel)
float *input_p = input + (ii + batch*input_n)*input_h*input_w + yy*stride_h*input_w + xx*stride_w;
float *kernel_p = kernel + (kk + batch*kernel_n)*kernel_w*kernel_h;
float sum = 0;
for(ky=0; ky<kernel_h; ky++) {
for(kx=kid; kx<kernel_w; kx+=nkids) {
sum += input_p[kx]*kernel_p[kx];
}
input_p += input_w;
kernel_p += kernel_w;
}
*(output_p++) = sum;
}
__syncthreads();
// reduce and write back
if (yy == 0) {
// reduce outputs
for (int k=1; k<nthreads; k++) {
for (int i=tid; i<output_w*output_h; i+=nthreads) {
shared_output[i] += shared_output[k*output_h*output_w + i];
}
}
__syncthreads();
// add existing output, and write back
for (int i=tid; i<output_w*output_h; i+=nthreads) {
output[i] += alpha*shared_output[i];
}
}
}
|
2,085
|
#include "includes.h"
__global__ void _bcnn_vadd_kernel(int n, float *a, float *b, float *y)
{
int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < n)
y[i] = a[i] + b[i];
}
|
2,086
|
//This is simple naive programme(brute force)
//which runs in O(N^2)
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
#include <sys/time.h>
#define G 6.67408E-11
struct vector
{
float x, y;
};
//This function calculate the gravitational force
//between two bodies or particals
vector gravity (vector a, vector b, float m1, float m2)
{
float res=G*m1*m2;
float r=(a.y-b.y)*(a.y-b.y)+(a.x-b.x)*(a.x-b.x);
if (r>0) res/=r;
vector vec;
vec.y=a.y-b.y;
vec.x=a.x-b.x;
r=sqrt(r);
if (r>0)
{
vec.y/=r;
vec.x/=r;
}
vec.y*=res;
vec.x*=res;
return vec;
}
int main()
{
int n;
scanf("%d", &n);
vector body[n];
float mass[n];
for (int i=0;i<n;i++)
{
body[i].x=rand()%1000000;
body[i].y=rand()%1000000;
mass[i]=rand()%1000000;
// scanf("%lf %lf %lf", &body[i].x, &body[i].y, &mass[i]);
}
vector force[n];
for (int i=0;i<n;i++) force[i].x=force[i].y=0;
struct timeval start, stop;
gettimeofday(&start, NULL);
float x=0, y=0;
for (int i=0;i<n;i++)
{
for (int j=i+1;j<n;j++)
{
vector temp=gravity(body[i], body[j], mass[i], mass[j]);
force[i].x+=temp.x;
force[i].y+=temp.y;
force[j].x-=temp.x;
force[j].y-=temp.y;
}
printf("%d : %f %f %f : %.15f %.15f\n", i, body[i].x, body[i].y, mass[i], force[i].x, force[i].y);
x+=force[i].x, y+=force[i].y;
}
gettimeofday(&stop, NULL);
float t=((stop.tv_sec-start.tv_sec)*1000+float(stop.tv_usec-start.tv_usec)/1000);
printf("naiive : %f\n", t);
printf("%f %f\n", x, y);
}
|
2,087
|
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void Babis_Kernel(double const *A, double *B, double const *G, int n, int m, int patchSize_x, int patchSize_y, double filtSigma)
{
int x,y,area_x,area_y;
double norm,w_temp,diff=0,W=0,Products=0;
// Set pixel coordinates
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
// For every pixel we check every other pixel neighbor, if we are inside the table limits
if( ( i>=patchSize_x ) && ( j>=patchSize_y ) && ( i<=(n-patchSize_x) -1 ) && ( j<=(m-patchSize_x) -1 ) )
{
for(x=patchSize_x; x< (n-patchSize_x) ; x++)
{
for(y=patchSize_y; y< (m-patchSize_y) ; y++)
{
norm=0;
w_temp=0;
// i,j indicate the coordinates of the current thread, while x,y indicate the coordinates of every other pixel-neighbor in the table/image and x_area,y_area indicate
// the area around the neighbor, for example 3x3, 5x5, 7x7.
for(area_x=-patchSize_x; area_x<=patchSize_x; area_x++)
{
for(area_y=-patchSize_y; area_y<=patchSize_y; area_y++)
{
diff=abs(A[(i+area_x)*m+(j+area_y)]-A[(x+area_x)*m+(y+area_y)]);
diff=diff*G[(area_x+patchSize_x)*((patchSize_y*2)+1)+(area_y+patchSize_y)];
diff=diff*diff;
norm+=diff;
}
}
w_temp=exp(-norm/filtSigma);
W+=w_temp;
Products+=w_temp*A[x*m+y];
}
}
B[(i-patchSize_x)*(m-(2*patchSize_y)) + (j-patchSize_y)] = Products/W;
}
}
|
2,088
|
#include <stdio.h>
#define N 512
__global__ void add(int *a, int *b, int *c){
c[blockIdx.x] = a[blockIdx.x]+b[blockIdx.x];
}
void random_ints(int *a, int n){
for(int i=0; i<n; i++){
a[i] = rand()%10+1;
}
}
int main(void){
int *a,*b,*c;
int *d_a,*d_b,*d_c;
int size = N*sizeof(int);
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
a=(int*)malloc(size); random_ints(a, N);
b=(int*)malloc(size); random_ints(b, N);
c=(int*)malloc(size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
add<<<N,1>>>(d_a, d_b, d_c);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
for(int i=0; i<N; i++)
printf("Hello World!%d %d %d\n", a[i], b[i], c[i]);
free(a); free(b);free(c);
return 0;
}
|
2,089
|
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
void Algorithm2(int m, int n, int l);
// Block size used in algorithm 2 of GEMM
#define BLOCK_SIZE_x 32
#define BLOCK_SIZE_y 16
__device__ unsigned long long totThr = 0;
__global__ void device_Matrix_multi(const double* const device_matrix_A, const double* const device_matrix_B,double* device_matrix_C, const int m,const int n,const int l)
{
atomicAdd(&totThr, 1);
const int threadid_x = threadIdx.x + blockDim.x*blockIdx.x;
const int threadid_y = threadIdx.y + blockDim.y*blockIdx.y;
if (threadid_x >= m || threadid_y >= l)
{
return;
}
int idx = threadid_y*m + threadid_x;
double sum = 0.0;
for (int k = 0; k < n; k++){
int idxA = k*m + threadid_x;
int idxB = threadid_y*n + k;
sum += device_matrix_A[idxA]*device_matrix_B[idxB];
}
device_matrix_C[idx] = sum;
}
int main()
{
Algorithm2(32, 32, 32);
Algorithm2(64, 64, 64);
Algorithm2(128,128, 128);
Algorithm2(256, 256, 256);
Algorithm2(512, 512, 512);
Algorithm2(1024, 1024, 1024);
Algorithm2(2048, 2048, 2048);
Algorithm2(4096, 4096, 4096);
}
void Algorithm2(const int m, const int n, const int l)
{
double *device_matrix_A;
double *device_matrix_B;
double *device_matrix_C;
double* matrix_A;
double* matrix_B;
double* matrix_C;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
// allocating the memory
matrix_A = (double*)malloc( m*n* sizeof(double));
matrix_B = (double*)malloc( m*l* sizeof(double));
matrix_C = (double*)malloc( m*l*sizeof(double));
cudaMalloc(&device_matrix_A, m*n*sizeof(double));
cudaMalloc(&device_matrix_B, n*l*sizeof(double));
cudaMalloc(&device_matrix_C, m*l*sizeof(double));
for(int i = 0; i < m; i++)
{
for(int j = 0; j <n; j++){
matrix_A[i *n + j] = rand()%10;
matrix_B[i *n + j] = rand()%10;
matrix_C[i *n + j] = 0;
}
}
// Copy data from the host memory to the device memory
cudaMemcpy(device_matrix_A, matrix_A, m*n*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_matrix_B, matrix_B, n*l*sizeof(double), cudaMemcpyHostToDevice);
dim3 nthreads(0, 0);
dim3 nblocks(0, 0);
nthreads.x = BLOCK_SIZE_x;
nthreads.y = BLOCK_SIZE_y;
nblocks.x = (m + nthreads.x - 1)/nthreads.x;
nblocks.y = (l + nthreads.y - 1)/nthreads.y;
cudaEventRecord(start);
// Launch the kernel
device_Matrix_multi <<<nblocks, nthreads>>> (device_matrix_A,device_matrix_B,device_matrix_C,m,n,l);
unsigned long long total;
cudaMemcpyFromSymbol(&total, totThr, sizeof(unsigned long long));
printf("Total threads counted in: %lu\n", total);
// Copy data from the device memory to the host memory
cudaMemcpy(matrix_C, device_matrix_C, m*l*sizeof(double), cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
for(int i=0; i<m;i++){
for(int j =0; j<n; j++){
}
}
cudaEventElapsedTime(&milliseconds, start, stop);
printf("elaspsed for algorithm 2 = %f ms\n\n\n", milliseconds);
// Free the device memory
cudaFree(device_matrix_A);
cudaFree(device_matrix_B);
cudaFree(device_matrix_C);
free(matrix_A);
free(matrix_B);
free(matrix_C);
}
|
2,090
|
#include <stdlib.h>
#include <stdio.h>
extern int HI; // the total number of point patterns
extern int HN; // the total number of points
extern int HK_star; // the total number of covariates
extern int Hd; // the dimensionality of the problem
extern int HV; // the total number of elements in the grid
void read_files(int ** foci , float ** Z , int * foci_counts , int * brain , int* author) {
int i , j; // those are for the for loops
// read the file with the points
FILE *file;
file = fopen("./inputs/foci.txt","r");
for (i=0 ; i<HN ; i++) {
for (j=0 ; j<(Hd+1) ; j++) {
if(!fscanf(file,"%d",&foci[i][j]))
break;
}
}
fclose(file);
// read the design matrix
file = fopen("./inputs/Z.txt","r");
for (i=0 ; i<HI ; i++) {
for (j=0 ; j<HK_star ; j++) {
if(!fscanf(file,"%f",&Z[i][j]))
break;
}
}
fclose(file);
// read the total number of points per pattern
file = fopen("./inputs/counts.txt","r");
for (i=0 ; i<HI ; i++) {
if(!fscanf(file,"%d",&foci_counts[i]))
break;
}
fclose(file);
/* Binary vector indicating which voxels in the grid are within the brain mask */
file = fopen("mask.txt","r");
for (i=0 ; i<HV ; i++) {
if(!fscanf(file,"%d",&brain[i]))
break;
}
fclose(file);
/* Read the publication identifier. Studies from the same paper must appear consecutive */
file = fopen("./inputs/paper.txt","r");
for (i=0 ; i<HI ; i++) {
if(!fscanf(file,"%d",&author[i]))
break;
}
fclose(file);
}
|
2,091
|
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
__global__ void vectAdd(int *a, int *b, int *c, int len)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len)
{
c[i] = a[i] + b[i];
}
}
/* Function computing the final string to print */
void vector_add(int *c, int *a, int *b, int length)
{
int *d_a, *d_b, *d_c;
struct timeval t1, t2;
cudaMalloc(&d_a, length * sizeof(int));
cudaMalloc(&d_b, length * sizeof(int));
cudaMalloc(&d_c, length * sizeof(int));
gettimeofday(&t1, NULL);
cudaMemcpy(d_a, a, length * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, length * sizeof(int), cudaMemcpyHostToDevice);
gettimeofday(&t2, NULL);
printf("Transfer done in %ld us\n", (t2.tv_sec - t1.tv_sec) * 1000000 + (t2.tv_usec - t1.tv_usec));
gettimeofday(&t1, NULL);
// a (1D/2D/3D) grid containing blocks, each one containing threads.
dim3 dimGrid(length / 1024 + 1);
dim3 dimBlock(1024);
vectAdd<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, length);
gettimeofday(&t2, NULL);
printf("Processing done in %ld us\n", (t2.tv_sec - t1.tv_sec) * 1000000 + (t2.tv_usec - t1.tv_usec));
gettimeofday(&t1, NULL);
cudaMemcpy(c, d_c, length * sizeof(int), cudaMemcpyDeviceToHost);
gettimeofday(&t2, NULL);
printf("Tranfer back done in %ld us\n", (t2.tv_sec - t1.tv_sec) * 1000000 + (t2.tv_usec - t1.tv_usec));
gettimeofday(&t1, NULL);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
int main(int argc, char **argv)
{
int N, S;
if (argc < 2)
{
printf("Usage: %s N S\n", argv[0]);
printf("\tS: seed for pseudo-random generator\n");
printf("\tN: size of the array\n");
exit(1);
}
N = atoi(argv[1]);
S = atoi(argv[2]);
srand48(S);
int *A, *B, *C;
int i;
A = (int *)malloc(N * sizeof(int));
B = (int *)malloc(N * sizeof(int));
C = (int *)malloc(N * sizeof(int));
/* Initialize the array */
for (i = 0; i < N; i++)
{
A[i] = lrand48();
B[i] = lrand48();
}
vector_add(C, A, B, N);
/* Checking the result */
printf("Checking the result...\n");
for (i = 0; i < N; i++)
{
if (C[i] != A[i] + B[i])
{
printf("Wrong res for i=%d\n", i);
return 0;
}
}
printf("Res OK!\n");
return 0;
}
|
2,092
|
#include <stdio.h>
#include <time.h>
#define MIN(a,b) (a < b ? a : b)
#define PRINT 0
static const int N = 50000;
__global__ void bubble_sort(int *array, int iteracio)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if(iteracio%2 == 0 ){
if(array[2*id] > array[2*id+1])
{
int aux = array[2*id];
array[2*id] = array[2*id+1];
array[2*id+1] = aux;
}
}else{
if(array[2*id+1] > array[2*id+2]){
int aux = array[2*id+1];
array[2*id+1] = array[2*id+2];
array[2*id+2] = aux;
}
}
}
//swap values at memory locations
void swap(int *elem1, int *elem2)//Exange the values stored in the memory spaces elem1 and elem2
{ //To do it we create an auxiliar variable where the value of the first element
//is stored while we store the value of the second iin the first one
//Your code here
int aux = *elem1; //We use an auxiliar variable to do the swap
*elem1 = *elem2;
*elem2 = aux;
}
//Bubble sort algorithm to sort arrays in ascending order
void bubbleSort(int * const array, const int size)
{
//Your code here
//1. Iterate along array elements
//2. swap adjacent elements if they are out of order
int i , j;
for(i = 0; i < size-1 ; ++i)
for(j = 0 ; j < size - i -1 ; ++j)
if(*(array + j) > *(array + j + 1))
swap(array+j , array+j+1);
}
int main(int argc, char const *argv[])
{
srand(time(NULL));
int a[N];
int *dev_a;
for(int i=0;i<N;i++)
a[i] = (int)rand()/(int)(RAND_MAX/300.0);
#if PRINT
printf("desordenat\n");
for(int i=0;i<N;i++)
printf("%d ", a[i]);
#endif
//execucio al CPU
clock_t t_host = clock();
bubbleSort(a,N);
t_host = clock() - t_host;
double time_taken_host = ((double)t_host)/CLOCKS_PER_SEC;
printf("CPU: %f segons\n",time_taken_host);
//execucio GPU
//device Memmory
cudaMalloc((void**)&dev_a, N*sizeof(int) );
cudaMemcpy(dev_a,a,N*sizeof(int),cudaMemcpyHostToDevice);
int threads_block = MIN(512,N);
while(N%threads_block != 0)--threads_block;
//execucio
clock_t t_device = clock();
for (int it = 0; it <= 2*N; it++) {
//Crida al kernel
if(it%2 == 0){
bubble_sort<<<1,(N/2)>>>(dev_a,it);
}else{
bubble_sort<<<1,(N/2)>>>(dev_a,it);
}
}
cudaMemcpy(a,dev_a,N*sizeof(int),cudaMemcpyDeviceToHost);
t_device = clock() - t_device;
double time_taken_device = ((double)t_device)/CLOCKS_PER_SEC;
printf("GPU %f segons \n", time_taken_device);
#if PRINT
printf("\nOrdenat\n");
for(int i=0;i<N;i++)
printf("%d ", a[i]);
#endif
cudaFree(dev_a);
return 0;
}
|
2,093
|
#define MAX_THREADS 1024
#define MULT_TILE_WIDTH 16
__global__ void matrixMultiplyShared(float *A, float *B, float *C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
__shared__ float M[MULT_TILE_WIDTH][MULT_TILE_WIDTH];
__shared__ float N[MULT_TILE_WIDTH][MULT_TILE_WIDTH];
// Need to linearize the block matrix
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
// Row and Col of C's element that is being worked on
int Row = by*MULT_TILE_WIDTH + ty;
int Col = bx*MULT_TILE_WIDTH + tx;
float sum = 0.0;
// The loop will be over a linearized tile index
for (int tile_x = 0; tile_x < ceil(numAColumns/(float)MULT_TILE_WIDTH); tile_x++) {
// First load the values of M and N that this thread is reponsible for
if ((Row < numARows) && (tile_x*MULT_TILE_WIDTH + tx) < numAColumns) {
M[ty][tx] = A[Row*numAColumns + tile_x*MULT_TILE_WIDTH + tx];
} else {
M[ty][tx] = 0.0;
}
//__syncthreads();
if((Col < numBColumns) && (tile_x*MULT_TILE_WIDTH + ty) < numBRows) {
N[ty][tx] = B[(tile_x*MULT_TILE_WIDTH + ty)*numBColumns + Col];
} else {
N[ty][tx] = 0.0;
}
// Make sure all threads in block have loaded their values
__syncthreads();
for (int k = 0; k < MULT_TILE_WIDTH; k++) {
sum += M[ty][k]*N[k][tx];
}
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) {
C[Row*numCColumns + Col] = sum;
}
}
void matrixMult(int M, int C, int K, int b, int H_out, int W_out, float *w, float *x, float *y){
int A_rows = M;
int A_columns = C * K * K;
int B_columns = H_out * W_out;
dim3 dimGrid(ceil(B_columns/(1.0*MULT_TILE_WIDTH)), ceil(A_rows/(1.0*MULT_TILE_WIDTH)), 1);
dim3 dimBlock(MULT_TILE_WIDTH, MULT_TILE_WIDTH, 1);
matrixMultiplyShared<<<dimGrid, dimBlock>>>(w, x, y+b*M*B_columns, A_rows, A_columns, A_columns, B_columns, A_rows, B_columns);
}
void matrixMult_launcher(int a_rows, int a_cols, int b_rows, int b_cols, int c_rows, int c_cols, float *w, float *x, float *y){
int A_rows = a_rows;
int A_columns = a_cols;
int B_columns = b_cols;
dim3 dimGrid(ceil(B_columns/(1.0*MULT_TILE_WIDTH)), ceil(A_rows/(1.0*MULT_TILE_WIDTH)), 1);
dim3 dimBlock(MULT_TILE_WIDTH, MULT_TILE_WIDTH, 1);
matrixMultiplyShared<<<dimGrid, dimBlock>>>(w, x, y, A_rows, A_columns, A_columns, B_columns, A_rows, B_columns);
}
|
2,094
|
#include <stdio.h>
#include <stdlib.h>
#define CSC(call) \
do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while(0)
// ядро kernel инвертирует массив
__global__ void kernel(int *arr1, int n) {
int idx = threadIdx.x + blockIdx.x * blockDim.x; // вычисляется индекс элемента который будет обрабатываться
if (idx < n/2) { // обработку необходимо производить только на первой половине массива, иначе массив вернется к исходному состоянию
printf("block №%d, thread №%d: обмен местами эл-та №%d и №%d\n", blockIdx.x, threadIdx.x, idx, n - idx - 1);
int tmp = arr1[idx];
arr1[idx] = arr1[n - idx - 1];
arr1[n - idx - 1] = tmp;
}
}
int main(){
int *arrHost = (int *)malloc(512*sizeof(int));
int *arrDev;
int i;
for (i = 0; i < 512; i++)
arrHost[i] = i + 1;
cudaEvent_t before, after;
CSC(cudaEventCreate(&before)); // инициализируем 2 события cuda
CSC(cudaEventCreate(&after));
CSC(cudaMalloc(&arrDev, 512*sizeof(int)));
CSC(cudaMemcpy(arrDev, arrHost, 512*sizeof(int), cudaMemcpyHostToDevice));
CSC(cudaEventRecord(before)); // сохраняем текущее время начала работы ядра
kernel<<<16, 32>>>(arrDev, 512);
CSC(cudaGetLastError());
CSC(cudaEventRecord(after)); // сохраняем время конца работы ядра
CSC(cudaEventSynchronize(after));
float t;
CSC(cudaEventElapsedTime(&t, before, after)); // считаем время работы ядра
CSC(cudaEventDestroy(before));
CSC(cudaEventDestroy(after));
printf("time = %f\n", t); //выводим посчитанную разницу во времени
CSC(cudaMemcpy(arrHost, arrDev, 512*sizeof(int), cudaMemcpyDeviceToHost));
for (i = 0; i < 512; i++){
printf("%d ", arrHost[i]);
}
printf("\n");
CSC(cudaFree(arrDev));
free(arrHost);
return 0;
}
|
2,095
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
__global__ void star(char *a, int *n)
{ int i, j, t;
int k = *n;
int tid=threadIdx.x;
j = k - tid - 1;
t = (tid + 1) * 2 - 1;
for(i = 0; i < t; i++){
a[tid * (k * 2 - 1) + j + i] = '*';
}
}
int main(void)
{
int i, N, j;
scanf("%d", &N);
int l = 2 * N - 1;
char a[l][l];
char *d_a;
int *d_b;
int size=sizeof(char);
int s = sizeof(int);
cudaMalloc((void **)&d_a,size*l*l);
cudaMalloc((void **)&d_b,s);
cudaMemcpy(d_a,&a,size*l*l,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,&N,s,cudaMemcpyHostToDevice);
star<<<1,N>>>(d_a,d_b);
cudaMemcpy(a,d_a,size*l*l,cudaMemcpyDeviceToHost);
for(i=0;i<l;i++)
{
for(j = 0; j < l; j++){
if(a[i][j] == '*'){
printf("%c", a[i][j]);
}else{
printf(" ");
}
}
printf("\n");
}
return 0;
}
|
2,096
|
__global__ void set_one(float *array, int i) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id == 0) {
array[i] = 1.0f;
}
}
__device__ float gpu_logistic(float x) {
return 1 / (1 + expf(-x));
}
__global__ void activation_function(float x[], int n) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n){
x[id] = gpu_logistic(x[id]);
}
}
|
2,097
|
__global__ void apply_blue(unsigned char *red_channel,unsigned char *green_channel,
const unsigned int width, const unsigned int height) {
const unsigned int row = threadIdx.y + blockIdx.y * blockDim.y;
const unsigned int col = threadIdx.x + blockIdx.x * blockDim.x;
if(row < height && col < width) {
int index = col + row * width;
red_channel[index] = green_channel[index] = 0;
}
}
|
2,098
|
/*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
/* macro to index a 1D memory array with 2D indices in column-major order */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
/* linear size of the matrices */
#define SIZE 1024
/* CPU matrix multiply function */
void host_dgemm( const int m, const int n, const int k,
double const * const a, double const * const b, double *c )
{
/*
* naive matrix multiplication loops go here. triply nested for loop
* C = A * B where A and B are matrices
* C(i,j) = SUM( A(i,k) * B(k,j), over the index "k", where 0 <= k < (SIZE-1) )
*/
/* insert code here */
for( int j = 0; j < n; j++ )
{
for( int i = 0; i < m; i++ )
{
for( int koff = 0; koff < k; koff++ )
{
c[INDX(i, j, m)] += a[INDX( i, koff, m )] * b[INDX( koff, j, n )];
} /* end for koff */
} /* end for i */
} /* end for j */
} /* end host_dgemm */
int main( int argc, char *argv[] )
{
int size = SIZE;
fprintf(stdout, "Matrix size is %d\n",size);
/* declare host pointers */
double *h_a, *h_b, *h_cdef;
size_t numbytes = size * size * sizeof( double );
/* allocate host pointers */
h_a = (double *) malloc( numbytes );
if( h_a == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_b = (double *) malloc( numbytes );
if( h_b == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
h_cdef = (double *) malloc( numbytes );
if( h_cdef == NULL )
{
fprintf(stderr,"Error in host malloc\n");
return 911;
}
/* set C to zero */
memset( h_cdef, 0, numbytes );
fprintf( stdout, "Total memory required is %lf MB\n",
3.0 * (double) numbytes / 1000000.0 );
/* initialize A and B on the host */
for( int i = 0; i < size * size; i++ )
{
h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
}
/* start timers */
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
/* call host dgemm */
host_dgemm( size, size, size, h_a, h_b, h_cdef );
/* stop the timers */
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, start, stop );
/* print the results */
fprintf(stdout, "Total time CPU is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* cleanup */
free( h_a );
free( h_b );
free( h_cdef );
cudaError_t cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
|
2,099
|
#include "includes.h"
__global__ void var(float * M1, float * M2, float * X, int b, size_t nele) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<nele) {
float delta = X[idx] - M1[idx];
M1[idx] += delta / (b + 1);
M2[idx] += delta*(X[idx] - M1[idx]);
}
}
|
2,100
|
#include "file_reader.cuh"
#include "reader.cuh"
template<class T>
FileReader<T>::FileReader(std::string &filename, Probe &probe)
: Reader<T>(probe), filename_(filename), file_size_(0) {
set_filename(filename);
}
/**
* @brief Acquire samples_ from the file, so many frames at a time.
* @tparam T The type of samples_ stored in the underlying file.
* @param frame_offset Number of frames after the beginning to start acquiring.
* @param n_frames Number of frames to acquire.
* @param buf Buffer where the acquired samples_ will be stored.
* @return The number of frames read.
*/
template<class T>
uint32_t
FileReader<T>::AcquireFrames(std::vector<T> &buf,
uint64_t frame_offset,
uint32_t n_frames) {
if (n_frames == 0) {
return 0;
}
Open(); // no-op if already Open
auto n_channels = this->probe_.n_total();
auto n_samples = n_frames * n_channels;
if (buf.size() != n_samples) {
buf.resize(n_samples);
}
auto nb = sizeof(T);
auto fpos = frame_offset * n_channels * nb;
auto n_bytes = nb * n_samples < file_size_ - fpos ? nb * n_samples : file_size_ - fpos;
fp.seekg(fpos, std::ios::beg);
fp.read((char *) buf.data(), n_bytes);
return n_bytes / (nb * this->probe_.n_total());
}
/**
* @brief Open the underlying file for reading.
* @tparam T The type of samples_ stored in the underlying file.
*/
template<class T>
void FileReader<T>::Open() {
if (!fp.is_open())
fp.open(filename_, std::ios::in | std::ios::binary);
}
/**
* @brief Close the underlying file.
* @tparam T The type of samples_ stored in the underlying file.
*/
template<class T>
void FileReader<T>::Close() {
if (fp.is_open())
fp.close();
}
/**
* @brief Compute and return the number of frames in the underlying data file.
* @tparam T The type of data stored in the underlying file.
* @return The number of frames in the underlying data file.
*/
template<class T>
uint64_t FileReader<T>::n_frames() const {
return file_size_ / (Reader<T>::probe_.n_total() * sizeof(T));
}
/**
* @brief Set the path for the underlying file.
* @tparam T The type of data stored in the underlying file.
* @param filename
*/
template<class T>
void FileReader<T>::set_filename(std::string &filename) {
filename_ = filename;
ComputeFileSize();
}
template<class T>
void FileReader<T>::ComputeFileSize() {
FileReader<T>::Open();
// seek to the end to get the size in bytes
fp.seekg(0, std::ios::end);
file_size_ = fp.tellg();
FileReader<T>::Close();
}
template
class FileReader<short>;
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.