serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
23,801 | /*==========================================================================
MD5 KERNEL
* Copyright (c) 2008, NetSysLab at the University of British Columbia
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the University nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY NetSysLab ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NetSysLab BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
DESCRIPTION
CPU version of the storeGPU library.
==========================================================================*/
/*==========================================================================
INCLUDES
==========================================================================*/
#include <string.h>
#include <stdio.h>
#include "cust.h"
/*==========================================================================
DATA DECLARATIONS
==========================================================================*/
/*--------------------------------------------------------------------------
TYPE DEFINITIONS
--------------------------------------------------------------------------*/
typedef struct {
unsigned long total[2]; /*!< number of bytes processed */
unsigned long state[4]; /*!< intermediate digest state */
unsigned char buffer[64]; /*!< data block being processed */
} md5_context;
/*--------------------------------------------------------------------------
FUNCTION PROTOTYPES
--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------
CONSTANTS
--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------
GLOBAL VARIABLES
--------------------------------------------------------------------------*/
__device__
const unsigned char md5_padding[64] =
{
0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
/*--------------------------------------------------------------------------
MACROS
--------------------------------------------------------------------------*/
// 32-bit integer manipulation macros (little endian)
#ifndef GET_UINT32_LE
#define GET_UINT32_LE(n,b,i) \
{ \
(n) = ( (unsigned long) (b)[(i) ] ) \
| ( (unsigned long) (b)[(i) + 1] << 8 ) \
| ( (unsigned long) (b)[(i) + 2] << 16 ) \
| ( (unsigned long) (b)[(i) + 3] << 24 ); \
}
#endif
#ifndef PUT_UINT32_LE
#define PUT_UINT32_LE(n,b,i) \
{ \
(b)[(i) ] = (unsigned char) ( (n) ); \
(b)[(i) + 1] = (unsigned char) ( (n) >> 8 ); \
(b)[(i) + 2] = (unsigned char) ( (n) >> 16 ); \
(b)[(i) + 3] = (unsigned char) ( (n) >> 24 ); \
}
#endif
#ifdef FEATURE_SHARED_MEMORY
// current thread stride.
#define SHARED_MEMORY_INDEX(index) (32 * (index) + (threadIdx.x & 0x1F))
#endif /* FEATURE_SHARED_MEMORY */
/*==========================================================================
FUNCTIONS
==========================================================================*/
/*--------------------------------------------------------------------------
LOCAL FUNCTIONS
--------------------------------------------------------------------------*/
#ifndef FEATURE_SHARED_MEMORY
/*===========================================================================
FUNCTION <Name>
DESCRIPTION
MD5 context setup
DEPENDENCIES
<dep.>
RETURN VALUE
<return>
===========================================================================*/
__device__
static void md5_starts( md5_context *ctx ) {
ctx->total[0] = 0;
ctx->total[1] = 0;
ctx->state[0] = 0x67452301;
ctx->state[1] = 0xEFCDAB89;
ctx->state[2] = 0x98BADCFE;
ctx->state[3] = 0x10325476;
}
/*===========================================================================
FUNCTION MD5_PROCESS
DESCRIPTION
<Desc.>
DEPENDENCIES
<dep.>
RETURN VALUE
<return>
===========================================================================*/
__device__
static void md5_process( md5_context *ctx, unsigned char data[64] ) {
unsigned long A, B, C, D;
unsigned long *X = (unsigned long *)data;
GET_UINT32_LE( X[ 0], data, 0 );
GET_UINT32_LE( X[ 1], data, 4 );
GET_UINT32_LE( X[ 2], data, 8 );
GET_UINT32_LE( X[ 3], data, 12 );
GET_UINT32_LE( X[ 4], data, 16 );
GET_UINT32_LE( X[ 5], data, 20 );
GET_UINT32_LE( X[ 6], data, 24 );
GET_UINT32_LE( X[ 7], data, 28 );
GET_UINT32_LE( X[ 8], data, 32 );
GET_UINT32_LE( X[ 9], data, 36 );
GET_UINT32_LE( X[10], data, 40 );
GET_UINT32_LE( X[11], data, 44 );
GET_UINT32_LE( X[12], data, 48 );
GET_UINT32_LE( X[13], data, 52 );
GET_UINT32_LE( X[14], data, 56 );
GET_UINT32_LE( X[15], data, 60 );
#undef S
#define S(x,n) ((x << n) | ((x & 0xFFFFFFFF) >> (32 - n)))
#undef P
#define P(a,b,c,d,k,s,t) { \
a += F(b,c,d) + X[k] + t; a = S(a,s) + b; \
} \
A = ctx->state[0];
B = ctx->state[1];
C = ctx->state[2];
D = ctx->state[3];
#define F(x,y,z) (z ^ (x & (y ^ z)))
P( A, B, C, D, 0, 7, 0xD76AA478 );
P( D, A, B, C, 1, 12, 0xE8C7B756 );
P( C, D, A, B, 2, 17, 0x242070DB );
P( B, C, D, A, 3, 22, 0xC1BDCEEE );
P( A, B, C, D, 4, 7, 0xF57C0FAF );
P( D, A, B, C, 5, 12, 0x4787C62A );
P( C, D, A, B, 6, 17, 0xA8304613 );
P( B, C, D, A, 7, 22, 0xFD469501 );
P( A, B, C, D, 8, 7, 0x698098D8 );
P( D, A, B, C, 9, 12, 0x8B44F7AF );
P( C, D, A, B, 10, 17, 0xFFFF5BB1 );
P( B, C, D, A, 11, 22, 0x895CD7BE );
P( A, B, C, D, 12, 7, 0x6B901122 );
P( D, A, B, C, 13, 12, 0xFD987193 );
P( C, D, A, B, 14, 17, 0xA679438E );
P( B, C, D, A, 15, 22, 0x49B40821 );
#undef F
#define F(x,y,z) (y ^ (z & (x ^ y)))
P( A, B, C, D, 1, 5, 0xF61E2562 );
P( D, A, B, C, 6, 9, 0xC040B340 );
P( C, D, A, B, 11, 14, 0x265E5A51 );
P( B, C, D, A, 0, 20, 0xE9B6C7AA );
P( A, B, C, D, 5, 5, 0xD62F105D );
P( D, A, B, C, 10, 9, 0x02441453 );
P( C, D, A, B, 15, 14, 0xD8A1E681 );
P( B, C, D, A, 4, 20, 0xE7D3FBC8 );
P( A, B, C, D, 9, 5, 0x21E1CDE6 );
P( D, A, B, C, 14, 9, 0xC33707D6 );
P( C, D, A, B, 3, 14, 0xF4D50D87 );
P( B, C, D, A, 8, 20, 0x455A14ED );
P( A, B, C, D, 13, 5, 0xA9E3E905 );
P( D, A, B, C, 2, 9, 0xFCEFA3F8 );
P( C, D, A, B, 7, 14, 0x676F02D9 );
P( B, C, D, A, 12, 20, 0x8D2A4C8A );
#undef F
#define F(x,y,z) (x ^ y ^ z)
P( A, B, C, D, 5, 4, 0xFFFA3942 );
P( D, A, B, C, 8, 11, 0x8771F681 );
P( C, D, A, B, 11, 16, 0x6D9D6122 );
P( B, C, D, A, 14, 23, 0xFDE5380C );
P( A, B, C, D, 1, 4, 0xA4BEEA44 );
P( D, A, B, C, 4, 11, 0x4BDECFA9 );
P( C, D, A, B, 7, 16, 0xF6BB4B60 );
P( B, C, D, A, 10, 23, 0xBEBFBC70 );
P( A, B, C, D, 13, 4, 0x289B7EC6 );
P( D, A, B, C, 0, 11, 0xEAA127FA );
P( C, D, A, B, 3, 16, 0xD4EF3085 );
P( B, C, D, A, 6, 23, 0x04881D05 );
P( A, B, C, D, 9, 4, 0xD9D4D039 );
P( D, A, B, C, 12, 11, 0xE6DB99E5 );
P( C, D, A, B, 15, 16, 0x1FA27CF8 );
P( B, C, D, A, 2, 23, 0xC4AC5665 );
#undef F
#define F(x,y,z) (y ^ (x | ~z))
P( A, B, C, D, 0, 6, 0xF4292244 );
P( D, A, B, C, 7, 10, 0x432AFF97 );
P( C, D, A, B, 14, 15, 0xAB9423A7 );
P( B, C, D, A, 5, 21, 0xFC93A039 );
P( A, B, C, D, 12, 6, 0x655B59C3 );
P( D, A, B, C, 3, 10, 0x8F0CCC92 );
P( C, D, A, B, 10, 15, 0xFFEFF47D );
P( B, C, D, A, 1, 21, 0x85845DD1 );
P( A, B, C, D, 8, 6, 0x6FA87E4F );
P( D, A, B, C, 15, 10, 0xFE2CE6E0 );
P( C, D, A, B, 6, 15, 0xA3014314 );
P( B, C, D, A, 13, 21, 0x4E0811A1 );
P( A, B, C, D, 4, 6, 0xF7537E82 );
P( D, A, B, C, 11, 10, 0xBD3AF235 );
P( C, D, A, B, 2, 15, 0x2AD7D2BB );
P( B, C, D, A, 9, 21, 0xEB86D391 );
#undef F
ctx->state[0] += A;
ctx->state[1] += B;
ctx->state[2] += C;
ctx->state[3] += D;
}
/*===========================================================================
FUNCTION MD5_UPDATE
DESCRIPTION
MD5 process buffer
DEPENDENCIES
<dep.>
RETURN VALUE
<return>
===========================================================================*/
__device__
static void md5_update( md5_context *ctx, unsigned char *input, int ilen ) {
int fill;
unsigned long left;
if( ilen <= 0 )
return;
left = ctx->total[0] & 0x3F;
fill = 64 - left;
ctx->total[0] += ilen;
ctx->total[0] &= 0xFFFFFFFF;
if( ctx->total[0] < (unsigned long) ilen )
ctx->total[1]++;
if( left && ilen >= fill ) {
//<ELSN>
/*memcpy( (void *) (ctx->buffer + left),
(void *) input, fill );*/
for (int i = 0; i < fill; i++) {
ctx->buffer[i+left] = input[i];
}
//</ELSN>
md5_process( ctx, ctx->buffer );
input += fill;
ilen -= fill;
left = 0;
}
while( ilen >= 64 ) {
md5_process( ctx, input );
input += 64;
ilen -= 64;
}
if( ilen > 0 ) {
//<ELSN>
/* memcpy( (void *) (ctx->buffer + left),
(void *) input, ilen );*/
for (int i = 0; i < ilen; i++) {
ctx->buffer[i+left] = input[i];
}
//</ELSN>
}
}
/*===========================================================================
FUNCTION MD5_FINISH
DESCRIPTION
MD5 final digest
DEPENDENCIES
None.
RETURN VALUE
<return>
===========================================================================*/
__device__
void md5_finish( md5_context *ctx, unsigned char *output ) {
unsigned long last, padn;
unsigned long high, low;
unsigned char msglen[8];
high = ( ctx->total[0] >> 29 ) | ( ctx->total[1] << 3 );
low = ( ctx->total[0] << 3 );
PUT_UINT32_LE( low, msglen, 0 );
PUT_UINT32_LE( high, msglen, 4 );
last = ctx->total[0] & 0x3F;
padn = ( last < 56 ) ? ( 56 - last ) : ( 120 - last );
md5_update( ctx, (unsigned char *) md5_padding, padn );
md5_update( ctx, msglen, 8 );
PUT_UINT32_LE( ctx->state[0], output, 0 );
#ifndef FEATURE_REDUCED_HASH_SIZE
PUT_UINT32_LE( ctx->state[1], output, 4 );
PUT_UINT32_LE( ctx->state[2], output, 8 );
PUT_UINT32_LE( ctx->state[3], output, 12 );
#endif
}
/*===========================================================================
FUNCTION MD5_INTERNAL
DESCRIPTION
Does the real md5 algorithm
DEPENDENCIES
None
RETURN VALUE
output is the hash result
===========================================================================*/
__device__
static void md5_internal( unsigned char *input, int ilen,
unsigned char *output ) {
md5_context ctx;
md5_starts( &ctx );
md5_update( &ctx, input, ilen );
md5_finish( &ctx, output );
}
#endif /* #ifndef FEATURE_SHARED_MEMORY */
#ifdef FEATURE_SHARED_MEMORY
/*===========================================================================
FUNCTION MD5_INTERNAL
DESCRIPTION
Does the real md5 algorithm.
DEPENDENCIES
None
RETURN VALUE
output is the hash result
===========================================================================*/
__device__
static void md5_internal( unsigned int *input, unsigned int *sharedMemory,
int chunkSize, unsigned char *output ) {
/* Number of passes (512 bit blocks) we have to do */
int numberOfPasses = chunkSize / 64 + 1;
/* Used during the hashing process */
unsigned long A, B, C, D;
/* Needed to do the little endian stuff */
unsigned char *data = (unsigned char *)sharedMemory;
/* Will hold the hash value through the
intermediate stages of MD5 algorithm */
unsigned int state0 = 0x67452301;
unsigned int state1 = 0xEFCDAB89;
unsigned int state2 = 0x98BADCFE;
unsigned int state3 = 0x10325476;
/* Used to cache the shared memory index calculations, but testing showed
that it has no performance effect. */
int x0 = SHARED_MEMORY_INDEX(0);
int x1 = SHARED_MEMORY_INDEX(1);
int x2 = SHARED_MEMORY_INDEX(2);
int x3 = SHARED_MEMORY_INDEX(3);
int x4 = SHARED_MEMORY_INDEX(4);
int x5 = SHARED_MEMORY_INDEX(5);
int x6 = SHARED_MEMORY_INDEX(6);
int x7 = SHARED_MEMORY_INDEX(7);
int x8 = SHARED_MEMORY_INDEX(8);
int x9 = SHARED_MEMORY_INDEX(9);
int x10 = SHARED_MEMORY_INDEX(10);
int x11 = SHARED_MEMORY_INDEX(11);
int x12 = SHARED_MEMORY_INDEX(12);
int x13 = SHARED_MEMORY_INDEX(13);
int x14 = SHARED_MEMORY_INDEX(14);
int x15 = SHARED_MEMORY_INDEX(15);
#undef GET_CACHED_INDEX
#define GET_CACHED_INDEX(index) (x##index)
for( int index = 0 ; index < (numberOfPasses) ; index++ ) {
/* Move data to the thread's shared memory space */
sharedMemory[GET_CACHED_INDEX(0)] = input[0 + 16 * index];
sharedMemory[GET_CACHED_INDEX(1)] = input[1 + 16 * index];
sharedMemory[GET_CACHED_INDEX(2)] = input[2 + 16 * index];
sharedMemory[GET_CACHED_INDEX(3)] = input[3 + 16 * index];
sharedMemory[GET_CACHED_INDEX(4)] = input[4 + 16 * index];
sharedMemory[GET_CACHED_INDEX(5)] = input[5 + 16 * index];
sharedMemory[GET_CACHED_INDEX(6)] = input[6 + 16 * index];
sharedMemory[GET_CACHED_INDEX(7)] = input[7 + 16 * index];
sharedMemory[GET_CACHED_INDEX(8)] = input[8 + 16 * index];
sharedMemory[GET_CACHED_INDEX(9)] = input[9 + 16 * index];
sharedMemory[GET_CACHED_INDEX(10)] = input[10 + 16 * index];
sharedMemory[GET_CACHED_INDEX(11)] = input[11 + 16 * index];
sharedMemory[GET_CACHED_INDEX(12)] = input[12 + 16 * index];
/* Testing the code with and without this if statement shows that
it has no effect on performance. */
if(index == numberOfPasses -1 ) {
/* The last pass will contain the size of the chunk size (according to
official MD5 algorithm). */
sharedMemory[GET_CACHED_INDEX(13)] = 0x00000080;
sharedMemory[GET_CACHED_INDEX(14)] = chunkSize << 3;
sharedMemory[GET_CACHED_INDEX(15)] = chunkSize >> 29;
} else {
sharedMemory[GET_CACHED_INDEX(13)] = input[13 + 16 * index];
sharedMemory[GET_CACHED_INDEX(14)] = input[14 + 16 * index];
sharedMemory[GET_CACHED_INDEX(15)] = input[15 + 16 * index];
}
/* Get the little endian stuff done. */
GET_UINT32_LE( sharedMemory[ GET_CACHED_INDEX(0)],
data, GET_CACHED_INDEX(0) * 4 );
GET_UINT32_LE( sharedMemory[ GET_CACHED_INDEX(1)],
data, GET_CACHED_INDEX(1) * 4 );
GET_UINT32_LE( sharedMemory[ GET_CACHED_INDEX(2)],
data, GET_CACHED_INDEX(2) * 4 );
GET_UINT32_LE( sharedMemory[ GET_CACHED_INDEX(3)],
data, GET_CACHED_INDEX(3) * 4 );
GET_UINT32_LE( sharedMemory[ GET_CACHED_INDEX(4)],
data, GET_CACHED_INDEX(4) * 4 );
GET_UINT32_LE( sharedMemory[ GET_CACHED_INDEX(5)],
data, GET_CACHED_INDEX(5) * 4 );
GET_UINT32_LE( sharedMemory[ GET_CACHED_INDEX(6)],
data, GET_CACHED_INDEX(6) * 4 );
GET_UINT32_LE( sharedMemory[ GET_CACHED_INDEX(7)],
data, GET_CACHED_INDEX(7) * 4 );
GET_UINT32_LE( sharedMemory[ GET_CACHED_INDEX(8)],
data, GET_CACHED_INDEX(8) * 4 );
GET_UINT32_LE( sharedMemory[ GET_CACHED_INDEX(9)],
data, GET_CACHED_INDEX(9) * 4 );
GET_UINT32_LE( sharedMemory[GET_CACHED_INDEX(10)],
data, GET_CACHED_INDEX(10) * 4 );
GET_UINT32_LE( sharedMemory[GET_CACHED_INDEX(11)],
data, GET_CACHED_INDEX(11) * 4 );
GET_UINT32_LE( sharedMemory[GET_CACHED_INDEX(12)],
data, GET_CACHED_INDEX(12) * 4 );
GET_UINT32_LE( sharedMemory[GET_CACHED_INDEX(13)],
data, GET_CACHED_INDEX(13) * 4 );
GET_UINT32_LE( sharedMemory[GET_CACHED_INDEX(14)],
data, GET_CACHED_INDEX(14) * 4 );
GET_UINT32_LE( sharedMemory[GET_CACHED_INDEX(15)],
data, GET_CACHED_INDEX(15) * 4 );
/* Start the MD5 permutations */
#undef S
#define S(x,n) ((x << n) | ((x & 0xFFFFFFFF) >> (32 - n)))
#undef P
#define P(a,b,c,d,k,s,t) { \
a += F(b,c,d) + sharedMemory[GET_CACHED_INDEX(k)] + t; a = S(a,s) + b; \
} \
A = state0;
B = state1;
C = state2;
D = state3;
#undef F
#define F(x,y,z) (z ^ (x & (y ^ z)))
P( A, B, C, D, 0, 7, 0xD76AA478 );
P( D, A, B, C, 1, 12, 0xE8C7B756 );
P( C, D, A, B, 2, 17, 0x242070DB );
P( B, C, D, A, 3, 22, 0xC1BDCEEE );
P( A, B, C, D, 4, 7, 0xF57C0FAF );
P( D, A, B, C, 5, 12, 0x4787C62A );
P( C, D, A, B, 6, 17, 0xA8304613 );
P( B, C, D, A, 7, 22, 0xFD469501 );
P( A, B, C, D, 8, 7, 0x698098D8 );
P( D, A, B, C, 9, 12, 0x8B44F7AF );
P( C, D, A, B, 10, 17, 0xFFFF5BB1 );
P( B, C, D, A, 11, 22, 0x895CD7BE );
P( A, B, C, D, 12, 7, 0x6B901122 );
P( D, A, B, C, 13, 12, 0xFD987193 );
P( C, D, A, B, 14, 17, 0xA679438E );
P( B, C, D, A, 15, 22, 0x49B40821 );
#undef F
#define F(x,y,z) (y ^ (z & (x ^ y)))
P( A, B, C, D, 1, 5, 0xF61E2562 );
P( D, A, B, C, 6, 9, 0xC040B340 );
P( C, D, A, B, 11, 14, 0x265E5A51 );
P( B, C, D, A, 0, 20, 0xE9B6C7AA );
P( A, B, C, D, 5, 5, 0xD62F105D );
P( D, A, B, C, 10, 9, 0x02441453 );
P( C, D, A, B, 15, 14, 0xD8A1E681 );
P( B, C, D, A, 4, 20, 0xE7D3FBC8 );
P( A, B, C, D, 9, 5, 0x21E1CDE6 );
P( D, A, B, C, 14, 9, 0xC33707D6 );
P( C, D, A, B, 3, 14, 0xF4D50D87 );
P( B, C, D, A, 8, 20, 0x455A14ED );
P( A, B, C, D, 13, 5, 0xA9E3E905 );
P( D, A, B, C, 2, 9, 0xFCEFA3F8 );
P( C, D, A, B, 7, 14, 0x676F02D9 );
P( B, C, D, A, 12, 20, 0x8D2A4C8A );
#undef F
#define F(x,y,z) (x ^ y ^ z)
P( A, B, C, D, 5, 4, 0xFFFA3942 );
P( D, A, B, C, 8, 11, 0x8771F681 );
P( C, D, A, B, 11, 16, 0x6D9D6122 );
P( B, C, D, A, 14, 23, 0xFDE5380C );
P( A, B, C, D, 1, 4, 0xA4BEEA44 );
P( D, A, B, C, 4, 11, 0x4BDECFA9 );
P( C, D, A, B, 7, 16, 0xF6BB4B60 );
P( B, C, D, A, 10, 23, 0xBEBFBC70 );
P( A, B, C, D, 13, 4, 0x289B7EC6 );
P( D, A, B, C, 0, 11, 0xEAA127FA );
P( C, D, A, B, 3, 16, 0xD4EF3085 );
P( B, C, D, A, 6, 23, 0x04881D05 );
P( A, B, C, D, 9, 4, 0xD9D4D039 );
P( D, A, B, C, 12, 11, 0xE6DB99E5 );
P( C, D, A, B, 15, 16, 0x1FA27CF8 );
P( B, C, D, A, 2, 23, 0xC4AC5665 );
#undef F
#define F(x,y,z) (y ^ (x | ~z))
P( A, B, C, D, 0, 6, 0xF4292244 );
P( D, A, B, C, 7, 10, 0x432AFF97 );
P( C, D, A, B, 14, 15, 0xAB9423A7 );
P( B, C, D, A, 5, 21, 0xFC93A039 );
P( A, B, C, D, 12, 6, 0x655B59C3 );
P( D, A, B, C, 3, 10, 0x8F0CCC92 );
P( C, D, A, B, 10, 15, 0xFFEFF47D );
P( B, C, D, A, 1, 21, 0x85845DD1 );
P( A, B, C, D, 8, 6, 0x6FA87E4F );
P( D, A, B, C, 15, 10, 0xFE2CE6E0 );
P( C, D, A, B, 6, 15, 0xA3014314 );
P( B, C, D, A, 13, 21, 0x4E0811A1 );
P( A, B, C, D, 4, 6, 0xF7537E82 );
P( D, A, B, C, 11, 10, 0xBD3AF235 );
P( C, D, A, B, 2, 15, 0x2AD7D2BB );
P( B, C, D, A, 9, 21, 0xEB86D391 );
#undef F
state0 += A;
state1 += B;
state2 += C;
state3 += D;
}
/* Got the hash, store it in the output buffer. */
PUT_UINT32_LE( state0, output, 0 );
#ifndef FEATURE_REDUCED_HASH_SIZE
PUT_UINT32_LE( state1, output, 4 );
PUT_UINT32_LE( state2, output, 8 );
PUT_UINT32_LE( state3, output, 12 );
#endif
}
__device__
static void md5_internal_overlap( unsigned int *input, unsigned int *sharedMemory,
int chunkSize, unsigned char *output ) {
/* Number of passes (512 bit blocks) we have to do */
int numberOfPasses = chunkSize / 64 + 1;
/* Used during the hashing process */
unsigned long A, B, C, D;
/* Needed to do the little endian stuff */
unsigned char *data = (unsigned char *)sharedMemory;
// number of padding bytes.
int numPadBytes = 0;
int numPadInt = 0;
//int numPadRemain = 0;
/* Will hold the hash value through the
intermediate stages of MD5 algorithm */
unsigned int state0 = 0x67452301;
unsigned int state1 = 0xEFCDAB89;
unsigned int state2 = 0x98BADCFE;
unsigned int state3 = 0x10325476;
/* Used to cache the shared memory index calculations, but testing showed
that it has no performance effect. */
int x0 = SHARED_MEMORY_INDEX(0);
int x1 = SHARED_MEMORY_INDEX(1);
int x2 = SHARED_MEMORY_INDEX(2);
int x3 = SHARED_MEMORY_INDEX(3);
int x4 = SHARED_MEMORY_INDEX(4);
int x5 = SHARED_MEMORY_INDEX(5);
int x6 = SHARED_MEMORY_INDEX(6);
int x7 = SHARED_MEMORY_INDEX(7);
int x8 = SHARED_MEMORY_INDEX(8);
int x9 = SHARED_MEMORY_INDEX(9);
int x10 = SHARED_MEMORY_INDEX(10);
int x11 = SHARED_MEMORY_INDEX(11);
int x12 = SHARED_MEMORY_INDEX(12);
int x13 = SHARED_MEMORY_INDEX(13);
int x14 = SHARED_MEMORY_INDEX(14);
int x15 = SHARED_MEMORY_INDEX(15);
#undef GET_CACHED_INDEX
#define GET_CACHED_INDEX(index) (x##index)
for( int index = 0 ; index < (numberOfPasses) ; index++ ) {
if(index == numberOfPasses - 1 ) {
numPadBytes = (64-12) - (chunkSize - (numberOfPasses-1)*64);
numPadInt = numPadBytes/sizeof(int);
/*numPadRemain = numPadBytes-numPadInt*sizeof(int);
printf("\nLast loop chunkSize = %d, numberOfPasses= %d and \nnumPadBytes = %d, numPadInt =%d, numPadRemain = %d\n",
chunkSize,numberOfPasses,numPadBytes,numPadInt,numPadRemain);*/
int i=0;
for(i = 0 ; i < numPadInt ; i++){
sharedMemory[SHARED_MEMORY_INDEX(13-i)] = 0;
}
int j=0;
for(j=0;j<(16-3-numPadInt);j++){
//printf("j= %d\n",j);
sharedMemory[SHARED_MEMORY_INDEX(j)] = input[j + 16 * index];
}
/* The last pass will contain the size of the chunk size (according to
official MD5 algorithm). */
sharedMemory[SHARED_MEMORY_INDEX(13-i)] = 0x00000080;
//printf("the last one at %d\n",13-i);
sharedMemory[GET_CACHED_INDEX(14)] = chunkSize << 3;
sharedMemory[GET_CACHED_INDEX(15)] = chunkSize >> 29;
} else {
/* Move data to the thread's shared memory space */
//printf("Not last loop\n");
sharedMemory[GET_CACHED_INDEX(0)] = input[0 + 16 * index];
sharedMemory[GET_CACHED_INDEX(1)] = input[1 + 16 * index];
sharedMemory[GET_CACHED_INDEX(2)] = input[2 + 16 * index];
sharedMemory[GET_CACHED_INDEX(3)] = input[3 + 16 * index];
sharedMemory[GET_CACHED_INDEX(4)] = input[4 + 16 * index];
sharedMemory[GET_CACHED_INDEX(5)] = input[5 + 16 * index];
sharedMemory[GET_CACHED_INDEX(6)] = input[6 + 16 * index];
sharedMemory[GET_CACHED_INDEX(7)] = input[7 + 16 * index];
sharedMemory[GET_CACHED_INDEX(8)] = input[8 + 16 * index];
sharedMemory[GET_CACHED_INDEX(9)] = input[9 + 16 * index];
sharedMemory[GET_CACHED_INDEX(10)] = input[10 + 16 * index];
sharedMemory[GET_CACHED_INDEX(11)] = input[11 + 16 * index];
sharedMemory[GET_CACHED_INDEX(12)] = input[12 + 16 * index];
sharedMemory[GET_CACHED_INDEX(13)] = input[13 + 16 * index];
sharedMemory[GET_CACHED_INDEX(14)] = input[14 + 16 * index];
sharedMemory[GET_CACHED_INDEX(15)] = input[15 + 16 * index];
}
/* Get the little endian stuff done. */
GET_UINT32_LE( sharedMemory[ GET_CACHED_INDEX(0)],
data, GET_CACHED_INDEX(0) * 4 );
GET_UINT32_LE( sharedMemory[ GET_CACHED_INDEX(1)],
data, GET_CACHED_INDEX(1) * 4 );
GET_UINT32_LE( sharedMemory[ GET_CACHED_INDEX(2)],
data, GET_CACHED_INDEX(2) * 4 );
GET_UINT32_LE( sharedMemory[ GET_CACHED_INDEX(3)],
data, GET_CACHED_INDEX(3) * 4 );
GET_UINT32_LE( sharedMemory[ GET_CACHED_INDEX(4)],
data, GET_CACHED_INDEX(4) * 4 );
GET_UINT32_LE( sharedMemory[ GET_CACHED_INDEX(5)],
data, GET_CACHED_INDEX(5) * 4 );
GET_UINT32_LE( sharedMemory[ GET_CACHED_INDEX(6)],
data, GET_CACHED_INDEX(6) * 4 );
GET_UINT32_LE( sharedMemory[ GET_CACHED_INDEX(7)],
data, GET_CACHED_INDEX(7) * 4 );
GET_UINT32_LE( sharedMemory[ GET_CACHED_INDEX(8)],
data, GET_CACHED_INDEX(8) * 4 );
GET_UINT32_LE( sharedMemory[ GET_CACHED_INDEX(9)],
data, GET_CACHED_INDEX(9) * 4 );
GET_UINT32_LE( sharedMemory[GET_CACHED_INDEX(10)],
data, GET_CACHED_INDEX(10) * 4 );
GET_UINT32_LE( sharedMemory[GET_CACHED_INDEX(11)],
data, GET_CACHED_INDEX(11) * 4 );
GET_UINT32_LE( sharedMemory[GET_CACHED_INDEX(12)],
data, GET_CACHED_INDEX(12) * 4 );
GET_UINT32_LE( sharedMemory[GET_CACHED_INDEX(13)],
data, GET_CACHED_INDEX(13) * 4 );
GET_UINT32_LE( sharedMemory[GET_CACHED_INDEX(14)],
data, GET_CACHED_INDEX(14) * 4 );
GET_UINT32_LE( sharedMemory[GET_CACHED_INDEX(15)],
data, GET_CACHED_INDEX(15) * 4 );
/* Start the MD5 permutations */
#undef S
#define S(x,n) ((x << n) | ((x & 0xFFFFFFFF) >> (32 - n)))
#undef P
#define P(a,b,c,d,k,s,t) { \
a += F(b,c,d) + sharedMemory[GET_CACHED_INDEX(k)] + t; a = S(a,s) + b; \
} \
A = state0;
B = state1;
C = state2;
D = state3;
#undef F
#define F(x,y,z) (z ^ (x & (y ^ z)))
P( A, B, C, D, 0, 7, 0xD76AA478 );
P( D, A, B, C, 1, 12, 0xE8C7B756 );
P( C, D, A, B, 2, 17, 0x242070DB );
P( B, C, D, A, 3, 22, 0xC1BDCEEE );
P( A, B, C, D, 4, 7, 0xF57C0FAF );
P( D, A, B, C, 5, 12, 0x4787C62A );
P( C, D, A, B, 6, 17, 0xA8304613 );
P( B, C, D, A, 7, 22, 0xFD469501 );
P( A, B, C, D, 8, 7, 0x698098D8 );
P( D, A, B, C, 9, 12, 0x8B44F7AF );
P( C, D, A, B, 10, 17, 0xFFFF5BB1 );
P( B, C, D, A, 11, 22, 0x895CD7BE );
P( A, B, C, D, 12, 7, 0x6B901122 );
P( D, A, B, C, 13, 12, 0xFD987193 );
P( C, D, A, B, 14, 17, 0xA679438E );
P( B, C, D, A, 15, 22, 0x49B40821 );
#undef F
#define F(x,y,z) (y ^ (z & (x ^ y)))
P( A, B, C, D, 1, 5, 0xF61E2562 );
P( D, A, B, C, 6, 9, 0xC040B340 );
P( C, D, A, B, 11, 14, 0x265E5A51 );
P( B, C, D, A, 0, 20, 0xE9B6C7AA );
P( A, B, C, D, 5, 5, 0xD62F105D );
P( D, A, B, C, 10, 9, 0x02441453 );
P( C, D, A, B, 15, 14, 0xD8A1E681 );
P( B, C, D, A, 4, 20, 0xE7D3FBC8 );
P( A, B, C, D, 9, 5, 0x21E1CDE6 );
P( D, A, B, C, 14, 9, 0xC33707D6 );
P( C, D, A, B, 3, 14, 0xF4D50D87 );
P( B, C, D, A, 8, 20, 0x455A14ED );
P( A, B, C, D, 13, 5, 0xA9E3E905 );
P( D, A, B, C, 2, 9, 0xFCEFA3F8 );
P( C, D, A, B, 7, 14, 0x676F02D9 );
P( B, C, D, A, 12, 20, 0x8D2A4C8A );
#undef F
#define F(x,y,z) (x ^ y ^ z)
P( A, B, C, D, 5, 4, 0xFFFA3942 );
P( D, A, B, C, 8, 11, 0x8771F681 );
P( C, D, A, B, 11, 16, 0x6D9D6122 );
P( B, C, D, A, 14, 23, 0xFDE5380C );
P( A, B, C, D, 1, 4, 0xA4BEEA44 );
P( D, A, B, C, 4, 11, 0x4BDECFA9 );
P( C, D, A, B, 7, 16, 0xF6BB4B60 );
P( B, C, D, A, 10, 23, 0xBEBFBC70 );
P( A, B, C, D, 13, 4, 0x289B7EC6 );
P( D, A, B, C, 0, 11, 0xEAA127FA );
P( C, D, A, B, 3, 16, 0xD4EF3085 );
P( B, C, D, A, 6, 23, 0x04881D05 );
P( A, B, C, D, 9, 4, 0xD9D4D039 );
P( D, A, B, C, 12, 11, 0xE6DB99E5 );
P( C, D, A, B, 15, 16, 0x1FA27CF8 );
P( B, C, D, A, 2, 23, 0xC4AC5665 );
#undef F
#define F(x,y,z) (y ^ (x | ~z))
P( A, B, C, D, 0, 6, 0xF4292244 );
P( D, A, B, C, 7, 10, 0x432AFF97 );
P( C, D, A, B, 14, 15, 0xAB9423A7 );
P( B, C, D, A, 5, 21, 0xFC93A039 );
P( A, B, C, D, 12, 6, 0x655B59C3 );
P( D, A, B, C, 3, 10, 0x8F0CCC92 );
P( C, D, A, B, 10, 15, 0xFFEFF47D );
P( B, C, D, A, 1, 21, 0x85845DD1 );
P( A, B, C, D, 8, 6, 0x6FA87E4F );
P( D, A, B, C, 15, 10, 0xFE2CE6E0 );
P( C, D, A, B, 6, 15, 0xA3014314 );
P( B, C, D, A, 13, 21, 0x4E0811A1 );
P( A, B, C, D, 4, 6, 0xF7537E82 );
P( D, A, B, C, 11, 10, 0xBD3AF235 );
P( C, D, A, B, 2, 15, 0x2AD7D2BB );
P( B, C, D, A, 9, 21, 0xEB86D391 );
#undef F
state0 += A;
state1 += B;
state2 += C;
state3 += D;
}
/* Got the hash, store it in the output buffer. */
PUT_UINT32_LE( state0, output, 0 );
#ifndef FEATURE_REDUCED_HASH_SIZE
PUT_UINT32_LE( state1, output, 4 );
PUT_UINT32_LE( state2, output, 8 );
PUT_UINT32_LE( state3, output, 12 );
#endif
}
#endif
/*--------------------------------------------------------------------------
GLOBAL FUNCTIONS
--------------------------------------------------------------------------*/
/*===========================================================================
FUNCTION MD5
DESCRIPTION
Main md5 hash function
DEPENDENCIES
GPU must be initialized
RETURN VALUE
output: the hash result
===========================================================================*/
__global__
void md5( unsigned char *input, int chunkSize, int totalThreads,
int padSize, unsigned char *scratch) {
int threadIndex = threadIdx.x + blockDim.x * blockIdx.x;
int chunkIndex = threadIndex * chunkSize;
int hashIndex = threadIndex * MD5_HASH_SIZE;
if(threadIndex >= totalThreads)
return;
if ((threadIndex == (totalThreads - 1)) && (padSize > 0)) {
for(int i = 0 ; i < padSize ; i++)
input[chunkIndex + chunkSize - padSize + i] = 0;
}
#ifdef FEATURE_SHARED_MEMORY
__shared__ unsigned int sharedMemory[4 * 1024 - 32];
// 512 words are allocated for every warp of 32 threads
unsigned int *sharedMemoryIndex = sharedMemory + ((threadIdx.x >> 5) * 512);
unsigned int *inputIndex = (unsigned int *)(input + chunkIndex);
md5_internal(inputIndex, sharedMemoryIndex, chunkSize,
scratch + hashIndex );
#else
md5_internal(input + chunkIndex, chunkSize, scratch + hashIndex );
#endif /* FEATURE_SHARED_MEMORY */
}
__global__
void md5_overlap( unsigned char *input, int chunkSize, int offset,
int totalThreads, int padSize, unsigned char *output ) {
int threadIndex = threadIdx.x + blockDim.x * blockIdx.x;
int chunkIndex = threadIndex * offset;
int hashIndex = threadIndex * MD5_HASH_SIZE;
if(threadIndex >= totalThreads)
return;
if ((threadIndex == (totalThreads - 1))) {
chunkSize-= padSize;
}
#ifdef FEATURE_SHARED_MEMORY
__shared__ unsigned int sharedMemory[4 * 1024 - 32];
unsigned int *sharedMemoryIndex = sharedMemory + ((threadIdx.x >> 5) * 512);
unsigned int *inputIndex = (unsigned int *)(input + chunkIndex);
md5_internal_overlap(inputIndex, sharedMemoryIndex, chunkSize,
output + hashIndex );
#else
md5_internal(input + chunkIndex, chunkSize, output + hashIndex );
#endif /* FEATURE_SHARED_MEMORY */
}
|
23,802 |
#include <iostream>
#include <memory>
#include <cassert>
using namespace std;
#include <cuda.h>
__global__ void getValue(float4 *outdata, float *indata) {
// outdata[0] = indata[0];
float4 my4 = make_float4(indata[0], indata[3], indata[1], indata[2]);
outdata[0] = my4;
}
int main(int argc, char *argv[]) {
int N = 1024;
CUstream stream;
cuStreamCreate(&stream, 0);
float *hostFloats1;
float *hostFloats2;
cuMemHostAlloc((void **)&hostFloats1, N * sizeof(float), CU_MEMHOSTALLOC_PORTABLE);
cuMemHostAlloc((void **)&hostFloats2, N * sizeof(float), CU_MEMHOSTALLOC_PORTABLE);
CUdeviceptr deviceFloats1;
CUdeviceptr deviceFloats2;
cuMemAlloc(&deviceFloats1, N * sizeof(float));
cuMemAlloc(&deviceFloats2, N * sizeof(float));
hostFloats1[0] = 123.456f;
hostFloats1[1] = 9;
hostFloats1[2] = 12;
hostFloats1[3] = 19;
cuMemcpyHtoDAsync(
(CUdeviceptr)(((float *)deviceFloats1)),
hostFloats1,
N * sizeof(float),
stream
);
// cuStreamSynchronize(stream);
getValue<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(((float4 *)deviceFloats2), ((float *)deviceFloats1));
// now copy back entire buffer
// hostFloats[64] = 0.0f;
cuMemcpyDtoHAsync(hostFloats2, deviceFloats2, N * sizeof(float), stream);
cuStreamSynchronize(stream);
// and check the values...
cout << hostFloats2[0] << endl;
cout << hostFloats2[1] << endl;
cout << hostFloats2[2] << endl;
cout << hostFloats2[3] << endl;
// assert(hostFloats2[64] == 126.456f);
cuMemFreeHost(hostFloats1);
cuMemFreeHost(hostFloats2);
cuMemFree(deviceFloats1);
cuMemFree(deviceFloats2);
cuStreamDestroy(stream);
return 0;
}
|
23,803 | void TestCpuFunctions()
{
}
|
23,804 | __global__ void Sample1Kernel(float *d_A, float *d_B, float *d_C) {
// Step 1. 自身のCUDAスレッドIDを計算する
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
// Step 2. CUDAスレッドIDを用いてグローバルメモリからデータを読み込み,計算する
d_C[thread_id] = d_A[thread_id] + d_B[thread_id];
}
|
23,805 | #define EIGEN_USE_GPU
#include <cuda.h>
#include <stdio.h>
__global__ void UntruncCovKernel(
const double* incr,
const int nb_incr,
const int paths_length,
const int paths_length_,
const int nb_diagonals,
double* pdes_sol)
{
unsigned int p = blockIdx.x;
unsigned int idx = threadIdx.x;
for (int diag=0; diag<nb_diagonals; diag++){
unsigned int J = max(0, min(diag - idx, paths_length - 1));
unsigned int i = idx + 1;
unsigned int j = J + 1;
if( idx+J==diag && (idx<paths_length && J<paths_length)){
float increment = incr[(nb_incr*nb_incr)*p + (i-1)*nb_incr + (j-1)];
pdes_sol[(paths_length_*paths_length_)*p + i*paths_length_ + j] = ( pdes_sol[(paths_length_*paths_length_)*p + (i-1)*paths_length_ + j] + pdes_sol[(paths_length_*paths_length_)*p + i*paths_length_ + j-1] )*(1.+0.5*increment+(1./12)*increment*increment) - pdes_sol[(paths_length_*paths_length_)*p + (i-1)*paths_length_ + j-1]*(1.-(1./12)*increment*increment);
}
__syncthreads();
}
}
__global__ void UntruncCovRevKernel(
const double* incr,
const int nb_incr,
const int paths_length,
const int paths_length_,
const int nb_diagonals,
double* pdes_sol)
{
unsigned int p = blockIdx.x;
unsigned int idx = threadIdx.x;
for (int diag=0; diag<nb_diagonals; diag++){
unsigned int J = max(0, min(diag - idx, paths_length - 1));
unsigned int i = idx + 1;
unsigned int j = J + 1;
if( idx+J==diag && (idx<paths_length && J<paths_length)){
float increment = incr[(nb_incr*nb_incr)*p + (i-1)*nb_incr + (j-1)];
pdes_sol[(paths_length_*paths_length_)*p + i*paths_length_ + j] = pdes_sol[(paths_length_*paths_length_)*p + (i-1)*paths_length_ + j] + pdes_sol[(paths_length_*paths_length_)*p + i*paths_length_ + j-1] - pdes_sol[(paths_length_*paths_length_)*p + (i-1)*paths_length_ + j-1]*(1.-increment);
}
__syncthreads();
}
}
void UntruncCovKernelLauncher(
const double* incr,
const int batch_samples,
const int nb_incr,
const int paths_length,
const int paths_length_,
const int nb_diagonals,
double* pdes_sol)
{
UntruncCovKernel<<<batch_samples, paths_length>>>(incr,nb_incr,paths_length,paths_length_,nb_diagonals,pdes_sol);
}
void UntruncCovRevKernelLauncher(
const double* incr,
const int batch_samples,
const int nb_incr,
const int paths_length,
const int paths_length_,
const int nb_diagonals,
double* pdes_sol)
{
UntruncCovRevKernel<<<batch_samples, paths_length>>>(incr,nb_incr,paths_length,paths_length_,nb_diagonals,pdes_sol);
}
|
23,806 | #include <stdio.h>
__global__ void helloFromGPU(void){
printf("hello world from gpu!\n");
}
int main(void){
// hello from cpu
printf("hello from cpu!\n");
// 1 thread block and 10 threads
helloFromGPU <<<1,10>>>();
cudaDeviceReset();
return 0;
} |
23,807 | /*************************************************************************************************************
* FILE: lakegpu_mpi.cu
*
* AUTHORS: attiffan Aurora T. Tiffany-Davis
* ssbehera Subhendu S. Behera
* wpmoore2 Wade P. Moore
*
* DESCRIPTION: Assist with modeling the surface of a slice of a lake,
* where some pebbles have been thrown onto the surface.
* The energy level at any point on the lake is influenced by
* the energy level on that point in the past,
* and by the current energy levels at neighboring points.
* This program takes into account all 8 neighboring points,
* and parallelizes the simulation by using EXACTLY ONE compute node,
* using multiple GPU threads.
*
* TO RUN: srun -N4 -n4 -p opteron -x c[53,101,102] --pty /bin/bash
* make -f p3.Makefile lake-mpi
* prun ./lake [lake size] [# pebbles] [duration of simulation in seconds] [# GPU threads]
*************************************************************************************************************/
// INCLUDES
#include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
// DEFINES
#define __DEBUG
#define TSCALE 1.0
#define VSQR 0.1
// Declare globals
double *aDeviceEnergy, *aDeviceEnergyStepOld, *aDeviceEnergyStepCurrent, *aDevicePebbleSizes;
/*************************************************************************************************************
* FUNCTION: kf
*
* DESCRIPTION: Get the energy impact of a given pebble size on the lake based on time.
* Impact decreases as time increases.
*
* ARGUMENTS: nPebbleSize - The size of a given pebble
* nTime - The amount of time that has elapsed in the simulation
*
* AUTHORS: ssbehera Subhendu S. Behera
*************************************************************************************************************/
__device__ double kf(double nPebbleSize, double nTime)
{
return -expf(-TSCALE * nTime) * nPebbleSize;
}
/*************************************************************************************************************
* FUNCTION: evolve
*
* DESCRIPTION: Update the energy levels in this node's slice of the lake for every lake point therein.
* Each lake point's new energy level depends upon
* old energy levels and the energy levels of neighboring points.
* This version uses 9 points (point of interest and 8 neighboring points).
* This code runs on 1 thread and is responsible for updating the energy level of 1 lake point.
*
* ARGUMENTS: aDeviceEnergy - Array representing new energy levels at every point in the lake
* aDeviceEnergyStepOld - Array representing current energy levels at every point in the lake
* aDeviceEnergyStepCurrent - Array representing old energy levels at every point in the lake
* aDevicePebbleSizes - Array representing the pebble sizes at every point in the lake (sparse)
* nTime - The amount of time that has elapsed in the simulation
* nLakePointsOneAxis - Number of lake points in this node's slice of the lake, on the x axis
* nPointsY - Number of lake points in this node's slice of the lake, on the y axis
* nTimeStep - The amount of time between one simulation step and the next
* nPointSpacing - The spacing between two points on the lake
*
* RETURNS: None
*
* AUTHOR: ssbehera Subhendu S. Behera
* attiffan Aurora T. Tiffany-Davis
*************************************************************************************************************/
__global__ void evolve(
double *aDeviceEnergy,
double *aDeviceEnergyStepOld,
double *aDeviceEnergyStepCurrent,
double *aDevicePebbleSizes,
double nTime,
int nPointsX,
int nPointsY,
double nTimeStep,
double nPointSpacing,
int nMyRank,
int nNumTaskPoints
)
{
// Declare variables
int nIndexInMemory;
int idx, idy;
// Calculate thread index
idx = threadIdx.x + blockIdx.x * blockDim.x;
idy = threadIdx.y + blockIdx.y * blockDim.y;
int nLakePointsOneAxis = nPointsX;
// If the thread is actually needed to help with the calculation
if (idx <= nPointsX - 1 && idy <= nPointsY - 1) {
nIndexInMemory = (idy + 1) * (nPointsX) + idx;
if (idy == 0 && nMyRank == 0) {
aDeviceEnergy[nIndexInMemory] = 0.;
}
else if (idy == nPointsY - 1 && nMyRank == 3)
aDeviceEnergy[nIndexInMemory] = 0.;
else if (idx == 0 || idx == nPointsX - 1)
aDeviceEnergy[nIndexInMemory] = 0.;
else {
aDeviceEnergy[nIndexInMemory] =
2 * aDeviceEnergyStepCurrent[nIndexInMemory] -
aDeviceEnergyStepOld[nIndexInMemory] +
VSQR * (nTimeStep * nTimeStep) * (
(
aDeviceEnergyStepCurrent[nIndexInMemory - 1] +
aDeviceEnergyStepCurrent[nIndexInMemory + 1] +
aDeviceEnergyStepCurrent[nIndexInMemory + nLakePointsOneAxis] +
aDeviceEnergyStepCurrent[nIndexInMemory - nLakePointsOneAxis] +
0.25 * (
aDeviceEnergyStepCurrent[nIndexInMemory + nLakePointsOneAxis - 1] +
aDeviceEnergyStepCurrent[nIndexInMemory + nLakePointsOneAxis + 1] +
aDeviceEnergyStepCurrent[nIndexInMemory - nLakePointsOneAxis - 1] +
aDeviceEnergyStepCurrent[nIndexInMemory - nLakePointsOneAxis + 1]
) -
5 * aDeviceEnergyStepCurrent[nIndexInMemory]
) /
(nPointSpacing * nPointSpacing) +
kf(aDevicePebbleSizes[nIndexInMemory], nTime)
);
}
}
}
/*************************************************************************************************************
* FUNCTION: gpu_memory_setup
*
* DESCRIPTION: Allocate memory used by the GPU, and copy over pebble sizes array
* (which does not change during execution).
*
* ARGUMENTS: nNumTaskPointsWithBoundaries - The total number of lake points that this node cares about
* Includes this node's own lake points,
* plus boundaries above / north and below / south
* aPebbleSizes - Array representing pebbles in this node's slice of the lake
*
* RETURNS: None
*
* AUTHOR: ssbehera Subhendu S. Behera
*************************************************************************************************************/
void gpu_memory_setup(int nNumTaskPointsWithBoundaries, double *aPebbleSizes)
{
cudaMalloc((void **)&aDeviceEnergy, sizeof(double) * nNumTaskPointsWithBoundaries);
cudaMalloc((void **)&aDeviceEnergyStepOld, sizeof(double) * nNumTaskPointsWithBoundaries);
cudaMalloc((void **)&aDeviceEnergyStepCurrent, sizeof(double) * nNumTaskPointsWithBoundaries);
cudaMalloc((void **)&aDevicePebbleSizes, sizeof(double) * nNumTaskPointsWithBoundaries);
}
/*************************************************************************************************************
* FUNCTION: gpu_memory_free
*
* DESCRIPTION: Free memory allocated by the GPU
*
* ARGUMENTS: None
*
* RETURNS: None
*
* AUTHOR: ssbehera Subhendu S. Behera
*************************************************************************************************************/
void gpu_memory_free(void)
{
/*
* Free the device memory.
*/
cudaFree(aDeviceEnergy);
cudaFree(aDeviceEnergyStepOld);
cudaFree(aDeviceEnergyStepCurrent);
cudaFree(aDevicePebbleSizes);
}
/*************************************************************************************************************
* FUNCTION: run_gpu
*
* DESCRIPTION: Simulate the energy changes over time in this node's slice of the lake, on the GPU
*
* ARGUMENTS: aEnergyStepOld - Array representing energy levels at every point in the lake at the previous time step
* aEnergyStepCurrent - Array representing energy levels at every point in the lake at the current time step
* nLakePointsOneAxis - The number of points in the map of the lake (one axis)
* nNumTaskPointsWithBoundaries - The total number of lake points that this node cares about
* Includes this node's own lake points,
* plus boundaries above / north and below / south
* nPointSpacing - The spacing between two points on the lake
* nTime - The current time value within the lake simulation
* nThreads - The number of threads to be used per axis of 2D block (if block is 2D)
* The intention is to have the total number of threads
* equal the total number of lake points that this node is responsible for
*
* RETURNS: None
*
* AUTHORS: ssbehera Subhendu S. Behera
*************************************************************************************************************/
void run_gpu(
double *aEnergyStepOld,
double *aEnergyStepCurrent,
int nLakePointsOneAxis,
int nNumTaskPointsWithBoundaries,
double nPointSpacing,
double nTime,
int nThreads, int nMyRank,
int nNumTasks,
double *aPebbleSizes
)
{
// Declare variables
double nTimeStep;
int nNumTaskPoints;
int nPointsY;
/* Calculate block dimensions
* One drawback of our design decision to split up the lake in slices instead of quadrants
* is that this could result in some unused threads
*/
int blockDimensionX = (nLakePointsOneAxis / nThreads);
if (blockDimensionX == 0)
blockDimensionX++;
int blockDimensionY = nLakePointsOneAxis / nNumTasks / nThreads;
if (blockDimensionY == 0)
blockDimensionY++;
dim3 threadsPerBlock(nThreads, nThreads);
dim3 noOfBlocks(blockDimensionX, blockDimensionY);
// Calculate time step
nTimeStep = nPointSpacing / 2;
nNumTaskPoints = nNumTaskPointsWithBoundaries - (2 * nLakePointsOneAxis);
nPointsY = nLakePointsOneAxis / nNumTasks;
/*
* copy data
*/
cudaMemcpy(aDeviceEnergyStepOld, aEnergyStepOld, sizeof(double) * nNumTaskPointsWithBoundaries,
cudaMemcpyHostToDevice);
cudaMemcpy(aDeviceEnergyStepCurrent, aEnergyStepCurrent, sizeof(double) * nNumTaskPointsWithBoundaries,
cudaMemcpyHostToDevice);
cudaMemcpy(aDevicePebbleSizes, aPebbleSizes, sizeof(double) * nNumTaskPointsWithBoundaries,
cudaMemcpyHostToDevice);
/*
* Make the kernel call.
*/
//nPointsY = nNumTaskPoints / nLakePointsOneAxis;
evolve<<<noOfBlocks, threadsPerBlock>>>(
aDeviceEnergy,
aDeviceEnergyStepOld,
aDeviceEnergyStepCurrent,
aDevicePebbleSizes,
nTime,
nLakePointsOneAxis,
nPointsY,
nTimeStep,
nPointSpacing,
nMyRank,
nNumTaskPoints
);
/*
* copy the current energy to old energy as cpu is free.
*/
memcpy(aEnergyStepOld, aEnergyStepCurrent, sizeof(double) * nNumTaskPointsWithBoundaries);
/*
* copy the new energy to current energy directly from the device.
*/
cudaMemcpy(aEnergyStepCurrent + nLakePointsOneAxis, aDeviceEnergy + nLakePointsOneAxis, sizeof(double) * nNumTaskPoints,
cudaMemcpyDeviceToHost);
}
|
23,808 | //#include"cuda_helper.cuh"
//
//#include"npp.h"
//#include"nppcore.h"
//#include"nppdefs.h"
//#include"nppi.h"
//#include"npps.h"
//#include"nppversion.h"
//#define NPP_CALL(x){const NppStatus a=(x);if (a!=NPP_SUCCESS){printf("\nNPP Error(err_num=%d) \n", a);cudaDeviceReset();ASSERT(0);}}
//
//#define NUM_STREAMS 4
//typedef unsigned char u8;
//
//#include <npp.h>
//#pragma comment(lib, "cudart.lib")
//#pragma comment(lib, "nppi.lib")
//
//template<class T>
//__host__ void fillHostArray(T* data, const u32 num_elements){
// for (u32 i = 0; i < num_elements; i++)
// data[i] = rand() % (num_elements + 1);
//}
//void _nppTest(){
//
// checkCudaErrors(cudaSetDevice(0));
// const u32 num_bytes = (1024u * 255u*256)*sizeof(Npp8u);
//
// Npp8u *host_src_ptr0;
// Npp8u *host_src_ptr1;
// Npp8u *host_dst_ptr0[NUM_STREAMS];
//
//
// Npp8u *device_src_ptr0[NUM_STREAMS];
// Npp8u *device_src_ptr1[NUM_STREAMS];
// Npp8u *device_dst_ptr0[NUM_STREAMS];
//
//
// checkCudaErrors(cudaHostAlloc(&host_src_ptr0,num_bytes,cudaHostAllocDefault));
// checkCudaErrors(cudaHostAlloc(&host_src_ptr1, num_bytes, cudaHostAllocDefault));
//
//
// cudaStream_t stream[NUM_STREAMS];
// cudaEvent_t start, stop;
// checkCudaErrors(cudaEventCreate(&start));
// checkCudaErrors(cudaEventCreate(&stop));
// for (u32 str = 0; str < NUM_STREAMS; str++){
// checkCudaErrors(cudaHostAlloc(&host_dst_ptr0[str], num_bytes, cudaHostAllocDefault));
// checkCudaErrors(cudaMalloc(&device_src_ptr0[str],num_bytes));
// checkCudaErrors(cudaMalloc(&device_src_ptr1[str], num_bytes));
// checkCudaErrors(cudaMalloc(&device_dst_ptr0[str], num_bytes));
// checkCudaErrors(cudaStreamCreate(&stream[str]));
//
// }
// fillHostArray(host_src_ptr0, num_bytes);
// fillHostArray(host_src_ptr1, num_bytes);
//
// checkCudaErrors(cudaEventRecord(start,0));
//
// for (u32 str = 0; str < NUM_STREAMS; str++){
//
// nppSetStream(stream[str]);
// checkCudaErrors(cudaMemcpyAsync(device_src_ptr0[str], host_src_ptr0,num_bytes,cudaMemcpyHostToDevice,stream[str]));
// checkCudaErrors(cudaMemcpyAsync(device_src_ptr1[str], host_src_ptr1, num_bytes, cudaMemcpyHostToDevice, stream[str]));
// nppsXor_8u(device_src_ptr0[str], device_src_ptr1[str], device_dst_ptr0[str],num_bytes);
//
// }
// for (u32 str = 0; str < NUM_STREAMS; str++){
// nppSetStream(stream[str]);
// checkCudaErrors(cudaMemcpyAsync(host_dst_ptr0[str], device_dst_ptr0[str], num_bytes,cudaMemcpyDeviceToHost,stream[str]));
// checkCudaErrors(cudaStreamSynchronize(stream[str]));
// }
// checkCudaErrors(cudaEventRecord(stop, 0));
//
// float elapasedTime = 0.f;
// checkCudaErrors(cudaEventElapsedTime(&elapasedTime,start,stop));
// printf("> Execute time:%3.1f\n", elapasedTime);
// for (u32 str = 0; str < NUM_STREAMS; str++){
// checkCudaErrors(cudaStreamDestroy(stream[str]));
// checkCudaErrors(cudaFree(device_src_ptr0[str]));
// checkCudaErrors(cudaFree(device_src_ptr1[str]));
// checkCudaErrors(cudaFree(device_dst_ptr0[str]));
// checkCudaErrors(cudaFreeHost(host_dst_ptr0[str]));
// }
// checkCudaErrors(cudaFreeHost(host_src_ptr0));
// checkCudaErrors(cudaFreeHost(host_src_ptr1));
//
//}
//
//int main(int argc, char **argv){
// printf("> %s Starting...\n\n", argv[0]);
// srand((unsigned)time(NULL));
// _nppTest();
// checkCudaErrors(cudaDeviceReset());
// EXIT_SUCCESS;
//}; |
23,809 | __global__
void saxpy_kernel(int n, float a, float *x, float *y)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if ( i < n )
y[i] += a * x[i];
}
extern "C" void saxpy(int n ,float a, float *x, float *y)
{
dim3 griddim, blockdim;
blockdim = dim3(128,1,1);
griddim = dim3(n/blockdim.x,1,1);
saxpy_kernel<<<griddim,blockdim>>>(n,a,x,y);
}
|
23,810 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define BLOCK_SIZE 1024 // You can change this
//#define NUM_OF_ELEMS 1e6 // You can change this
__global__ void total(float * input, float * output, int len)
{
int tid_x = blockIdx.x * blockDim.x + threadIdx.x ;
int tid_y= blockIdx.y * blockDim.y + threadIdx.y;
int tid= tid_x+tid_y*tid_x;
// Traverse reduction tree
for (unsigned int stride = len/2; stride > 0; stride /= 2)
{
if ((tid < stride) && (input[tid] > input[tid + stride]) ){
input[tid] = input[tid + stride];}
__syncthreads();
}
// Write the computed sum of the block to the output vector at correct index
if (tid == 0)
{
output[0] = input[0];
}
}
int main(int argc, char ** argv)
{
float * hostInput; // The input 1D vector
float * hostOutput; // The output vector
float * deviceInput;
float * deviceOutput;
int NUM_OF_ELEMS=pow(2,24);
cudaEvent_t start=0;
cudaEvent_t stop=0;
float timef=0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int numInputElements = NUM_OF_ELEMS; // number of elements in the input list
//int numOutputElements; // number of elements in the output list
hostInput = (float *) malloc(sizeof(float) * numInputElements);
srand(time(NULL));
for (int i=0; i < NUM_OF_ELEMS; i++)
{
hostInput[i] = rand();
}
printf("host %f and %f and %f \n", hostInput[10] , hostInput[20] , hostInput[30]);
hostOutput = (float*) malloc(sizeof(float));
//@@ Allocate GPU memory here
cudaMalloc((void **)&deviceInput, numInputElements * sizeof(float));
cudaMalloc((void **)&deviceOutput, sizeof(float));
// Copy memory to the GPU here
cudaMemcpy(deviceInput, hostInput, numInputElements * sizeof(float), cudaMemcpyHostToDevice);
// Initialize the grid and block dimensions here
dim3 DimGrid(8192, 2, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
// Launch the GPU Kernel here
cudaEventRecord(start,0);
total<<<DimGrid, DimBlock>>>(deviceInput, deviceOutput, numInputElements);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
// Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, sizeof(float), cudaMemcpyDeviceToHost);
printf("Reduced Sum from GPU = %f \n", hostOutput[0]);
cudaEventElapsedTime(&timef,start,stop);
printf("time of the Kernel %f \n",timef );
// Free the GPU memory here
cudaFree(deviceInput);
cudaFree(deviceOutput);
free(hostInput);
free(hostOutput);
return 0;
}
|
23,811 | /* Monte Carlo simulation of the Ising model using CUDA */
/* Author: Jorge Fernandez de Cossio Diaz */
/* March, 2019 */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cassert>
#include <cuda.h>
#include <curand_kernel.h>
//#include <curand.h>
#define RANDSEED 5 // random seed
/* Linear dimension of square grid. The block-size in my laptop is 1024.
Therefore we set L=32 so that there are a total of L^2 = 1024 spins. */
#define L 32 // linear dimension of square grid.
#define N (L*L) // total number of spins
#define ITERATIONS 10001 // number of iterations
/* linear index of current block */
__device__ int globalBlockIdx() {
return blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
}
/* linear index of current thread inside its block */
__device__ int threadIdxInBlock() {
return threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y;
}
/* total number of threads per block */
__device__ int blockVolume() {
return blockDim.x * blockDim.y * blockDim.z;
}
/* gobal linear index of current thread */
__device__ int globalThreadIdx() {
return threadIdxInBlock() + globalBlockIdx() * blockVolume();
}
/* setups random number generation in each thread */
__global__ void initialize_rand(curandState *rngState) {
// Each thread gets same seed, a different sequence number, no offset
int idx = globalThreadIdx();
//assert(0 <= idx && idx < N);
curand_init(RANDSEED, idx, 0, rngState + idx);
}
/* random uniform real in [0,1] */
__device__ float randreal(curandState *rngState) {
return curand_uniform(rngState + globalThreadIdx());
}
/* returns random -1 or +1 */
__device__ short randspin(curandState *rngState) {
return 2 * (short)roundf(randreal(rngState)) - 1;
}
/* returns linear index corresponding to Cartesian index x, y
in the periodic square grid */
__host__ __device__ short linear_index(int x, int y) {
while (x < 0) {x += L; }
while (x >= L) {x -= L; }
while (y < 0) {y += L; }
while (y >= L) {y -= L; }
//assert(x >= 0 && x < L);
//assert(y >= 0 && y < L);
int idx = x + L * y;
//assert(0 <= idx && idx < N);
return idx;
}
__host__ __device__ short get(short *spins, int x, int y) {
return spins[linear_index(x, y)];
}
__host__ __device__ short set(short *spins, int x, int y, short state) {
spins[linear_index(x, y)] = state;
return state;
}
/* initializes the spins to random states */
__global__ void initialize_spins(short *spins, curandState *rngState) {
spins[globalThreadIdx()] = randspin(rngState);
}
/* sum of neighboring spins */
__device__ short neighbor_sum(short *spins) {
int x = threadIdx.x;
int y = threadIdx.y;
short sum = 0;
sum += get(spins, x - 1, y);
sum += get(spins, x + 1, y);
sum += get(spins, x, y - 1);
sum += get(spins, x, y + 1);
return sum;
}
/* update all spins with metropolis rule at inverse temperature beta. */
__device__ void metropolis(short *spins, curandState *rngState, float beta) {
int idx = globalThreadIdx();
short state = spins[idx];
//assert(state == 1 || state == -1);
// Metropolis update rule
float deltaE = 2.0f * state * neighbor_sum(spins);
//if (idx == 10) { printf("thread %i, beta %f, deltaE %f, expf(-beta * deltaE) %f\n", idx, beta, deltaE, expf(-beta * deltaE)); }
//if (deltaE <= 0) { assert(randreal(rngState) < expf(-beta * deltaE)); }
float u = randreal(rngState);
if (u < 0.5 && u < expf(-beta * deltaE)) {
state = -state;
}
// synchronous update
__syncthreads(); // wait for all threads to compute new state
spins[idx] = state;
__syncthreads(); // wait for all threads to update spins
}
/* copies spin states between two arrays in device memory */
__device__ void spinsCpy(short *to, short *from) {
int idx = globalThreadIdx();
to[idx] = from[idx];
__syncthreads();
}
/* simulates the system of spins in shared memory */
__global__ void simulate(short *spinsGlob, curandState *rngState, float beta) {
__shared__ short spinsShared[N];
// copy spins from global to shared memory
spinsCpy(spinsShared, spinsGlob);
// simulate
for (int iter = 0; iter < ITERATIONS; ++iter) {
metropolis(spinsShared, rngState, beta);
}
// copy spins back to global memory
spinsCpy(spinsGlob, spinsShared);
}
/* return magnetization of system of spins */
__host__ __device__ float magnetization(short* spins) {
short M = 0;
for (int i = 0; i < N; ++i) {
//assert(spins[i] == -1 || spins[i] == 1);
M += spins[i];
}
return (float)M / N;
}
/* prints the grid of spins */
__host__ __device__ void print_spins(short *spins) {
for (int x = 0; x < L; ++x) {
for (int y = 0; y < L; ++y) {
short s = get(spins, x, y);
if (s == 1) {
printf("+ ");
} else if (s == -1) {
printf("- ");
} else {
printf("%i ", s);
}
}
printf("\n");
}
}
int main(void) {
printf("Simulating %i spins, on a square grid of length %i\n", N, L);
// random setup
curandState *rngStatesDev;
cudaMalloc(&rngStatesDev, N * sizeof(curandState));
dim3 blockSize(L,L);
initialize_rand<<<1, blockSize>>>(rngStatesDev);
// allocate host/device memory for spins
short *spins;
cudaMallocManaged(&spins, N * sizeof(short));
// initialize spins to random configurations
initialize_spins<<<1, blockSize>>>(spins, rngStatesDev);
cudaDeviceSynchronize();
//printf("Initialized spins (in random state), |m| = %f\n", abs(magnetization(spins)));
//print_spins(spins);
// simulate
printf("beta\tabsolute magnetization\n");
for (float beta = 0.0f; beta <= 1.0f; beta += 0.01f) {
simulate<<<1, blockSize>>>(spins, rngStatesDev, beta);
cudaDeviceSynchronize();
float m = magnetization(spins);
printf("%f\t%f\n", beta, abs(m));
fflush(stdout);
}
//printf("Final configuration, |m| = %f\n", abs(magnetization(spins)));
//print_spins(spins);
cudaFree(rngStatesDev);
cudaFree(spins);
return 0;
} |
23,812 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <unistd.h>
#define THREADS 1024
__global__ void kernel(struct cudaPitchedPtr pitchedPointer){
int id;
float *d;
id = blockDim.x*blockIdx.x + threadIdx.x;
d = (float*)pitchedPointer.ptr;
d[id] += 1.0f;
}
int main(){
int i,ite = 100,size = 40;
cudaError_t res;
struct cudaExtent extent;
struct cudaPitchedPtr pitchedPointer;
float* ans;
ans = (float*)malloc(sizeof(float)*size*size*size*THREADS);
for(i = 0 ; i < size*size*size*THREADS ; i ++){
ans[i] = 0.0f;
}
extent.width = sizeof(float)*size*THREADS;
extent.height = size;
extent.depth = size;
res = cudaMalloc3D(&pitchedPointer,extent);
if(res != cudaSuccess){
printf("Oh...(%d)\n",res);
exit(-1);
}
printf("Pointer : %p\n",pitchedPointer.ptr);
res = cudaMemcpy(pitchedPointer.ptr,ans,sizeof(float)*size*size*size*THREADS,cudaMemcpyHostToDevice);
if(res != cudaSuccess){
printf("Oh...(%d)\n",res);
exit(-1);
}
dim3 threads(THREADS,1,1);
dim3 blocks(size*size*size,1,1);
for(i = 0 ; i < ite ; i ++){
kernel<<<blocks,threads>>>(pitchedPointer);
}
res = cudaMemcpy(ans,pitchedPointer.ptr,sizeof(float)*size*size*size*THREADS,cudaMemcpyDeviceToHost);
if(res != cudaSuccess){
printf("Oops ...\n");
exit(-1);
}
for(i = 0 ; i < size*size*size*THREADS ; i ++){
if(ans[i] != ite){
printf("ans[%d] == %f\n",i,ans[i]);
exit(-1);
}
}
printf("Elements : %d\n",size*size*size*THREADS);
sleep(10);
res = cudaFree(pitchedPointer.ptr);
if(res != cudaSuccess){
printf("Oops ...\n");
exit(-1);
}
return 0;
}
|
23,813 | #include <stdio.h>
#include <cuda.h>
#define N 500
#define BLOCKSIZE 64
#define ELEPERTHREAD 20
__device__ const unsigned delta = ELEPERTHREAD / 5;
__global__ void k1(unsigned *nelements) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ unsigned sum;
__shared__ unsigned avg;
__shared__ unsigned donationbox[N], donationboxindex;
if (id == 0) {
sum = 0;
donationboxindex = 0;
}
__syncthreads();
// compute sum.
atomicAdd(&sum, nelements[id]);
__syncthreads();
// compute average.
if (id == 0) avg = sum / blockDim.x;
__syncthreads();
// check if I need to donate.
unsigned surplus = nelements[id] - avg;
if (surplus > delta) {
// donate.
unsigned index = atomicAdd(&donationboxindex, surplus);
for (unsigned ii = 0; ii < surplus; ++ii) {
donationbox[index + ii] = id; // some work.
}
}
// process.
// some processing here.
//__syncthreads(); // this is wrong.
// empty donation box.
while (donationboxindex < N * ELEPERTHREAD) {
unsigned index = atomicDec(&donationboxindex, N * ELEPERTHREAD + blockDim.x); // to ensure that wrap-around does not cause confusion.
if (index < N * ELEPERTHREAD) {
unsigned work = donationbox[index];
// process with work.
}
}
}
int main() {
unsigned hnelements[N];
for (unsigned ii = 0; ii < N; ++ii) {
hnelements[ii] = rand() % ELEPERTHREAD;
}
unsigned *nelements;
cudaMalloc(&nelements, N * sizeof(unsigned));
cudaMemcpy(nelements, hnelements, N * sizeof(unsigned), cudaMemcpyHostToDevice);
unsigned nblocks = (N + BLOCKSIZE - 1) / BLOCKSIZE;
k1<<<nblocks, BLOCKSIZE>>>(nelements);
cudaDeviceSynchronize();
//k2<<<1, 1>>>();
//cudaDeviceSynchronize();
return 0;
}
|
23,814 | #include "median_tree_node.cuh"
|
23,815 | #include "includes.h"
__global__ void Histogram_kernel(int size, int bins, int cpu_bins, unsigned int *data, unsigned int *histo) {
extern __shared__ unsigned int l_mem[];
unsigned int* l_histo = l_mem;
// Block and thread index
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int bD = blockDim.x;
const int gD = gridDim.x;
// Output partition
int bins_per_wg = (bins - cpu_bins) / gD;
int my_bins_start = bx * bins_per_wg + cpu_bins;
int my_bins_end = my_bins_start + bins_per_wg;
// Constants for read access
const int begin = tx;
const int end = size;
const int step = bD;
// Sub-histograms initialization
for(int pos = tx; pos < bins_per_wg; pos += bD) {
l_histo[pos] = 0;
}
__syncthreads(); // Intra-block synchronization
// Main loop
for(int i = begin; i < end; i += step) {
// Global memory read
unsigned int d = ((data[i] * bins) >> 12);
if(d >= my_bins_start && d < my_bins_end) {
// Atomic vote in shared memory
atomicAdd(&l_histo[d - my_bins_start], 1);
}
}
__syncthreads(); // Intra-block synchronization
// Merge per-block histograms and write to global memory
for(int pos = tx; pos < bins_per_wg; pos += bD) {
unsigned int sum = 0;
for(int base = 0; base < (bins_per_wg); base += (bins_per_wg))
sum += l_histo[base + pos];
// Atomic addition in global memory
histo[pos + my_bins_start] += sum;
}
} |
23,816 | #include "includes.h"
__global__ void render_final(float *points3d_polar, float * selection, float * depth_render, int * img, int * render, int oh, int ow)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int w = gridDim.x * TILE_DIM;
int h = w /2;
int maxsize = oh * ow;
for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS)
{
int iw = x;
int ih = y + j;
int tx = round((points3d_polar[(ih * w + iw) * 3 + 1] + 1)/2 * ow - 0.5);
int ty = round((points3d_polar[(ih * w + iw) * 3 + 2] + 1)/2 * oh - 0.5);
float tx_offset = ((points3d_polar[(ih * w + iw) * 3 + 1] + 1)/2 * ow - 0.5);
float ty_offset = ((points3d_polar[(ih * w + iw) * 3 + 2] + 1)/2 * oh - 0.5);
float tx00 = 0;
float ty00 = 0;
float tx01 = ((points3d_polar[(ih * w + iw + 1) * 3 + 1] + 1)/2 * ow - 0.5) - tx_offset;
float ty01 = ((points3d_polar[(ih * w + iw + 1) * 3 + 2] + 1)/2 * oh - 0.5) - ty_offset;
float tx10 = ((points3d_polar[((ih + 1) * w + iw) * 3 + 1] + 1)/2 * ow - 0.5) - tx_offset;
float ty10 = ((points3d_polar[((ih + 1) * w + iw) * 3 + 2] + 1)/2 * oh - 0.5) - ty_offset;
float tx11 = ((points3d_polar[((ih+1) * w + iw + 1) * 3 + 1] + 1)/2 * ow - 0.5) - tx_offset;
float ty11 = ((points3d_polar[((ih+1) * w + iw + 1) * 3 + 2] + 1)/2 * oh - 0.5) - ty_offset;
float t00 = 0 * (float)tx00 + (float)tx01 * -1.0/3 + (float)tx10 * 2.0/3 + (float)tx11 * 1.0/3;
float t01 = 0 * (float)ty00 + (float)ty01 * -1.0/3 + (float)ty10 * 2.0/3 + (float)ty11 * 1.0/3;
float t10 = 0 * (float)tx00 + (float)tx01 * 2.0/3 + (float)tx10 * -1.0/3 + (float)tx11 * 1.0/3;
float t11 = 0 * (float)ty00 + (float)ty01 * 2.0/3 + (float)ty10 * -1.0/3 + (float)ty11 * 1.0/3;
float det = t00 * t11 - t01 * t10 + 1e-10;
//printf("%f %f %f %f %f\n", t00, t01, t10, t11, det);
float it00, it01, it10, it11;
it00 = t11/det;
it01 = -t01/det;
it10 = -t10/det;
it11 = t00/det;
//printf("inverse %f %f %f %f\n", it00, it01, it10, it11);
int this_depth = (int)(12800/128 * points3d_polar[(ih * w + iw) * 3 + 0]);
int delta00 = (int)(12800/128 * points3d_polar[(ih * w + iw) * 3 + 0]) - (int)(100 * depth_render[(ty * ow + tx)]);
int delta01 = (int)(12800/128 * points3d_polar[(ih * w + iw + 1) * 3 + 0]) - (int)(100 * depth_render[(ty * ow + tx + 1)]);
int delta10 = (int)(12800/128 * points3d_polar[((ih + 1) * w + iw) * 3 + 0]) - (int)(100 * depth_render[((ty+1) * ow + tx)]);
int delta11 = (int)(12800/128 * points3d_polar[((ih+1) * w + iw + 1) * 3 + 0]) - (int)(100 * depth_render[((ty+1) * ow + tx + 1)]);
int mindelta = min(min(delta00, delta01), min(delta10, delta11));
int maxdelta = max(max(delta00, delta01), max(delta10, delta11));
int depth00 = (int)(12800/128 * points3d_polar[(ih * w + iw) * 3 + 0]);
int depth01 = (int)(12800/128 * points3d_polar[(ih * w + iw + 1) * 3 + 0]);
int depth10 = (int)(12800/128 * points3d_polar[((ih+1) * w + iw) * 3 + 0]);
int depth11 = (int)(12800/128 * points3d_polar[((ih+1) * w + iw+1) * 3 + 0]);
int max_depth = max(max(depth00, depth10), max(depth01, depth11));
int min_depth = min(min(depth00, depth10), min(depth01, depth11));
int delta_depth = max_depth - min_depth;
int txmin = floor(tx_offset + min(min(tx00, tx11), min(tx01, tx10)));
int txmax = ceil(tx_offset + max(max(tx00, tx11), max(tx01, tx10)));
int tymin = floor(ty_offset + min(min(ty00, ty11), min(ty01, ty10)));
int tymax = ceil(ty_offset + max(max(ty00, ty11), max(ty01, ty10)));
float newx, newy;
int r,g,b;
int itx, ity;
//render[(ty * ow + tx)] = img[ih * w + iw];
//selection[(ty * ow + tx)] = 1.0;
float tolerance = 0.1 * this_depth > 10? 0.1 * this_depth : 10;
float tolerance2 = 0.05 * max_depth > 10? 0.05 * max_depth: 10;
float flank = 0.01;
if ((delta_depth < tolerance2) && (y > 1 * h/8) && (y < (h*7)/8))
if (((mindelta > - tolerance) && (maxdelta < tolerance)) && (this_depth < 10000)) {
if (((txmax - txmin) * (tymax - tymin) < 1600) && (txmax - txmin < 40) && (tymax - tymin < 40))
{
for (itx = txmin; itx < txmax; itx ++)
for (ity = tymin; ity < tymax; ity ++)
{ if (( 0 <= itx) && (itx < ow) && ( 0 <= ity) && (ity < oh))
{
newx = (itx - tx_offset) * it00 + it10 * (ity - ty_offset);
newy = (itx - tx_offset) * it01 + it11 * (ity - ty_offset);
//printf("%f %f\n", newx, newy);
if ((newx > -flank) && (newx < 1 + flank) && (newy > -flank) && (newy < 1 + flank))
{
if (newx < 0) newx = 0;
if (newy < 0) newy = 0;
if (newx > 1) newx = 1;
if (newy > 1) newy = 1;
r = img[(ih * w + iw)] / (256*256) * (1-newx) * (1-newy) + img[(ih * w + iw + 1)] / (256*256) * (1-newx) * (newy) + img[((ih+1) * w + iw)] / (256*256) * (newx) * (1-newy) + img[((ih+1) * w + iw + 1)] / (256*256) * newx * newy;
g = img[(ih * w + iw)] / 256 % 256 * (1-newx) * (1-newy) + img[(ih * w + iw + 1)] / 256 % 256 * (1-newx) * (newy) + img[((ih+1) * w + iw)] / 256 % 256 * (newx) * (1-newy) + img[((ih+1) * w + iw + 1)] / 256 % 256 * newx * newy;
b = img[(ih * w + iw)] % 256 * (1-newx) * (1-newy) + img[(ih * w + iw + 1)] % 256 * (1-newx) * (newy) + img[((ih+1) * w + iw)] % 256 * (newx) * (1-newy) + img[((ih+1) * w + iw + 1)] % 256 * newx * newy ;
if (r > 255) r = 255;
if (g > 255) g = 255;
if (b > 255) b = 255;
if ((ity * ow + itx > 0) && (ity * ow + itx < maxsize)) {
render[(ity * ow + itx)] = r * 256 * 256 + g * 256 + b;
selection[(ity * ow + itx)] = 1.0 / abs(det);
}
}
}
}
}
}
}
} |
23,817 |
/************************************************************************
Source Code : vectorModel.cu
Program : GPU as SIMD Processor using Vector Programming model
Objective : To demonstrate that better bandwidth can be achieved if each
thread handles more than one element using GPU as a 32-way SIMD
processor. This Program measures the bandwidth of global memory
for simple initialization kernel operation [a(i) = value].
Input : None
Output : Bandwidth achieved and timing (average)
Modified : Aug 2011
Author : RarchK
*******************************************************************************/
#include <stdio.h>
#include <cuda.h>
#define ARRAY_SIZE 1280000
#define BLOCK_SIZE 32
#define FACTOR 4
#define NTIMES 10
#define HLINE "--------------------------------------------------------------\n"
void printResults();
void printDeviceDetails();
void cudaSafeMalloc(void ** , size_t );
void CudaGetDeviceProperties(cudaDeviceProp *, int);
void CudaGetDevice(int *);
void checkCudaErrors();
float avgTime[2] = {0};
char *label[] = {"Normal ","Vector Model"};
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Kernel for initializing the array - straightforward
///////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void initializationNormally(float *array, float value, int size)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < size)
array[idx] = value;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Kernel for initializing the array using Vector Model
///////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void initializationWithVectorModel(float *array, float value, int size)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x * FACTOR;
if (idx < size)
for(int i=0; i<FACTOR; i++)
array[idx + i*blockDim.x] = value;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
// Main function to time both the kernels
///////////////////////////////////////////////////////////////////////////////////////////////////////
int main()
{
float *d_array;
size_t size = ARRAY_SIZE * sizeof(float);
int i,j;
float elapsedTimes[2][NTIMES];
cudaEvent_t start,stop;
// event creation, which will be used for timing the code
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaSafeMalloc((void **) &d_array, size);
int gridSize1 = ARRAY_SIZE / BLOCK_SIZE;
int gridSize2 = ARRAY_SIZE / (BLOCK_SIZE * FACTOR);
if(ARRAY_SIZE % BLOCK_SIZE != 0) gridSize1 += 1;
if(ARRAY_SIZE % BLOCK_SIZE != 0) gridSize2 += 1;
dim3 grid1, grid2,block;
block.x = BLOCK_SIZE;
grid1.x = gridSize1;
grid2.x = gridSize2;
for(i=0; i<NTIMES; i++)
{
// timing the initialization without Partition Camping
cudaEventRecord(start,0);
initializationNormally<<<grid1, block>>>(d_array, 1, ARRAY_SIZE);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimes[0][i],start,stop);
checkCudaErrors();
// timing the initialization with Partition Camping
cudaEventRecord(start,0);
initializationWithVectorModel<<< grid2, block>>>(d_array, 1, ARRAY_SIZE);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimes[1][i],start,stop);
checkCudaErrors();
}
//Computing average time taken
for(i=0; i<2; i++)
{
for(j=1; j<NTIMES; j++) //skipping first iteration
{
avgTime[i] += elapsedTimes[i][j];
}
avgTime[i] = avgTime[i]/(NTIMES-1);
}
// Printing the results
printResults();
//Destroying the events
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Host Function to print the results
//
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
void printResults()
{
int j;
printf("\n\n");
printf(HLINE);
printf("VECTOR MODEL DEMONSTRATION\n");
printf(HLINE);
printDeviceDetails();
printf(HLINE);
printf("Array Size = %llu\n",(unsigned long long)ARRAY_SIZE);
printf("Block Size = %d\n",(int)BLOCK_SIZE);
printf("No. of data elements per thread = %d\n",(int)FACTOR);
printf(HLINE);
printf("Initialization Rate (GB/s) Avg time \n");
for (j=0; j<2; j++)
{
printf("%s%11.4f %11.4f \n", label[j], 1.0E-06 * (ARRAY_SIZE * sizeof(float))/avgTime[j],avgTime[j]);
}
printf(HLINE);
}
void printDeviceDetails()
{
int deviceId;
cudaDeviceProp prop;
CudaGetDevice(&deviceId);
CudaGetDeviceProperties(&prop, deviceId);
printf("Device Name is %s\n", prop.name);
//printf("Clock Rate of this device is %f GHz\n",(float)prop.clockRate * 1.0E-06);
printf("Compute Capability of this device is %d.%d\n",prop.major,prop.minor);
//printf("Number of Multiprocessors = %d\n", prop.multiProcessorCount);
//printf("Max no. of blocks allowed in a 1D Grid = %d\n", prop.maxGridSize[0]);
//printf("Max no. of threads allowed in 1D block = %d\n", prop.maxThreadsDim[0]);
//printf("Max no. of threads allowed in a block = %d\n", prop.maxThreadsPerBlock);
//printf("No. of registers per block = %d\n", prop.regsPerBlock);
//printf("Shared Memory Per block (in KB) = %f\n", (float)prop.sharedMemPerBlock * 1.0E-03);
printf("Total Global Memory available = %f GB\n",(float)prop.totalGlobalMem * 1.0E-09);
printf("Warp Size in threads = %d\n",prop.warpSize);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// Wrapper Fuctions for error checking
//////////////////////////////////////////////////////////////////////////////////////////////////////////
void cudaSafeMalloc(void ** devicePtr, size_t size)
{
cudaMalloc(devicePtr, size);
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("Cuda Error: %s\n",cudaGetErrorString(error));
cudaThreadExit();
exit(-1);
}
}
void CudaGetDeviceProperties(cudaDeviceProp *devicePropPtr, int deviceId)
{
cudaGetDeviceProperties(devicePropPtr, deviceId);
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("Cuda Error: %s\n",cudaGetErrorString(error));
cudaThreadExit();
exit(-1);
}
}
void CudaGetDevice(int *deviceIdPtr)
{
cudaGetDevice(deviceIdPtr);
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("Cuda Error: %s\n",cudaGetErrorString(error));
cudaThreadExit();
exit(-1);
}
}
void checkCudaErrors()
{
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("Cuda Error: %s\n",cudaGetErrorString(error));
cudaThreadExit();
exit(-1);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
23,818 | #include "includes.h"
/*
* PARA CORRERLO:
* $ export LD_LIBRARY_PATH=/usr/local/cuda/lib
* $ export PATH=$PATH:/usr/local/cuda/bin
* $ nvcc -o matrixTrans matrixTrans.cu -O2 -lc -lm
* $ ./matrixTrans n
*/
/*
* UNSIGNED INT --> Tipo de dato para enteros, números sin punto decimal.
* Los enteros sin signo pueden ser tan grandes como 65535
* y tan pequeños como 0.
* Son almacenados como 16 bits de información.
*
* SIZE_T --> is an unsigned integer type guaranteed to support the longest
* object for the platform you use. It is also the result of the
* sizeof operator.sizeof returns the size of the type in bytes.
* So in your context of question in both cases you pass a
* size_t to malloc.
*/
#define NUMBER_THREADS 32
float elapsed_time_ms;
int gpudev = 1;
char *dev_mat_in, *dev_mat_out;
//---------------------------------------------------------------------------
__global__ void kernelTransposeMatrix(const char *mat_in, char *mat_out, unsigned int rows, unsigned int cols){
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int idy = threadIdx.y + blockIdx.y * blockDim.y;
if (idx < cols && idy < rows) {
unsigned int pos = idy * cols + idx;
unsigned int trans_pos = idx * rows + idy;
mat_out[trans_pos] = mat_in[pos];
}
} |
23,819 | #include "includes.h"
__global__ void gpu_find_vac( const int num_atoms, const int correlation_step, const int num_correlation_steps, const float* g_vx, const float* g_vy, const float* g_vz, const float* g_vx_all, const float* g_vy_all, const float* g_vz_all, float* g_vac_x, float* g_vac_y, float* g_vac_z)
{
const int num_atoms_sq = num_atoms * num_atoms;
const int n1n2 = blockIdx.x * blockDim.x + threadIdx.x;
if (n1n2 >= num_atoms_sq)
return;
const int n1 = n1n2 / num_atoms;
const int n2 = n1n2 - n1 * num_atoms;
for (int k = 0; k < num_correlation_steps; ++k) {
int nc = correlation_step - k;
if (nc < 0)
nc += num_correlation_steps;
g_vac_x[nc * num_atoms_sq + n1n2] += g_vx[n1] * g_vx_all[k * num_atoms + n2];
g_vac_y[nc * num_atoms_sq + n1n2] += g_vy[n1] * g_vy_all[k * num_atoms + n2];
g_vac_z[nc * num_atoms_sq + n1n2] += g_vz[n1] * g_vz_all[k * num_atoms + n2];
}
} |
23,820 | #include <stdio.h>
#include <iostream>
#include <cuda.h>
#include <time.h>
using namespace std;
//prodotto puntuale tra 2 matrici, usando il padding di memoria (pitch)
//input size matrici (m,n) , dimensioni blocchi (righe di thread, colonne di thread)
__host__
void inizializzaCPU(int *a,int m,int n){
srand((unsigned int)time(NULL));
for(int i=0;i<m;i++)
for(int j=0;j<n;j++)
a[i*n+j]=1+rand()%10;
}
__host__
void stampaCPU(int *a,int m,int n){
cout<<"--------------------------------"<<endl;
for(int i=0;i<m;i++){
for(int j=0;j<n;j++)
cout<<a[i*n+j]<<" ";
cout<<endl;
}
}
__global__
void calcolaProdPuntuale(int *a,int *b,int *c,int m,int n,int pitch){
//attenzione a come si accede alle matrici allocate in memoria gpu perchè c'e' il pitch
int nColonneEffettive = pitch/sizeof(int); //perchè il pitch è la lunghezza effettiva delle righe matrici allocate in memoria gpu, ma in bytes
int iRiga = threadIdx.x + blockIdx.x * blockDim.x; //indice riga del thread GLOBALMENTE alla griglia
int iCol = threadIdx.y + blockIdx.y * blockDim.y ; //indice colonna del thread GLOBALMENTE alla griglia
if(iRiga >= m || iCol >=n)
return;
c[iRiga * nColonneEffettive + iCol] = a[iRiga * nColonneEffettive + iCol] * b[iRiga * nColonneEffettive + iCol];
}
int main(int argc,char *argv[]){
int m,n;
dim3 dimBlocco; //default constructor, qui salviamo il numero di righe e di colonne (di thread) in un blocco
dim3 dimGriglia; //qui salviamo il numero di righe e di colonne (di blocchi) della griglia
if(argc!=5){
m=5;
n=5;
dimBlocco.x=2; //2 righe di thread in un blocco
dimBlocco.y=2; //2 colonne di thread in un blocco
//quindi 4 thread in totale per un blocco
}
else{
sscanf(argv[1],"%d",&m);
sscanf(argv[2],"%d",&n);
sscanf(argv[3],"%d",& dimBlocco.x); //n righe di thread in blocco
sscanf(argv[4],"%d",& dimBlocco.y); //n di colonne di thread in blocco
}
dimGriglia.x = m / dimBlocco.x;
if(m % dimBlocco.x != 0)
dimBlocco.x ++;
dimGriglia.y = n / dimBlocco.y;
if(n % dimBlocco.y != 0)
dimBlocco.y ++;
//strutture dati su host
int *h_a,*h_b,*h_c;
//alloco
h_a=(int *)malloc(n*m*sizeof(int));
h_b=(int *)malloc(n*m*sizeof(int));
h_c=(int *)malloc(n*m*sizeof(int));
//inizializzo le matrici
inizializzaCPU(h_a,m,n);
inizializzaCPU(h_b,m,n);
stampaCPU(h_a,m,n);
stampaCPU(h_b,m,n);
//per le strutture dati sulla memoria gpu
int *d_a,*d_b,*d_c;
size_t pitch; //qui verrà salvato dalla cudaMallocPitch la lunghezza effettiva (paddata) in bytes
//ATTENZIONE CHE IL PITCH DEVE ESSERE UNSIGNED LONG
//alloco memoria sull'heap gpu usando il pitch (padding)
cudaMallocPitch(&d_a,&pitch,n*sizeof(int),m);
cudaMallocPitch((void **)&d_b,&pitch,n*sizeof(int),m);
cudaMallocPitch((void **)&d_c,&pitch,n*sizeof(int),m);
//ora in pitch c'e' la lunghezza effettiva (in bytes) delle righe
//ora devo copiare il contenuto delle matrici dalla memoria host a memoria device
//pero' la memoria device è paddata (pitch) quindi devo usare la cudaMemcpy2D per evitare
//di inserire dati in quelle che sono le celle di padding, quindi...
cudaMemcpy2D(d_a,pitch,h_a,n*sizeof(int),n*sizeof(int),m,cudaMemcpyHostToDevice);
//i parametri sono : destinazione, lunghezza effettiva in bytes delle righe in memoria device (quindi quest'informazione sta in pitch)
//origine, lunghezza in bytes delle righe nella memoria host (potrebbe esserci il pitch anche qui)
//numero di bytes da copiare nella direzione orizzontale (lunghezza bytes riga)
//numero di righe da copiare
//flag destinazione
cudaMemcpy2D(d_b,pitch,h_b,n*sizeof(int),n*sizeof(int),m,cudaMemcpyHostToDevice);
//lancio il kernel
calcolaProdPuntuale<<<dimGriglia, dimBlocco>>>(d_a,d_b,d_c,m,n,pitch);
//ricopio dalla memoria device alla memoria host
int *copyFromGPU=(int *)malloc(n*m*sizeof(int));
//usando sempre la cudaMemcpy2D perchè nell'origine ci sono celle di padding
cudaMemcpy2D(copyFromGPU,n*sizeof(int),d_c,pitch,n*sizeof(int),m,cudaMemcpyDeviceToHost);
//stampo
stampaCPU(copyFromGPU,m,n);
//libero memoria
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
} |
23,821 | #include <stdio.h>
#define n 1024
#define NUMTHREADS 256
__global__ void histogram_kernel(unsigned int *data, unsigned int *bin) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
atomicAdd(&(bin[data[i]]), 1);
}
}
int main(int argc, char *argv[]) {
int i;
int size = n * sizeof(int);
unsigned int a[n];
unsigned int bin[10];
unsigned int *dA, *dBin;
for (i = 0; i < n; i++) {
a[i] = i % 10;
}
cudaMalloc((void **)&dA, size);
cudaMalloc((void **)&dBin, 10 * sizeof(int));
cudaMemcpy(dA, a, size, cudaMemcpyHostToDevice);
cudaMemset(dBin, 0, 10 * sizeof(int));
int nblocks = (n + NUMTHREADS - 1) / NUMTHREADS;
histogram_kernel<<<nblocks, NUMTHREADS>>>(dA, dBin);
cudaMemcpy(bin, dBin, 10 * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dA);
cudaFree(dBin);
int count = 0;
for (i = 0; i < 10; i++) {
printf("Freq %d = %d\n", i, bin[i]);
count = count + bin[i];
}
printf("#elements = %d\n", count);
} |
23,822 | #include <iostream>
#include <chrono>
#include <vector>
#include<curand_kernel.h>
using namespace std;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
constexpr int MAX_SIZE = 300;
constexpr int GENERATIONS = 100;
constexpr int ISLANDS = 32;
constexpr int POPULATION = 64;
__device__ __forceinline__ void swap(int& a, int& b){
int temp = a;
a = b;
b = temp;
}
__device__ __forceinline__ int cal_fitness(int* weights, int* values, char* genom, int W, int n){
int fitness = 0, weight = 0;
for(int i = 0; i < n; i++){
fitness+=values[i]*(int)genom[i];
weight+=weights[i]*(int)genom[i];
}
if(weight>W)
fitness = 0;
return fitness;
}
__device__ __forceinline__ void selection(char* a, char* b, int n){
for(int i = 0; i < n; i++){
a[i] = b[i];
}
}
__device__ __forceinline__ void crossover(char* mother, char* father, char* child, curandState_t* state, int n){
for(int i = 0; i < n; i++){
double p = curand_uniform(state);
if(p < 0.45)
child[i] = father[i];
else if(p < 0.90)
child[i] = mother[i];
else
child[i] = curand(state)%2;
}
}
__device__ __forceinline__ void sort(int* fitness, int* pos, int n){
int tid = threadIdx.x;
for(unsigned int k = 2; k <= POPULATION; k*=2){
for(unsigned int j = k/2; j > 0; j/=2){
unsigned int ixj = tid^j;
if(ixj>tid){
if((tid & k) == 0)
if(fitness[tid] < fitness[ixj]){
swap(fitness[tid], fitness[ixj]);
swap(pos[tid], pos[ixj]);
}
else if(fitness[tid] > fitness[ixj]){
swap(fitness[tid], fitness[ixj]);
swap(pos[tid], pos[ixj]);
}
}
__syncthreads();
}
}
}
__device__ __forceinline__ void prefixmax(int* arr, int* pos, int n){
int x, p;
int tid = threadIdx.x;
for(int i = 1; i < n; i*=2){
if(tid>=i){
x = arr[tid-i];
p = pos[tid-i];
}
__syncthreads();
if(tid>=i&&x>arr[tid]){
arr[tid] = x;
pos[tid] = p;
}
__syncthreads();
}
}
__global__ void kernel(int* w, int* v, int n, int W, char* result, int* profit, curandState_t* states) {
__shared__ int weights[MAX_SIZE];
__shared__ int values[MAX_SIZE];
__shared__ char population[POPULATION][MAX_SIZE];
__shared__ char new_population[POPULATION][MAX_SIZE];
__shared__ int fitness[POPULATION];
__shared__ int pos[POPULATION];
int tid = threadIdx.x;
int bid = blockIdx.x;
int id = blockDim.x*bid + tid;
int frac = POPULATION/10;
int p1, p2;
curandState_t state = states[id];
for(int i = tid; i < n; i+=POPULATION){
weights[i] = w[i];
values[i] = v[i];
}
__syncthreads();
for(int i = 0; i < n; i++){
population[tid][i] = curand(&state)%2;
}
int not_changed = 0;
int prev = 0;
int iter = 0;
for(int g = 0; g < GENERATIONS+1; g++)
//while(not_changed<GENERATIONS)
{
iter++;
fitness[tid] = cal_fitness(weights, values, population[tid], W, n);
pos[tid] = tid;
sort(fitness, pos, n);
__syncthreads();
// if(prev == fitness[0])
// not_changed++;
// else
// not_changed = 0;
// prev = fitness[0];
// __syncthreads();
if(tid < frac){
selection(new_population[tid], population[pos[tid]], n);
}
if(tid >= frac){
p1 = ceilf(curand_uniform(&state) * (POPULATION/2));
p2 = ceilf(curand_uniform(&state) * (POPULATION/2));
crossover(population[pos[p1]], population[pos[p2]], new_population[tid], &state, n);
}
__syncthreads();
for(int i = 0; i < n; i++)
population[tid][i] = new_population[tid][i];
__syncthreads();
}
fitness[tid] = cal_fitness(weights, values, population[tid], W, n);
pos[tid] = tid;
__syncthreads();
prefixmax(fitness, pos, n);
if(tid == 0){
profit[bid] = fitness[POPULATION-1];
// stats[bid] = iter;
}
__syncthreads();
for(int i = tid; i < n; i+=POPULATION)
result[bid*n+i] = population[pos[POPULATION-1]][i];
}
__global__ void init(curandState_t* states, unsigned int seed){
int id = blockDim.x*blockIdx.x + threadIdx.x;
curand_init(seed, id, 0, &states[id]);
}
int main(){
cudaSetDevice(0);
int *d_weights, *d_values, *d_profit;
// int* d_stats;
char* d_result;
curandState_t* states;
int n, W;
cin>>n>>W;
vector<int> weights(n), values(n), profit(ISLANDS);
vector<char> result(ISLANDS*n);
// vector<int> stats(ISLANDS);
for(int i = 0; i < n; i++){
cin>>weights[i]>>values[i];
}
gpuErrchk(cudaMalloc(&d_weights, n*sizeof(int)));
gpuErrchk(cudaMalloc(&d_values, n*sizeof(int)));
gpuErrchk(cudaMalloc(&d_result, ISLANDS*n*sizeof(int)));
gpuErrchk(cudaMalloc(&d_profit, ISLANDS*sizeof(int)));
// gpuErrchk(cudaMalloc(&d_stats, ISLANDS*sizeof(int)));
gpuErrchk(cudaMalloc(&states, ISLANDS*POPULATION*sizeof(curandState_t)));
gpuErrchk(cudaMemcpy(d_weights, weights.data(), n*sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_values, values.data(), n*sizeof(int), cudaMemcpyHostToDevice));
init<<<ISLANDS, POPULATION>>>(states, time(0));
gpuErrchk(cudaDeviceSynchronize());
auto start = chrono::steady_clock::now();
kernel<<<ISLANDS, POPULATION>>>(d_weights, d_values, n, W, d_result, d_profit, states);
gpuErrchk(cudaDeviceSynchronize());
auto stop = chrono::steady_clock::now();
gpuErrchk(cudaMemcpy(profit.data(), d_profit, ISLANDS*sizeof(int), cudaMemcpyDeviceToHost));
// gpuErrchk(cudaMemcpy(stats.data(), d_stats, ISLANDS*sizeof(int), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(result.data(), d_result, n*ISLANDS, cudaMemcpyDeviceToHost));
int best = 0;
// int worst = 0;
for(int i = 0; i < ISLANDS; i++){
if(profit[i]>profit[best])
best = i;
// if(stats[i]>stats[worst])
// worst = i;
}
cout<<"Best island: "<<best<<endl;
cout<<"Profit: "<<profit[best]<<endl;
// cout<<"Max generations:"<<stats[worst]<<endl;
for(int i = 0; i < n; i++)
cout<<+result[best*n+i]<<" ";
cout<<endl;
cerr << "Elapsed time: " << chrono::duration_cast<chrono::microseconds>(stop - start).count() << "μs\n";
gpuErrchk(cudaFree(states));
gpuErrchk(cudaFree(d_weights));
gpuErrchk(cudaFree(d_values));
gpuErrchk(cudaFree(d_profit));
gpuErrchk(cudaFree(d_result));
// gpuErrchk(cudaFree(d_stats));
return 0;
} |
23,823 | __device__ int N;
__global__ void set_N(int n) {
N = n;
}
__global__ void dev_add(int *a, int *b, int *c) {
int id = blockIdx.x;
if (id < N)
c[id] = a[id] + b[id];
}
void add(int a[], int b[], int c[], int n) {
int *dev_a, *dev_b, *dev_c;
set_N<<<1, 1>>>(n);
cudaMalloc(&dev_a, n * sizeof(int));
cudaMalloc(&dev_b, n * sizeof(int));
cudaMalloc(&dev_c, n * sizeof(int));
cudaMemcpy(dev_a, a, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, n * sizeof(int), cudaMemcpyHostToDevice);
dev_add<<<n, 1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
}
__global__ void dev_add_n(int *a, int *b, int *c, int N) {
int id = blockIdx.x;
if (id < N)
c[id] = a[id] + b[id];
}
void add_n(int a[], int b[], int c[], int n) {
int *dev_a, *dev_b, *dev_c;
cudaMalloc(&dev_a, n * sizeof(int));
cudaMalloc(&dev_b, n * sizeof(int));
cudaMalloc(&dev_c, n * sizeof(int));
cudaMemcpy(dev_a, a, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, n * sizeof(int), cudaMemcpyHostToDevice);
dev_add_n<<<n, 1>>>(dev_a, dev_b, dev_c, n);
cudaMemcpy(c, dev_c, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
}
|
23,824 | /* threads and blocks.
* for blocks = 2, threads = 4:
* a = [0,1,2,3 | 0,1,2,3]
* index = threadIdx.x + blockIdx.x * threads
* = [0,1,2,3 | 4,5,6,7]
*/
#include <stdio.h>
/* this kernel uses threads and blocks. the width of a block
* (number of threads per block) can be accessed with the
* built in variable blockDim.x so that the index can be obtained.
*/
__global__ void device_add(int *a, int *b, int *res)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
res[idx] = a[idx] + b[idx];
}
#define N 8
#define THREADS_PER_BLOCK 4
#define BLOCKS (N / THREADS_PER_BLOCK)
void random_ints(int *arr, int n)
{
int i;
for(i = 0; i < n; i++)
arr[i] = rand();
}
void print_arr(int *arr, int n)
{
int i, last;
for(i = 0, last = n -1; i < last; i++)
printf("%i,", arr[i]);
printf("%i\n", arr[last]);
}
int main(void)
{
int *a, *b, *res;
int *dev_a, *dev_b, *dev_res;
int size = N * sizeof(int);
cudaMalloc((void**) &dev_a, size);
cudaMalloc((void**) &dev_b, size);
cudaMalloc((void**) &dev_res, size);
a = (int*) malloc(size);
b = (int*) malloc(size);
res = (int*) malloc(size);
random_ints(a, N);
random_ints(b, N);
/* copy dev_a, dev_b to the device */
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
/* launch device_add kernel with M blocks of N threads. */
device_add<<<BLOCKS, THREADS_PER_BLOCK>>>(dev_a, dev_b, dev_res);
/* copy the device result (dev_res) back to res (on host) */
cudaMemcpy(res, dev_res, size, cudaMemcpyDeviceToHost);
print_arr(res, N);
free(a);
free(b);
free(res);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_res);
return 0;
}
|
23,825 | #include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
#include <errno.h>
#include <unistd.h>
/******************************************************************************
* The variable names and the function names of this program is same as provided by the university.
The added variable and function are the only changes made to this program.
* To compile:
* nvcc -o linear61 linear61.cu -lm
*
* To run:
* .linear61
*
*****************************************************************************/
typedef struct point_t {
double x;
double y;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000;
point_t data[] = {
{72.61,122.14},{65.30,113.99},{65.94,115.13},{83.00,119.01},
{73.72,122.37},{83.86,144.91},{73.36,121.80},{81.25,115.76},
{66.88,114.31},{65.05,112.13},{76.57,128.74},{ 3.97,50.44},
{91.69,168.98},{16.79,57.68},{62.41,119.44},{15.56,47.54},
{47.82,103.56},{16.30,72.48},{66.83,107.08},{24.36,64.49},
{23.79,64.11},{57.51,90.08},{26.71,67.38},{12.05,60.97},
{76.99,132.98},{21.52,65.34},{77.23,133.36},{14.60,57.44},
{51.10,102.98},{35.46,85.93},{36.20,96.59},{49.65,88.94},
{73.62,137.70},{43.75,96.00},{84.69,151.10},{33.85,84.44},
{66.14,104.48},{14.70,34.68},{60.19,103.97},{67.77,123.51},
{44.80,96.82},{42.69,107.47},{95.00,161.89},{65.45,118.05},
{40.45,78.14},{34.83,73.11},{52.03,107.13},{16.41,49.38},
{79.78,141.19},{62.20,95.42},{ 6.57,37.64},{82.22,125.07},
{ 2.76,38.32},{60.33,110.92},{12.68,46.07},{45.02,79.62},
{ 2.06,25.84},{ 2.95, 9.74},{58.84,118.16},{34.29,73.77},
{68.67,118.81},{42.06,102.74},{19.82,71.99},{62.06,119.00},
{30.06,71.44},{41.36,85.00},{21.15,79.07},{56.54,125.92},
{11.70,59.76},{36.32,95.43},{29.52,69.98},{ 8.02,42.22},
{20.89,62.86},{60.96,98.93},{60.84,103.12},{31.36,70.69},
{75.19,134.80},{69.89,121.38},{72.42,137.81},{77.26,134.53},
{91.92,153.26},{39.99,81.63},{ 1.00,33.56},{21.46,49.48},
{ 9.18,52.75},{37.36,66.18},{ 8.83,53.58},{97.25,160.25},
{90.05,137.91},{ 3.36,48.25},{22.68,66.50},{47.37,87.34},
{16.58,44.61},{82.64,137.86},{86.10,140.03},{86.79,142.37},
{13.48,71.60},{47.71,95.39},{44.07,90.76},{99.00,156.16},
{86.82,125.22},{70.64,106.68},{ 6.74,34.88},{57.42,100.27},
{63.40,120.60},{64.34,115.87},{ 6.44,28.64},{43.63,96.98},
{96.58,138.76},{79.70,117.68},{12.68,34.45},{77.73,119.56},
{72.37,115.28},{48.92,89.53},{72.34,117.56},{94.57,142.14},
{83.29,128.51},{10.15,29.29},{96.11,157.14},{64.62,122.40},
{23.49,80.38},{52.51,88.73},{97.59,151.15},{62.01,136.34},
{70.75,126.65},{14.18,45.13},{ 1.13,31.52},{68.33,132.65},
{80.27,123.84},{23.82,64.11},{90.89,131.49},{90.07,133.01},
{71.57,125.22},{86.68,134.96},{99.49,154.59},{66.87,120.46},
{63.78,127.60},{30.22,76.41},{42.74,98.86},{50.93,108.47},
{97.66,143.65},{40.23,89.47},{ 9.89,26.96},{31.37,87.03},
{25.53,59.02},{ 9.70,49.23},{64.57,108.10},{55.12,94.01},
{ 7.48,55.96},{ 1.17,42.16},{28.34,63.63},{61.21,108.10},
{84.94,144.04},{11.92,66.38},{61.50,123.90},{83.77,141.83},
{20.50,52.57},{85.89,127.86},{ 6.18,51.59},{47.09,91.84},
{78.40,134.21},{16.65,74.99},{83.61,120.39},{84.45,120.19},
{45.90,90.34},{82.15,132.49},{19.06,60.23},{15.22,56.13},
{21.38,50.83},{39.68,87.21},{75.62,124.13},{ 5.54,35.89},
{13.19,47.90},{41.31,92.83},{77.79,122.26},{17.53,53.95},
{35.24,82.56},{31.33,74.62},{16.56,61.34},{65.49,106.96},
{85.01,134.78},{23.66,64.89},{26.25,73.31},{40.73,73.37},
{ 4.54,59.22},{ 5.60,42.33},{57.72,111.38},{75.00,114.73},
{55.23,114.82},{27.08,77.18},{45.38,60.76},{28.99,62.84},
{42.07,89.21},{14.36,33.23},{94.73,142.59},{17.92,47.35},
{10.68,63.36},{67.05,110.46},{ 2.95,17.96},{67.87,120.05},
{64.06,118.94},{86.24,125.79},{10.52,41.95},{ 4.64,52.52},
{84.65,140.97},{27.38,71.35},{64.03,127.93},{19.15,62.09},
{30.58,65.64},{44.43,80.38},{47.58,92.06},{30.24,67.15},
{91.80,134.31},{73.90,141.68},{94.09,153.98},{96.40,144.62},
{40.03,65.14},{44.77,93.21},{74.74,137.22},{10.49,58.46},
{99.14,160.90},{ 2.53,29.98},{58.80,111.70},{ 7.40,50.19},
{31.54,69.13},{17.69,43.19},{69.03,129.69},{ 1.54,45.83},
{25.46,58.41},{73.24,116.47},{58.18,96.07},{99.58,143.45},
{ 5.68,53.53},{14.07,35.50},{68.58,125.77},{64.66,116.20},
{ 9.26,36.41},{90.59,142.18},{54.39,92.83},{68.73,114.32},
{37.87,82.88},{49.35,100.88},{17.05,59.56},{26.56,53.14},
{83.73,141.78},{86.43,147.25},{15.90,53.34},{33.39,77.22},
{80.37,153.79},{92.13,129.36},{40.66,73.81},{40.66,76.82},
{81.92,134.87},{74.81,123.32},{56.45,102.54},{33.84,93.31},
{92.31,149.13},{45.93,88.84},{13.29,65.57},{ 5.79,35.12},
{ 7.82,53.37},{14.19,28.92},{40.91,99.71},{63.00,117.19},
{14.56,57.61},{55.40,108.59},{33.67,84.59},{77.47,121.30},
{32.74,73.15},{ 8.27,43.56},{ 0.80,36.13},{16.07,57.90},
{44.10,74.78},{25.49,81.59},{ 1.57,49.87},{91.32,152.99},
{91.05,136.42},{30.52,76.52},{20.82,48.58},{16.16,50.64},
{ 9.12,53.02},{16.12,33.92},{42.23,98.66},{ 8.94,52.97},
{92.66,146.94},{49.63,92.93},{18.18,69.02},{40.87,73.68},
{98.32,153.59},{44.15,84.02},{16.88,50.45},{65.43,110.86},
{68.14,119.79},{75.59,99.75},{99.05,147.11},{67.53,130.29},
{75.43,114.67},{37.94,79.26},{94.53,149.31},{88.48,136.32},
{52.91,88.78},{53.14,106.33},{76.65,133.86},{52.26,94.93},
{89.66,129.71},{27.42,72.57},{82.18,130.89},{37.58,90.29},
{44.12,70.27},{86.73,144.47},{44.92,87.96},{32.78,75.89},
{71.50,136.15},{52.72,101.41},{77.55,138.51},{66.70,111.44},
{70.70,118.79},{37.20,80.84},{68.64,94.49},{33.09,72.94},
{24.78,71.66},{ 3.65,41.09},{ 1.39,52.64},{81.80,150.05},
{49.62,87.09},{69.36,111.30},{27.81,56.73},{82.25,132.98},
{87.82,131.61},{81.20,113.89},{15.83,52.35},{11.74,56.93},
{37.81,97.88},{70.58,114.80},{66.85,113.88},{53.07,97.35},
{86.78,127.99},{15.81,42.29},{ 8.44,56.48},{54.20,99.97},
{93.13,154.95},{24.33,76.49},{87.66,154.00},{64.67,110.80},
{19.99,71.37},{19.84,53.55},{ 3.30,26.73},{ 4.40,29.28},
{ 4.92,45.96},{84.36,128.34},{83.76,156.79},{79.92,131.27},
{22.53,74.00},{31.07,52.86},{84.85,138.64},{88.82,133.59},
{10.39,30.82},{78.01,123.36},{ 6.61,26.80},{29.59,61.55},
{22.83,51.56},{49.82,99.26},{78.21,150.36},{91.44,157.75},
{16.38,53.54},{67.17,108.57},{42.44,96.94},{71.98,128.35},
{38.67,84.50},{51.26,80.03},{19.35,57.56},{ 6.43,38.65},
{74.78,107.58},{34.79,69.36},{51.63,109.48},{48.01,98.88},
{ 0.92,46.85},{21.32,58.31},{56.07,88.58},{35.47,75.52},
{13.05,65.67},{54.94,98.10},{15.17,45.46},{55.95,101.47},
{68.04,113.47},{15.49,59.40},{44.82,87.51},{96.78,159.09},
{83.26,132.31},{19.30,49.64},{91.82,127.99},{81.75,127.96},
{65.82,118.94},{98.26,155.68},{83.17,141.85},{14.27,48.18},
{55.40,96.46},{49.57,89.69},{74.81,140.97},{82.16,137.89},
{76.22,136.10},{42.92,87.97},{23.00,64.08},{98.68,174.43},
{20.53,56.05},{ 5.89,47.62},{70.09,109.02},{59.44,124.94},
{57.59,104.84},{85.29,127.63},{29.77,75.36},{60.68,114.37},
{78.68,108.18},{81.78,128.87},{71.01,113.02},{67.71,124.89},
{87.80,128.73},{29.80,65.60},{ 0.47,45.33},{ 5.58,42.47},
{21.53,55.18},{ 1.69,32.91},{44.51,82.89},{44.02,90.22},
{87.02,127.60},{46.73,77.85},{33.19,83.04},{42.65,85.79},
{34.70,79.73},{41.07,106.48},{97.38,154.70},{56.42,96.03},
{84.56,148.81},{18.33,46.90},{35.72,75.87},{53.62,96.55},
{17.73,62.61},{18.40,57.17},{50.32,103.39},{22.19,48.78},
{44.51,92.53},{91.78,153.55},{40.91,88.28},{ 3.90,30.57},
{19.99,61.00},{94.37,141.53},{19.91,55.77},{17.21,55.53},
{20.07,69.73},{11.38,37.48},{22.92,75.56},{38.86,71.38},
{75.82,135.05},{95.31,148.83},{41.22,90.49},{35.00,71.28},
{33.30,79.00},{15.44,47.43},{66.42,108.81},{21.99,85.23},
{16.04,48.08},{ 1.61,27.86},{32.19,52.31},{93.26,158.32},
{46.27,85.71},{92.41,157.29},{52.90,79.45},{78.38,125.95},
{46.14,87.57},{25.29,68.09},{38.44,64.49},{73.11,103.77},
{38.41,88.03},{46.00,83.76},{11.54,65.35},{17.36,69.14},
{21.27,66.48},{91.66,154.52},{76.74,129.80},{56.54,104.12},
{27.26,78.68},{14.25,43.23},{ 2.21,40.84},{91.30,130.69},
{96.01,160.65},{41.31,89.71},{40.54,81.77},{59.19,86.80},
{12.23,60.41},{98.19,145.54},{42.92,79.66},{32.42,71.41},
{65.71,106.18},{ 8.96,38.07},{10.19,42.35},{95.29,140.79},
{23.89,75.73},{ 0.55,27.59},{38.70,95.88},{95.82,149.88},
{65.62,111.52},{10.95,50.72},{55.16,87.28},{85.94,141.81},
{50.30,99.67},{72.48,121.23},{15.97,60.91},{63.30,106.75},
{63.03,111.19},{75.50,116.04},{23.54,66.63},{ 6.87,34.00},
{32.27,87.46},{96.05,154.49},{10.01,45.61},{54.87,109.09},
{90.35,131.14},{43.87,98.42},{54.77,109.64},{73.73,124.30},
{58.45,80.04},{48.37,94.71},{ 7.77,65.60},{27.98,76.09},
{67.71,125.14},{26.33,73.78},{ 6.90,31.05},{ 3.09,50.61},
{25.83,49.88},{72.61,122.60},{19.64,60.19},{65.78,115.50},
{28.20,65.53},{54.20,107.50},{83.82,110.84},{86.05,135.46},
{44.88,100.19},{80.99,140.91},{43.75,101.80},{16.56,58.04},
{67.57,108.05},{35.57,101.45},{14.34,38.17},{30.26,69.15},
{30.20,74.12},{74.53,123.22},{94.97,155.12},{24.24,55.24},
{42.76,91.42},{48.25,71.66},{93.52,153.28},{95.02,135.50},
{25.81,63.31},{50.47,92.78},{19.76,53.70},{74.95,139.33},
{82.71,128.51},{98.05,144.88},{ 1.13,25.40},{75.29,95.58},
{27.77,77.19},{57.67,105.06},{34.22,74.40},{85.47,147.19},
{38.02,81.98},{15.19,67.72},{80.03,133.38},{81.42,140.50},
{78.82,135.16},{66.56,114.52},{ 0.39,46.05},{69.42,105.91},
{46.44,93.74},{48.68,102.13},{88.95,137.05},{85.37,132.32},
{49.80,92.72},{35.02,91.45},{65.01,139.64},{39.34,82.25},
{10.86,49.81},{ 5.96,58.81},{98.04,165.30},{64.93,119.00},
{57.34,97.87},{42.57,90.41},{79.06,132.37},{67.00,110.29},
{22.45,69.44},{83.35,123.75},{79.93,123.11},{51.78,103.50},
{ 6.12,33.48},{18.37,66.08},{34.19,80.97},{89.89,129.35},
{22.76,60.29},{ 8.91,47.60},{53.26,108.67},{12.46,45.82},
{88.52,131.36},{21.97,78.82},{17.32,68.69},{30.54,66.54},
{27.65,78.07},{78.16,131.22},{46.25,93.75},{70.20,92.04},
{33.95,70.94},{64.73,105.51},{93.27,134.72},{52.54,110.50},
{70.91,119.06},{29.69,65.45},{99.90,142.59},{83.82,138.28},
{92.49,142.99},{13.15,55.67},{28.72,76.57},{32.91,55.54},
{32.85,93.49},{96.42,147.04},{37.90,72.33},{55.95,86.84},
{36.87,81.83},{ 4.32,30.88},{52.37,110.14},{26.69,84.96},
{60.27,99.83},{90.68,164.69},{34.57,81.97},{81.14,134.44},
{63.78,113.39},{99.88,146.62},{53.48,89.70},{36.13,78.58},
{79.56,124.45},{48.12,112.53},{80.88,138.11},{92.59,152.07},
{13.99,65.56},{ 9.26,69.18},{ 0.67,29.25},{56.13,96.43},
{ 4.39,54.34},{38.40,82.36},{12.28,62.45},{ 6.55,38.81},
{76.93,139.44},{20.17,63.87},{64.63,125.41},{10.45,49.24},
{14.06,59.29},{40.38,69.01},{57.97,113.60},{94.14,157.37},
{82.46,145.27},{ 7.66,27.72},{41.54,80.55},{96.06,149.78},
{51.95,79.34},{18.96,49.36},{49.18,99.48},{10.91,54.33},
{68.82,132.85},{88.63,138.54},{93.58,164.32},{41.44,68.46},
{35.95,94.55},{94.86,145.40},{91.54,161.25},{ 2.27,35.98},
{61.32,114.66},{58.78,110.41},{48.23,99.87},{11.59,51.28},
{50.59,87.15},{72.48,122.70},{55.35,105.91},{39.90,82.36},
{79.80,141.27},{62.02,123.78},{95.21,161.62},{10.46,42.13},
{41.49,92.82},{67.11,130.87},{18.71,67.76},{68.57,119.24},
{93.28,143.32},{93.40,154.37},{10.99,49.67},{68.17,93.03},
{63.81,124.40},{50.90,87.92},{92.18,133.25},{78.69,124.42},
{22.59,47.76},{87.67,138.29},{92.13,151.06},{32.39,79.20},
{46.25,102.78},{41.20,84.44},{25.90,72.64},{82.20,128.96},
{82.44,119.98},{93.27,142.41},{94.06,143.89},{69.82,97.85},
{48.75,97.67},{38.58,80.98},{68.53,124.50},{83.06,121.14},
{53.03,107.42},{64.13,96.22},{98.59,139.71},{62.89,115.03},
{34.92,72.99},{88.37,149.91},{26.09,71.32},{52.42,100.01},
{92.04,153.85},{55.24,106.92},{43.51,69.94},{45.36,96.66},
{90.15,142.16},{16.14,67.73},{93.19,159.11},{70.19,140.27},
{72.54,123.30},{84.23,123.53},{ 2.10,28.01},{68.12,116.50},
{96.88,144.20},{96.68,154.20},{97.34,155.41},{37.51,78.27},
{12.87,48.95},{92.46,156.53},{67.41,110.36},{86.32,142.74},
{70.72,107.25},{ 4.90,41.60},{50.04,92.56},{28.84,69.66},
{89.50,142.94},{75.91,123.92},{63.95,126.67},{41.90,86.63},
{ 0.62,55.56},{28.51,79.62},{32.99,74.12},{90.92,141.70},
{79.18,132.94},{56.96,121.01},{ 9.05,43.49},{47.43,116.86},
{18.50,71.76},{87.71,140.40},{17.41,50.76},{48.79,85.36},
{63.97,93.04},{41.79,85.37},{11.14,51.47},{68.66,128.66},
{14.40,50.15},{99.40,147.08},{52.75,108.18},{91.81,159.21},
{ 6.86,56.21},{58.06,101.25},{ 9.81,50.66},{70.94,147.47},
{40.16,84.80},{55.72,111.38},{16.61,38.90},{23.51,68.47},
{45.62,83.33},{43.73,85.94},{95.46,164.42},{88.74,117.89},
{75.81,141.81},{47.93,89.47},{ 8.95,50.38},{85.76,152.19},
{26.65,81.46},{47.76,103.71},{56.39,103.75},{96.54,161.13},
{57.49,89.28},{75.88,110.78},{16.16,54.40},{ 5.71,55.19},
{12.29,52.16},{30.92,69.18},{20.20,69.80},{35.67,89.28},
{60.03,116.20},{46.94,85.67},{87.77,129.39},{70.42,124.80},
{16.99,54.76},{18.10,45.82},{80.19,135.86},{95.27,155.69},
{ 9.99,55.39},{97.27,148.90},{ 5.44,39.64},{58.18,108.14},
{68.56,116.61},{88.27,141.73},{ 1.19,32.36},{24.24,71.11},
{67.36,107.10},{78.06,114.60},{27.97,74.85},{10.69,49.54},
{ 2.59,36.83},{45.45,95.15},{64.08,131.95},{20.33,39.99},
{ 1.58,42.82},{15.02,50.08},{55.00,90.11},{25.52,66.46},
{88.80,149.55},{70.08,130.52},{25.53,56.56},{44.07,75.36},
{54.84,100.89},{18.43,60.26},{44.51,90.39},{62.64,105.10},
{31.95,63.27},{52.28,95.72},{76.51,138.46},{24.96,57.71},
{93.51,123.66},{53.73,97.50},{98.07,162.39},{80.65,126.50},
{20.67,62.60},{60.97,115.73},{41.74,92.81},{ 2.84,34.92},
{56.82,113.96},{89.68,152.73},{74.47,125.59},{80.75,124.23},
{87.86,146.47},{46.07,89.78},{57.31,101.06},{51.21,91.07},
{58.38,103.55},{69.18,96.72},{53.61,103.13},{88.91,136.29},
{40.90,82.60},{52.68,88.63},{19.34,60.82},{11.14,45.73},
{52.17,97.70},{26.34,68.26},{98.05,150.46},{22.91,55.22},
{87.82,148.21},{82.63,126.96},{95.51,166.87},{69.38,115.01},
{95.94,153.40},{11.68,45.80},{18.36,46.90},{60.17,114.61},
{50.21,106.85},{98.33,152.85},{ 7.07,51.01},{98.59,155.61},
{34.73,83.82},{34.46,95.27},{42.38,77.67},{50.83,110.71},
{79.24,138.98},{39.45,76.06},{54.91,112.86},{26.40,68.84},
{17.61,55.64},{36.01,71.30},{32.94,65.29},{59.91,91.84},
{47.42,81.16},{23.35,78.50},{19.08,63.62},{72.14,118.11},
{39.46,73.59},{83.55,118.87},{ 6.64,43.57},{92.66,173.60},
{86.07,140.56},{67.54,113.19},{72.04,132.82},{90.26,140.02},
{62.07,105.63},{33.69,73.16},{11.64,40.39},{56.60,115.09},
{22.99,52.46},{89.86,155.65},{17.69,49.44},{12.85,55.41},
{76.64,136.31},{16.86,46.01},{39.01,79.91},{44.51,103.87},
{19.65,39.62},{32.67,74.38},{63.66,105.77},{11.37,48.08},
{77.84,135.38},{14.90,52.82},{69.19,109.97},{40.81,87.48},
{27.82,75.33},{77.26,127.42},{86.83,151.43},{73.46,110.67},
{63.47,110.89},{68.79,101.47},{15.60,44.20},{19.93,56.36},
{61.34,105.73},{30.67,93.18},{ 3.70,33.63},{38.24,80.68},
{98.22,148.31},{38.34,80.19},{79.69,120.18},{14.81,48.67},
{ 7.63,46.02},{13.76,51.40},{36.19,80.37},{60.92,113.38},
{51.14,77.50},{10.91,67.23},{75.16,115.78},{23.36,59.62},
{12.05,42.55},{84.25,135.64},{18.97,71.98},{25.41,70.90},
{25.67,78.76},{18.85,62.82},{88.05,127.78},{ 9.83,48.79},
{76.23,133.73},{48.98,108.31},{41.89,102.26},{47.42,104.54},
{22.56,75.79},{85.32,135.60},{83.56,139.62},{26.94,60.63},
{90.14,140.53},{87.41,145.79},{87.38,125.23},{31.30,63.86},
{99.98,158.89},{ 9.18,33.85},{11.69,40.96},{ 8.06,35.44},
{53.19,95.89},{39.40,95.78},{80.92,125.08},{14.50,57.70},
{24.87,74.78},{32.09,98.88},{31.58,69.31},{81.70,136.05},
{27.81,63.65},{77.62,120.77},{60.07,111.07},{ 5.89,45.80},
{ 1.95,39.98},{85.49,134.87},{17.34,52.05},{37.48,71.82},
{25.73,64.73},{43.73,92.26},{93.57,144.10},{60.26,115.64},
{29.70,58.07},{ 9.83,41.24},{97.45,158.31},{61.30,105.64},
{88.06,139.17},{74.42,111.88},{86.28,124.66},{14.90,60.16},
{32.56,81.98},{42.51,71.47},{67.84,108.16},{25.37,57.68}
};
double residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c);
}
// Calculate the difference between two times. Returns zero on
// success and the time difference through an argument. It will
// be unsuccessful if the start time is after the end time.
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int d_sec = finish->tv_sec - start->tv_sec;
long long int d_nsec = finish->tv_nsec - start->tv_nsec;
if(d_nsec < 0 ) {
d_sec--;
d_nsec += 1000000000;
}
*difference = d_sec * 1000000000 + d_nsec;
return !(*difference > 0);
}
int main() {
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be = rms_error(bm, bc);
error = cudaMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i] = bc + (oc[i] * step);
}
error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error,
cudaGetErrorString(error));
}
for(i=0;i<8;i++) {
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data);
cudaThreadSynchronize();
error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error,
cudaGetErrorString(error));
}
for(int j=0; j<n_data; j++) {
error_sum_total += h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / n_data;
e[i] = sqrt(error_sum_mean);
if(e[i] < best_error) {
best_error = e[i];
best_error_i = i;
}
error_sum_total = 0;
}
if(best_error < be) {
be = best_error;
bm = dm[best_error_i];
bc = dc[best_error_i];
} else {
minimum_found = 1;
}
}
error = cudaFree(d_dm);
if(error){
fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_dc);
if(error){
fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_data);
if(error){
fprintf(stderr, "cudaFree on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
23,826 | #include<stdio.h>
#include<math.h>
#define SIZE 1024
__global__ void max(int * A, int * C)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
A[2*i] < A[2*i+1]?C[i]=A[2*i]:C[i]=A[2*i+1];
}
int main()
{
int A[SIZE];
int *devA,*devC;
for(int j=0;j<SIZE;j++)
{
A[j]=SIZE-j;
}
cudaMalloc((void **)&devA,SIZE*sizeof(int));
cudaMalloc((void **)&devC,SIZE*sizeof(int));
for(int j=1;j<log2((double)SIZE);j++)
{
cudaMemcpy(devA,A,SIZE*sizeof(int),cudaMemcpyHostToDevice);
max<<<1,SIZE/pow(2,j)>>>(devA,devC);
cudaMemcpy(&A,devC,SIZE*sizeof(int),cudaMemcpyDeviceToHost);
}
A[0] < A[1]?printf("Min is:%d",A[0]) : printf("Min is:%d",A[1]);
cudaFree(devA);
cudaFree(devC);
return 0;
}
|
23,827 |
// Cudafy1.Program
extern "C" __global__ void thekernel();
// Cudafy1.Program
extern "C" __global__ void thekernel()
{
}
|
23,828 | #include "myqueue.cuh"
#include <iostream>
using namespace std;
template<class T>
MyQueue<T>::MyQueue() : frontPtr(NULL), backPtr(NULL), count(0)
{
}
template<class T>
__device__ __host__ bool MyQueue<T>::isEmpty() {
return(count == 0);
}
template<class T>
__device__ __host__ void MyQueue<T>::push(T data) {
Node *newOne = new Node;
newOne->date = data;
newOne->next = NULL;
if (isEmpty()) {
frontPtr = newOne;
}
else {
backPtr->next = newOne;
}
backPtr = newOne;
count++;
}
template<class T>
__device__ __host__ T MyQueue<T>::pop() {
if (isEmpty()) {
printf("Nothing inside\n");
}
else {
Node *temp = frontPtr;
T tr = temp->data;
if (frontPtr == backPtr) {
frontPtr = NULL;
backPtr = NULL;
}
else {
frontPtr = frontPtr->next;
}
delete temp;
count--;
return tr;
}
} |
23,829 | /* Block size X: 32 */
__global__ void fct_ale_b1_vertical(const int maxLevels, const int * __restrict__ nLevels, const double * __restrict__ fct_adf_v, double * __restrict__ fct_plus, double * __restrict__ fct_minus)
{
const int node = (blockIdx.x * maxLevels);
for ( int level = threadIdx.x; level < nLevels[blockIdx.x] - 1; level += 32 )
{
double fct_adf_v_level = 0.0;
double fct_adf_v_nlevel = 0.0;
int item = blockIdx.x * (maxLevels + 1) + level;
fct_adf_v_level = fct_adf_v[item];
fct_adf_v_nlevel = fct_adf_v[item + 1];
fct_plus[node + level] = fmax(0.0, fct_adf_v_level) + fmax(0.0, -fct_adf_v_nlevel);
fct_minus[node + level] = fmin(0.0, fct_adf_v_level) + fmin(0.0, -fct_adf_v_nlevel);
}
} |
23,830 | #include <stdio.h>
__global__ void VecAdd(double* A, double* B, double* C, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
#define N 10000
int main() {
double a[N];
double b[N];
double c[N];
int i;
for (i = 0; i < N; ++i) {
a[i] = 3.0*(double)i - 11.4;
b[i] = 42.0 / ((double)i);
}
size_t s = N * sizeof(double);
double* da;
cudaMalloc((void**)&da, s);
double* db;
cudaMalloc((void**)&db, s);
double* dc;
cudaMalloc((void**)&dc, s);
cudaMemcpy(da, a, s, cudaMemcpyHostToDevice);
cudaMemcpy(db, b, s, cudaMemcpyHostToDevice);
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(da, db, dc, N);
cudaMemcpy(c, dc, s, cudaMemcpyDeviceToHost);
for (i = 0; i < 10000; ++i) {
printf("%.3lf + %.3lf = %.3lf\n", a[i], b[i], c[i]);
}
cudaFree(da);
cudaFree(db);
cudaFree(dc);
return 0;
}
|
23,831 | #include <iostream>
#include <math.h>
#define THREADS_PER_BLOCK 1024
__global__ void add(int n, float *x, float *y) {
int i = THREADS_PER_BLOCK * blockIdx.x + threadIdx.x;
if (i < n){
y[i] += x[i];
}
}
int main(void) {
int N = 1 << 20; // N = 2^20 = 1024*1024= 1.048.576
int N_blocks = 1 + (N-1)/THREADS_PER_BLOCK; // ceiling(N/THREADS_PER_BLOCK)
float *x; // = new float[N];
float *y; // = new float[N];
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
for (int i = 0; i < N; i++){
x[i]= 1.0f;
y[i]= 2.0f;
}
add<<<N_blocks,THREADS_PER_BLOCK>>>(N, x, y);
cudaDeviceSynchronize();
float maxError = 0.0f;
int contError = 0;
for (int i = 0; i < N; i++){
maxError = fmax(maxError,fabs(y[i]-3.0f));
if (y[i] != 3.0) contError++;
}
std::cout << "Suma de " << N << " elementos" << std::endl;
std::cout << "Número de errores: " <<contError << std::endl;
std::cout << "Max error: " <<maxError << std::endl;
cudaFree (x);
cudaFree (y);
return 0;
} |
23,832 | #include "includes.h"
__global__ void sumaVectores (float * d_a, float *d_b, float * d_c) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
if (index < N )
d_c[index] = d_a[index] +d_b[index];
} |
23,833 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
void check_param(void);
void printfinal(void);
int nsteps, tpoints;
float values[MAXPOINTS+2];
void check_param(void) {
char tchar[20];
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: "
,MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n",
MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
__global__ void wave(float* oldval_d, float* values_d, float* newval_d, int nsteps, int tpoints) {
int idx = threadIdx.x;
float x, fac = 2.0 * PI, k = idx, tmp = tpoints - 1;
x = k / tmp;
values_d[idx] = sin(fac * x);
float dtime = 0.3, c = 1.0, dx = 1.0;
float tau = c * dtime / dx;
float sqtau = tau * tau;
oldval_d[idx] = values_d[idx];
for (int i = 0; i < nsteps; i++) {
newval_d[idx] = (2.0 * values_d[idx]) - oldval_d[idx] + (sqtau * (-2.0) * values_d[idx]);
oldval_d[idx] = values_d[idx];
values_d[idx] = newval_d[idx];
}
}
void printfinal() {
for (int i = 0; i < tpoints; i++) {
printf("%6.4f ", values[i]);
if (i % 10 == 9)
printf("\n");
}
}
int main(int argc, char *argv[]) {
float *oldval_d, *values_d, *newval_d;
sscanf(argv[1],"%d",&tpoints);
sscanf(argv[2],"%d",&nsteps);
check_param();
printf("Initializing points on the line...\n");
printf("Updating all points for all time steps...\n");
cudaMalloc((void**)&oldval_d, sizeof(float) * tpoints);
cudaMalloc((void**)&values_d, sizeof(float) * tpoints);
cudaMalloc((void**)&newval_d, sizeof(float) * tpoints);
wave<<<1, tpoints>>>(oldval_d, values_d, newval_d, nsteps, tpoints);
cudaMemcpy(values, values_d, sizeof(float) * tpoints, cudaMemcpyDeviceToHost);
cudaFree(oldval_d);
cudaFree(values_d);
cudaFree(newval_d);
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
return 0;
}
|
23,834 | #include <math.h>
#include <iostream>
#include <cuda_runtime.h>
#include <stdlib.h>
#include <cstdio>
using namespace std;
#define SIZE 1024 * 1024
const int N = 1024;
float h_A[SIZE];
float h_B[SIZE];
float h_C[SIZE];
__global__ void matrixMultiplicationKernel(float* A, float* B, float* C, int N) {
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
float tmpSum = 0;
if (row < N && col < N) {
// each thread computes one element of the block sub-matrix
for (int i = 0; i < N; i++)
tmpSum += A[row * N + i] * B[i * N + col];
}
C[row * N + col] = tmpSum;
}
void matrixMultiplication(float *A, float *B, float *C, int N){
// declare the number of blocks per grid and the number of threads per block
// use 1 to 1024 threads per block
dim3 threadsPerBlock(1, 1);
dim3 blocksPerGrid(1024, 1024);
matrixMultiplicationKernel<<<blocksPerGrid,threadsPerBlock>>>(A, B, C, N);
}
int main(void)
{
float *d_A, *d_B, *d_C;
for(int i = 0; i < N; i++) {
h_A[i * N + i] = 1;
h_B[i * N + i] = 2;
}
cudaMalloc((void **) &d_A, SIZE * sizeof(float));
cudaMalloc((void **) &d_B, SIZE * sizeof(float));
cudaMalloc((void **) &d_C, SIZE * sizeof(float));
cudaMemcpy(d_A, h_A, SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
matrixMultiplication(d_A, d_B, d_C, N);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaDeviceSynchronize();
cudaMemcpy(h_C, d_C, SIZE * sizeof(float), cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
cout << h_C[i * N + j] << " ";
}
cout << endl;
}
printf("Time for the kernel: %fms\n", time);
return 0;
}
|
23,835 | #include <iostream>
#include <unistd.h>
#include <stdlib.h>
#include "cuda.h"
using namespace std;
__global__ void infinitekernel(float *dptr, int *dwait)
{
while(*dwait) *dptr += 1;
*dptr = 999;
}
int main(void)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaStream_t stream[2];
for (int i=0; i < 2 ; i++)
cudaStreamCreate(&stream[i]);
float *hptr;
float *dptr;
int *hwait;
int *dwait;
hptr = (float*)malloc(sizeof(float));
hwait = (int*)malloc(sizeof(int));
cudaMalloc((void **)&dptr, sizeof(float));
cudaMalloc((void **)&dwait, sizeof(int));
*hptr = 9;
*hwait = 1;
cudaMemcpyAsync(dptr, hptr, sizeof(float), cudaMemcpyHostToDevice, stream[0]);
cudaMemcpyAsync(dwait, hwait, sizeof(float), cudaMemcpyHostToDevice, stream[0]);
cout << "Start to record kernel elapsed time" << endl;
cudaEventRecord(start, stream[1]);
infinitekernel<<<1, 1, 0, stream[1]>>>(dptr,dwait);
cudaEventRecord(stop, stream[1]);
for(int i=0; i<5; i++)
{
sleep(1);
cudaMemcpyAsync(hptr, dptr, sizeof(float), cudaMemcpyDeviceToHost, stream[0]);
cout << "["<< i << " seconds]" <<"value = " << *hptr << endl;
}
*hwait = 0;
cudaMemcpyAsync(dwait, hwait, sizeof(int), cudaMemcpyHostToDevice, stream[0]);
cudaMemcpyAsync(hptr, dptr, sizeof(float), cudaMemcpyDeviceToHost, stream[0]);
cout <<"[Finally]" << "value = "<< *hptr << endl;
cudaEventSynchronize(stop);
float elapsTime;
cudaEventElapsedTime(&elapsTime, start, stop);
cout << "Elapsed Time: " << elapsTime << endl;
}
|
23,836 | #include "includes.h"
__global__ void MatMulInt(int *a, int b, int *c,int ROW, int COLUMNS){
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy * COLUMNS + ix;
if (ix < ROW && iy < COLUMNS)
{
c[idx] = a[idx] * b ;
}
} |
23,837 | __global__ void kernel_forwardDVF(float *mx, float *my, float *mz, cudaTextureObject_t alpha_x, cudaTextureObject_t alpha_y, cudaTextureObject_t alpha_z, cudaTextureObject_t beta_x, cudaTextureObject_t beta_y, cudaTextureObject_t beta_z, float volume, float flow, int nx, int ny, int nz)
{
int ix = 16 * blockIdx.x + threadIdx.x;
int iy = 16 * blockIdx.y + threadIdx.y;
int iz = 4 * blockIdx.z + threadIdx.z;
if (ix >= nx || iy >= ny || iz >= nz)
return;
int id = ix + iy * nx + iz * nx * ny;
mx[id] = tex3D<float>(alpha_x, (ix + 0.5f), (iy + 0.5f), (iz + 0.5f)) * volume
+ tex3D<float>(beta_x, (ix + 0.5f), (iy + 0.5f), (iz + 0.5f)) * flow;
my[id] = tex3D<float>(alpha_y, (ix + 0.5f), (iy + 0.5f), (iz + 0.5f)) * volume
+ tex3D<float>(beta_y, (ix + 0.5f), (iy + 0.5f), (iz + 0.5f)) * flow;
mz[id] = tex3D<float>(alpha_z, (ix + 0.5f), (iy + 0.5f), (iz + 0.5f)) * volume
+ tex3D<float>(beta_z, (ix + 0.5f), (iy + 0.5f), (iz + 0.5f)) * flow;
} |
23,838 | #include <stdio.h>
__global__
void test(char* a, int* b)
{
a[threadIdx.x] += b[threadIdx.x];
}
int main()
{
int dec[7] = {1, 1, 1, 1, 1, 1, 0};
char str[7] = "Hello ";
printf("%s", str);
int* cuda_mem_int;
char* cuda_mem_str;
cudaMalloc((void**)&cuda_mem_str, sizeof(str));
cudaMalloc((void**)&cuda_mem_int, sizeof(dec));
cudaMemcpy(cuda_mem_str, str, sizeof(str), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_mem_int, dec, sizeof(dec), cudaMemcpyHostToDevice);
dim3 dimBlock(7);
dim3 dimGrid(1);
test<<<dimGrid, dimBlock>>>(cuda_mem_str, cuda_mem_int);
cudaMemcpy(str, cuda_mem_str, sizeof(str), cudaMemcpyDeviceToHost);
cudaFree(cuda_mem_str);
printf("%s\n", str);
return 1;
}
|
23,839 | // solveBCs.cu
//
//This file contains the function used solve for the boundary conditions of a pendulum
//Included Files
#include <iostream>
//Function Prototypes
// Functions found in Functs.cu
void matmult61(double A[6][6], double B[6], double C[6]);
//solve_BCs:
// Function used to solve for the acceleration and force at each handle of a pendulum.
// This function does assume that the first handle is pinned and the second is free,
// as is the case for this simulation.
// bodies is the single body that is a product of all assembled bodies
// AF is where the accelerations and forces will be stored
void solve_BCs(double Zs[], double Xs[], double AF[])
{
//Variable declarations
double Fc1[6]; //Constraint force on handle 1
double Fc2[6]; //Constraint force on handle 2
double A1[6]; //Acceleration of handle 1
double A2[6]; //Acceleration of handle 2
double temp[6]; //Temporary matrix used for matrix operations
double temp2[6][6];
double val; //Temporary value
double M[3][4]; //Matrix used to solve a system of linear equations
//This loop fills the M matrix with the correct values in z11 and z13 in order to solve
//for the force at handle 1. This can be done because handle 1 of a pendulum is pinned,
//therefor no translational acceleration is allowed in the joint, and no rotational forces
//are allowed in the joint
for(int c = 0; c<3; c++) //Loop through 3 columns
{
for(int r = 0; r<3; r++) //Loop through 3 rows
{
M[r][c] = Zs[(c+3)+(r+3)*26]; //Save the correct values in z11
if (c==0) //If the column is 0
{
M[r][3] = -1*Zs[(r+3)*26+12]; //Save the correct values in z13
}
}
}
//This loop solves the system of linear equations created in the loop above
//by using gaussian elimination to turn the leftmost 3x3 matrix in M into the
//identity matrix
for(int s=0; s<3;s++) //Loop through 3 rows
{
val = M[s][s]; //Set the temporary value so it is not overwritten durring the loop
for(int r=0; r<4;r++) //Loop through every column
{
M[s][r]=M[s][r]/val; //Divide every element in the row by the first non zero element
}
for(int j =0; j<3; j++) //Loop through 3 rows
{
val=M[j][s]; //Set the temporary value so it is not overwritten durring the loop
if( s!=j) //If the current iteration is not on the diagonal
{
for(int l=0; l<4; l++) //Loop through every column
{
//Subtract the rows to produce zeros below the leading 1
M[j][l]=M[j][l]-(M[s][l]*val);
}
}
}
}
//After completing the loop above, the last column of M holds the translational
//constraint force on the first handle of the body
for(int s=0; s<6;s++) //Loop through every row
{
//Set the constraint force on the second handle to 0 becuase this end is free
Fc2[s] = 0;
if (s>=3) //If the row is greater than 3
{
Fc1[s] = M[s-3][3]; //Save the translational force from M into Fc1
}
else
{
Fc1[s] = 0; //Set the rest of Fc1 to 0 because there is no rotational force
}
}
for(int r = 0; r<6; r++)
{
for(int c=0; c<6; c++)
{
temp2[r][c]=Zs[c+r*26]; //z11
}
}
matmult61(temp2, Fc1, temp); //Perform z11*Fc1 and save the result in temp
for(int s = 0; s<6; s++) //Loop through every row
{
A1[s]=temp[s]+Zs[s*26+12]; //Find and save A1
}
for(int r = 0; r<6; r++)
{
for(int c=0; c<6; c++)
{
temp2[r][c]=Zs[c+r*26+13]; //z11
}
}
matmult61(temp2, Fc1, temp); //Perform z21*Fc1 and save the result in temp
for(int s = 0; s<6; s++) //Loop through every row
{
A2[s]=temp[s]+Zs[s*26+25]; //Find and save A2
}
for(int r =0; r<6; r++) //Loop through every row
{
AF[r*4]=A1[r]; //Save A1 into AF
AF[r*4+1]=Fc1[r]; //Save Fc1 into AF
AF[r*4+2]=A2[r]; //Save A2 into AF
AF[r*4+3]=Fc2[r]; //Save Fc2 into Af
}
}
//The function below is used for debugging and printing purposes only and can be
//removed at any time
void printM(double M[3][4])
{
for(int i = 0; i < 3; i++)
{
for( int j = 0; j<4;j++)
{
std::cout<<M[i][j]<<"\t";
}
std::cout<<std::endl;
}
}
|
23,840 | #include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <unistd.h>
__global__ void add(int *a, int *b, int *c){
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
#define N 512*512*1024
int random_ints(int *p, int n){
int i;
for(i=0;i<n;i++)
*p++ = rand();
return 0;
}
int main(){
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N*sizeof(int);
cudaSetDevice(1);
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
random_ints(a,N);
random_ints(b,N);
a[N-1] = 100;
b[N-1] = 200;
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
add<<<N,1>>>(d_a, d_b, d_c);
cudaDeviceSynchronize();
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
printf("result = %d\n",c[N-1]);
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
23,841 |
__global__
void calc_dd_coeff(
const double dx,
const double dy,
const double dz,
const double * __restrict__ eta,
const double * __restrict__ xi,
double * __restrict__ dd_i,
double * __restrict__ dd_j,
double * __restrict__ dd_k
)
{
size_t a = blockIdx.x * blockDim.x + threadIdx.x;
// There is only one dd_i so just get the first thread to do this
if (a == 0 && blockIdx.x == 0)
dd_i[0] = 2.0 / dx;
dd_j[a] = (2.0 / dy) * eta[a];
dd_k[a] = (2.0 / dz) * xi[a];
}
|
23,842 | /* Program to compute Pi using Monte Carlo methods */
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <curand_kernel.h>
#define SEED 35791246
__global__ void getcount(int *count_dev) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
double x, y, z;
//init random number seed by taking clock() value
curandState_t state;
curand_init(clock(), idx, 0, &state);
//get random x and y between [0,1]
x = (double)curand_uniform(&state);
y = (double)curand_uniform(&state);
z = x * x + y * y;
//check if z is inside the circle if yes, then increment the count
if(z <= 1) {
count_dev[idx] += 1;
}
}
int main(int argc, char** argv) {
//local variables
int niter=0;
double pi;
int *count_host;
int final_count = 0;
//device_variabls
int *count_dev;
niter = atoi(argv[1]);
int block_size = 512;
int nb_blocks = niter/block_size;
int size = sizeof(int) * niter;
//allocate memory for count on host
count_host = (int *)malloc(size);
memset(count_host, 0, size);
//allcoate memory for count on device
cudaMalloc((void **) &count_dev, size);
//copy data from host to device initial counters (all 0)
cudaMemcpy(count_dev, count_host, size, cudaMemcpyHostToDevice);
getcount<<<nb_blocks, block_size>>>(count_dev);
cudaMemcpy(count_host, count_dev, size, cudaMemcpyDeviceToHost);
for (int i=0; i<niter; i++) {
//printf("final_count = %d\t", final_count);
final_count += count_host[i];
}
pi=(((double)final_count)/niter)*4;
printf("# of trials= %d , estimate of pi is %.16f \t\n",niter,pi);
return 0;
}
|
23,843 | #include <cuda.h>
#include <stdio.h>
#include <time.h>
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
__global__ void printMatrix(float **d_matrix, int size) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i < size && i >= 0) {
if (j < size && j >=0) {
printf("i is %d, j is %d, %f \n", i, j, d_matrix[i][j]);
}
}
}
__global__ void changeFirstElementToOne(float **d_matrix, float **d_inversion, int pivot, int size, float firstElement) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i == pivot) {
if (j >= 0 && j < size) {
d_matrix[i][j] = d_matrix[i][j] / firstElement;
d_inversion[i][j] = d_inversion[i][j] / firstElement;
}
}
}
__global__ void GJKernel(float **d_matrix, float **d_inversion, int pivot, int size) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i >= 0 && i < size && i != pivot && j < size && j >= 0) {
if (j != pivot) {
d_matrix[i][j] = d_matrix[i][j] - d_matrix[i][pivot] * d_matrix[pivot][j];
}
d_inversion[i][j] = d_inversion[i][j] - d_matrix[i][pivot] * d_inversion[pivot][j];
}
}
__global__ void setPivotColumnToZero(float **d_matrix, int pivot, int size) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i >= 0 && i < size && i != pivot) {
if (j == pivot) {
d_matrix[i][j] = 0.0;
}
}
}
int main(void) {
// read in data
std::ifstream file_("test100.txt");
if (!file_) {
std::cout << "Cannot open file.\n";
return 0;
}
int size; // size of the matrix
file_ >> size;
float **matrix; // matrix to inverse
matrix = new float*[size];
for (int i = 0; i < size; i++) {
matrix[i] = new float[size];
}
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
file_ >> matrix[i][j];
}
}
// initialize variable
float **inversion, **d_inversion; // result
float **d_inversion_h;
d_inversion_h = (float**)malloc(size * sizeof(float *));
float **d_matrix;
float **d_matrix_h;
d_matrix_h = (float**)malloc(size * sizeof(float *));
// alloc space for device copies
cudaMalloc((void **)&d_inversion, size * sizeof(float*));
cudaMalloc((void **)&d_matrix, size * sizeof(float*));
// alloc space for host copies
inversion = (float**)malloc(size * sizeof(float *));
// initial inversion
for (int i = 0; i < size; i++) {
inversion[i] = (float*)malloc(size * sizeof(float));
}
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
if (i == j) inversion[i][j] = 1.0;
else inversion[i][j] = 0.0;
}
}
// copy from host to device
for (int i = 0; i < size; i++) {
cudaMalloc((void**)&(d_matrix_h[i]), size * sizeof(float));
cudaMemcpy(d_matrix_h[i], matrix[i], size * sizeof(float), cudaMemcpyHostToDevice);
}
cudaMemcpy(d_matrix, d_matrix_h, size * sizeof(float*), cudaMemcpyHostToDevice);
for (int i = 0; i < size; i++) {
cudaMalloc((void**)&(d_inversion_h[i]), size * sizeof(float));
cudaMemcpy(d_inversion_h[i], inversion[i], size * sizeof(float), cudaMemcpyHostToDevice);
}
cudaMemcpy(d_inversion, d_inversion_h, size * sizeof(float*), cudaMemcpyHostToDevice);
// threadsPerBlock, numBlocks
dim3 threadsPerBlock(8, 8);
dim3 numBlocks((size - 1 + threadsPerBlock.x)/threadsPerBlock.x, (size - 1 + threadsPerBlock.y)/threadsPerBlock.y);
struct timespec cudalustart = {0,0}; //time of constructing GF
struct timespec cudaluend = {0,0};
clock_gettime(CLOCK_REALTIME,&cudalustart);
// Gauss-Jordan
for (int i = 0; i < size; i++) {
// change first element of the pivot line to 1
float firstElement;
cudaMemcpy(&firstElement, &d_matrix_h[i][i], sizeof(float), cudaMemcpyDeviceToHost);
changeFirstElementToOne<<<numBlocks, threadsPerBlock>>>(d_matrix, d_inversion, i, size, firstElement);
GJKernel<<<numBlocks, threadsPerBlock>>>(d_matrix, d_inversion, i, size);
setPivotColumnToZero<<<numBlocks, threadsPerBlock>>>(d_matrix, i, size);
}
// copy result from d_inversion to inversion
for (int i = 0; i < size; i++) {
cudaMemcpy(inversion[i], d_inversion_h[i], size * sizeof(float), cudaMemcpyDeviceToHost);
}
clock_gettime(CLOCK_REALTIME,&cudaluend);
std::cout<<"The time is "<<(cudaluend.tv_sec-cudalustart.tv_sec)*1000+(cudaluend.tv_nsec-cudalustart.tv_nsec)/1000000<<"ms\n";
/*
// print the result
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
std::cout << inversion[i][j] << " ";
}
std::cout << std::endl;
}
*/
// clean up
free(matrix); free(inversion); free(d_matrix_h); free(d_inversion_h);
cudaFree(d_matrix); cudaFree(d_inversion);
return 0;
} |
23,844 | #include <stdio.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <stdlib.h>
#define N 5
__global__ void add(int *a, int *b, int *c) {
*c += a[threadIdx.x] * b[threadIdx.x];
}
void print_five(int* array){
for(int i=0; i<5; ++i){
printf("%d ", array[i]);
}
printf("\n");
}
void random_ints(int *a, int n){
int i;
for (i = 0; i < n; ++i)
a[i] = rand() %10;
}
int main(void) {
int *a, *b; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = N * sizeof(int);
// Alloc space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Alloc space for host copies of a, b, c and setup input values
a = (int *)malloc(size); random_ints(a, N);
b = (int *)malloc(size); random_ints(b, N);
int *c, zero = 0;
*c = zero;
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, sizeof(int), cudaMemcpyHostToDevice);
// Launch add() kernel on GPU with N blocks
add<<<1,N>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
print_five(a);
print_five(b);
printf("c: %d\n", *c);
// Cleanup
free(a); free(b);
cudaFree(d_a); cudaFree(d_b);
return 0;
}
|
23,845 | #include "includes.h"
__global__ void initialize_cells(CellT* dev_cells, CellT* dev_next_cells, int size_x, int size_y) {
for (int i = threadIdx.x + blockDim.x * blockIdx.x;
i < size_x*size_y; i += blockDim.x * gridDim.x) {
dev_cells[i] = 0;
dev_next_cells[i] = 0;
}
} |
23,846 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#define THREADS_PER_BLOCK 512
float *CPU_big_dot(float *A, float *B, int N);
float *GPU_big_dot(float *A, float *B, int N);
__global__ void multiply(float *a, float *b, float *results, int *N);
/* Helper functions */
float *random(int N);
long long timestamp();
int main(){
int n = 1 << 20; // 1024 * 1024
float *a = random(n);
float *b = random(n);
long long start = timestamp();
float *rcpu = CPU_big_dot(a, b, n);
long long end = timestamp();
float tcpu = (end - start) / 1e6;
start = timestamp();
float *rgpu = GPU_big_dot(a, b, n);
end = timestamp();
float tgpu = (end - start) / 1e6;
float error = fabs((*rgpu - *rcpu) / *rcpu) * 100;
printf("=========== Computed ===========\n");
printf(" CPU = %.4f\n", *rcpu);
printf(" GPU = %.4f\n", *rgpu);
printf(" Error = %.4f%%\n", error);
printf("================================\n\n");
// Clean up vectors and results
free(rgpu);
free(rcpu);
free(a);
free(b);
// Print out metrics
printf("============ Result ============\n");
printf(" Tcpu = %.4fs\n", tcpu);
printf(" Tgpu = %.4fs\n", tgpu);
printf(" Speedup = %.4f\n", tcpu / tgpu);
printf("================================\n");
return 0;
}
float *CPU_big_dot(float *A, float *B, int N){
float *result = (float *)calloc(1, sizeof(float));
for(int i = 0; i < N; i++)
*result += A[i] * B[i];
return result;
}
float *GPU_big_dot(float *A, float *B, int N){
// CPU variables
int size = sizeof(float) * N;
float *results = (float *)malloc(size);
float *result = (float *)calloc(1, sizeof(float));
// GPU variables
int *d_N;
float *d_A, *d_B, *d_results;
cudaMalloc((void **)&d_A, size);
cudaMalloc((void **)&d_B, size);
cudaMalloc((void **)&d_results, size);
cudaMalloc((void **)&d_N, sizeof(int));
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_N, &N, sizeof(int), cudaMemcpyHostToDevice);
multiply<<<(N + THREADS_PER_BLOCK -
1)/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_A, d_B, d_results, d_N);
// Copy the results from GPU to CPU
cudaMemcpy(results, d_results, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_results);
cudaFree(d_N);
// Gather results and sum them up
for(int i = 0; i < N; i++)
*result += results[i];
free(results);
return result;
}
__global__ void multiply(float *A, float *B, float *results, int *N){
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index < *N)
results[index] = A[index] * B[index];
}
float *random(int N){
float *results = (float *)malloc(sizeof(float) * N);
for(int i = 0; i < N; i++)
results[i] = (float)((rand() % 100) + 1);
return results;
}
long long timestamp(){
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
|
23,847 | // Codigo serial para resolver la ecuacion de difusion en 2D //
#include <iostream>
#include <fstream>
#include <cmath>
#include <stdio.h>
#include <sys/time.h>
double get_time()
{ struct timeval tim;
gettimeofday(&tim, NULL);
return (double) tim.tv_sec+(tim.tv_usec/1000000.0);
}
void update (float *u, float *u_prev, int N, float h, float dt, float alpha)
{
int I;
for (int j=0; j<N; j++)
{ for (int i=0; i<N; i++)
{ I = j*N+i;
u_prev[I] = u[I];
}
}
for (int j=1; j<N-1; j++)
{ for (int i=1; i<N-1; i++)
{ I = j*N+i;
u[I] = u_prev[I] + alpha*dt/(h*h) * (u_prev[I+1] + u_prev[I-1] + u_prev[I+N] + u_prev[I-N] - 4*u_prev[I]);
}
}
// Boundary conditions are automatically imposed
// as we don't touch boundaries
}
int main(int argc, char**argv)
{
int N;
if(argc == 1) {
N = 512;
} else if(argc == 2) {
N = atoi(argv[1]);
} else {
printf("\n Parametros no validos!"
"\n Uso: ./difusionCPU # malla"
"\n Uso: ./difusionCPU <N> # malla"
"\n");
exit(0);
}
// Allocate in CPU
// int N = 512;
float xmin = 0.0f;
float xmax = 3.5f;
float ymin = 0.0f;
//float ymax = 2.0f;
float h = (xmax-xmin)/(N-1);
float dt = 0.00001f;
float alpha = 0.645f;
float time = 0.4f;
int steps = ceil(time/dt);
int I;
float *x = new float[N*N];
float *y = new float[N*N];
float *u = new float[N*N];
float *u_prev = new float[N*N];
std::cout<<"N = "<<N<<std::endl;
// Generate mesh and intial condition
for (int j=0; j<N; j++)
{ for (int i=0; i<N; i++)
{ I = N*j + i;
x[I] = xmin + h*i;
y[I] = ymin + h*j;
u[I] = 0.0f;
if ( (i==0) || (j==0))
{u[I] = 200.0f;}
}
}
// Loop
double start = get_time();
for (int t=0; t<steps; t++)
{ update (u, u_prev, N, h, dt, alpha);
}
double stop = get_time();
double elapsed = stop - start;
std::cout<<"time = "<<elapsed<<std::endl;
std::ofstream temperature("temperature_cpu.txt");
for (int j=0; j<N; j++)
{ for (int i=0; i<N; i++)
{ I = N*j + i;
// std::cout<<u[I]<<"\t";
temperature<<x[I]<<"\t"<<y[I]<<"\t"<<u[I]<<std::endl;
}
temperature<<"\n";
//std::cout<<std::endl;
}
temperature.close();
} |
23,848 | #include <stdio.h>
// Kernel definition
//adds two vectors A and B of size N and stores the result into vector C:
__global__ void VecAdd(float* A, float* B, float *C)
{
int i = threadIdx.x;
C[i] = A[i] + B[i] ;
}
int main()
{
// here only 1024 is the maximum number i can use for carrying out the
// additions that is the max number of threads i can have per thread block
int N = 1024;
float *A, *B, *C, *d_A, *d_B, *d_C;
A = (float*)malloc(N*sizeof(float));
B = (float*)malloc(N*sizeof(float));
C = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_A, N*sizeof(float));
cudaMalloc(&d_B, N*sizeof(float));
cudaMalloc(&d_C, N*sizeof(float));
for (int i = 0; i < N; i++) {
A[i] = 1.0f;
B[i] = 2.0f;
C[i] = 0.0f;
}
cudaMemcpy(d_A, A, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, N*sizeof(float), cudaMemcpyHostToDevice);
// Kernel invocation with N threads
VecAdd<<<1, N>>>(d_A, d_B, d_C);
cudaMemcpy(C, d_C, N*sizeof(float), cudaMemcpyDeviceToHost);
float sum = 0.0f;
for (int i = 0; i < N; i++){
sum = sum + C[i];
}
printf("Sum is: %f\n", sum);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(A);
free(B);
free(C);
} |
23,849 | # include <stdio.h>
# include <math.h>
# include <sys/time.h>
# define N 1000000
# define RADIUS 100
# define THREADS 32
__global__ void QuarterAreaOfCircle ( float *area , float *start, float *end){
//int i = blockDim.x*blockIdx.x+threadIdx.x;
int i = 0;
float threadStartX;
float x, dx;
float segmentArea;
// x starting value of each block
threadStartX = ((float)RADIUS/(float)blockDim.x);
start[threadIdx.x] = (float)threadIdx.x * threadStartX;
x = start[threadIdx.x];
end[threadIdx.x] = x;
// increasing value of x
dx = (float)RADIUS/(float)N;
// calculate segment area
for(i = 0; i < ((float)N/(float)blockDim.x); i++){
x += dx;
segmentArea += sqrt(fabs((float)RADIUS*(float)RADIUS-x*x)) * dx;
}
end[threadIdx.x] = dx;
area[threadIdx.x] = segmentArea;
}
int main(int argc, char *argv[])
{
float *reduceArea_d, reduceArea[THREADS], Area = 0;
float *start_d, start[THREADS];
float *end_d, end[THREADS];
int i;
dim3 dimBlock(THREADS);
dim3 dimGrid(1);
for( i = 0; i < dimBlock.x; i++){
reduceArea[i] = 0;
}
cudaMalloc( (void**) &reduceArea_d, sizeof(float) * THREADS );
cudaMalloc( (void**) &start_d, sizeof(float) * THREADS );
cudaMalloc( (void**) &end_d, sizeof(float) * THREADS );
QuarterAreaOfCircle<<<dimGrid, dimBlock>>>(reduceArea_d, start_d, end_d);
cudaMemcpy(reduceArea, reduceArea_d, sizeof(float)*dimBlock.x, cudaMemcpyDeviceToHost);
cudaMemcpy(start, start_d, sizeof(float)*dimBlock.x, cudaMemcpyDeviceToHost);
cudaMemcpy(end, end_d, sizeof(float)*dimBlock.x, cudaMemcpyDeviceToHost);
for(i = 0; i < dimBlock.x; i++){
Area += reduceArea[i];
printf("reduced area : %5.10f , grid : %d, area : %5.10f, sart : %5.10f, end : %5.10f \n", reduceArea[i], i, Area, start[i], end[i]);
}
printf("area : %5.10f \n",Area*4);
cudaFree(reduceArea_d);
cudaFree(start_d);
cudaFree(end_d);
}
|
23,850 | #include "includes.h"
__global__ void matrixTriUpper(float *a, int m, int n) {
//setting matricies to their upper bound
for(int i = 0; i < m; ++i) {
for(int j = 0; j < n; ++j) {
if(i>j)
a[i*n + j] = 0;
a[i*n + j] = a[i*n + j];
}
}
} |
23,851 | #include "includes.h"
__global__ void matrixMul(int *a, int *b, int *c, int n, int tile_size){
__shared__ int A[SHMEM_SIZE];
__shared__ int B[SHMEM_SIZE];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int row = by * tile_size + ty;
int col = bx * tile_size + tx;
int temp_sum = 0;
for (int i = 0; i < (n / tile_size); i++){
A[(ty * tile_size) + tx] = a[row * n + (i * tile_size + tx)];
B[(ty * tile_size) + tx] = b[(i * tile_size * n + ty * n) + col];
__syncthreads();
for(int j = 0; j < tile_size; j++){
temp_sum += A[(ty * tile_size) + j] * B[(j * tile_size) + tx];
}
__syncthreads();
}
c[(row * n) + col] = temp_sum;
} |
23,852 | /**
* Name : Veerakumar Natarajan
* Student Id: 200208042
*
* 2d convolution program
*/
#include <stdio.h>
#include <fstream>
#include <sstream>
#include <stdlib.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
/**
* CUDA Kernel Device code
*
* Computes the 2d convolution of A and B into C.
*/
__global__ void
convol(float *A, float *B, float *C, int row_a, int row_b, int row_c, int col_a, int col_b, int col_c)
{
int x = blockDim.y * blockIdx.y + threadIdx.y;
int y = blockDim.x * blockIdx.x + threadIdx.x;
C[x * col_c + y] = 0.0;
if(x < row_c && y < col_c) {
for(int i = 0; i < row_b; i++) {
for(int j = 0; j < col_b; j++) {
if(((x - i) < row_a && (x - i) >= 0) && ((y - j) < col_a && (y - j) >= 0))
C[x * col_c + y] += B[i * col_b + j] * A[(x - i) * col_a + (y - j)];
}
}
}
}
/**
* Host main routine
*/
int
main(int argc, char *argv[])
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
float *h_A, *h_B, *h_C, tmp;
int row_a, row_b, row_c, col_a, col_b, col_c;
int a_matrix = 1;
int i, j;
int size_a, size_b, size_c;
std::ifstream file(argv[1]);
std::string row;
row_a=row_b=row_c=col_a=col_b=col_c=0;
// Finding size of matrix A and matrix B
while(std::getline(file, row)) {
if(row.empty())
a_matrix = 0;
std::istringstream iss(row);
if(a_matrix == 1) {
col_a=0;
while(iss.good()) {
iss >> tmp;
col_a++;
}
row_a++;
} else {
if(!row.empty()) {
col_b=0;
while(iss.good()) {
iss >> tmp;
col_b++;
}
row_b++;
}
}
}
row_c = row_a + row_b - 1;
col_c = col_a + col_b - 1;
// Calculating size of matrix A, B and C
size_a = row_a * col_a;
size_b = row_b * col_b;
size_c = row_c * col_c;
// Allocate the host input vector A, B
h_A = (float *)malloc(size_a * sizeof(float));
h_B = (float *)malloc(size_b * sizeof(float));
// Allocate the host output vector
h_C = (float *)malloc(size_c * sizeof(float));
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Reading value of matrix A and B from input file
std::ifstream file1(argv[1]);
a_matrix = 1;
i = j = 0;
while(std::getline(file1, row)) {
if(row.empty())
a_matrix = 0;
std::istringstream iss1(row);
if(a_matrix == 1){
while(iss1.good()) {
iss1 >> tmp;
h_A[i] = tmp;
i++;
}
} else {
if(!row.empty()) {
while(iss1.good()) {
iss1 >> tmp;
h_B[j] = tmp;
j++;
}
}
}
}
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size_a * sizeof(float));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size_b * sizeof(float));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size_c * sizeof(float));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
err = cudaMemcpy(d_A, h_A, size_a * sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size_b * sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector 2dconvol CUDA Kernel
dim3 dimBlock(row_c, col_c, 1);
dim3 dimGrid(4, 4, 1);
convol<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, row_a, row_b, row_c, col_a, col_b, col_c);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
err = cudaMemcpy(h_C, d_C, size_c * sizeof(float), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
for(i = 0; i < row_c; i++) {
for(j = 0; j < col_c; j++) {
printf("%.3f ", h_C[i * col_c + j]);
}
printf("\n");
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return 0;
}
|
23,853 | __global__ void gpu_KIDepthToVertices(const float *depthIn,
float4 * vertOut, int *segMap,
const int width,
const int height,
const float2 pp,
const float2 fl) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = depthIn[index];// / 1000.0;
vertOut[index] = make_float4( (u - pp.x)*(depth/fl.x),
(v - pp.y)*(depth/fl.y),
depth,
//segMap[index] == 20 ? 1.0f : 0.0f);
1.0f);
}
void KIDepthToVertices(const float *depthIn, float4 *vertOut, int *segMap
, const int width, const int height, const float2 pp, const float2 fl) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
gpu_KIDepthToVertices<<<grid,block>>>(depthIn, vertOut, segMap
, width, height, pp, fl);
}
|
23,854 | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: JIANG Yufan (email: jiangyufan2018@outlook.com) 2018-08-14
* $Updated by: LinYe (email: linye2015@outlook.com) 2019-07-30 float16 added
*/
#include "../../XDevice.h"
#include "../../XUtility.h"
#include "MultiplyDim.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
tensor multiplication of a tensor and a row vector
c = a * b + \alpha * c
where a is a tensor and b is a row vector
>> a - pointer to the data array of a
>> b - pointer to the data array of b
>> c - pointer to the data array of c
>> rowNum - number of rows of a and c
>> colNum - number of columns of a and c (i.e., the size of b)
>> alpha - the scaling factor
*/
template <class T, bool alphaFired>
__global__
void KernelMultiplyWithRow(T * a, T * b, T * c, int rowNum, int colNum, T alpha)
{
__shared__ T bv[MAX_CUDA_THREAD_NUM_PER_BLOCK];
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if (col >= colNum || row >= rowNum)
return;
if (threadIdx.y == 0)
bv[threadIdx.x] = b[col];
__syncthreads();
int offset = colNum * row + col;
if (alphaFired)
c[offset] = a[offset] * bv[threadIdx.x] + c[offset] * alpha;
else
c[offset] = a[offset] * bv[threadIdx.x];
}
/*
tensor multiplication of a tensor and a colum vector
c = a * b + \alpha * c
where a is a tensor and b is a colum vector
>> a - pointer to the data array of a
>> b - pointer to the data array of b
>> c - pointer to the data array of c
>> rowNum - number of rows of a and c (i.e., the size of b)
>> colNum - number of columns of a and c
>> blockNum - size of a block (matrix), i.e., rowNum * colNum
>> blockNum - number of matrics
>> alpha - the scaling factor
*/
template <class T, bool alphaFired>
__global__
void KernelMultiplyWithCol(T * a, T * b, T * c, int rowNum, int colNum, int blockSize, int blockNum, T alpha)
{
__shared__ T bv[MAX_CUDA_THREAD_NUM_PER_BLOCK];
int colIndex = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = colIndex % colNum;
int block = colIndex / colNum;
if (row >= rowNum || block >= blockNum)
return;
if (threadIdx.x == 0)
bv[threadIdx.y] = b[row];
__syncthreads();
int offset = block * blockSize + row * colNum + col;
if (alphaFired)
c[offset] = a[offset] * bv[threadIdx.y] + c[offset] * alpha;
else
c[offset] = a[offset] * bv[threadIdx.y];
}
/*
tensor multiplication
c = a * b + \alpha * c
where the size of b is equal to the n-th dimension of a,
i.e., a is multiplied with b by broadcasting
>> a - a tensor
>> b - another tensor whose size is equal to that of dimension n of a
>> c - where we put a * b + \alpha * c. we save it in a if c is NULL
>> n - the dimension index
>> alpha - the scaling factor
*/
void _CudaMultiplyDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE alpha)
{
CheckNTErrors(a && b && c, "Empty tensor input!");
CheckNTErrors(a->unitNum == c->unitNum, "Unmatched tensors in multiplication!");
CheckNTErrors(a->dataType == b->dataType && a->dataType == c->dataType,
"Unmatched data types in multiplication!");
CheckNTErrors(a->order == c->order, "The input tensors do not have the same order in multiplication!");
CheckNTErrors(!a->isSparse && !b->isSparse && !c->isSparse, "Dense tensors are required!");
CheckNTErrors(a->dimSize[n] == b->unitNum, "Wrong tensor size!");
int stride = 1;
int blockSize = a->dimSize[n];
int blockNum = 1;
for (int i = a->order - 1; i >= 0; i--) {
if (i > n)
stride *= a->dimSize[i];
else if (i < n)
blockNum *= a->dimSize[i];
}
int cudaGrids[3];
int cudaBlocks[3];
int devIDBackup = 0;
ProtectCudaDev(a->devID, devIDBackup);
if (a->dataType == DEFAULT_DTYPE) {
if (stride > 1) {
GDevs.GetCudaThread2D(a->devID, stride * blockNum, blockSize, MAX_INT, cudaGrids, cudaBlocks);
if(alpha == (DTYPE)0.0F)
KernelMultiplyWithCol<DTYPE, false> <<<dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1])>>>
((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data,
blockSize, stride, blockSize * stride, blockNum, alpha);
else
KernelMultiplyWithCol<DTYPE, true> <<<dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1])>>>
((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data,
blockSize, stride, blockSize * stride, blockNum, alpha);
}
else if (stride == 1) {
GDevs.GetCudaThread2D(a->devID, blockSize, blockNum, MAX_INT, cudaGrids, cudaBlocks);
if(alpha == (DTYPE)0.0F)
KernelMultiplyWithRow<DTYPE, false> <<<dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1])>>>
((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data,
blockNum, blockSize, alpha);
else
KernelMultiplyWithRow<DTYPE, true> <<<dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1])>>>
((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data,
blockNum, blockSize, alpha);
}
else {
ShowNTErrors("Something is wrong!");
}
}
else if (a->dataType == X_FLOAT16) {
#ifdef HALF_PRECISION
half alpha1 = __float2half(alpha);
if (stride > 1) {
GDevs.GetCudaThread2D(a->devID, stride * blockNum, blockSize, MAX_INT, cudaGrids, cudaBlocks);
if (alpha == (DTYPE)0.0F)
KernelMultiplyWithCol<__half, false> <<<dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1])>>>
((__half*)a->data, (__half*)b->data, (__half*)c->data,
blockSize, stride, blockSize * stride, blockNum, alpha1);
else
KernelMultiplyWithCol<__half, true> <<<dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1])>>>
((__half*)a->data, (__half*)b->data, (__half*)c->data,
blockSize, stride, blockSize * stride, blockNum, alpha1);
}
else if (stride == 1) {
GDevs.GetCudaThread2D(a->devID, blockSize, blockNum, MAX_INT, cudaGrids, cudaBlocks);
if (alpha == 0.0F)
KernelMultiplyWithRow<__half, false> <<<dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1])>>>
((__half*)a->data, (__half*)b->data, (__half*)c->data,
blockNum, blockSize, alpha1);
else
KernelMultiplyWithRow<__half, true> <<<dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1])>>>
((__half*)a->data, (__half*)b->data, (__half*)c->data,
blockNum, blockSize, alpha1);
}
else {
ShowNTErrors("Something is wrong!");
}
#else
ShowNTErrors("Recompile the code with HALF_PRECISION!");
#endif
}
else {
ShowNTErrors("TODO!");
}
BacktoCudaDev(a->devID, devIDBackup);
}
#endif
} // namespace nts(NiuTrans.Tensor)
|
23,855 | #include<stdio.h>
#define BLOCK_DIM 25
#define N 25
__global__ void matadd(int *a, int *b, int *c)
{
int col=blockIdx.x*blockDim.x+threadIdx.x;
int row=blockIdx.y*blockDim.y+threadIdx.y;
int index = col + row*N;
if(col<N && row<N){
c[index]=a[index]+b[index];
}
}
int main(void)
{
int a[N][N],b[N][N],c[N][N];
int *d_a,*d_b,*d_c;
int size = sizeof(int) * N * N;
printf("Elements of matA\n");
for(int i=0;i<N;i++)
{
for(int j=0;j<N;j++)
{
a[i][j]=i;
printf("%d\t",a[i][j]);
}
printf("\n");
}
printf("Elements of matB\n");
for(int i=0;i<N;i++)
{
for(int j=0;j<N;j++)
{
b[i][j]=j;
printf("%d\t",b[i][j]);
}
printf("\n");
}
printf("\n");
cudaMalloc((void **)&d_a,size);
cudaMalloc((void **)&d_b,size);
cudaMalloc((void **)&d_c,size);
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_DIM,BLOCK_DIM);
dim3 dimGrid((int)(N/dimBlock.x),(int)(N/dimBlock.y));
matadd<<<dimGrid,dimBlock>>>(d_a,d_b,d_c);
cudaMemcpy(c,d_c,size,cudaMemcpyDeviceToHost);
printf("Elements of MatC\n");
for(int i=0;i<N;i++)
{
for(int j=0;j<N;j++)
{
printf("%d\t",c[i][j]);
}
printf("\n");
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
23,856 | #include "includes.h"
__global__ void reprojectPoint(double *d_N, int nRxns, int istart, double *d_umat, double *points, int pointsPerFile, int pointCount, int index){
int newindex = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i=newindex;i<nRxns-istart;i+=stride){
d_umat[nRxns*index+i]=0;//d_umat now is d_tmp
for(int j=0;j<nRxns;j++){
d_umat[nRxns*index+i]+=d_N[j+i*nRxns]*points[pointCount+pointsPerFile*j];//here t(N)*Pt
}
}
} |
23,857 | extern "C"
__device__ __forceinline__ float sigmoid_f(float x) {
return 1.0 / (1 + exp(-x));
}
extern "C"
__device__ __forceinline__ float training_q_fwd(const float log_alpha, const float beta, const float gamma, const float zeta) {
return sigmoid_f(log_alpha - beta * (-gamma / zeta));
}
extern "C"
__device__ __forceinline__ float training_q_bwd(const float log_alpha, const float beta, const float gamma, const float zeta) {
float s = training_q_fwd(log_alpha, beta, gamma, zeta);
return s * (1 - s);
}
extern "C"
__device__ __forceinline__ float test_q_fwd(const float log_alpha, const float beta, const float gamma, const float zeta) {
return min(1.0f, max(0.0f, sigmoid_f(log_alpha) * (zeta - gamma) + gamma));
}
extern "C"
__global__ void l0_weights_test_fwd(float *out_weights, const float *in_weights, const float *log_alpha,
const float beta, const float gamma, const float zeta, const int channel_size, const int group_size) {
int cid = blockIdx.x * blockDim.x + threadIdx.x;
int gid = blockIdx.y * blockDim.y + threadIdx.y;
if (cid >= channel_size || gid >= group_size)
return;
int idx = cid * group_size + gid;
out_weights[idx] = test_q_fwd(log_alpha[cid], beta, gamma, zeta) * in_weights[idx];
}
extern "C"
__global__ void l0_norm_fwd(float *out_norm, const float *log_alpha, const float *weights, const float weight_decay, const float beta, const float gamma, const float zeta,
const int channel_size, const int group_size) {
int cid = blockIdx.x * blockDim.x + threadIdx.x;
int gid = blockIdx.y * blockDim.y + threadIdx.y;
if (cid >= channel_size || gid >= group_size)
return;
int idx = cid * group_size + gid;
if (weight_decay == 0.0f) {
out_norm[cid] = group_size * training_q_fwd(log_alpha[cid], beta, gamma, zeta);
} else {
float norm = training_q_fwd(log_alpha[cid], beta, gamma, zeta) * (0.5 * weights[idx] * weights[idx] * weight_decay + 1);
atomicAdd(out_norm + cid, norm);
}
}
extern "C"
__global__ void l0_norm_bwd(float *out_norm_grad, const float *in_norm_grad, const float *log_alpha, float *weight_grad, const float *weights, const float weight_decay,
const float beta, const float gamma, const float zeta, const int channel_size, const int group_size) {
int cid = blockIdx.x * blockDim.x + threadIdx.x;
int gid = blockIdx.y * blockDim.y + threadIdx.y;
if (cid >= channel_size || gid >= group_size)
return;
int idx = cid * group_size + gid;
atomicAdd(out_norm_grad + cid, (0.5 * weights[idx] * weights[idx] * weight_decay + 1) * training_q_bwd(log_alpha[cid], beta, gamma, zeta) * in_norm_grad[0]);
weight_grad[idx] = training_q_fwd(log_alpha[cid], beta, gamma, zeta) * in_norm_grad[0] * weights[idx] * weight_decay;
}
extern "C"
__global__ void l0_weights_fwd(float *out_weights, const float *in_weights, const float *log_alpha, const float *uniform_tensor,
const float beta, const float gamma, const float zeta, const int channel_size, const int group_size) {
int cid = blockIdx.x * blockDim.x + threadIdx.x;
int gid = blockIdx.y * blockDim.y + threadIdx.y;
if (cid >= channel_size || gid >= group_size)
return;
int idx = cid * group_size + gid;
float u = uniform_tensor[cid];
float log_alpha_ = log_alpha[cid];
float s = sigmoid_f((log(u / (1 - u)) + log_alpha_) / beta);
s = s * (zeta - gamma) + gamma;
float z = min(1.0f, max(0.0f, s));
out_weights[idx] = z * in_weights[idx];
}
extern "C"
__global__ void l0_weights_bwd(float *out_weights_grad, float *out_log_alpha_grad, const float *in_weights_grad, const float *in_weights,
const float *log_alpha, const float *uniform_tensor, const float beta, const float gamma, const float zeta, const int channel_size, const int group_size) {
int cid = blockIdx.x * blockDim.x + threadIdx.x;
int gid = blockIdx.y * blockDim.y + threadIdx.y;
if (cid >= channel_size || gid >= group_size)
return;
float u = uniform_tensor[cid];
int idx = cid * group_size + gid;
float log_alpha_ = log_alpha[cid];
float s = sigmoid_f((log(u / (1 - u)) + log_alpha_) / beta);
float z = s * (zeta - gamma) + gamma;
float w = in_weights[idx];
z = min(1.0f, max(0.0f, z));
out_weights_grad[idx] = z * in_weights_grad[idx];
out_log_alpha_grad[idx] = w * (zeta - gamma) * s * (1 - s) / beta * in_weights_grad[idx];
}
|
23,858 | #include "includes.h"
// Kind of lame, but just put static file-level variables here for now.
// Pointer to device results array.
float * dev_result = 0;
// Pointer to device data array.
float * dev_data = 0;
// Size of data/result sets (i.e. number of entries in array).
unsigned int testArraySize = 0;
// GPU function to converts the provided dBm value to mW.
// The power in milliwatts (P(mW)) is equal to 1mW times 10 raised by the
// power in decibel-milliwatts (P(dBm)) divided by 10:
// P(mW) = 1mW * 10 ^ (P(dBm) / 10)
__device__ float convertDbmToMw(const float dBm)
{
return powf(10.0f, dBm / 10.0f);
}
__global__ void convertDbmToMwKernal(float * result, const float * data)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
result[i] = convertDbmToMw(data[i]);
} |
23,859 | #include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
extern "C" __global__ void mandelbrot_ker(float* lattice, float* mandelbrot_graph, int max_iters, float upper_bound_squared, int lattice_size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < lattice_size * lattice_size)
{
int i = tid % lattice_size;
int j = lattice_size - 1 - (tid / lattice_size);
float c_re = lattice[i];
float c_im = lattice[j];
float z_re = 0.0f;
float z_im = 0.0f;
mandelbrot_graph[tid] = 1;
for(int k = 0; k < max_iters; k++)
{
float temp;
temp = z_re * z_re - z_im * z_im + c_re;
z_im = 2 * z_re * z_im + c_im;
z_re = temp;
if ( (z_re * z_re + z_im * z_im) > upper_bound_squared)
{
mandelbrot_graph[tid] = 0;
break;
}
}
}
return;
}
extern "C" void launch_mandelbrot(float* lattice, float* mandelbrot_graph, int max_iters, float upper_bound, int lattice_size)
{
int num_bytes_lattice = sizeof(float) * lattice_size;
int num_bytes_graph = sizeof(float) * lattice_size * lattice_size;
float * d_lattice;
float * d_mandelbrot_graph;
cudaMalloc((float**) &d_lattice, num_bytes_lattice);
cudaMalloc((float**) &d_mandelbrot_graph, num_bytes_graph);
cudaMemcpy(d_lattice, lattice, num_bytes_lattice, cudaMemcpyHostToDevice);
int grid_size = (int) ceil(((double) lattice_size * lattice_size) / ((double) 32));
mandelbrot_ker <<<grid_size, 32>>> (d_lattice, d_mandelbrot_graph, max_iters, upper_bound * upper_bound, lattice_size);
cudaMemcpy(mandelbrot_graph, d_mandelbrot_graph, num_bytes_graph, cudaMemcpyDeviceToHost);
cudaFree(d_lattice);
cudaFree(d_mandelbrot_graph);
}
|
23,860 | /*
* Copyright 2014 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#define CUDA_CALL(F) if( (F) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__); exit(-1);}
#define CUDA_CHECK() if( (cudaPeekAtLastError()) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__-1); exit(-1);}
__global__ void mykernel(){
printf("Hello world from device!\n");
} /* end kernel */
int main(void)
{
mykernel<<<1,1>>>();
CUDA_CHECK()
CUDA_CALL( cudaDeviceSynchronize() );
printf("Hello World from Host\n");
return 0;
} /* end main */
|
23,861 | #define TPB2D 8
__global__ void ldc_D3Q15_LBGK_ts(float * fOut, const float * fIn,
const int * snl,
const int * lnl, const float u_bc,
const float omega,const float * ex,
const float * ey, const float * ez,
const float * w, const int Nx,
const int Ny, const int Nz){
int X=threadIdx.x+blockIdx.x*blockDim.x;
int Y=threadIdx.y+blockIdx.y*blockDim.y;
int Z=threadIdx.z+blockIdx.z*blockDim.z;
if((X<Nx)&&(Y<Ny)&&(Z<Nz)){
int tid=X+Y*Nx+Z*Nx*Ny;
int dof;
//load fIn data into shared memory
__shared__ float fIns[TPB2D][TPB2D][15];
for(int spd=0;spd<15;spd++){
for(int y=0;y<TPB2D;y++){
for(int x=0;x<TPB2D;x++){
dof=(blockIdx.x*blockDim.x+x)+
(blockIdx.y*blockDim.y+y)*Nx+
(blockIdx.z*blockDim.z)*Nx*Ny;
fIns[y][x][spd]=fIn[spd*(Nx*Ny*Nz)+dof];
}
}
}
//compute density and velocity
float rho=0.; float ux=0.; float uy=0.; float uz=0.; float f_tmp;
for(int spd=0;spd<15;spd++){
f_tmp=fIns[threadIdx.y][threadIdx.x][spd];
rho+=f_tmp;
ux+=f_tmp*ex[spd];
uy+=f_tmp*ey[spd];
uz+=f_tmp*ez[spd];
}
ux/=rho; uy/=rho; uz/=rho;
//check for boundary condition and update
if(lnl[tid]==1){
for(int spd=1;spd<15;spd++){
f_tmp=3.0*(ex[spd]*(-ux)+ey[spd]*(u_bc-uy)+ez[spd]*(-uz));
fIns[threadIdx.y][threadIdx.x][spd]+=w[spd]*rho*f_tmp;
}
ux=0.; uy=u_bc;uz=0.;
}
if(snl[tid]==1){
ux=0.;uy=0.;uz=0.;
//-- bounce-back here as well..
// 1--2
f_tmp=fIns[threadIdx.y][threadIdx.x][2];
fIns[threadIdx.y][threadIdx.x][2]=fIns[threadIdx.y][threadIdx.x][1];
fIns[threadIdx.y][threadIdx.x][1]=f_tmp;
//3 -- 4
f_tmp=fIns[threadIdx.y][threadIdx.x][4];
fIns[threadIdx.y][threadIdx.x][4]=fIns[threadIdx.y][threadIdx.x][3];
fIns[threadIdx.y][threadIdx.x][3]=f_tmp;
//5 -- 6
f_tmp=fIns[threadIdx.y][threadIdx.x][6];
fIns[threadIdx.y][threadIdx.x][6]=fIns[threadIdx.y][threadIdx.x][5];
fIns[threadIdx.y][threadIdx.x][5]=f_tmp;
//7 -- 14
f_tmp=fIns[threadIdx.y][threadIdx.x][14];
fIns[threadIdx.y][threadIdx.x][14]=fIns[threadIdx.y][threadIdx.x][7];
fIns[threadIdx.y][threadIdx.x][7]=f_tmp;
//8--13
f_tmp=fIns[threadIdx.y][threadIdx.x][13];
fIns[threadIdx.y][threadIdx.x][13]=fIns[threadIdx.y][threadIdx.x][8];
fIns[threadIdx.y][threadIdx.x][8]=f_tmp;
//9--12
f_tmp=fIns[threadIdx.y][threadIdx.x][12];
fIns[threadIdx.y][threadIdx.x][12]=fIns[threadIdx.y][threadIdx.x][9];
fIns[threadIdx.y][threadIdx.x][9]=f_tmp;
//10--11
f_tmp=fIns[threadIdx.y][threadIdx.x][11];
fIns[threadIdx.y][threadIdx.x][11]=fIns[threadIdx.y][threadIdx.x][10];
fIns[threadIdx.y][threadIdx.x][10]=f_tmp;
//do not do relaxation on solid nodes since the result
//is annulled with the bounce-back.
}else{
//not a solid node, relaxation
float cu, fEq;
for(int spd=0;spd<15;spd++){
cu = 3.0*(ex[spd]*ux+ey[spd]*uy+ez[spd]*uz);
fEq=rho*w[spd]*(1.+cu+0.5*(cu*cu)-
(1.5)*(ux*ux+uy*uy+uz*uz));
fIns[threadIdx.y][threadIdx.x][spd]-=
omega*(fIns[threadIdx.y][threadIdx.x][spd]-fEq);
}
}
//now, everybody streams....
int X_t,Y_t,Z_t;
for(int spd=0;spd<15;spd++){
X_t=X+ex[spd];
Y_t=Y+ey[spd];
Z_t=Z+ez[spd];
if(X_t==Nx)
X_t=0;
if(Y_t==Ny)
Y_t=0;
if(Z_t==Nz)
Z_t=0;
if(X_t<0)
X_t=(Nx-1);
if(Y_t<0)
Y_t=(Ny-1);
if(Z_t<0)
Z_t=(Nz-1);
dof=X_t+Y_t*Nx+Z_t*Nx*Ny;
fOut[spd*Nx*Ny*Nz+dof]=fIns[threadIdx.y][threadIdx.x][spd];
}
}//if (X<Nx...
}
void ldc_D3Q15_LBGK_ts_cuda(float * fOut, const float * fIn, const int * snl,
const int * lnl, const float u_bc,
const float omega, const float * ex,
const float * ey, const float * ez,
const float * w, const int Nx,
const int Ny, const int Nz){
dim3 BLOCKS(TPB2D,TPB2D,1);
dim3 GRIDS((Nx+TPB2D-1)/TPB2D,(Ny+TPB2D-1)/TPB2D,Nz);
ldc_D3Q15_LBGK_ts<<<GRIDS,BLOCKS>>>(fOut,fIn,snl,lnl,u_bc,
omega,ex,ey,ez,w,Nx,Ny,Nz);
}
|
23,862 | using Point = double3;
struct Ref {
Point* pos;
Point* dir;
double* distance;
};
struct View {
int size;
Point* pos;
Point* dir;
double* distance;
__device__ Ref operator[](int i) const {
return {pos + i, dir + i, distance + i};
}
};
__device__ inline void move_impl(const Ref& ref) {
const double nextdist = *ref.distance;
ref.pos->x += ref.dir->x * nextdist;
ref.pos->y += ref.dir->y * nextdist;
ref.pos->z += ref.dir->z * nextdist;
}
__global__ void move(View view) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < view.size) {
move_impl(view[idx]);
}
}
|
23,863 | //
// main.c
// qr
//
// Created by Zia Ul-Huda on 21/11/2016.
// Copyright © 2016 TU Darmstadt. All rights reserved.
//
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include <sys/time.h>
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
void showGPUMem();
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
showGPUMem();
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
/* err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}*/
#endif
return;
}
typedef struct {
int m, n;
double * v;
} mat_t, mat;
#define BLOCK_SIZE 16
#define MAX_INT 100
#define EPSILON 0.00000001
int numBlocks;
dim3 dimGrid, dimBlock;
int numBlocksSingle, numThreadsSingle;
//get current wall time
double get_wall_time(){
struct timeval time;
if (gettimeofday(&time,NULL)){
// Handle error
exit(-1);
}
return (double)time.tv_sec + (double)time.tv_usec * .000001;
}
//creates a new structure of mat type with m*n dimensions and
//returns its pointer
mat* matrix_new(int m, int n)
{
mat *x = (mat*)malloc(sizeof(mat_t));
x->v = (double*)calloc(sizeof(double), m * n);
x->m = m;
x->n = n;
return x;
}
/**
* Creates a new structure of type mat
* on the device and initializes it. It returns
* the pointer to the structure in *x
*/
void cuda_matrix_new(int m, int n, mat** x)
{
double* d_arr;
mat temp;// =(mat_t*)malloc(sizeof(mat_t)) ;
temp.m = m;//temp->m = m;
temp.n = n;//temp->n = n;
//allocate mat struct on device
cudaMalloc((void**) x,sizeof(mat_t));
CudaCheckError();
//allocate array on device and set it to 0
cudaMalloc((void**) &d_arr, m*n*sizeof(double));
CudaCheckError();
cudaMemset(d_arr, 0, sizeof(double) * m * n);
CudaCheckError();
//store the device pointer in temp object
temp.v = d_arr; //temp->v = d_arr;
//copy the temp to device object
cudaMemcpy(*x, &temp, sizeof(mat_t), cudaMemcpyHostToDevice);
CudaCheckError();
// free(temp);
}
//delete a matrix
void matrix_delete(mat *m)
{
free(m->v);
free(m);
}
/**
* Free the memory of the structure pointed to by
* m on the device. Make sure to also free the memory
* of the elements of the matrix.
*/
void cuda_matrix_delete(mat *m)
{
mat temp;
// Copy m to host
cudaMemcpy(&temp,m,sizeof(mat),cudaMemcpyDeviceToHost);
CudaCheckError();
// Free array in m
cudaFree(temp.v);
CudaCheckError();
// Free m
cudaFree(m);
CudaCheckError();
}
//calculate transpose of a matrix
void matrix_transpose(mat *m)
{
int i,j;
for (i = 0; i < m->m; i++) {
for (j = 0; j < i; j++) {
double t = m->v[i*m->n+j];
m->v[i*m->n+j] = m->v[j*m->m+i];
m->v[j*m->m+i] = t;
}
}
}
/**
* Transpose the matrix on the device
*/
__global__
void cuda_matrix_transpose(mat* m){
//Calculate the row of current element
int row = blockIdx.y * blockDim.y + threadIdx.y;
//Calculate the column of current element
int col = blockIdx.x * blockDim.x + threadIdx.x;
//Just the threads in lower triangle should swap m elements
if(row<m->m && col<m->n && row<col){
double t = m->v[row*m->n+col];
m->v[row*m->n+col] = m->v[col*m->m+row];
// Finish swapping
m->v[col*m->m+row] = t;
}
}
//Create a new matrix and initialize its elements randomly
mat* matrix_create(int m, int n)
{
mat *x = matrix_new(m, n);
srand(time(NULL));
int i,j;
for (i = 0; i < m*n; i++){
j=rand() % MAX_INT;
x->v[i] = j;
}
return x;
}
//multiplication of two matrixes
mat* matrix_mul(mat *x, mat *y)
{
if (x->n != y->m) return NULL;
mat *r = matrix_new(x->m, y->n);
int i,j,k;
for (i = 0; i < x->m; i++)
for (j = 0; j < y->n; j++)
for (k = 0; k < x->n; k++)
r->v[i*r->n+j] += x->v[i*x->n+k] * y->v[k*y->n+j];
return r;
}
/**
* Multiply matrices x and y on the device and store
* the result in r on the device. r contains already
* enough memory for the result matrix.
*/
__global__
void cuda_matrix_mul(mat* x, mat* y, mat* r)
{
//calculate the row and column index of matrixes x and y respectively
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < x->m && col < y->n){
double rValue=0;
//each thread computes one element of r
int k;
for(k=0; k < x->n; ++k)
rValue += x->v[row*x->n+k]*y->v[k*y->n+col];
r->v[row*r->n+col] = rValue;
}
}
//calculate minor of a matrix given int d. Set first d
//diagonal entries to 1 and and set the rest of elements of
//first d rows and columns to zero. Then copy rest of the
//elements from the given matrix and return the pointer to new
//object
mat* matrix_minor(mat *x, int d)
{
mat *m = matrix_new(x->m, x->n);
int i,j;
for (i = 0; i < d; i++)
m->v[i*m->n+i] = 1;
for (i = d; i < x->m; i++)
for (j = d; j < x->n; j++)
m->v[i*m->n+j] = x->v[i*x->n+j];
return m;
}
/**
* Calculate minor of a matrix given int d on device
*/
__global__
void cuda_matrix_minor(mat* x, int d, mat* m){
//calculate the row and column index of matrixes x and y
//respectively
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < x->m && col < x->n){
if (row == col && row < d)
m->v[row*m->n+col]=1;
if(row >= d && row < x->m && col >= d && col < x->n)
m->v[row*m->n+col]=x->v[row*x->n+col];
}
}
// c = a + b * s
double *vmadd(double a[], double b[], double s, double c[], int n)
{
int i;
for (i = 0; i < n; i++)
c[i] = a[i] + s * b[i];
return c;
}
/**
* c = a + b * s on device
*/
__global__
void cuda_vmadd(double a[], double b[], double *s, double c[], int n){
int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row <n)
c[row] = a[row] + b[row]*(*s);
}
// m = I - 2vv^T
mat* vmul(double v[], int n)
{
mat *x = matrix_new(n, n);
int i,j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++)
x->v[i*x->n+j] = -2 * v[i] * v[j];
for (i = 0; i < n; i++)
x->v[i*x->n+i] += 1;
return x;
}
/**
* m = I - 2vv^T on device
*/
__global__
void cuda_vmul(double v[], int n, mat* m)
{
//calculate the row and column index of matrixes x and y respectively
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < n && col < n){
m->v[row*m->n+col] = -2*v[row]*v[col];
if(row == col)
m->v[row*m->n+col] += 1;
}
}
// ||x||
double vnorm(double x[], int n)
{
double sum = 0;
int i;
for (i = 0; i < n; i++) sum += x[i] * x[i];
return sqrt(sum);
}
/**
* Call with <<1,1>>
* ||x|| on device and result is given in *a.
* If flag is true (!= 0) a is multiplied with -1
*/
__global__
void cuda_vnorm(double x[], int n, double *a, int flag)
{
if(blockIdx.x == 0 && threadIdx.x == 0 ) {
double sum = 0;
int i;
for (i = 0; i < n; i++)
sum += x[i]*x[i];
*a = sqrt(sum);
if (flag) *a = *a*(-1);
}
}
// y = x / d
double* vdiv(double x[], double d, double y[], int n)
{
int i;
for (i = 0; i < n; i++) y[i] = x[i] / d;
return y;
}
/**
* y = x / d on device
*/
__global__
void cuda_vdiv(double x[], double *d, double y[], int n)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row <n)
y[row]=x[row]/(*d);
}
// take c-th column of m, put in v
double* mcol(mat *m, double *v, int c)
{
int i;
for (i = 0; i < m->m; i++)
v[i] = m->v[i*m->n+c];
return v;
}
/**
* Take c-th column of m, put in v on device
*/
__global__
void cuda_mcol(mat *m, double *v, int c)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row < m->m)
v[row] = m->v[row*m->n+c];
}
/**
* Initialize vector e where k-th element is set to 1
* and all other are 0 on device
*/
__global__
void cuda_initialize_e(double* e, int n, int k){
int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row < n){
if(row==k){
e[row] = 1;
}else{
e[row] = 0;
}
}
}
//visualize a matrix
void matrix_show(mat *m)
{
int i,j;
for(i = 0; i < m->m; i++) {
for (j = 0; j < m->n; j++) {
printf(" %8.3f", m->v[i*m->n+j]);
}
printf("\n");
}
printf("\n");
}
//householder calculations
void householder(mat *m, mat **R, mat **Q)
{
mat *q[m->m];
mat *z = m, *z1;
int i,k;
for (k = 0; k < m->n && k < m->m - 1; k++) {
double e[m->m], x[m->m], a;
z1 = matrix_minor(z, k);
if (z != m) matrix_delete(z);
z = z1;
mcol(z, x, k);
a = vnorm(x, m->m);
if (m->v[k*m->n+k] > 0) a = -a;
for (i = 0; i < m->m; i++)
e[i] = (i == k) ? 1 : 0;
vmadd(x, e, a, e, m->m);
vdiv(e, vnorm(e, m->m), e, m->m);
q[k] = vmul(e, m->m);
z1 = matrix_mul(q[k], z);
if (z != m) matrix_delete(z);
z = z1;
}
matrix_delete(z);
*Q = q[0];
*R = matrix_mul(q[0], m);
for (i = 1; i < m->n && i < m->m - 1; i++) {
z1 = matrix_mul(q[i], *Q);
if (i > 1) matrix_delete(*Q);
*Q = z1;
matrix_delete(q[i]);
}
matrix_delete(q[0]);
z = matrix_mul(*Q, m);
matrix_delete(*R);
*R = z;
matrix_transpose(*Q);
}
/**
* Householder calculations with calls to device kernels
*/
void cuda_householder(mat *m, mat **R, mat **Q, mat *original)
{
mat *q;
mat *z = m, *z1;
int k;
double *e, *x, *a;
// Alloc vector e
cudaMalloc((void**)&e, sizeof(double) * original->m);
CudaCheckError();
// Alloc vector x
cudaMalloc((void**)&x, sizeof(double) * original->m);
CudaCheckError();
// Alloc scalar a
cudaMalloc((void**)&a, sizeof(double));
CudaCheckError();
//showGPUMem();
for (k = 0; k < original->n && k < original->m - 1; k++) {
// Allocate and init matrix z1
cuda_matrix_new(original->m,original->n, &z1);
// One thread calculates one element of matrix z1
cuda_matrix_minor<<<dimGrid, dimBlock>>>(z, k, z1 ); //Versuch, Idee alternativ (original, k, z1) wenn original->v == m->v
CudaCheckError();
if (z != m) cuda_matrix_delete(z);
z = z1;
// One thread calculates one element of vector x
cuda_mcol<<<numBlocksSingle,numThreadsSingle>>>(z, x, k); //Abgeschrieben von sequentieller Funktion
//z müsste ein Möglichkeit bieten auf z->v bzw m->v zuzugreifen
CudaCheckError();
int f = (original->v[k*original->n+k] > 0) ? 1 : 0;
// Call cuda_vnorm with only one thread
cuda_vnorm<<<1,1>>>(x, original->m, a, f); //Eingetragen: 1,1
CudaCheckError();
// One thread calculates one element of vector e
cuda_initialize_e<<<numBlocksSingle,numThreadsSingle>>>(e, original->m, k); //Eingetragen (e, original->m, k)
CudaCheckError();
// One thread calculates one element of vector e
cuda_vmadd<<<numBlocksSingle, numThreadsSingle>>>(x, e, a, e, original->m); //Eingetragen 1,1 und original->m
CudaCheckError();
// Call cuda_vnorm with only one thread
cuda_vnorm<<<1,1>>>(e, original->m, a, 0);
CudaCheckError();
// One thread calculates one element of vector e with cuda_vdiv
cuda_vdiv<<<numBlocksSingle,numThreadsSingle>>>(e, a, e, original->m); //EIngetragen: cuda_cdiv und 1,1
CudaCheckError();
// Allocate matrix q
cuda_matrix_new(original->m, original->m, &q);
// One thread calculates one element of matrix q
CudaCheckError();
cuda_vmul<<<dimGrid, dimBlock>>>(e, original->m, q);
CudaCheckError();
// Allocate matrix z1
cuda_matrix_new(original->m,original->n,&z1);
// One thread calculates one element of matrix z1
// Calculate matrix product z1 = q*z with cuda_matrix_mul
cuda_matrix_mul<<<dimGrid,dimBlock>>>(q,z,z1); //Komplett selbst geschrieben
CudaCheckError();
if (z != m) cuda_matrix_delete(z);
z = z1;
if(k==0){
*Q = q;
}
else if(k>0){
cuda_matrix_new(original->m, original->m, &z1);
cuda_matrix_mul<<<dimGrid, dimBlock>>>(q, *Q, z1);
CudaCheckError();
cuda_matrix_delete(*Q);
*Q = z1;
cuda_matrix_delete(q);
}
}
// Free temporary storage on device
cudaFree(e);
CudaCheckError();
cudaFree(x);
CudaCheckError();
cudaFree(a);
CudaCheckError();
cuda_matrix_delete(z);
cuda_matrix_new(original->m, original->n, R);
// Result matrix R
cuda_matrix_mul<<<dimGrid, dimBlock>>>(*Q, m, *R);
CudaCheckError();
// Result matrix Q
cuda_matrix_transpose<<<dimGrid, dimBlock>>>(*Q);
CudaCheckError();
}
/** Task2
* Deep copy of matrix x to the device.
* Return pointer to new structure on device in *dX
*/
void copyToDevice(mat** dX, mat* x){
mat temp;
temp.m = x->m;
temp.n = x->n;
double* d_arr;
//allocate device matrix
cudaMalloc((void**)dX, sizeof(mat));
CudaCheckError();
//allocate device array
cudaMalloc((void**)&d_arr, x->m*x->n*sizeof(double));
CudaCheckError();
//copy contents of x array
cudaMemcpy(d_arr, x->v, x->m*x->n*sizeof(double), cudaMemcpyHostToDevice);
CudaCheckError();
//save d_arr in temp
temp.v = d_arr;
//copy the temp to device object
cudaMemcpy(*dX, &temp, sizeof(mat_t), cudaMemcpyHostToDevice);
CudaCheckError();
}
/**
* Deep copy of matrix dX to the host.
* Return pointer to new structure on host in *x
*/
void copyToHost(mat** x, mat* dX){
*x = (mat*)malloc(sizeof(mat_t));
cudaMemcpy(*x, dX, sizeof(mat_t), cudaMemcpyDeviceToHost);
CudaCheckError();
double* temp = (double*)malloc(sizeof(double) * (*x)->m * (*x)->n);
// Copy array of dX to temp
cudaMemcpy(temp, (*x)->v, sizeof(double) * (*x)->m * (*x)->n, cudaMemcpyDeviceToHost);
CudaCheckError();
(*x)->v = temp;
}
//check if two matrixes are equal with their corrsponding element's values being within an epsilon
int is_equal(mat *m, mat *x){
if(m->m != x->m || m->n != x->n) return 0;
int i;
for(i=0; i< (m->m * m->n); ++i)
if(abs(m->v[i] - x->v[i]) > EPSILON) return 0;
return 1;
}
void showGPUMem(){
// show memory usage of GPU
size_t free_byte ;
size_t total_byte ;
cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ) ;
if ( cudaSuccess != cuda_status ){
printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) );
exit(1);
}
double free_db = (double)free_byte ;
double total_db = (double)total_byte ;
double used_db = total_db - free_db ;
printf("GPU memory usage: used = %f MB, free = %f MB, total = %f MB\n",
used_db/1024.0/1024.0, free_db/1024.0/1024.0,
total_db/1024.0/1024.0);
}
int main(int argc, char *argv[])
{
if(argc != 3){
puts("Usage: qr #rows #cols\n //#rows > 2 and #cols > 1\n");
exit(0);
}
int row = atoi(argv[1]), col = atoi(argv[2]);
if(row < 3 || col < 2){
puts("Error: invalid number of rows or columns\n");
exit(0);
}
int maxDim = (row > col) ? row : col;
//use maxDim to calculate dimensions of grids and blocks for 2D cuda kernels
numBlocks = maxDim / BLOCK_SIZE;
if(maxDim % BLOCK_SIZE) numBlocks++;
dimGrid.x = numBlocks; dimGrid.y = numBlocks;
// Every CUDA block is of size (x,y,z) = (BLOCK_SIZE,BLOCK_SIZE,1) threads
dimBlock.x = BLOCK_SIZE; dimBlock.y = BLOCK_SIZE;
//dimensions of blocks and threads for 1D cuda kernels for vectors
// Every CUDA block is of size (x,y,z) = (BLOCK_SIZE*BLOCK_SIZE,1,1)
numThreadsSingle = BLOCK_SIZE * BLOCK_SIZE;
numBlocksSingle = maxDim/numThreadsSingle;
if(maxDim % numThreadsSingle) ++numBlocksSingle;
mat *R = NULL, *Q = NULL, *dX = NULL, *dQ = NULL, *dR = NULL;
//showGPUMem();
//create a random row*col matrix
mat *x = matrix_create(row, col);
//puts("x"); matrix_show(x);
fprintf(stderr,"matix x %d,%d,%p\n",x->m,x->n,x->v);
double time_start = get_wall_time();
//copy x to device
copyToDevice(&dX, x);
fprintf(stderr,"adress &X: %p\n",dX);
//showGPUMem();
//householder calculations on device
cuda_householder(dX, &dR, &dQ, x);
//copy the calculated dR and dQ to host
copyToHost(&R, dR);
copyToHost(&Q, dQ);
double time_end = get_wall_time();
//puts("Q"); matrix_show(Q);
//puts("R"); matrix_show(R);
// to show their product is the input matrix
mat* dM = NULL;
cuda_matrix_new(x->m, x->n, &dM);
cuda_matrix_mul<<<dimGrid, dimBlock>>>(dQ, dR, dM);
//copy resultant matrix to host
mat* m = NULL;
copyToHost(&m, dM);
//puts("Q * R"); matrix_show(m);
printf("Verification: ");
if(is_equal(m, x))
printf("Successful\n");
else
printf("Unsuccessful\n");
printf("Time taken: %8.3f seconds\n",time_end - time_start);
matrix_delete(x);
matrix_delete(R);
matrix_delete(Q);
matrix_delete(m);
cuda_matrix_delete(dX);
cuda_matrix_delete(dQ);
cuda_matrix_delete(dR);
cuda_matrix_delete(dM);
fflush(stdout);
return 0;
}
|
23,864 | #pragma once
#include <cuda_runtime.h>
#include <cuda.h>
#include <iostream>
#include <vector>
#include <string>
#include <stdexcept>
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != cudaSuccess)
{
throw std::runtime_error(
std::string("ERROR: ")
+ file
+ ":"
+ std::to_string(line)
+ "\n"
+ cudaGetErrorName(err)
+ " "
+ "\n"
+ cudaGetErrorString(err)
+ " "
+ func
);
}
}
#define checkCudaErrors(val) check((val), #val, __FILE__, __LINE__)
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError(msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file,
const int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr,
"ERROR: %s(%i) : getLastCudaError() CUDA error :"
" %s : (%d) %s.\n",
file, line, errorMessage, static_cast<int>(err),
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
// This will only print the proper error string when calling cudaGetLastError
// but not exit program incase error detected.
#define printLastCudaError(msg) __printLastCudaError(msg, __FILE__, __LINE__)
inline void __printLastCudaError(const char *errorMessage, const char *file,
const int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr,
"ERROR: %s(%i) : getLastCudaError() CUDA error :"
" %s : (%d) %s.\n",
file, line, errorMessage, static_cast<int>(err),
cudaGetErrorString(err));
}
}
#ifndef MAX
#define MAX(a, b) (a > b ? a : b)
#endif
template <class T>
std::vector<T> read_vector1d(size_t n)
{
std::vector<T> v;
for (size_t i = 0; i < n; ++i)
{
T input;
std::cin >> input;
v.push_back(input);
}
return v;
}
template <class T>
void print_vector1d(std::vector<T> &v)
{
for (size_t i = 0; i + 1 < v.size(); ++i)
std::cout << v[i] << ' ';
if (!v.empty())
std::cout << v.back() << std::endl;
}
template <class T>
class CudaMemoryLogic
{
protected:
T *ptr = nullptr;
public:
size_t count = 0;
static void Swap(CudaMemoryLogic &left, CudaMemoryLogic &right)
{
size_t saveCount = left.count;
left.count = right.count;
right.count = saveCount;
T *savePtr = left.ptr;
left.ptr = right.ptr;
right.ptr = savePtr;
}
CudaMemoryLogic& operator=(const CudaMemoryLogic &right)
{
ptr = right.ptr;
count = right.count;
}
void Reset()
{
ptr = nullptr;
count = 0;
}
void alloc(size_t count)
{
if (count == 0)
return;
this->count = count;
checkCudaErrors(
cudaMalloc(&ptr, count * sizeof(T))
);
}
__host__ __device__
T*& get()
{
return ptr;
}
__host__ __device__
T* get() const
{
return ptr;
}
size_t bytes_size()
{
return count * sizeof(T);
}
void memcpy(void *ptr, cudaMemcpyKind kind, size_t deviceOffset=0, size_t count=0)
{
if (!ptr)
return;
void *dst, *src;
if (kind == cudaMemcpyHostToDevice)
{
src = ptr;
dst = reinterpret_cast<T*>(this->ptr) + deviceOffset;
}
else
{
src = reinterpret_cast<T*>(this->ptr) + deviceOffset;
dst = ptr;
}
size_t count_to_cpy = (count ? count : this->count);
checkCudaErrors(
cudaMemcpy(dst, src, count_to_cpy * sizeof(T), kind)
);
}
void dealloc()
{
if (!ptr)
return;
checkCudaErrors(
cudaFree(ptr)
);
ptr = nullptr;
}
};
template<typename T>
class CudaMemory : public CudaMemoryLogic<T>
{
public:
CudaMemory()
{
this->count = 0;
}
CudaMemory(size_t count)
{
this->alloc(count);
}
CudaMemory(const CudaMemory &right)
{
this->ptr = right.ptr;
this->count = right.count;
}
~CudaMemory()
{
this->dealloc();
}
};
class CudaKernelChecker
{
private:
cudaError_t err;
public:
CudaKernelChecker()
{
err = cudaSuccess;
}
void check(const std::string &name)
{
err = cudaGetLastError();
if (err != cudaSuccess)
{
throw std::runtime_error(
std::string("ERROR: Failed to launch kernel ")
+ name
+ " (error "
+ cudaGetErrorString(err)
+ ")!"
);
}
}
};
class CudaTimer
{
private:
cudaEvent_t event_start, event_stop;
public:
CudaTimer()
{
checkCudaErrors(cudaEventCreate(&event_start));
checkCudaErrors(cudaEventCreate(&event_stop));
}
void start()
{
checkCudaErrors(cudaEventRecord(event_start));
}
void stop()
{
checkCudaErrors(cudaEventRecord(event_stop));
checkCudaErrors(cudaEventSynchronize(event_stop));
}
float get_time()
{
float ms;
checkCudaErrors(cudaEventElapsedTime(&ms, event_start, event_stop));
return ms;
}
void print_time()
{
float ms_time = get_time();
printf("time: %f\n", ms_time);
}
};
|
23,865 | #include<thrust/host_vector.h>
#include<thrust/device_vector.h>
#include<thrust/generate.h>
#include<thrust/sort.h>
#include<thrust/copy.h>
#include<cstdlib>
int main()
{
thrust::host_vector<int> H(22);
thrust::generate(H.begin(), H.end(), rand);
thrust::device_vector<int> D = H;
thrust::sort(D.begin(), D.end());
thrust::copy(D.begin(), D.end(), H.begin());
for(int i=0; i<H.size(); i++)
printf("%d ",H[i]);
return 0;
} |
23,866 | /*
Solution of the Laplace equation for heat conduction in a square plate
*/
#include <iostream>
// global variables
const int NX = 1024; // mesh size (number of node points along X)
const int NY = 1024; // mesh size (number of node points along Y)
const int MAX_ITER=1000; // number of Jacobi iterations
// device function to update the array T_new based on the values in array T_old
// note that all locations are updated simultaneously on the GPU
__global__ void Laplace(double *T_old, double *T_new)
{
// compute the "i" and "j" location of the node point
// handled by this thread
int tx = threadIdx.x, ty = threadIdx.y;
int i = blockIdx.x * blockDim.x + tx ;
int j = blockIdx.y * blockDim.y + ty ;
// get the natural index values of node (i,j) and its neighboring nodes
// N
int P = i + j*NX; // node (i,j) |
#define smem(x,y) s_old[(x)+1][(y)+1]
__shared__ double s_old[16+2][16+2];
smem(tx,ty) = T_old[P];
bool is_x_top = tx==0, is_x_bot = tx==15;
bool is_y_top = ty==0, is_y_bot = ty==15;
if (is_x_top) smem(tx-1,ty) = T_old[(i-1)*NX+j];
else if (is_x_bot) smem(tx+1,ty) = T_old[(i+1)*NX+j];
if (is_y_top) smem(tx,ty-1) = T_old[i*NX+j-1];
else if (is_y_bot) smem(tx,ty+1) = T_old[i*NX+j+1];
__syncthreads();
// update "interior" node points
if(i>0 && i<NX-1 && j>0 && j<NY-1) {
T_new[P] = 0.25*( smem(tx,ty+1) + smem(tx,ty-1) +
smem(tx-1,ty) + smem(tx+1,ty) );
}
#undef smem
}
// initialization
void Initialize(double *TEMPERATURE)
{
for(int i=0;i<NX;i++) {
for(int j=0;j<NY;j++) {
int index = i + j*NX;
TEMPERATURE[index]=0.0;
}
}
// set left wall to 1
for(int j=0;j<NY;j++) {
int index = j*NX;
TEMPERATURE[index]=1.0;
}
}
int main(int argc,char **argv)
{
double *_T1, *_T2; // pointers to device (GPU) memory
// allocate a "pre-computation" T array on the host
double *T = new double [NX*NY];
// initialize array on the host
Initialize(T);
// allocate storage space on the GPU
cudaMalloc((void **)&_T1,NX*NY*sizeof(double));
cudaMalloc((void **)&_T2,NX*NY*sizeof(double));
// copy (initialized) host arrays to the GPU memory from CPU memory
cudaMemcpy(_T1,T,NX*NY*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(_T2,T,NX*NY*sizeof(double),cudaMemcpyHostToDevice);
// assign a 2D distribution of CUDA "threads" within each CUDA "block"
int ThreadsPerBlock=16;
dim3 dimBlock( ThreadsPerBlock, ThreadsPerBlock );
// calculate number of blocks along X and Y in a 2D CUDA "grid"
dim3 dimGrid( ceil(double(NX)/double(dimBlock.x)), ceil(double(NY)/double(dimBlock.y)), 1 );
// begin Jacobi iteration
int k = 0;
while(k<MAX_ITER) {
Laplace<<<dimGrid, dimBlock>>>(_T1,_T2); // update T1 using data stored in T2
Laplace<<<dimGrid, dimBlock>>>(_T2,_T1); // update T2 using data stored in T1
k+=2;
}
// copy final array to the CPU from the GPU
cudaMemcpy(T,_T2,NX*NY*sizeof(double),cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
/*
// print the results to screen
for (int j=NY-1;j>=0;j--) {
for (int i=0;i<NX;i++) {
int index = i + j*NX;
std::cout << T[index] << " ";
}
std::cout << std::endl;
}
*/
// release memory on the host
delete T;
// release memory on the device
cudaFree(_T1);
cudaFree(_T2);
return 0;
}
|
23,867 | //xfail:BUGLE_ERROR
//--gridDim=1 --blockDim=2 --no-inline
//This kernel is racy. However, variable-length memcpys are not supported.
//Expect error at Bugle stage.
#define memcpy(dst, src, len) __builtin_memcpy(dst, src, len)
typedef struct {
short x;
short y;
char z;
} s_t; //< sizeof(s_t) == 6
__global__ void overstep(s_t *in, s_t *out, size_t len) {
memcpy(&out[threadIdx.x], &in[threadIdx.x], len);
}
|
23,868 | #include "includes.h"
__global__ void next_move_hub_kernel(int* hub, int nhub, int *rat_count, int *healthy_rat_count, int *exposed_rat_count, int *infectious_rat_count, double *node_weight, double *sum_weight_result,int *neighbor, int *neighbor_start, int width, int height, double batch_fraction){
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < nhub) {
int nid = hub[x];
for (int eid = neighbor_start[nid]; eid < neighbor_start[nid+1]; eid++) {
int remote_node = neighbor[eid];
double move_prob = batch_fraction * node_weight[remote_node] / sum_weight_result[nid]; // check 0
int move_rat = rat_count[nid] * move_prob;
int move_healthy = healthy_rat_count[nid] * move_prob;
int move_exposed = exposed_rat_count[nid] * move_prob;
int move_infectious = infectious_rat_count[nid] * move_prob;
atomicAdd(&rat_count[remote_node], move_rat);
atomicAdd(&healthy_rat_count[remote_node], move_healthy);
atomicAdd(&exposed_rat_count[remote_node], move_exposed);
atomicAdd(&infectious_rat_count[remote_node], move_infectious);
rat_count[nid] -= move_rat;
healthy_rat_count[nid] -= move_healthy;
exposed_rat_count[nid] -= move_exposed;
infectious_rat_count[nid] -= move_infectious;
}
}
} |
23,869 | #include <stdio.h>
#include <math.h>
#include <iostream>
static void HandleError( cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
__device__ double power(double a, double b){
return pow(a, b);
}
__global__ void pi_approx_kernel(double n, double *res, double batch_size)
{
const double index = threadIdx.x + blockDim.x * blockIdx.x;
// long int threads = blockDim.x * gridDim.x * blockDim.y * gridDim.y;
const double a = -1;
const double b = 1;
const double width = (b-a) / n;
double x = 0;
double num = 0;
double batch_index = batch_size * index;
for (double i = 0; i < batch_size; i++){
x = width * (batch_index + i) + a + width/2;
num += sqrt(1-power(x, 2)) * width;
}
atomicAdd(res, num);
}
double pi_approx_cpu(double n){
double pi = 0;
double a = -1;
double b = 1;
double width = 1/n * (b-a);
double x;
double num;
for (int i = 0; i < n; i++){
x = (b-a) * i/n + a + width/2;
num = sqrt(1-pow(x, 2)) * width;
pi += num;
}
pi = 2 * pi;
return pi;
}
__global__ void pi_approx_gpu_single(double n, double *pi){
double a = -1;
double b = 1;
double width = 1/n * (b-a);
double x;
double num;
for (int i = 0; i < n; i++){
x = (b-a) * i/n + a + width/2;
num = sqrt(1-power(x, 2)) * width;
pi[0] += num;
}
}
double pi_approx_gpu(double iters, int block_count=-1, int thread_count=-1){
// const double batch_size = iters / (block_count * thread_count);
double batch_size;
if (block_count == -1 || thread_count == -1){
if (iters / (1024 * 1024) >= 1024){
block_count = 1024;
thread_count = 1024;
}
else {
block_count = ceil(iters / (1024 * 1024));
thread_count = iters / block_count / 1024;
}
}
batch_size = ceil(iters / (block_count * thread_count));
double *d_res = 0;
double res = 0;
printf("Batch size: %lf\n\n", batch_size);
printf("Estimated GPU time in seconds (less accurate for less than 2^30): %lf\n\n", 0.72 * iters / pow(2, 30));
cudaMalloc((void**) &d_res, sizeof(double));
cudaMemcpy(d_res, &res, sizeof(double), cudaMemcpyHostToDevice);
float elapsed = 0;
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start, 0));
pi_approx_kernel<<<block_count, thread_count>>>(iters, d_res, batch_size);
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&elapsed, start, stop) );
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
printf("With GPU: %f seconds\n", elapsed / 1000);
cudaMemcpy(&res, d_res, sizeof(double), cudaMemcpyDeviceToHost);
printf("GPU approx: %.50lf\n\n", res * 2);
cudaFree(d_res);
return res * 2;
}
int main()
{
double x;
std::cout << "2^() iterations: "; // Type a number and press enter
std::cin >> x; // Get user input from the keyboard
const double iters = pow(2, x);
pi_approx_gpu(iters);
// double *d_pi = 0;
// double pi = 0;
// cudaMalloc(&d_pi, sizeof(double));
// HANDLE_ERROR(cudaEventCreate(&start));
// HANDLE_ERROR(cudaEventCreate(&stop));
// HANDLE_ERROR(cudaEventRecord(start, 0));
// pi_approx_gpu_single<<<1, 1>>>(iters, d_pi);
// cudaMemcpy(&pi, d_pi, sizeof(double), cudaMemcpyDeviceToHost);
// HANDLE_ERROR(cudaEventRecord(stop, 0));
// HANDLE_ERROR(cudaEventSynchronize(stop));
// HANDLE_ERROR(cudaEventElapsedTime(&elapsed, start, stop) );
// HANDLE_ERROR(cudaEventDestroy(start));
// HANDLE_ERROR(cudaEventDestroy(stop));
// printf("With GPU 2: %f seconds\n", elapsed / 1000);
// printf("GPU approx 2: %.50lf\n\n", 2 * pi);
clock_t cpu_startTime, cpu_endTime;
double cpu_ElapseTime=0;
cpu_startTime = clock();
double cpu_approx = pi_approx_cpu(iters);
cpu_endTime = clock();
cpu_ElapseTime = ((cpu_endTime - cpu_startTime)/CLOCKS_PER_SEC);
printf("With CPU: %f seconds\n", cpu_ElapseTime);
printf("CPU approx: %.50lf\n", cpu_approx);
return 0;
} |
23,870 | // tdfc-cuda backend autocompiled body file
// tdfc version 1.160
// Thu May 26 16:38:16 2011
#include <stdio.h>
__global__ void tdfc_rot(float cc_c,float cc_s,float* cc_x,float* cc_y,float* cc_x_out,float* cc_y_out,int N )
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx<N)
{
{
cc_x_out[idx] = (((cc_x[idx]*cc_c)+(cc_y[idx]*cc_s)));
cc_y_out[idx] = (((cc_y[idx]*cc_c)-(cc_x[idx]*cc_s)));
}
}
} //tdfc_rot
|
23,871 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
void synchronizeAndCheckReturnStatus()
{
cudaThreadSynchronize();
cudaError_t status = cudaGetLastError();
if (status != cudaSuccess)
{
printf("return status: %s\n", cudaGetErrorString(status));
exit(0);
}
}
int main()
{
cudaDeviceProp properties;
int device;
// cudaGetDevice(&device);
// synchronizeAndCheckReturnStatus();
cudaGetDeviceProperties(&properties, device);
synchronizeAndCheckReturnStatus();
printf("Properties of device number %d:\n\n", device);
printf("name %s\n", properties.name);
printf("warpSize %d\n", properties.warpSize);
printf("totalGlobalMem %d\n", properties.totalGlobalMem);
printf("sharedMemPerBlock %d\n", properties.sharedMemPerBlock);
printf("regsPerBlock %d\n", properties.regsPerBlock);
printf("memPitch %d\n", properties.memPitch);
printf("maxThreadsPerBlock %d\n", properties.maxThreadsPerBlock);
printf("maxThreadsDim[0] %d\n", properties.maxThreadsDim[0]);
printf("maxThreadsDim[1] %d\n", properties.maxThreadsDim[1]);
printf("maxThreadsDim[2] %d\n", properties.maxThreadsDim[2]);
printf("maxGridSize[0] %d\n", properties.maxGridSize[0]);
printf("maxGridSize[1] %d\n", properties.maxGridSize[1]);
printf("maxGridSize[2] %d\n", properties.maxGridSize[2]);
printf("totalConstMem %d\n", properties.totalConstMem);
printf("major %d\n", properties.major);
printf("minor %d\n", properties.minor);
printf("clockRate %d\n", properties.clockRate);
printf("textureAlignment %d\n", properties.textureAlignment);
printf("deviceOverlap %d\n", properties.deviceOverlap);
printf("multiProcessorCount %d\n", properties.multiProcessorCount);
printf("kernelExecTimeoutEnabled %d\n", properties.kernelExecTimeoutEnabled);
printf("integrated %d\n", properties.integrated);
printf("canMapHostMemory %d\n", properties.canMapHostMemory);
printf("computeMode %d\n", properties.computeMode);
}
|
23,872 | /* hello CUDA kernels
* Arguments:
* char *a - an array of characters
* Purpose:
* Each CUDA thread calculates an index value and increments
* its portion of the array by the value of its index.
*/
/* hello_block
* This kernel works when called with
* multiple thread blocks, each using
* a single thread
*/
__global__ void hello_block(char *a, int N)
{
int i = blockIdx.x;
if(i < N)
a[i] = a[i] + i;
}
/* hello_thread
* This kernel works when called with
* a single thread block using
* multiple threads
*/
__global__ void hello_thread(char *a, int N)
{
int i = threadIdx.x;
if(i < N)
a[i] = a[i] + i;
}
/* hello_both
* This kernel works when called with
* multiple thread blocks each with
* multiple threads
*/
__global__ void hello_both(char *a, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < N)
a[i] = a[i] + i;
}
|
23,873 |
// CUDA sample
// simple grid-stride
#include <stdio.h>
#include <cstdlib>
void init(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i;
}
}
__global__
void doubleElementsStride(int *a, int N)
{
/*
* Use a grid-stride loop so each thread does work
* on more than one element in the array.
*/
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (int i = idx; i < N; i += stride)
{
a[i] *= 2;
}
}
__global__
void doubleElementsMismatch(int *a, int N)
{
/*
* Use a grid-stride loop so each thread does work
* on more than one element in the array.
*/
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (int i = idx; i < N; i += stride)
{
a[i] *= 2;
}
}
bool checkElementsAreDoubled(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
if (a[i] != i*2) return false;
}
return true;
}
int main(int argc, char** argv)
{
int N = 10000;
int *a;
int blocks = 32;
int threads = 256;
int flag = 0;
if(argc > 3) {
N = atoi(argv[1]);
threads = atoi(argv[2]);
blocks = atoi(argv[3]);
if(argc > 4) {
flag = atoi(argv[4]);
}
}
size_t size = N * sizeof(int);
cudaMallocManaged(&a, size);
size_t threads_per_block = threads;
size_t number_of_blocks = blocks;
bool areDoubled;
if(flag == 0 || flag == 1) {
init(a, N);
doubleElementsStride<<<number_of_blocks, threads_per_block>>>(a, N);
cudaDeviceSynchronize();
areDoubled = checkElementsAreDoubled(a, N);
printf("All elements were doubled(stride-grid)? %s\n", areDoubled ? "TRUE" : "FALSE");
}
if(flag == 0 || flag == 2) {
init(a, N);
size_t threads_per_block_mismatch = 256;
size_t number_of_blocks_mismatch = (N + threads_per_block_mismatch) / threads_per_block_mismatch;
doubleElementsMismatch<<<number_of_blocks_mismatch, threads_per_block_mismatch>>>(a, N);
cudaDeviceSynchronize();
areDoubled = checkElementsAreDoubled(a, N);
printf("All elements were doubled(mismatch)? %s\n", areDoubled ? "TRUE" : "FALSE");
}
cudaFree(a);
}
|
23,874 | #include <stdio.h>
void GetDeviceProperties(struct cudaDeviceProp *prop) {
cudaError_t e;
int device;
device = 0;
e = cudaGetDeviceProperties (prop, device);
if (e != cudaSuccess) {
fprintf(stderr, "GetDeviceProperties failed\n");
exit(2);
}
}
int main() {
cudaDeviceProp p;
GetDeviceProperties(&p);
printf("totalGlobalMem: % 09ld\n", p.totalGlobalMem);
printf("maxTexture1D : % 09d\n", p.maxTexture1D);
}
|
23,875 | #include <iostream>
static __global__ void kernel(const float *A, const float *b) {
}
int main(int argc, char** argv) {
float *d_a, *d_b;
if(cudaMalloc(&d_a, sizeof(float)) != cudaSuccess) {
std::cout << "cudaMalloc d_a failed" << std::endl;
return 1;
}
if(cudaMalloc(&d_b, sizeof(float)) != cudaSuccess) {
std::cout << "cudaMalloc d_b failed" << std::endl;
cudaFree(d_a);
return 1;
}
kernel<<<1, 1>>>(d_a, d_b);
cudaFree(d_a);
cudaFree(d_b);
std::cout << "done." << std::endl;
}
|
23,876 | #include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/random.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <thrust/equal.h>
using namespace thrust::placeholders;
/*************************************/
/* CONVERT LINEAR INDEX TO ROW INDEX */
/*************************************/
template <typename T>
struct linear_index_to_row_index : public thrust::unary_function<T,T> {
T Ncols; // --- Number of columns
__host__ __device__ linear_index_to_row_index(T Ncols) : Ncols(Ncols) {}
__host__ __device__ T operator()(T i) { return i / Ncols; }
};
/********/
/* MAIN */
/********/
int main()
{
/**************************/
/* SETTING UP THE PROBLEM */
/**************************/
const int Nrows = 10; // --- Number of rows
const int Ncols = 3; // --- Number of columns
// --- Random uniform integer distribution between 0 and 100
thrust::default_random_engine rng;
thrust::uniform_int_distribution<int> dist1(0, 100);
// --- Random uniform integer distribution between 1 and 4
thrust::uniform_int_distribution<int> dist2(1, 4);
// --- Matrix allocation and initialization
thrust::device_vector<float> d_matrix(Nrows * Ncols);
for (size_t i = 0; i < d_matrix.size(); i++) d_matrix[i] = (float)dist1(rng);
// --- Column vector allocation and initialization
thrust::device_vector<float> d_column(Nrows);
for (size_t i = 0; i < d_column.size(); i++) d_column[i] = (float)dist2(rng);
// --- Row vector allocation and initialization
thrust::device_vector<float> d_row(Ncols);
for (size_t i = 0; i < d_row.size(); i++) d_row[i] = (float)dist2(rng);
printf("\n\nOriginal matrix\n");
for(int i = 0; i < Nrows; i++) {
std::cout << "[ ";
for(int j = 0; j < Ncols; j++)
std::cout << d_matrix[i * Ncols + j] << " ";
std::cout << "]\n";
}
printf("\n\nColumn vector\n");
for(int i = 0; i < Nrows; i++) std::cout << d_column[i] << "\n";
printf("\n\nRow vector\n");
for(int i = 0; i < Ncols; i++) std::cout << d_row[i] << " ";
/*******************************************************/
/* ADDING THE SAME COLUMN VECTOR TO ALL MATRIX COLUMNS */
/*******************************************************/
thrust::device_vector<float> d_matrix2(d_matrix);
thrust::transform(d_matrix.begin(), d_matrix.end(),
thrust::make_permutation_iterator(
d_column.begin(),
thrust::make_transform_iterator(thrust::make_counting_iterator(0), linear_index_to_row_index<int>(Ncols))),
d_matrix2.begin(),
thrust::plus<float>());
printf("\n\nColumn + Matrix -> Result matrix\n");
for(int i = 0; i < Nrows; i++) {
std::cout << "[ ";
for(int j = 0; j < Ncols; j++)
std::cout << d_matrix2[i * Ncols + j] << " ";
std::cout << "]\n";
}
/*************************************************/
/* ADDING THE SAME ROW VECTOR TO ALL MATRIX ROWS */
/*************************************************/
thrust::device_vector<float> d_matrix3(d_matrix);
thrust::transform(thrust::make_permutation_iterator(
d_matrix.begin(),
thrust::make_transform_iterator(thrust::make_counting_iterator(0),(_1 % Nrows) * Ncols + _1 / Nrows)),
thrust::make_permutation_iterator(
d_matrix.begin(),
thrust::make_transform_iterator(thrust::make_counting_iterator(0),(_1 % Nrows) * Ncols + _1 / Nrows)) + Nrows * Ncols,
thrust::make_permutation_iterator(
d_row.begin(),
thrust::make_transform_iterator(thrust::make_counting_iterator(0), linear_index_to_row_index<int>(Nrows))),
thrust::make_permutation_iterator(
d_matrix3.begin(),
thrust::make_transform_iterator(thrust::make_counting_iterator(0),(_1 % Nrows) * Ncols + _1 / Nrows)),
thrust::plus<float>());
printf("\n\nRow + Matrix -> Result matrix\n");
for(int i = 0; i < Nrows; i++) {
std::cout << "[ ";
for(int j = 0; j < Ncols; j++)
std::cout << d_matrix3[i * Ncols + j] << " ";
std::cout << "]\n";
}
return 0;
}
|
23,877 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define Mask_Width 101
#define TILE_WIDTH 1000
__constant__ int M[Mask_Width];
__global__ void Convolution1D_kernel(int *N, int *P, int n) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j;
int PValue = 0;
int N_start_point = i - ((Mask_Width - 1)/2);
for(j = 0; j < Mask_Width; j++) {
if( ((N_start_point + j) >= 0) && ((N_start_point + j) < n) ) {
PValue += N[N_start_point + j]*M[j];
}
}
P[i] = PValue;
}
__global__ void Convolution1D_SM_kernel(int *N, int *P, int width) {
// NOTES(ciroceissler): shared memory com tamanho suficiente para guardar os elementos
// do halo da direita e esquerda, alem dos elementos centrais.
__shared__ int N_ds[TILE_WIDTH + Mask_Width - 1];
int i = blockIdx.x*blockDim.x + threadIdx.x;
int n = (Mask_Width - 1)/2;
// NOTES(ciroceissler): calcula o indice do tile anterior
int halo_index_left = (blockIdx.x - 1)*blockDim.x + threadIdx.x;
// NOTES(ciroceissler): carregar apenas o elementos necessarios do tile anterior
if (threadIdx.x >= blockDim.x - n) {
// NOTES(ciroceissler): checa se tem ou nao um elemento ghost, indice menor que zero.
N_ds[threadIdx.x - (blockDim.x - n)] = (halo_index_left < 0) ? 0 : N[halo_index_left];
}
// NOTES(ciroceissler): carregar os elementos centrais
N_ds[n + threadIdx.x] = N[blockIdx.x*blockDim.x + threadIdx.x];
// NOTES(ciroceissler): calcula o indice do tile posterior
int halo_index_right = (blockIdx.x + 1)*blockDim.x + threadIdx.x;
// NOTES(ciroceissler): carregar apenas o elementos necessarios do tile posterior
if (threadIdx.x < n) {
// NOTES(ciroceissler): checa se tem ou nao um elemento ghost, indice menor que zero.
N_ds[n + blockDim.x + threadIdx.x] = (halo_index_right >= width) ? 0 : N[halo_index_right];
}
// NOTES(ciroceissler): sincronizar todas as threads
__syncthreads();
int PValue = 0;
// NOTES(ciroceissler): calcular a operacao de convolucao
for(int j = 0; j < Mask_Width; j++) {
PValue += N_ds[threadIdx.x + j]*M[j];
}
P[i] = PValue;
}
#ifdef __DEBUG__
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
#endif // __DEBUG__
int main(int argc, char *argv[]) {
// NOTES(ciroceissler): variaveis do device
int *d_N, *d_P;
/* input, output e máscara */
int *N , *P, *h_M;
int n, i;
#ifdef __DEBUG__
// NOTES(ciroceissler): variaveis de tempo
double time_start;
double time_end;
#endif // __DEBUG__
/* Tamanho do vetor */
scanf("%d",&n);
// NOTES(ciroceissler): dimensoes
dim3 dimGrid(ceil((float)n / TILE_WIDTH), 1, 1);
dim3 dimBlock(TILE_WIDTH, 1, 1);
/* Alocação dos buffers necessários */
P = (int *)malloc(n*sizeof(int));
N = (int *)malloc(n*sizeof(int));
h_M = (int *)malloc(sizeof(int)*Mask_Width);
/* entrada dos valores */
for(i = 0; i < n ; i++)
scanf("%d",&N[i]);
for(i = 0; i < Mask_Width; i++) h_M[i] = i;
#ifdef __DEBUG__
time_start = rtclock();
#endif // __DEBUG__
// NOTES(ciroceissler): alocacao dos buffers do device
cudaMalloc((void **) &d_N, sizeof(int)*n);
cudaMalloc((void **) &d_P, sizeof(int)*n);
// NOTES(ciroceissler): copiar os vetores para o device
cudaMemcpy(d_N, N, sizeof(int)*n, cudaMemcpyHostToDevice);
// NOTES(ciroceissler): copiar os valores para a constante
cudaMemcpyToSymbol(M, h_M, sizeof(int)*Mask_Width);
// NOTES(ciroceissler): rodar o kernel
Convolution1D_SM_kernel<<<dimGrid, dimBlock>>>(d_N, d_P, n);
// NOTES(ciroceissler): copiar o valor na GPU
cudaMemcpy(P, d_P, sizeof(int)*n, cudaMemcpyDeviceToHost);
#ifdef __DEBUG__
time_end = rtclock();
#endif // __DEBUG__
for(i = 0; i < n; i++) printf("%d ", P[i]);
printf("\n");
#ifdef __DEBUG__
fprintf(stdout, "\n%0.6lf\n", time_end - time_start);
#endif // __DEBUG__
cudaFree(d_N);
cudaFree(d_P);
free(P);
free(N);
free(h_M);
}
// ------------------------------------------------------------------------------------
// input | cpu_serial | gpu_nosharedmemory | gpu_sharedmemory | speedup (cpu/gpusm)
// ------------------------------------------------------------------------------------
// arq1.in | 0.057762 | 0.670367 | 0.621001 | 0.09301434297
// arq2.in | 0.578741 | 0.641411 | 0.636617 | 0.9090881959
// arq3.in | 5.779576 | 0.837708 | 0.820906 | 7.040484538
// ------------------------------------------------------------------------------------
//
// taf!
|
23,878 | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void HelloFromGPU(void) {
printf("hello from GPU\n");
return;
}
void HelloFromCPU(void) {
printf("hello from CPU\n");
return;
}
int main (void) {
HelloFromCPU();
HelloFromGPU<<<2, 5>>>();
HelloFromCPU();
return 0;
}
|
23,879 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <curand_kernel.h>
#include <cstdio>
#include <cassert>
#define check_cuda_call(ans) { _check((ans), __FILE__, __LINE__); }
inline void _check(cudaError_t code, char *file, int line)
{
if (code != cudaSuccess) {
fprintf(stderr,"CUDA Error: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
class EventTimer {
public:
EventTimer() : mStarted(false), mStopped(false) {
cudaEventCreate(&mStart);
cudaEventCreate(&mStop);
}
~EventTimer() {
cudaEventDestroy(mStart);
cudaEventDestroy(mStop);
}
void start(cudaStream_t s = 0) {
cudaEventRecord(mStart, s);
mStarted = true;
mStopped = false;
}
void stop(cudaStream_t s = 0) {
assert(mStarted);
cudaEventRecord(mStop, s);
mStarted = false;
mStopped = true;
}
float elapsed() {
assert(mStopped);
if (!mStopped) return 0;
cudaEventSynchronize(mStop);
float elapsed = 0;
cudaEventElapsedTime(&elapsed, mStart, mStop);
return elapsed;
}
private:
bool mStarted, mStopped;
cudaEvent_t mStart, mStop;
};
__global__ void state_setup(curandStateXORWOW_t* states, int w, int h)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= w || y >= h) {
return;
}
int i = x + y * w;
curand_init(clock64(), x, 0, states + i);
}
__global__ void write(int *buf, int w, int h, float k, curandStateXORWOW_t* states)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= w || y >= h) {
return;
}
int i = x + y * w;
buf[i] = curand(states + i) & 1 ? 0x00000000 : 0xffffffff;
// (x << 24 | y << 16 | x << 8 | y) * k;
}
curandStateXORWOW_t* states;
void state_setup(int w, int h)
{
check_cuda_call(cudaMalloc(&states, w * h * sizeof(curandStateXORWOW_t)));
check_cuda_call(cudaMemset(states, 0, w * h * sizeof(curandStateXORWOW_t)));
dim3 dim_block(32, 16); // 32 * 16 = 512;
dim3 dim_grid(((w + dim_block.x - 1) / dim_block.x),
(h + dim_block.y - 1) / dim_block.y);
state_setup<<<dim_grid, dim_block>>>(states, w, h);
}
void cuda_write(int* buf, int w, int h, float k)
{
dim3 dim_block(32, 16); // 32 * 16 = 512;
dim3 dim_grid(((w + dim_block.x - 1) / dim_block.x),
(h + dim_block.y - 1) / dim_block.y);
EventTimer t;
t.start();
write<<<dim_grid, dim_block>>>(buf, w, h, k, states);
t.stop();
//printf("kernel time: %f\n", t.elapsed());
}
void state_destroy()
{
check_cuda_call(cudaFree(states));
}
|
23,880 | #include<iostream>
#include<cuda.h>
using namespace std;
#define N 10
__global__ void add(int *a,int *b,int *c){
int tid=threadIdx.x;
if(tid<N)
c[tid]=a[tid]+b[tid];
}
int main(){
int a[N],b[N],c[N];
int *dev_a,*dev_b,*dev_c;
cudaMalloc(&dev_a,N*sizeof(int));
cudaMalloc(&dev_b,N*sizeof(int));
cudaMalloc(&dev_c,N*sizeof(int));
for(int i=0;i<N;i++){
a[i]=i;
b[i]=i*i;
}
cudaMemcpy(dev_a,a,N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,N*sizeof(int),cudaMemcpyHostToDevice);
add<<<1,N>>>(dev_a,dev_b,dev_c);//一个计算单元是一个kernel,即一个grid,我觉得第一个数是block数量,第二个是每一个blokc含有的thread数量
cudaMemcpy(c,dev_c,N*sizeof(int),cudaMemcpyDeviceToHost);
for(int i=0;i<N;i++){
cout<<c[i]<<endl;
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} |
23,881 | #include <stdio.h>
#include <stdlib.h>
int main() {
unsigned int N = 450000000;
unsigned int bytes = N*sizeof(double);
// Host Initialization
double *h_a;
h_a = (double*)malloc(bytes);
for (unsigned int i=0; i<N; i++)
h_a[i] = 2.0f;
// Device Initialization
double *d_a;
cudaMalloc(&d_a, bytes);
// Event Initialization
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milli = 0;
cudaEventRecord(start,0);
cudaMemcpy(d_a,h_a,bytes,cudaMemcpyHostToDevice);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli,start,stop);
printf("%f ms\n",milli);
// Cleanup
free(h_a);
cudaFree(d_a);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
23,882 | /* Ray-Triangle Intersection Test Routines */
/* Different optimizations of my and Ben Trumbore's */
/* code from journals of graphics tools (JGT) */
/* http://www.acm.org/jgt/ */
/* by Tomas Moller, May 2000 */
#include <math.h>
#include <iostream>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "RayTriangleIntersect.cuh"
#define EPSILON 0.000001
#define CROSS(dest,v1,v2) \
dest[0]=v1[1]*v2[2]-v1[2]*v2[1]; \
dest[1]=v1[2]*v2[0]-v1[0]*v2[2]; \
dest[2]=v1[0]*v2[1]-v1[1]*v2[0];
#define DOT(v1,v2) (v1[0]*v2[0]+v1[1]*v2[1]+v1[2]*v2[2])
#define SUB(dest,v1,v2) \
dest[0]=v1[0]-v2[0]; \
dest[1]=v1[1]-v2[1]; \
dest[2]=v1[2]-v2[2];
/* code rewritten to do tests on the sign of the determinant */
/* the division is before the test of the sign of the det */
/* and one CROSS has been moved out from the if-else if-else */
__device__ int intersect_triangle3(float orig[3], float dir[3],
float vert0[3], float vert1[3], float vert2[3],
float* t, float* u, float* v)
{
//std::cout << "vert0 = " << vert0[0] << ", " << vert0[1] << ", " << vert0[2] << std::endl;
//std::cout << "vert1 = " << vert1[0] << ", " << vert1[1] << ", " << vert1[2] << std::endl;
//std::cout << "vert2 = " << vert2[0] << ", " << vert2[1] << ", " << vert2[2] << std::endl;
//std::cout << "orig = " << orig[0] << ", " << orig[1] << ", " << orig[2] << std::endl;
float edge1[3], edge2[3], tvec[3], pvec[3], qvec[3];
float det, inv_det;
/* find vectors for two edges sharing vert0 */
SUB(edge1, vert1, vert0);
SUB(edge2, vert2, vert0);
/* begin calculating determinant - also used to calculate U parameter */
CROSS(pvec, dir, edge2);
/* if determinant is near zero, ray lies in plane of triangle */
det = DOT(edge1, pvec);
/* calculate distance from vert0 to ray origin */
SUB(tvec, orig, vert0);
inv_det = 1.0 / det;
CROSS(qvec, tvec, edge1);
if (det > EPSILON)
{
*u = DOT(tvec, pvec);
if (*u < 0.0 || *u > det)
return 0;
/* calculate V parameter and test bounds */
*v = DOT(dir, qvec);
if (*v < 0.0 || *u + *v > det)
return 0;
}
else if (det < -EPSILON)
{
/* calculate U parameter and test bounds */
*u = DOT(tvec, pvec);
if (*u > 0.0 || *u < det)
return 0;
/* calculate V parameter and test bounds */
*v = DOT(dir, qvec);
if (*v > 0.0 || *u + *v < det)
return 0;
}
else return 0; /* ray is parallell to the plane of the triangle */
*t = DOT(edge2, qvec) * inv_det;
(*u) *= inv_det;
(*v) *= inv_det;
if (*t > 0)
{
return 1;
}
else
{
return 0;
}
}
/* code rewritten to do tests on the sign of the determinant */
/* the division is before the test of the sign of the det */
/* and one CROSS has been moved out from the if-else if-else */
int intersect_triangleCPU(float orig[3], float dir[3],
float vert0[3], float vert1[3], float vert2[3],
float* t, float* u, float* v)
{
//std::cout << "vert0 = " << vert0[0] << ", " << vert0[1] << ", " << vert0[2] << std::endl;
//std::cout << "vert1 = " << vert1[0] << ", " << vert1[1] << ", " << vert1[2] << std::endl;
//std::cout << "vert2 = " << vert2[0] << ", " << vert2[1] << ", " << vert2[2] << std::endl;
//std::cout << "orig = " << orig[0] << ", " << orig[1] << ", " << orig[2] << std::endl;
float edge1[3], edge2[3], tvec[3], pvec[3], qvec[3];
float det, inv_det;
/* find vectors for two edges sharing vert0 */
SUB(edge1, vert1, vert0);
SUB(edge2, vert2, vert0);
/* begin calculating determinant - also used to calculate U parameter */
CROSS(pvec, dir, edge2);
/* if determinant is near zero, ray lies in plane of triangle */
det = DOT(edge1, pvec);
/* calculate distance from vert0 to ray origin */
SUB(tvec, orig, vert0);
inv_det = 1.0 / det;
CROSS(qvec, tvec, edge1);
if (det > EPSILON)
{
*u = DOT(tvec, pvec);
if (*u < 0.0 || *u > det)
return 0;
/* calculate V parameter and test bounds */
*v = DOT(dir, qvec);
if (*v < 0.0 || *u + *v > det)
return 0;
}
else if (det < -EPSILON)
{
/* calculate U parameter and test bounds */
*u = DOT(tvec, pvec);
if (*u > 0.0 || *u < det)
return 0;
/* calculate V parameter and test bounds */
*v = DOT(dir, qvec);
if (*v > 0.0 || *u + *v < det)
return 0;
}
else return 0; /* ray is parallell to the plane of the triangle */
*t = DOT(edge2, qvec) * inv_det;
(*u) *= inv_det;
(*v) *= inv_det;
if (*t > 0)
{
return 1;
}
else
{
return 0;
}
}
//block per origin
__global__ void intersect_triangleGPU_BlockPerOrigin(float3* origins, float dir[3],
int3* triangles, float3* vertices, int numberOfTriangles, bool* inside) // , int* intersectionsPerOrigin, float3* outsideVertices
{
int threadidx = threadIdx.x;
float orig[3] = { origins[blockIdx.x].x, origins[blockIdx.x].y, origins[blockIdx.x].z };
__shared__ int intersectionsPerBlock[1];
intersectionsPerBlock[0] = 0;
int numberOfIntersections = 0;
while (threadidx < numberOfTriangles) {
//if (*inside) {
float vert0[3] = { vertices[triangles[threadidx].x].x, vertices[triangles[threadidx].x].y, vertices[triangles[threadidx].x].z };
float vert1[3] = { vertices[triangles[threadidx].y].x, vertices[triangles[threadidx].y].y, vertices[triangles[threadidx].y].z };
float vert2[3] = { vertices[triangles[threadidx].z].x, vertices[triangles[threadidx].z].y, vertices[triangles[threadidx].z].z };
float t, u, v;
if (intersect_triangle3(orig, dir, vert0, vert1, vert2, &t, &u, &v) == 1)
{
numberOfIntersections += 1;
}
threadidx += blockDim.x;
/*}
else {
return;
}*/
}
atomicAdd(&intersectionsPerBlock[0], numberOfIntersections);
__syncthreads();
if (intersectionsPerBlock[0] % 2 == 0)
{
*inside = false;
}
}
//thread per origin
__global__ void intersect_triangleGPU_ThreadPerOrigin(float3* origins, float dir[3],
int3* triangles, float3* vertices, int numberOfOrigins, int numberOfTriangles, bool* inside)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < numberOfOrigins)
{
float orig[3] = { origins[tid].x, origins[tid].y, origins[tid].z };
int numberOfIntersections = 0;
for (int i = 0; i < numberOfTriangles; i++)
{
//if (*inside) {
float vert0[3] = { vertices[triangles[i].x].x, vertices[triangles[i].x].y, vertices[triangles[i].x].z };
float vert1[3] = { vertices[triangles[i].y].x, vertices[triangles[i].y].y, vertices[triangles[i].y].z };
float vert2[3] = { vertices[triangles[i].z].x, vertices[triangles[i].z].y, vertices[triangles[i].z].z };
float t, u, v;
if (intersect_triangle3(orig, dir, vert0, vert1, vert2, &t, &u, &v) == 1)
{
numberOfIntersections++;
}
/*}
else {
return;
}*/
}
//intersectionsPerOrigin[tid] = numberOfIntersections;
if (numberOfIntersections % 2 == 0)
{
*inside = false;
//return;
/*outsideVertices[tid].x = orig[0];
outsideVertices[tid].y = orig[1];
outsideVertices[tid].z = orig[2];*/
}
}
}
//thread per triangle
__global__ void intersect_triangleGPU_ThreadPerTriangle(float3* origins, float dir[3],
int3* triangles, float3* vertices, int numberOfOrigins, int numberOfTriangles, int* intersectionsPerOrigin)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < numberOfTriangles)
{
float vert0[3] = { vertices[triangles[tid].x].x, vertices[triangles[tid].x].y, vertices[triangles[tid].x].z };
float vert1[3] = { vertices[triangles[tid].y].x, vertices[triangles[tid].y].y, vertices[triangles[tid].y].z };
float vert2[3] = { vertices[triangles[tid].z].x, vertices[triangles[tid].z].y, vertices[triangles[tid].z].z };
int i = 0;
while (i < numberOfOrigins)
{
float orig[3] = { origins[i].x, origins[i].y, origins[i].z };
float t, u, v;
if (intersect_triangle3(orig, dir, vert0, vert1, vert2, &t, &u, &v) == 1)
{
atomicAdd(&intersectionsPerOrigin[i], 1);
}
i++;
}
}
}
//block per triangle
__global__ void intersect_triangleGPU_BlockPerTriangle(float3* origins, float dir[3],
int3* triangles, float3* vertices, int numberOfOrigins, int* intersectionsPerOrigin) // , int* intersectionsPerOrigin, float3* outsideVertices
{
int threadidx = threadIdx.x;
float vert0[3] = { vertices[triangles[blockIdx.x].x].x, vertices[triangles[blockIdx.x].x].y, vertices[triangles[blockIdx.x].x].z };
float vert1[3] = { vertices[triangles[blockIdx.x].y].x, vertices[triangles[blockIdx.x].y].y, vertices[triangles[blockIdx.x].y].z };
float vert2[3] = { vertices[triangles[blockIdx.x].z].x, vertices[triangles[blockIdx.x].z].y, vertices[triangles[blockIdx.x].z].z };
while (threadidx < numberOfOrigins) {
float orig[3] = { origins[threadidx].x, origins[threadidx].y, origins[threadidx].z };
float t, u, v;
if (intersect_triangle3(orig, dir, vert0, vert1, vert2, &t, &u, &v) == 1)
{
atomicAdd(&intersectionsPerOrigin[threadidx], 1);
}
threadidx += blockDim.x;
}
} |
23,883 | ////12163291 ˰ HW1
//#pragma warning(disable: 4819) //
//
//#include<stdio.h>
//#include<iostream>
//#include <cuda_runtime.h>
//#include <cuda.h>
//#include <time.h> //for
//#include <math.h>
//
//using namespace std;
//
//#define DATASIZE 1048576 //2048 131072 262144 1048576 ȵƴµ ÷ο ִ
//#define BLOCK_SIZE 2048 //1024 2048 2048 2048
////524288 1024 ɷ DZ 2097152 //80,,
////1048576 1024 ƹٷ..
////1048576 2048 4194304 0.21 0.204
////1048576 4089 0.211ʵ 켱 2048̶ ̴
//
//void print_elapsed(clock_t start, clock_t stop) //ð¿
//{
// double elapsed = ((double)(stop - start)) / CLOCKS_PER_SEC;
// printf("CUDA ODD EVEN MERGE\nҿð : %.3fs\n", elapsed);
//}
//
//__global__ void oddevensort(int* input, unsigned int len, int i)
//{
// // ǥ
// unsigned int tx = threadIdx.x;
//
// //ü ̹ ǥ
// unsigned int x = tx + blockDim.x * blockIdx.x;
// //̵ ӽ
// int temp;
//
// //ڷ ̸ŭ µ, ε(i) ¦̸ ¦ڸ ڸ .
// //ε Ȧ̸ Ȧڸ ڸ ؼ Ѵ.
// if (i % 2 == 0)
// {
// // ̸ ָ ִ ڸ ִ ڷᰡ Ƣ .
// if (input[x] > input[x + 1] && x < len && x % 2 == 0)
// {
// temp = input[x + 1];
// input[x + 1] = input[x];
// input[x] = temp;
// }
// }
// else
// {
// if (input[x] > input[x + 1] && x < len && x % 2 != 0)
// {
// temp = input[x + 1];
// input[x + 1] = input[x];
// input[x] = temp;
// }
// }
// __syncthreads();
//}
//
//void array_print(int* arr, int length)
//{
// int i;
// for (i = 0; i < length; ++i) {
// printf("%d ", arr[i]);
// }
// printf("\n");
//}
//
//void array_fill(int* arr, int length)
//{
// srand((int)time(NULL));
//
// for (int i = 0; i < length; ++i) {
// arr[i] = rand() % 100;
// }
//}
//
//__global__ void bitonic_sort_step(int* dev_values, int j, int k)
//{
// unsigned int i, ixj; /* Sorting partners: i and ixj */
// i = threadIdx.x + blockDim.x * blockIdx.x;
// ixj = i ^ j;
//
// /* The threads with the lowest ids sort the array. */
// if ((ixj) > i) {
// if ((i & k) == 0) {
// /* Sort ascending */
// if (dev_values[i] > dev_values[ixj]) {
// /* exchange(i,ixj); */
// int temp = dev_values[i];
// dev_values[i] = dev_values[ixj];
// dev_values[ixj] = temp;
// }
// }
// if ((i & k) != 0) {
// /* Sort descending */
// if (dev_values[i] < dev_values[ixj]) {
// /* exchange(i,ixj); */
// int temp = dev_values[i];
// dev_values[i] = dev_values[ixj];
// dev_values[ixj] = temp;
// }
// }
// }
//}
//
//clock_t start, stop = 0;
//int TestInput[DATASIZE], TestOutput[DATASIZE];
//
//int main(){
// cout << "12163291\n\n";
//
// srand(time(NULL));
//
// for (int i = 0; i < DATASIZE; i++) {
// TestInput[i] = rand() % 100; //ݴϴ.
// }
//
// //device
// int* devInput, * devOutput;
// //ϴ ũ ƴϱ
// unsigned int MemDataSize = DATASIZE * sizeof(int);
//
// // device ڸ ݴϴ.
// cudaMalloc((void**)&devInput, MemDataSize);
// cudaMalloc((void**)&devOutput, MemDataSize);
// cudaMemset(devOutput, 0, MemDataSize);
//
// // ڸ 縦 ݴϴ.00000
// cudaMemcpy(devInput, TestInput, MemDataSize, cudaMemcpyHostToDevice);
//
// // block ũ մϴ.
// dim3 dimBlocksize(BLOCK_SIZE);
// dim3 dimGridsize(ceil((DATASIZE - 1) / (float)BLOCK_SIZE) + 1);
// // ϴ Max min ˾ƾ մϴ.
// // ϴ cpu ðݴϴ.
//
// start = clock();
// for (int i = 0; i < DATASIZE; i++) {
// oddevensort << < dimGridsize, dimBlocksize >> > (devInput, DATASIZE, i);
// }
// stop = clock();
//
// // testoutput մϴ.
// cudaMemcpy(TestOutput, devInput, MemDataSize, cudaMemcpyDeviceToHost);
//
// /* for (int i = 0; i < DATASIZE; i++) {
// cout << TestOutput[i] << ", "; // Ȯο
// if ((i + 1) % 16 == 0) {
// cout << endl;
// }
// }*/
// cout << endl << "迭 ũ : " << sizeof(TestOutput) << endl << endl;
// print_elapsed(start, stop); //ɸð ݴϴ.
//
// // GPU ڸ free.
// //malloc - free ¦.
// // մϴ.
// cudaFree(devInput);
// cudaFree(devOutput);
//
// return 0;
//}
//
////https://github.com/ptjoker95/OddEvenSort-with-CUDA/blob/master/oddevensort.cu
////ó ϴ. |
23,884 | #include <stdio.h>
#include <vector>
// CUDA device kernel
__global__ void vector_add(const float *A, const float *B, float *C,
size_t array_size) {
// local thread id
size_t id = threadIdx.x;
// calculating global id
size_t total_threads = gridDim.x * blockDim.x;
for (size_t i = id; i < array_size; i += total_threads) {
C[i] = A[i] + B[i];
}
}
int main() {
const size_t array_size = 256;
std::vector<float> A(array_size, 1.0f);
std::vector<float> B(array_size, 1.0f);
std::vector<float> C(array_size);
// allocating device memory
float *A_dev;
float *B_dev;
float *C_dev;
cudaMalloc((void **)&A_dev, array_size * sizeof(float));
cudaMalloc((void **)&B_dev, array_size * sizeof(float));
cudaMalloc((void **)&C_dev, array_size * sizeof(float));
// explicitly copying data from host to device
cudaMemcpy(A_dev, A.data(), array_size * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(B_dev, B.data(), array_size * sizeof(float),
cudaMemcpyHostToDevice);
// getting device property in order to query device parameters
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
const size_t max_thread_per_block = prop.maxThreadsPerBlock;
const size_t num_thread_per_block =
std::min(max_thread_per_block, array_size);
const size_t num_block_per_grid =
(size_t)std::ceil(((float)array_size) / num_thread_per_block);
// constructing block size
dim3 block_size(num_thread_per_block, 1, 1);
// constructing number of blocks (grid size)
dim3 num_blocks(num_block_per_grid, 1, 1);
// launching and executing cuda kernel
vector_add<<<num_blocks, block_size>>>(A_dev, B_dev, C_dev, array_size);
// retruning result to the host vector
cudaMemcpy(C.data(), C_dev, array_size * sizeof(float),
cudaMemcpyDeviceToHost);
// releasing the cuda memory objects
cudaFree(A_dev);
cudaFree(B_dev);
cudaFree(C_dev);
return EXIT_SUCCESS;
}
|
23,885 | #include <stdio.h>
#include <iostream>
#include <unistd.h>
#include <sys/time.h>
// Shorthand for formatting and printing usage options to stderr
#define fpe(msg) fprintf(stderr, "\t%s\n", msg);
// Shorthand for handling CUDA errors.
#define HANDLE_ERROR(err) ( HandleError( err, __FILE__, __LINE__ ) )
using namespace std;
/*****************
* CUDA Utilites *
*****************/
void HandleError(cudaError_t err, const char *file, int line) {
//
// Handle and report on CUDA errors.
//
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
void checkCUDAError(const char *msg, bool exitOnError) {
//
// Check cuda error and print result if appropriate.
//
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
if (exitOnError) {
exit(-1);
}
}
}
void cleanupCuda(void) {
//
// Clean up CUDA resources.
//
//
// Explicitly cleans up all runtime-related resources associated with the
// calling host thread.
//
HANDLE_ERROR(
cudaThreadExit()
);
}
/*********************
* End CUDA Utilites *
*********************/
struct Args {
bool debug;
bool sequential;
bool blocked;
bool overlapped;
// Data attributes
int size, dimensions, alloc_size;
int xSize, ySize, zSize;
int xBlockSize, yBlockSize, zBlockSize, tBlockSize;
// Run attributes
int grid_size, block_count, thread_count, iterations;
};
void usage(char *prog_name, string msg) {
if (msg.size() > 0) {
fputs(msg.c_str(), stderr);
}
fprintf(stderr, "%s\n", prog_name);
fprintf(stderr, "Options are:\n");
fpe("-n<size> Set data size (default: 1024)");
fpe("-d<dims> Set number of data dimensions (1, 2, or 3) (default: 2)");
fpe("-g<size> Set grid size");
fpe("-b<num> Set block count");
fpe("-t<num> Set thread count");
fpe("-i<iter> Number of iterations to perform (default: 1000)");
fpe("-x<size> X Dimension");
fpe("-y<size> Y Dimension");
fpe("-z<size> Z Dimension");
fpe("-T<size> T Dimension");
fpe("-S Execute sequential, CPU version");
fpe("-B Execute blocked sequential, CPU version");
fpe("-O Execute sequential overlapped tiling, CPU version");
fpe("-D Print debug info");
fpe("-h Print usage info (this message)");
exit(EXIT_FAILURE);
}
Args parse_arguments(int argc, char *argv[]) {
Args args = Args();
args.debug = false;
args.sequential = false;
args.blocked = false;
args.overlapped = false;
args.size = 1024;
args.dimensions = 2;
args.xSize = args.ySize = args.zSize = 1;
args.xBlockSize = args.yBlockSize = args.zBlockSize = 1;
args.grid_size = 1;
args.block_count = -1;
args.thread_count = -1;
args.iterations = 1000;
int opt;
// Parse args
while ((opt = getopt(argc, argv, "n:d:g:b:t:i:x:y:z:T:hSBOD")) != -1) {
switch (opt) {
case 'D':
args.debug = true;
break;
case 'S':
args.sequential = true;
break;
case 'B':
args.blocked = true;
break;
case 'O':
args.overlapped = true;
break;
case 'n':
args.size = atoi(optarg);
break;
case 'd':
args.dimensions = atoi(optarg);
break;
case 'g':
args.grid_size = atoi(optarg);
break;
case 'b':
args.block_count = atoi(optarg);
break;
case 't':
args.thread_count = atoi(optarg);
break;
case 'i':
args.iterations = atoi(optarg);
break;
case 'x':
args.xBlockSize = atoi(optarg);
break;
case 'X':
args.xSize = atoi(optarg);
break;
case 'y':
args.yBlockSize = atoi(optarg);
break;
case 'Y':
args.ySize = atoi(optarg);
break;
case 'z':
args.zBlockSize = atoi(optarg);
break;
case 'Z':
args.zSize = atoi(optarg);
break;
case 'T':
args.tBlockSize = atoi(optarg);
break;
case 'h':
usage(argv[0], "");
break;
default:
usage(argv[0], "Unrecognized option\n");
}
}
// check sizes
if (args.size <= 0) {
cout << "Data size must be larger than 0" << endl;
exit(EXIT_FAILURE);
}
if (args.dimensions <= 0 || args.dimensions >= 4) {
cerr << "Data must be 1, 2, or 3 dimensions" << endl;
exit(EXIT_FAILURE);
}
// Calculations
if (args.dimensions == 1) {
args.alloc_size = args.size;
} else if (args.dimensions == 2) {
args.alloc_size = args.size * args.size;
} else {
args.alloc_size = args.size * args.size * args.size;
}
if (args.thread_count > 0) {
args.block_count = args.alloc_size / args.thread_count;
} else if (args.block_count > 0) {
args.thread_count = args.alloc_size / args.block_count;
} else {
args.thread_count = 16;
args.block_count = args.alloc_size / args.thread_count;
}
return args;
}
typedef struct {
int dimensions;
int height;
int width;
int depth;
float* elements;
} Matrix;
Matrix initialize_matrix(int dimensions, int width, int height = 1, int depth = 1) {
Matrix data;
if (dimensions == 1 && width > 1) {
data.width = width;
data.height = 1;
data.depth = 1;
data.elements = (float *) malloc(width * sizeof(float));
data.elements[0] = 1.0;
data.elements[width - 1] = 1.0;
} else if (dimensions == 2 && width > 1 && height > 1) {
data.width = width;
data.height = height;
data.depth = 1;
data.elements = (float *) malloc(width * height * sizeof(float));
for (int y = 0; y < height; y += height - 1) {
for (int x = 0; x < width; x++) {
data.elements[y * width + x] = 1.0;
}
}
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x += width - 1) {
data.elements[y * width + x] = 1.0;
}
}
} else if (dimensions == 3 && width > 1 && height > 1 && depth > 1) {
data.width = width;
data.height = height;
data.depth = depth;
data.elements = (float *) malloc(width * height * depth * sizeof(float));
for (int z = 0; z < depth; z++) {
// X = 0 & N planes
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x += width - 1) {
data.elements[z * width * height + y * width + x] = 1.0;
}
}
// Y = 0 & N planes
for (int y = 0; y < height; y += height - 1) {
for (int x = 0; x < width; x++) {
data.elements[z * width * height + y * width + x] = 1.0;
}
}
}
// Z = 0 & N planes
for (int z = 0; z < depth; z += depth - 1) {
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
data.elements[z * width * height + y * width + x] = 1.0;
}
}
}
} else {
fprintf(stderr, "Improper dimension or size.");
exit(1);
}
return data;
}
/****************
* CUDA KERNELS *
****************/
__global__ void jacobi1d(Matrix data, Matrix result) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
float newValue;
if (id > 0 && id < data.width - 1) {
newValue = (data.elements[id - 1] + data.elements[id] + data.elements[id + 1]) / 3;
__syncthreads();
result.elements[id] = newValue;
} else {
// Edge or outside completely, do not change.
__syncthreads();
}
}
__global__ void jacobi2d(Matrix data, Matrix result) {
int threadRow = threadIdx.y;
int threadCol = threadIdx.x;
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
int x = blockCol * blockDim.x + threadCol;
int y = blockRow * blockDim.y + threadRow;
int index = x + y * data.width;
int xPrev = (x - 1) + y * data.width;
int xNext = (x + 1) + y * data.width;
int yPrev = x + (y - 1) * data.width;
int yNext = x + (y + 1) * data.width;
float newValue;
if (x > 0 && x < data.width - 1 && y > 0 && y < data.height - 1) {
newValue =
(
data.elements[index] +
data.elements[xPrev] +
data.elements[xNext] +
data.elements[yPrev] +
data.elements[yNext]
) * 0.2;
__syncthreads();
result.elements[index] = newValue;
} else {
// Edge or beyond, do not change.
__syncthreads();
}
}
__global__ void jacobi3d(Matrix data, Matrix result) {
int threadDep = threadIdx.z;
int threadRow = threadIdx.y;
int threadCol = threadIdx.x;
int blockDep = blockIdx.z;
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
int x = blockCol * blockDim.x + threadCol;
int y = blockRow * blockDim.y + threadRow;
int z = blockDep * blockDim.z + threadDep;
int xySurface = data.width * data.height;
int zTemp = z * xySurface;
int yTemp = y * data.width;
int index = x + yTemp + zTemp; // x + y * data.width + z * data.width * data.height;
int xPrev = (x - 1) + yTemp + zTemp; // (x-1) + y * data.width + z * data.width * data.height;
int xNext = (x + 1) + yTemp + zTemp; // (x+1) + y * data.width + z * data.width * data.height;
int yPrev = x + yTemp - data.width + zTemp; // x + (y-1) * data.width + z * data.width * data.height;
int yNext = x + yTemp + data.width + zTemp; // x + (y+1) * data.width + z * data.width * data.height;
int zPrev = x + yTemp + zTemp - xySurface; // x + y * data.width + (z-1) * data.width * data.height;
int zNext = x + yTemp + zTemp + xySurface; // x + y * data.width + (z+1) * data.width * data.height;
float newValue;
if (x > 0 && x < data.width - 1 && y > 0 && y < data.height - 1 && z > 0 && z < data.depth - 1) {
newValue =
(
data.elements[index] +
data.elements[xPrev] +
data.elements[xNext] +
data.elements[yPrev] +
data.elements[yNext] +
data.elements[zPrev] +
data.elements[zNext]
) / 7;
__syncthreads();
result.elements[index] = newValue;
} else {
// Edge or beyond, do not change.
__syncthreads();
}
}
/********************
* END CUDA KERNELS *
********************/
Matrix initialize_device(Matrix A) {
Matrix deviceA;
deviceA.width = A.width;
deviceA.height = A.height;
deviceA.depth = A.depth;
deviceA.dimensions = A.dimensions;
size_t sizeA = A.width * A.height * A.depth * sizeof(float);
HANDLE_ERROR(cudaMalloc((void **) &deviceA.elements, sizeA));
HANDLE_ERROR(cudaMemcpy(deviceA.elements, A.elements, sizeA, cudaMemcpyHostToDevice));
return deviceA;
}
void callKernel(Args args, Matrix A, Matrix B) {
Matrix deviceA, deviceB;
deviceA = initialize_device(A);
deviceB = initialize_device(B);
if (args.dimensions == 1) {
dim3 blocks(max(args.size/32, 1));
dim3 threads(min(args.size, 32));
for (int t = 0; t < args.iterations; t++) {
jacobi1d<<<blocks, threads>>>(deviceA, deviceB);
// checkCUDAError("jacobi1d", true);
swap(deviceA, deviceB);
}
} else if (args.dimensions == 2) {
dim3 blocks(max(args.size/16, 1), max(args.size/16, 1));
dim3 threads(min(args.size, 16), min(args.size, 16));
for (int t = 0; t < args.iterations; t++) {
jacobi2d<<<blocks, threads>>>(deviceA, deviceB);
// checkCUDAError("jacobi2d", true);
swap(deviceA, deviceB);
}
} else {
dim3 blocks(max(args.size/8, 1), max(args.size/8, 1), max(args.size/8, 1));
dim3 threads(min(args.size, 8), min(args.size, 8), min(args.size, 8));
for (int t = 0; t < args.iterations; t++) {
jacobi3d<<<blocks, threads>>>(deviceA, deviceB);
// checkCUDAError("jacobi3d", true);
swap(deviceA, deviceB);
}
}
cudaMemcpy(B.elements, deviceA.elements, A.width * A.height * A.depth * sizeof(float), cudaMemcpyDeviceToHost);
}
void print_data(float *data, int size, int dimensions) {
// if (size > 32) {
// cerr << "Data too big to print\n" << endl;
// return;
// }
if (dimensions == 1) {
for (int x = 0; x < size; x++) {
printf("%.3f ", data[x]);
}
} else if (dimensions == 2) {
for (int y = 0; y < size; y++) {
for (int x = 0; x < size; x++) {
printf("%.3f ", data[y * size + x]);
}
cout << endl;
}
} else if (dimensions == 3) {
for (int z = 0; z < size; z++) {
for (int y = 0; y < size; y++) {
for (int x = 0; x < size; x++) {
printf("%.3f ", data[z * size * size + y * size + x]);
}
cout << endl;
}
cout << endl;
}
}
cout << endl << endl;
}
int main(int argc, char *argv[]) {
Args args = parse_arguments(argc, argv);
Matrix A, B;
A = initialize_matrix(args.dimensions, args.size, args.size, args.size);
B = initialize_matrix(args.dimensions, args.size, args.size, args.size);
float runtime;
struct timeval start, end;
gettimeofday(&start, NULL);
callKernel(args, A, B);
gettimeofday(&end, NULL);
runtime = ((end.tv_sec - start.tv_sec) * 1000.0) + ((end.tv_usec - start.tv_usec) / 1000.0);
printf("Processing Time: %4.4f milliseconds\n", runtime);
if (args.debug) { print_data(B.elements, args.size, args.dimensions); }
}
|
23,886 | #include "includes.h"
__global__ void max_pool3d_backward(int B, int N, int M, int C, const int* maxIndex, const float* gradOutput, float* gradInput)
{
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=threadIdx.x;j<M*C;j+=blockDim.x)
{
int c = j%C;
int n = maxIndex[i*M*C+j];
atomicAdd(&gradInput[i*N*C+n*C+c],gradOutput[i*M*C+j]);
}
}
} |
23,887 | #include <stdio.h>
#include <cuda.h>
#define TILE_DIM 16
__global__
void multMats(float * A, float * B, float * C, int m, int n, int k)
{
//Create 2 tiles for matrix A and B at the shared memory
__shared__ float ATile[TILE_DIM][TILE_DIM];
__shared__ float BTile[TILE_DIM][TILE_DIM];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int thrX = threadIdx.x;
int thrY = threadIdx.y;
//to accumulate partial values of each element in C
float elementC = 0;
for (int t = 0; t < (n-1)/TILE_DIM +1; ++t)
{
//threads to load matrix A to shared memory
if(row < m && t*TILE_DIM+thrX < n)
ATile[thrY][thrX] = A[row*n + t*TILE_DIM+thrX];
else
ATile[thrY][thrX] = 0.0f;
//threads to load matrix B to shared memory
if (t*TILE_DIM+thrY < n && col < k)
BTile[thrY][thrX] = B[(t*TILE_DIM+thrY)*k + col];
else
BTile[thrY][thrX] = 0.0f;
__syncthreads();
//calculate a partial value of thread element in C
for (int i = 0; i < TILE_DIM; ++i)
elementC += ATile[thrY][i] * BTile[i][thrX];
__syncthreads();
}
//copy final element value to the C matrix
if (row < m && col < k)
C[row*k+col] = elementC;
}
int main(int argc, char ** argv)
{
float *hostA;
float *hostB;
float *hostC;
float *deviceA;
float *deviceB;
float *deviceC;
int m = 1024; // number of A rows
int n = 1024; // number of A columns (or B rows)
int k = 1024; // number of B columns
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//allocate data in host
hostA = (float *) malloc(m * n * sizeof(float));
hostB = (float *) malloc(n * k * sizeof(float));
hostC = (float *) malloc(m * k * sizeof(float));
for (int i = 0; i < m*n; i++)//Matrix Initialization
hostA[i]=1.0;
for (int i = 0; i < n*k; i++)
hostB[i]=1.0;
//allocate data in device
cudaMalloc((void **) &deviceA, m * n * sizeof(float));
cudaMalloc((void **) &deviceB, n * k * sizeof(float));
cudaMalloc((void **) &deviceC, m * k * sizeof(float));
//copy inputs to device
cudaMemcpy(deviceA, hostA, m * n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, hostB, n * k * sizeof(float), cudaMemcpyHostToDevice);
//device kernal
dim3 DimGrid((k-1)/TILE_DIM+1, (m-1)/TILE_DIM+1, 1);
dim3 DimBlock(TILE_DIM, TILE_DIM, 1);
cudaEventRecord(start);
multMats<<<DimGrid,DimBlock>>>(deviceA, deviceB, deviceC, m, n, k);
cudaThreadSynchronize();
cudaEventRecord(stop);
//copy result back to host
cudaMemcpy(hostC, deviceC, m * k * sizeof(float), cudaMemcpyDeviceToHost);
// Blocks CPU execution until stop has been recorded
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Elapsed Time: %f milliseconds\n", milliseconds);
// Destroying events
cudaEventDestroy(start);
cudaEventDestroy(stop);
//deallocate device
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
//deallocate host
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
23,888 | //pass
//--gridDim=1 --blockDim=2 --no-inline
//This kernel is race-free.
//
//It uses uses memcpy and copies fewer bytes than the struct size so we have to
//handle the arrays in and out at the byte-level.
#define memcpy(dst, src, len) __builtin_memcpy(dst, src, len)
typedef struct {
short x;
short y;
char z;
} s_t; //< sizeof(s_t) == 6
__global__ void k(s_t *in, s_t *out) {
size_t len = 4;
memcpy(&out[threadIdx.x], &in[threadIdx.x], len);
}
|
23,889 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
unsigned char *pdata; // pointer to data content
void getInfo(int *width, int *height, int *dataOffset, int *pixLen) {
FILE *f;
if (NULL == (f = fopen("lena_color.bmp", "rb"))) {
printf("Fail to open the file1");
exit(EXIT_FAILURE);
}
fseek(f, 0x00A, SEEK_SET);
fread(dataOffset, sizeof(char) * 4, 1, f);
fseek(f, 0x012, SEEK_SET );
fread(width, sizeof(char) * 4, 1, f);
fseek(f, 0x016, SEEK_SET);
fread(height, sizeof(char) * 4, 1, f);
fseek(f, 0x01C, SEEK_SET);
fread(pixLen, sizeof(char)* 2, 1, f);
*pixLen /= 8; //bit to byte
printf("width = %d, height = %d, dataOffset = %d, pixLen = %d\n", *width, *height, *dataOffset, *pixLen);
fclose(f);
}
void getData(int width, int height, int dataOffset, int pixLen) {
FILE *f;
if (NULL == (f = fopen("lena_color.bmp", "rb"))) {
printf("Fail to open the file2");
exit(EXIT_FAILURE);
}
fseek(f, dataOffset, SEEK_SET);
int size = fread(pdata, sizeof(unsigned char), width * height * pixLen, f);
printf("Data size = %d byte \n", size);
fclose(f);
}
void copy() {
FILE *r, *w;
unsigned char buf[1024];
if (NULL == (r = fopen("lena_color.bmp", "rb"))) {
printf("Fail to open the file3");
exit(EXIT_FAILURE);
}
if (NULL == (w = fopen("result.bmp", "wb"))) {
printf("Fail to open the file4");
exit(EXIT_FAILURE);
}
while((fread(buf,sizeof(char),1024,r))>0)
fwrite(buf,sizeof(char),1024,w);
fclose(r);
fclose(w);
}
void writeDataToImg(int width, int height, int dataOffset, int pixLen) {
FILE *f;
if (NULL == (f = fopen("result.bmp", "r+b"))) {
printf("Fail to open the file5");
exit(EXIT_FAILURE);
}
fseek(f, dataOffset, SEEK_SET);
fwrite(pdata, sizeof(unsigned char), width * height * pixLen, f);
fclose(f);
}
__global__ void processData(unsigned char *Da, int* filter)
{
int tx = threadIdx.x; // thread的x軸id
int bx = blockIdx.x; // block的x軸id
int bn = blockDim.x;
int gid = bx * bn + tx;
__shared__ int sfilter[3][3];
__shared__ int sR[3][512]; // 每個block存上中下三行
__shared__ int sG[3][512];
__shared__ int sB[3][512];
__shared__ int sRsum[512]; // 每個block 最後512個sum
__shared__ int sGsum[512];
__shared__ int sBsum[512];
if (tx < 9) // 每個block 存filter 到 share memory
{
sfilter[tx / 3][tx % 3] = filter[tx];
}
__syncthreads();
if (bx == 0 || bx == 511 || tx == 0 || tx == 511)
{
// 邊界處理 --> 直接給原本值不動
sRsum[tx] = Da[gid * 3];
sGsum[tx] = Da[gid * 3 + 1];
sBsum[tx] = Da[gid * 3 + 2];
}
// 邊界處理(第1個block跟最後一個block不做)
if (bx != 0 && bx != 511)
{
// R, G, B個別將該Row(Block)運算會用到的上中下三行存入Share Memory
sR[0][tx] = Da[gid * 3 - 512 * 3];
sR[1][tx] = Da[gid * 3];
sR[2][tx] = Da[gid * 3 + 512 * 3];
sG[0][tx] = Da[gid * 3 - 512 * 3 + 1];
sG[1][tx] = Da[gid * 3 + 1];
sG[2][tx] = Da[gid * 3 + 512 * 3 + 1];
sB[0][tx] = Da[gid * 3 - 512 * 3 + 2];
sB[1][tx] = Da[gid * 3 + 2];
sB[2][tx] = Da[gid * 3 + 512 * 3 + 2];
__syncthreads();
// 邊界處理(每個block的的第一個值和最後一個值不做)
if (tx != 0 && tx != 511)
{
// R
sRsum[tx] = sR[0][tx - 1] * sfilter[0][0];
sRsum[tx] += sR[0][tx] * sfilter[0][1];
sRsum[tx] += sR[0][tx + 1] * sfilter[0][2];
sRsum[tx] += sR[1][tx - 1] * sfilter[1][0];
sRsum[tx] += sR[1][tx] * sfilter[1][1];
sRsum[tx] += sR[1][tx + 1] * sfilter[1][2];
sRsum[tx] += sR[2][tx - 1] * sfilter[2][0];
sRsum[tx] += sR[2][tx] * sfilter[2][1];
sRsum[tx] += sR[2][tx + 1] * sfilter[2][2];
// G
sGsum[tx] = sG[0][tx - 1] * sfilter[0][0];
sGsum[tx] += sG[0][tx] * sfilter[0][1];
sGsum[tx] += sG[0][tx + 1] * sfilter[0][2];
sGsum[tx] += sG[1][tx - 1] * sfilter[1][0];
sGsum[tx] += sG[1][tx] * sfilter[1][1];
sGsum[tx] += sG[1][tx + 1] * sfilter[1][2];
sGsum[tx] += sG[2][tx - 1] * sfilter[2][0];
sGsum[tx] += sG[2][tx] * sfilter[2][1];
sGsum[tx] += sG[2][tx + 1] * sfilter[2][2];
// B
sBsum[tx] = sB[0][tx - 1] * sfilter[0][0];
sBsum[tx] += sB[0][tx] * sfilter[0][1];
sBsum[tx] += sB[0][tx + 1] * sfilter[0][2];
sBsum[tx] += sB[1][tx - 1] * sfilter[1][0];
sBsum[tx] += sB[1][tx] * sfilter[1][1];
sBsum[tx] += sB[1][tx + 1] * sfilter[1][2];
sBsum[tx] += sB[2][tx - 1] * sfilter[2][0];
sBsum[tx] += sB[2][tx] * sfilter[2][1];
sBsum[tx] += sB[2][tx + 1] * sfilter[2][2];
sRsum[tx] /= filter[9];
sGsum[tx] /= filter[9];
sBsum[tx] /= filter[9];
// 大於255 或 小於0處理
if (sRsum[tx] > 255)
sRsum[tx] = 255;
else if (sRsum[tx] < 0)
sRsum[tx] = 0;
if (sGsum[tx] > 255)
sGsum[tx] = 255;
else if (sGsum[tx] < 0)
sGsum[tx] = 0;
if (sBsum[tx] > 255)
sBsum[tx] = 255;
else if (sBsum[tx] < 0)
sBsum[tx] = 0;
}
}
__syncthreads();
// 將R, G, B三個陣列值合併寫回一維陣列,以利輸出到檔案
Da[gid * 3] = sRsum[tx];
Da[gid * 3 + 1] = sGsum[tx];
Da[gid * 3 + 2] = sBsum[tx];
}
void ImgDataProcess(int width, int height, int pixLen){
int DataSize = width * height * pixLen; // 512 * 512 * 3
/* GPU config */
unsigned char *Da;
int f[10];
int choose;
// user choose
printf("請選擇您要的圖片轉換:\n");
printf("1.模糊化\n");
printf("2.銳利化\n");
printf("選擇:");
scanf("%d", &choose);
if (choose == 1)
{
for (int i = 0;i < 9;i++)
f[i] = 1;
f[9] = 9; // 模糊化 存最後要除的值
}
else if (choose == 2)
{
f[0] = 0; f[1] = -1; f[2] = 0;
f[3] = -1; f[4] = 5; f[5] = -1;
f[6] = 0; f[7] = -1; f[8] = 0;
f[9] = 1; // 銳利化signal
}
else
{
printf("沒這選項88");
exit(1);
}
int *filter;
cudaMalloc((void**)&Da, DataSize); // create memory for save cpu data in gpu memory
cudaMalloc((void**)&filter, 10 * sizeof(int));
cudaMemcpy(Da, pdata, DataSize, cudaMemcpyHostToDevice);
cudaMemcpy(filter, f, 10 * sizeof(int), cudaMemcpyHostToDevice);
// #########計算時間 宣告
cudaEvent_t start,stop; //宣告起始時間和結束時間
cudaEventCreate(&start); //分配開始時間的紀錄空間
cudaEventCreate(&stop); //分配結束時間的紀錄空間
/** 開始計時 **/
cudaEventRecord(start, 0); //將起始時間歸零並開始計算
//-------------------
// 處理資料
dim3 block(512, 1, 1);
dim3 grid(512, 1, 1);
processData <<< grid, block >>> (Da, filter);
cudaThreadSynchronize();
//-------------------
cudaEventRecord(stop, 0); //將結束時間歸零並開始計算
/** 結束計時 **/
/*time slapsed*/
cudaEventSynchronize(stop);
float elaspedTime;
cudaEventElapsedTime(&elaspedTime, start, stop);
printf("Exe time: %f\n", elaspedTime); //print time
cudaEventDestroy(start);
cudaEventDestroy(stop);
/* #########timing end*/
// 將資料寫回pdata
cudaMemcpy(pdata, Da, DataSize, cudaMemcpyDeviceToHost);
cudaFree(Da);cudaFree(filter);
}
// CPU
void transfer(int width, int height, int pixLen) {
int i, j;
int R[512][512];
int G[512][512];
int B[512][512];
for(i = 0;i < height;i++) {
for (j = 0;j < width;j++)
{
R[i][j] = pdata[(i * width * 3) + (j * 3)];
G[i][j] = pdata[(i * width * 3) + (j * 3 + 1)];
B[i][j] = pdata[(i * width * 3) + (j * 3 + 2)];
}
}
int Rsum;
int Gsum;
int Bsum;
for (i = 0;i < height;i++)
{
for (j = 0;j < width;j++)
{
Rsum = 0;
Gsum = 0;
Bsum = 0;
if (i == 0 || j == 0 || i == height - 1 || j == width - 1) // 邊緣不處理
{
// pdata[(i * width * 3) + (j * 3)] = R[i][j];
// pdata[(i * width * 3) + (j * 3 + 1)] = G[i][j];
// pdata[(i * width * 3) + (j * 3 + 2)] = B[i][j];
continue;
}
Rsum += R[i - 1][j - 1] + R[i - 1][j] + R[i - 1][j + 1];
Rsum += R[i][j - 1] + R[i][j] + R[i][j + 1];
Rsum += R[i + 1][j - 1] + R[i + 1][j] + R[i + 1][j + 1];
Gsum += G[i - 1][j - 1] + G[i - 1][j] + G[i - 1][j + 1];
Gsum += G[i][j - 1] + G[i][j] + G[i][j + 1];
Gsum += G[i + 1][j - 1] + G[i + 1][j] + G[i + 1][j + 1];
Bsum += B[i - 1][j - 1] + B[i - 1][j] + B[i - 1][j + 1];
Bsum += B[i][j - 1] + B[i][j] + B[i][j + 1];
Bsum += B[i + 1][j - 1] + B[i + 1][j] + B[i + 1][j + 1];
Rsum /= 9;
Gsum /= 9;
Bsum /= 9;
if (Rsum > 255)
Rsum = 255;
else if (Rsum < 0)
Rsum = 0;
if (Gsum > 255)
Gsum = 255;
else if (Gsum < 0)
Gsum = 0;
if (Bsum > 255)
Bsum = 255;
else if (Bsum < 0)
Bsum = 0;
}
}
}
int main() {
int height, width;
int dataOffset, pixLen;
getInfo(&width, &height, &dataOffset, &pixLen);
pdata = (unsigned char *)malloc(sizeof(unsigned char) * height * width * pixLen);
getData(width, height, dataOffset, pixLen);
// cpu 版本
// transfer(width, height, pixLen);
// 改變原始資料內容(pdata改變)
ImgDataProcess(width, height, pixLen);
copy(); //copy an backup of "lena.bmp"
writeDataToImg(width, height, dataOffset, pixLen); // 將資料寫入新圖
free(pdata);
} |
23,890 | #include <stdio.h>
#include <string>
#include <vector>
#include <algorithm>
#include <numeric>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void left_shift_kernel(int *a, const int N){
int idx = threadIdx.x;
if (idx < N - 1){
int temp = a[idx + 1];
__syncthreads();
a[idx] = temp;
__syncthreads();
}
}
int main(){
int N = 1 << 10;
size_t size = N * sizeof(int);
int *a = new int[N];
std::iota(a, a + N, 0);
for (int i = 0; i < N; i++){
printf("%d ", a[i]);
}
printf("\n");
int *dev_a;
cudaMalloc(&dev_a, size);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
left_shift_kernel<<<4, 256>>> (dev_a, N);
cudaMemcpy(a, dev_a, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++){
printf("%d ", a[i]);
}
printf("\n");
cudaFree(dev_a);
delete[] a;
} |
23,891 | #include "includes.h"
__global__ void init_cs(int *d_cl, int *d_cs, int c_size, int chunk)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= c_size) {
return;
}
if (i == 0) {
d_cs[i] = 0;
}
else {
d_cs[i] = d_cl[i - 1] * chunk;
}
} |
23,892 | // numInterior: (NfIn)
// intrplWgts: (NfIn*maxK, 3)
// input: (NfIn*maxK, C)
// filter: (3, C, r)
// output: (NfIn, C*r)
__global__ void facet2facet_conv3d_forward(int NfIn, int C, int r, const int* numInterior,
const float* intrplWgts, const float* input,
const float* filter, float* output)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x; // thread index
int fcIdx = idx/(C*r); // global face index in the batch
int cout = idx%(C*r); // output channel ID
int cin = cout/r; // input channel ID
if(fcIdx<NfIn)
{
int kStart = 0;
int kEnd = numInterior[fcIdx];
if (fcIdx>0) kStart = numInterior[fcIdx-1];
// convolution
int K = kEnd - kStart;
for(int k=kStart;k<kEnd;k++)
{
// get interpolation weights (w1,w2,w3) related to (v1->v2->v3) of the face
float w1=intrplWgts[k*3], w2=intrplWgts[k*3+1], w3=intrplWgts[k*3+2];
float weight = w1*filter[cout] + w2*filter[cout+C*r] + w3*filter[cout+2*C*r];
output[fcIdx*C*r+cout] += weight*input[k*C+cin];
}
output[fcIdx*C*r+cout] /= K;
}
}
// numInterior: (NfIn)
// intrplWgts: (NfIn*maxK, 3)
// filter: (3, C, r)
// gradOutput: (NfIn, C*r)
// gradInput: (NfIn*maxK, C)
__global__ void facet2facet_input_backward(int NfIn, int C, int r, const int* numInterior,
const float* intrplWgts, const float* filter,
const float* gradOutput, float* gradInput)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x; // thread index
int fcIdx = idx/(C*r); // global face index in the batch
int cout = idx%(C*r); // output channel ID
int cin = cout/r; // input channel ID
if(fcIdx<NfIn)
{
int kStart = 0;
int kEnd = numInterior[fcIdx];
if (fcIdx>0) kStart = numInterior[fcIdx-1];
// convolution
int K = kEnd - kStart;
for(int k=kStart;k<kEnd;k++)
{
// get interpolation weights (w1,w2,w3) related to (v1->v2->v3) of the face
float w1=intrplWgts[k*3], w2=intrplWgts[k*3+1], w3=intrplWgts[k*3+2];
float weight = w1*filter[cout] + w2*filter[cout+C*r] + w3*filter[cout+2*C*r];
float derIn = gradOutput[fcIdx*C*r+cout]*weight/K;
atomicAdd(&gradInput[k*C+cin], derIn);
}
}
}
// numInterior: (NfIn)
// intrplWgts: (NfIn*maxK, 3)
// input: (NfIn*maxK, C)
// gradOutput: (NfIn, C*r)
// gradFilter: (3, C, r)
__global__ void facet2facet_filter_backward(int NfIn, int C, int r, const int* numInterior,
const float* intrplWgts, const float* input, const float* gradOutput,
float* gradFilter, int sharedMemSize, int startIdx)
{
extern __shared__ float gradPerBlock[]; // the gradient on each block
for (int i=threadIdx.x;i<sharedMemSize;i+=blockDim.x)
{
gradPerBlock[i] = 0; // for 1D block
}
__syncthreads();
int idx = blockIdx.x*blockDim.x + threadIdx.x; // thread index
int fcIdx = idx/(C*r); // global face index in the batch
int cout = idx%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int endIdx = sharedMemSize+startIdx;
if(fcIdx<NfIn)
{
int kStart = 0;
int kEnd = numInterior[fcIdx];
if (fcIdx>0) kStart = numInterior[fcIdx-1];
// convolution
int K = kEnd - kStart;
float derFilt[3] = {0,0,0};
for(int k=kStart;k<kEnd;k++)
{
// get interpolation weights (w1,w2,w3) related to (v1->v2->v3) of the face
float w1=intrplWgts[k*3], w2=intrplWgts[k*3+1], w3=intrplWgts[k*3+2];
float temp = gradOutput[fcIdx*C*r+cout]*input[k*C+cin]/K;
derFilt[0] += temp*w1; derFilt[1] += temp*w2; derFilt[2] += temp*w3;
}
for(int m=0;m<3;m++)
{
int currIdx = m*C*r+cout;
if((currIdx>=startIdx) && (currIdx<endIdx)) // within the shared memory
{
atomicAdd(&gradPerBlock[currIdx-startIdx],derFilt[m]);
}
}
}
__syncthreads();
for (int i=threadIdx.x;i<sharedMemSize;i+=blockDim.x)
{
atomicAdd(&gradFilter[i+startIdx],gradPerBlock[i]); // for 1D block
}
}
void facet2facetConv3dLauncher(int NfIn, int C, int r, const int* numInterior, const float* intrplWgts,
const float* input, const float* filter, float* output)
{
int numGrid = NfIn*C*r/1024 + 1;
facet2facet_conv3d_forward<<<numGrid,1024>>>(NfIn, C, r, numInterior, intrplWgts,
input, filter, output);
}
void facet2facetConv3dGradLauncher(int NfIn, int C, int r, const int* numInterior, const float* intrplWgts,
const float* input, const float* filter, const float* gradOutput,
float* gradInput, float* gradFilter)
{
int numGrid = NfIn*C*r/1024 + 1;
facet2facet_input_backward<<<numGrid,1024>>>(NfIn, C, r, numInterior, intrplWgts, filter,
gradOutput, gradInput);
// titan xp has shared memory of 49152 bytes, each float value takes 4 bytes in the memory
int maxSharedMemSize = int(49152/sizeof(float));
int maxIter = (3*C*r)/maxSharedMemSize;
int remainder = (3*C*r)%maxSharedMemSize;
for(int iter=0;iter<maxIter;iter++)
{
facet2facet_filter_backward<<<numGrid,1024,sizeof(float)*maxSharedMemSize>>>(NfIn, C, r, numInterior,
intrplWgts, input, gradOutput, gradFilter,
maxSharedMemSize, maxSharedMemSize*iter);
}
if(remainder>0) // fill the remainder
{
facet2facet_filter_backward<<<numGrid,1024,sizeof(float)*remainder>>>(NfIn, C, r, numInterior,
intrplWgts, input, gradOutput, gradFilter,
remainder, maxSharedMemSize*maxIter);
}
}
// numInterval: (NfIn)
// face: (NfIn, 3)
// input: (NvIn, C)
// filter: (3, C, r)
// output: (NfIn, C*r)
__global__ void vertex2facet_conv3d_forward(int NfIn, int C, int r, const int* numInterval, const int* face,
const float* input, const float* filter, float* output)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x; // thread index
int fcIdx = idx/(C*r); // global face index in the batch
int cout = idx%(C*r); // output channel ID
int cin = cout/r; // input channel ID
if(fcIdx<NfIn)
{
int v1=face[3*fcIdx], v2=face[3*fcIdx+1], v3=face[3*fcIdx+2];
// convolution, ensure that output has been all initialized to zeros
int intervalSize = numInterval[fcIdx];
float step = 1.0/float(intervalSize);
int numInterior = (intervalSize+1)*(intervalSize+2)/2; // number of interior points to interpolate
for(int k1=0;k1<=intervalSize;k1++)
{
for(int k2=0;k2<=intervalSize-k1;k2++)
{
float w1 = k1*step;
float w2 = k2*step;
float w3 = 1 - w1 - w2;
// use vertex features, (x,y,z), (nx,ny,nz) can be already concatenated into
float weight = w1*filter[cout] + w2*filter[cout+C*r] + w3*filter[cout+2*C*r];
float feat = w1*input[v1*C+cin] + w2*input[v2*C+cin] + w3*input[v3*C+cin];
output[fcIdx*C*r+cout] += weight*feat;
}
}
output[fcIdx*C*r+cout] /= numInterior;
}
}
// numInterval: (NfIn)
// face: (NfIn, 3)
// filter: (3, C, r)
// gradOutput: (NfIn, C*r)
// gradInput: (NvIn, C)
__global__ void vertex2facet_input_backward(int NfIn, int C, int r, const int* numInterval, const int* face,
const float* filter, const float* gradOutput, float* gradInput)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x; // thread index
int fcIdx = idx/(C*r); // global face index in the batch
int cout = idx%(C*r); // output channel ID
int cin = cout/r; // input channel ID
if(fcIdx<NfIn)
{
int v1=face[3*fcIdx], v2=face[3*fcIdx+1], v3=face[3*fcIdx+2];
float derIn[3] = {0,0,0};
int intervalSize = numInterval[fcIdx];
float step = 1.0/float(intervalSize);
int numInterior = (intervalSize+1)*(intervalSize+2)/2; // number of interior points to interpolate
for(int k1=0;k1<=intervalSize;k1++)
{
for(int k2=0;k2<=intervalSize-k1;k2++)
{
float w1=k1*step, w2=k2*step, w3=1-w1-w2;
// use vertex features, (x,y,z), (nx,ny,nz) can be already concatenated into
float weight = w1*filter[cout] + w2*filter[cout+C*r] + w3*filter[cout+2*C*r];
derIn[0] += weight*w1; derIn[1] += weight*w2; derIn[2] += weight*w3;
}
}
// gradient accumulation from all adjacent faces
atomicAdd(&gradInput[v1*C+cin], gradOutput[fcIdx*C*r+cout]*derIn[0]/numInterior);
atomicAdd(&gradInput[v2*C+cin], gradOutput[fcIdx*C*r+cout]*derIn[1]/numInterior);
atomicAdd(&gradInput[v3*C+cin], gradOutput[fcIdx*C*r+cout]*derIn[2]/numInterior);
}
}
// numInterval: (NfIn)
// face: (NfIn, 3)
// input: (NvIn, C)
// gradOutput: (NfIn, C*r)
// gradFilter: (3, C, r)
__global__ void vertex2facet_filter_backward(int NfIn, int C, int r, const int* numInterval, const int* face,
const float* input, const float* gradOutput, float* gradFilter,
int sharedMemSize, int startIdx)
{
extern __shared__ float gradPerBlock[]; // the gradient on each block
for (int i=threadIdx.x;i<sharedMemSize;i+=blockDim.x)
{
gradPerBlock[i] = 0; // for 1D block
}
__syncthreads();
int idx = blockIdx.x*blockDim.x + threadIdx.x; // thread index
int fcIdx = idx/(C*r); // global face index in the batch
int cout = idx%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int endIdx = sharedMemSize+startIdx;
if(fcIdx<NfIn)
{
int v1=face[3*fcIdx], v2=face[3*fcIdx+1], v3=face[3*fcIdx+2];
// convolution, ensure that output has been all initialized to zeros
int intervalSize = numInterval[fcIdx]; // number of interior points to sample
float step = 1.0/float(intervalSize);
int numInterior = (intervalSize+1)*(intervalSize+2)/2;
float derFilt[3] = {0,0,0};
for(int k1=0;k1<=intervalSize;k1++)
{
for(int k2=0;k2<=intervalSize-k1;k2++)
{
float w1 = k1*step;
float w2 = k2*step;
float w3 = 1 - w1 - w2;
// use vertex features, (x,y,z), (nx,ny,nz) can be already concatenated into
float feat = w1*input[v1*C+cin] + w2*input[v2*C+cin] + w3*input[v3*C+cin];
derFilt[0] += w1*feat; derFilt[1] += w2*feat; derFilt[2] += w3*feat;
}
}
for(int m=0;m<3;m++)
{
int currIdx = m*C*r+cout;
if((currIdx>=startIdx) && (currIdx<endIdx)) // within the shared memory
{
atomicAdd(&gradPerBlock[currIdx-startIdx],gradOutput[fcIdx*C*r+cout]*derFilt[m]/numInterior);
}
}
}
__syncthreads();
for (int i=threadIdx.x;i<sharedMemSize;i+=blockDim.x)
{
atomicAdd(&gradFilter[i+startIdx],gradPerBlock[i]); // for 1D block
}
}
void vertex2facetConv3dLauncher(int NfIn, int C, int r, const int* numInterval, const int* face,
const float* input, const float* filter, float* output)
{
int numGrid = NfIn*C*r/1024 + 1;
vertex2facet_conv3d_forward<<<numGrid,1024>>>(NfIn, C, r, numInterval, face, input, filter, output);
}
void vertex2facetConv3dGradLauncher(int NfIn, int C, int r, const int* numInterval, const int* face,
const float* input, const float* filter, const float* gradOutput,
float* gradInput, float* gradFilter)
{
int numGrid = NfIn*C*r/1024 + 1;
vertex2facet_input_backward<<<numGrid,1024>>>(NfIn, C, r, numInterval, face, filter, gradOutput, gradInput);
// titan xp has shared memory of 49152 bytes, each float value takes 4 bytes in the memory
int maxSharedMemSize = int(49152/sizeof(float));
int maxIter = (3*C*r)/maxSharedMemSize;
int remainder = (3*C*r)%maxSharedMemSize;
for(int iter=0;iter<maxIter;iter++)
{
vertex2facet_filter_backward<<<numGrid,1024,sizeof(float)*maxSharedMemSize>>>(NfIn, C, r, numInterval, face,
input, gradOutput, gradFilter,
maxSharedMemSize, maxSharedMemSize*iter);
}
if(remainder>0) // fill the remainder
{
vertex2facet_filter_backward<<<numGrid,1024,sizeof(float)*remainder>>>(NfIn, C, r, numInterval, face,
input, gradOutput, gradFilter,
remainder, maxSharedMemSize*maxIter);
}
}
// vtMap: NvIn; // only non-negative mapid got output features
// nfCount: NvIn;
// face: NfIn*3;
// coeff: NfIn*K;
// input: NfIn*C;
// filter: K*C*r;
// output: NvOut*(C*r);
__global__ void facet2vertex_conv3d_forward(int NfIn, int C, int r, const int K, const int* vtMap,
const int* nfCount, const int* face, const float* coeff,
const float* input, const float* filter, float* output)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x; // thread index
int fcIdx = idx/(C*r); // global face index in the batch
int cout = idx%(C*r); // output channel ID
int cin = cout/r; // input channel ID
if (fcIdx<NfIn) // index must be in the legal range
{
// a fuzzy combined weights
float weight = 0;
for(int k=0;k<K;k++)
{
float xi_k = coeff[fcIdx*K+k];
weight += xi_k*filter[k*C*r+cout];
}
float out_feat = weight*input[fcIdx*C+cin];
int v[3] = {face[3*fcIdx], face[3*fcIdx+1], face[3*fcIdx+2]};
for(int k=0;k<3;k++) // aggregate context of vertex from adjacent faces
{
int vi = v[k];
int vo = vtMap[vi]; // for non-strided convolution, we have vtMap[vi]=vi.
int nfSize = nfCount[vo]; //nfSize is the number of adjacent faces to vi, try nfSize=1 for no averaging
if (vo>=0)
atomicAdd(&output[vo*C*r+cout], out_feat/nfSize);
}
}
}
// vtMap: NvIn; // only non-negative mapid got output features
// nfCount: NvIn;
// face: NfIn*3;
// coeff: NfIn*K;
// filter: K*C*r;
// gradOutput: NvOut*(C*r)
// gradInput: NfIn*C;
__global__ void facet2vertex_input_backward(int NfIn, int C, int r, const int K, const int* vtMap,
const int* nfCount, const int* face, const float* coeff,
const float* filter, const float* gradOutput, float* gradInput)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x; // thread index
int fcIdx = idx/(C*r); // global face index in the batch
int cout = idx%(C*r); // output channel ID
int cin = cout/r; // input channel ID
if (fcIdx<NfIn) // index must be in the legal range
{
// a fuzzy combined weights
float weight = 0;
for(int k=0;k<K;k++)
{
float xi_k = coeff[fcIdx*K+k];
weight += xi_k*filter[k*C*r+cout];
}
// gradInput is on faces, each face collect gradients from three vertices
// better no atomic addition
int v[3] = {face[3*fcIdx], face[3*fcIdx+1], face[3*fcIdx+2]};
for(int k=0;k<3;k++) // aggregate context of vertex from adjacent faces
{
int vi = v[k];
int vo = vtMap[vi];
int nfSize = nfCount[vo];
if (vo>=0)
{
float derIn = gradOutput[vo*C*r+cout]*weight/nfSize;
atomicAdd(&gradInput[fcIdx*C+cin], derIn);
}
}
}
}
// vtMap: NvIn; // only non-negative mapid got output features
// nfCount: NvIn;
// face: NfIn*3;
// coeff: NfIn*K;
// input: NfIn*C;
// gradOutput: NvOut*(C*r)
// gradFilter: K*C*r;
__global__ void facet2vertex_filter_backward(int NfIn, int C, int r, const int K, const int* vtMap,
const int* nfCount, const int* face, const float* coeff,
const float* input, const float* gradOutput,
float* gradFilter, int sharedMemSize, int startIdx)
{
extern __shared__ float gradPerBlock[]; // the gradient on each block
for (int i=threadIdx.x;i<sharedMemSize;i+=blockDim.x)
{
gradPerBlock[i] = 0; // for 1D block
}
__syncthreads();
int idx = blockIdx.x*blockDim.x + threadIdx.x; // thread index
int fcIdx = idx/(C*r); // global face index in the batch
int cout = idx%(C*r); // output channel ID
int cin = cout/r; // input channel ID
int endIdx = sharedMemSize+startIdx;
if (fcIdx<NfIn) // index must be in the legal range
{
int v[3] = {face[3*fcIdx], face[3*fcIdx+1], face[3*fcIdx+2]};
for(int k=0;k<K;k++)
{
int currIdx = k*C*r+cout;
if((currIdx>=startIdx) && (currIdx<endIdx)) // within the shared memory
{
float derFilt = coeff[fcIdx*K+k]*input[fcIdx*C+cin];
for(int m=0;m<3;m++)
{
int vi = v[m];
int vo = vtMap[vi];
int nfSize = nfCount[vo];
if (vo>=0)
atomicAdd(&gradPerBlock[currIdx-startIdx], gradOutput[vo*C*r+cout]*derFilt/nfSize);
}
}
}
}
__syncthreads();
for (int i=threadIdx.x;i<sharedMemSize;i+=blockDim.x)
{
atomicAdd(&gradFilter[i+startIdx],gradPerBlock[i]); // for 1D block
}
}
// vtMap: NvIn;
// nfCount: NvIn;
// face: NfIn*3;
// input: NfIn*C;
// filter: K*C*r;
// gradOutput: NvOut*(C*r)
// gradCoeff: NfIn*K;
__global__ void facet2vertex_coeff_backward(int NfIn, int C, int r, const int K, const int* vtMap,
const int* nfCount, const int* face, const float* input,
const float* filter, const float* gradOutput, float* gradCoeff)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x; // thread index
int fcIdx = idx/(C*r); // global face index in the batch
int cout = idx%(C*r); // output channel ID
int cin = cout/r; // input channel ID
if (fcIdx<NfIn) // index must be in the legal range
{
int v[3] = {face[3*fcIdx], face[3*fcIdx+1], face[3*fcIdx+2]};
for(int k=0;k<K;k++)
{
float derCoeff = filter[k*C*r+cout]*input[fcIdx*C+cin];
for(int m=0;m<3;m++)
{
int vi = v[m];
int vo = vtMap[vi];
int nfSize = nfCount[vo];
if (vo>=0)
atomicAdd(&gradCoeff[fcIdx*K+k], gradOutput[vo*C*r+cout]*derCoeff/nfSize);
}
}
}
}
void facet2vertexConv3dLauncher(int NfIn, int C, int r, int K, const int* vtMap, const int* nfCount, const int* face,
const float* coeff, const float* input, const float* filter, float* output)
{
int numGrid = NfIn*C*r/1024 + 1;
facet2vertex_conv3d_forward<<<numGrid,1024>>>(NfIn, C, r, K, vtMap, nfCount, face, coeff, input, filter, output);
}
void facet2vertexConv3dGradLauncher(int NfIn, int C, int r, int K, const int* vtMap, const int* nfCount, const int* face,
const float* coeff, const float* input, const float* filter, const float* gradOutput,
float* gradInput, float* gradFilter, float* gradCoeff)
{
int numGrid = NfIn*C*r/1024 + 1;
facet2vertex_input_backward<<<numGrid,1024>>>(NfIn, C, r, K, vtMap, nfCount, face, coeff, filter, gradOutput, gradInput);
// titan xp has shared memory of 49152 bytes, each float value takes 4 bytes in the memory
int maxSharedMemSize = int(49152/sizeof(float));
int maxIter = (K*C*r)/maxSharedMemSize;
int remainder = (K*C*r)%maxSharedMemSize;
for(int iter=0;iter<maxIter;iter++)
{
facet2vertex_filter_backward<<<numGrid,1024,sizeof(float)*maxSharedMemSize>>>(NfIn, C, r, K, vtMap, nfCount, face,
coeff, input, gradOutput, gradFilter,
maxSharedMemSize, maxSharedMemSize*iter);
}
if(remainder>0) // fill the remainder
{
facet2vertex_filter_backward<<<numGrid,1024,sizeof(float)*remainder>>>(NfIn, C, r, K, vtMap, nfCount, face,
coeff, input, gradOutput, gradFilter,
remainder, maxSharedMemSize*maxIter);
}
facet2vertex_coeff_backward<<<numGrid,1024>>>(NfIn, C, r, K, vtMap, nfCount, face, input, filter, gradOutput, gradCoeff);
}
|
23,893 | #include "includes.h"
__global__ void calcSigmoidForwardGPU(float *in, float *out, int elements)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
float v = in[id];
v = 1.0f / (1.0f + exp( -v )); // sigmoid
out[id] = v;
}
/* original
for ( int i = 0; i < in_total_size; ++i ){
out.data[i] = activator_function(in.data[i]);
}
*/
} |
23,894 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/functional.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/sequence.h>
#include <iostream>
#include <cstdlib>
#include <ctime>
#include <chrono>
using namespace std;
using sys_clock = std::chrono::system_clock;
/// used to fill a host vector
struct rand_functor
{
int mod = 0;
rand_functor(int _mod = 0) : mod(_mod) { std::srand(std::time(0)); }
template<typename T>
void operator()(T &var)
{
if(mod > 0)
var = std::rand() % mod;
else
var = std::rand();
}
};
struct matrix_mult
{
/// Fill the structure
int *data;
matrix_mult(int* _data) : data(_data){}
template<typename Tuple>
__host__ __device__
void operator()(Tuple t){
thrust::get<2>(t) = thrust::get<0>(t) * thrust::get<1>(t) + data[thrust::get<2>(t)];
}
};
void cpu_matrix_mult(float *A, float *B, float *C, int row_size, int col_size)
{
/// CPU matrix mult
for (int i=0; i<row_size; i++)
for (int j=0; j<col_size; j++)
for (int k=0; k<col_size; k++)
C[i*(col_size)+j] += A[i*(col_size)+k] * B[k*(row_size)+j];
}
void print_matrix(float *A, int row_size, int col_size)
{
std::cout << "\n";
for(int i = 0; i < row_size; i++)
{
for(int j = 0; j <col_size; j++)
{
std::cout << A[i * col_size + j] << " ";
}
std::cout << "\n";
}
}
void thrust_matrix_mult(const int row_size, const int col_size)
{
const int matrix_size = col_size * row_size;
std::chrono::time_point<sys_clock> t1, t2;
std::chrono::duration<double, std::milli> exec_time_ms;
/// These are for the CPU matrix mult
float *A = (float*)malloc(sizeof(float) * matrix_size);
float *B = (float*)malloc(sizeof(float) * matrix_size);
float *C = (float*)malloc(sizeof(float) * matrix_size);
/// Vectors for the thrust matrix mult
thrust::host_vector<float> result(matrix_size);
thrust::host_vector<float> matrix_hA(matrix_size), matrix_hB(matrix_size);
thrust::device_vector<float> matrix_A(matrix_size), matrix_B(matrix_size), matrix_C(matrix_size, 0.0f);
thrust::device_vector<int> ids(matrix_size);
thrust::device_vector<int> data(matrix_size);
/// Additional variables you may need
thrust::sequence(data.begin(),data.end());
thrust::sequence(ids.begin(),ids.end());
thrust::for_each(matrix_hA.begin(), matrix_hA.end(), rand_functor(10));
thrust::for_each(matrix_hB.begin(), matrix_hB.end(), rand_functor(10));
matrix_A = matrix_hA;
matrix_B = matrix_hB;
thrust::copy(matrix_A.begin(), matrix_A.end(), A);
thrust::copy(matrix_B.begin(), matrix_B.end(), B);
t1 = sys_clock::now();
cpu_matrix_mult(A, B, C, row_size, col_size);
t2 = sys_clock::now();
exec_time_ms = t2 - t1;
std::cout << "CPU mm time: " << exec_time_ms.count() << "ms\n";
t1 = sys_clock::now();
/// Thrust code!
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(matrix_A.begin(),matrix_B.begin(),ids.begin(),matrix_C.begin())),
thrust::make_zip_iterator(thrust::make_tuple(matrix_A.end(),matrix_B.end(),ids.end(),matrix_C.end())),
matrix_mult(thrust::raw_pointer_cast(data.data()))
);
result = matrix_C;
t2 = sys_clock::now();
exec_time_ms = t2 - t1;
std::cout << "Thrust GPU mm time: " << exec_time_ms.count() << "ms\n";
bool ora = true;
std::cout << "\nChecking Matrices" << std::endl;
// Compare matrices (CPU & thrust) for correctness aja
for(int preguntame = 0; preguntame < col_size; preguntame++)
if(C[preguntame] == result[preguntame])
continue;
else
ora=false;
if(ora) cout << "Iguales" << endl;
else cout << "NOT equal" << endl;
}
int main(int argc, char* argv[])
{
if (argc < 2)
thrust_matrix_mult(50, 50);
else
thrust_matrix_mult(atoi(argv[1]), atoi(argv[1]));
return 0;
}
|
23,895 | #include <iomanip>
#include <iostream>
using namespace std;
// CUDA Kernel
//Performs matrix multiplication A * B = Out
//Note that aWidth must equal bHeight for the multiplication to succeed
//Thus we have summarily done away with the latter to remove temptation
//This kernel assumes that A is row major and B is column major
__global__ void matrixMultiply(double *matrixA, double *matrixB, double* matrixOut,
int aHeight, int aWidth, int bWidth) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int tid = row * bWidth + col;
double sum = 0;
// check to see if we are inside our problem space
if (row < aHeight && col < bWidth) {
// calculate row and col that we are going to compute
// loop over A & B at the same time since A is row major and B is column major
for (int ndx = 0; ndx < aWidth; ndx++) {
double lhs = *(matrixA + row*aWidth + ndx);
double rhs = *(matrixB + col*aWidth + ndx);
//Accumulate result
sum += lhs * rhs;
}
// store in matrix
*(matrixOut + tid) = sum;
}
}
//CUDA Kernel using shared memory to speed things up.
//Performs matrix multiplication A * B = Out
//Note that aWidth must equal bHeight for the multiplication to succeed
//Thus we have summarily done away with the latter to remove temptation
//This kernel assumes that A is row major and B is column major
//Further the max (and probably optimal) aWidth value is 32.
//While the shared memory version does not currently work, due to issues with indexing into A and B
//the resultant calculation is still an order of magnitude faster than the naive implementation.
//How much of this is due to actual efficiency vs busted math (multiplying and adding zeroes instead of values), I am not sure.
//Averages over ten runs for each set of dimensions
// 128x128: .0036 ms vs. .0540 ms
// 256x256 .0035 ms vs. .0590 ms
// 1024x1024 .0044 ms vs. .0880 ms
// 4096x4096 .0058 ms vs. .0890 ms <-- I expected the naive kernel to take much longer on this set
__global__ void sharedMatrixMultiply(double *matrixA, double *matrixB, double* matrixOut,
int aHeight, int aWidth, int bWidth,
double* sharedTestA, double* sharedTestB) {
//Row and column of the output space
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int tid = row * bWidth + col;
//These values should correspond to our block size
const int sharedWidth = 32;
const int sharedHeight = 32;
__shared__ double sharedA[sharedWidth * sharedHeight];
__shared__ double sharedB[sharedWidth * sharedHeight];
//figure out which rows of A and columns of B need to be loaded into the shared memory
//This should be based off the TID for the output matrix
//If we're in the first row of the output matrix, we need the first row of A
//If we're in the first column of the output matrix, we need the first column of B
//This correspondence seems to hold over the output space
//The size of our block determines how many rows and columns we need to hold
//For block 0,1 it needs to draw from the first set of rows and the second set of columns
//Each thread should load a single element from A and a single element from B into shared memory
//Shared dimensions are NOT the same as block dimensions - should they be?
//Let's assume they are - constraints make this reasonable
int sharedCol = threadIdx.x;
int sharedRow = threadIdx.y;
*(sharedA + sharedRow * sharedWidth + sharedCol) = *(matrixA + row*aWidth + col);
*(sharedB + sharedRow * sharedWidth + sharedCol) = *(matrixB + row*aWidth + col);
//Since the shared memory copy is not working, try something simpler
//*(sharedA + sharedRow * sharedWidth + sharedCol) = blockIdx.x;
//*(sharedB + sharedRow * sharedWidth + sharedCol) = blockIdx.y;
__syncthreads();
for(int ndx = 0; ndx < sharedHeight * sharedWidth; ndx++) {
*(sharedTestA + ndx) = *(sharedA + ndx);
*(sharedTestB + ndx) = *(sharedB + ndx);
}
double sum = 0;
double lhs = 0;
double rhs = 0;
//TODO: CHECK YOUR SHARED MEMORY DIMENSIONS!
// check to see if we are inside our problem space
if (row < aHeight && col < bWidth) {
// calculate row and col that we are going to compute
// loop over A & B at the same time since A is row major and B is column major
for (int ndx = 0; ndx < sharedWidth; ndx++) {
lhs = *(sharedA + sharedRow*sharedWidth + ndx);
rhs = *(sharedB + sharedCol*sharedWidth + ndx);
//TODO: Test using the identity matrix as the RHS
//rhs = 1;
//Accumulate result
sum += lhs * rhs;
}
// store in matrix
*(matrixOut + tid) = sum;
}
}
void fillMatrix(double *target, int targetSize) {
for (double ndx = 0; ndx < targetSize; ndx += 1) {
*target = (int)ndx % 100;
target++;
}
}
void printMatrixRowMaj(double *target, int numRows, int numCols) {
for (int row = 0; row < numRows; row++) {
for (int col = 0; col < numCols; col++) {
std::cout << std::setw(7) << *(target + row * numCols + col) << " ";
}
std::cout << std::endl;
}
std::cout << std:: endl;
}
void printMatrixColMaj(double *target, int numRows, int numCols) {
for (int row = 0; row < numRows; row++) {
for (int col = 0; col < numCols; col++) {
std::cout << std::setw(7) << *(target + col * numRows + row) << " ";
}
std::cout << std::endl;
}
std::cout << std:: endl;
}
int main() {
int aHeight = 4096; //num of rows in A
const int aWidth = 32; //num of cols in A
const int bHeight = 32; //num of rows in B - this must be the same as aWidth for AB to work
int bWidth = 4096; //num of cols in B
double *dev_matrixA, *dev_matrixB, *dev_matrixOut, *dev_sharedA, *dev_sharedB;
cudaEvent_t start, stop;
float milliseconds; //how long did we take to do things?
float naiveMs;
float sharedMs;
//bHeight = aWidth; //Let's just make sure
//allocate space
double* matrixA = (double * )malloc(sizeof (double) * aHeight * aWidth);
double* matrixB = (double * )malloc(sizeof (double) * bHeight * bWidth); //The operand matrices
double* matrixOut = (double * )malloc(sizeof (double) * aHeight * bWidth); //The result matrix
double* sharedA = (double * )malloc(sizeof (double) * 1024); //The result matrix
double* sharedB = (double * )malloc(sizeof (double) * 1024); //The result matrix
//fill operands
fillMatrix(matrixA, aHeight * aWidth);
fillMatrix(matrixB, bHeight * bWidth);
//setup memory on device
cudaMalloc((void**)&dev_matrixA, (aHeight * aWidth) * sizeof(double));
cudaMalloc((void**)&dev_matrixB, (bHeight * bWidth) * sizeof(double));
cudaMalloc((void**)&dev_matrixOut, (aHeight * bWidth) * sizeof(double));
cudaMalloc((void**)&dev_sharedA, (1024) * sizeof(double));
cudaMalloc((void**)&dev_sharedB, (1024) * sizeof(double));
// https://devblogs.nvidia.com/how-implement-performance-metrics-cuda-cc/
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(dev_matrixA, matrixA, aHeight * aWidth * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_matrixB, matrixB, bHeight * bWidth * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_matrixOut, matrixOut, aHeight * bWidth * sizeof(double), cudaMemcpyHostToDevice);
//Set up problem space dimensions
//dim3 threadsPerBlock (bWidth, aHeight);
dim3 threadsPerBlock (32, 32);
dim3 blocks (1, 4);
//start timer event
cudaEventRecord(start);
//call kernel
matrixMultiply<<<blocks,threadsPerBlock>>>(dev_matrixA, dev_matrixB, dev_matrixOut, aHeight, aWidth, bWidth);
//sharedMatrixMultiply<<<blocks,threadsPerBlock>>>(dev_matrixA, dev_matrixB, dev_matrixOut, aHeight, aWidth, bWidth, dev_sharedA, dev_sharedB);
//stop timer event
cudaEventRecord(stop);
//get result from device
cudaMemcpy(matrixOut, dev_matrixOut, aHeight * bWidth * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(matrixA, dev_matrixA, 16 * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(matrixB, dev_matrixB, 16 * sizeof(double), cudaMemcpyDeviceToHost);
//calculate time
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
naiveMs = milliseconds;
//Test our calculation
//printMatrixRowMaj(matrixA, aHeight, aWidth);
//printMatrixColMaj(matrixB, bHeight, bWidth);
//printMatrixRowMaj(matrixOut, aHeight, bWidth);
//printMatrixRowMaj(sharedA, 2, 2);
//printMatrixColMaj(sharedB, 2, 2);
cudaEventRecord(start);
//call kernel
//matrixMultiply<<<blocks,threadsPerBlock>>>(dev_matrixA, dev_matrixB, dev_matrixOut, aHeight, aWidth, bWidth);
sharedMatrixMultiply<<<blocks,threadsPerBlock>>>(dev_matrixA, dev_matrixB, dev_matrixOut, aHeight, aWidth, bWidth, dev_sharedA, dev_sharedB);
//stop timer event
cudaEventRecord(stop);
//get result from device
cudaMemcpy(matrixOut, dev_matrixOut, aHeight * bWidth * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(sharedA, dev_sharedA, 16 * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(sharedB, dev_sharedB, 16 * sizeof(double), cudaMemcpyDeviceToHost);
//calculate time
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
sharedMs = milliseconds;
//free memory
cudaFree(dev_matrixA);
cudaFree(dev_matrixB);
cudaFree(dev_matrixOut);
cudaFree(sharedA);
cudaFree(sharedB);
std::cout << "the shared memory version took " << sharedMs << " milliseconds to complete.\n";
std::cout << "the naive implementation took " << naiveMs << " milliseconds to complete.\n";
return 0;
}
|
23,896 | #include<iostream>
#include<cuda.h>
using namespace std;
//这种__shared__其实没什么,就是一个block内的线程能够维护一个全局变量
#define imin(a,b) (a<b?a:b)
const int N=33*1024;
const int threadsPerBlock=256;
const int blocksPerGrid=imin(32,(N+threadsPerBlock-1)/threadsPerBlock);
__global__ void dot(float *a,float *b,float *c){
__shared__ float cache[threadsPerBlock];
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int cacheIndex=threadIdx.x;
float temp=0;
while(tid<N){
temp+=a[tid]*b[tid];
tid+=blockDim.x*gridDim.x;
}
cache[cacheIndex]=temp;
__syncthreads();
float sum=0;
for(int i=0;i<threadsPerBlock;i++)
sum+=cache[i];
c[blockIdx.x]=sum;
}
int main(){
float *a,*b,*c;
float *dev_a,*dev_b,*dev_c;
a=new float[N*sizeof(float)];
b=new float[N*sizeof(float)];
c=new float[N*sizeof(float)];
cudaMalloc(&dev_a,N*sizeof(float));
cudaMalloc(&dev_b,N*sizeof(float));
cudaMalloc(&dev_c,N*sizeof(float));
for(int i=0;i<N;i++){
a[i]=i;
b[i]=i*2;
}
cudaMemcpy(dev_a,a,N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,N*sizeof(float),cudaMemcpyHostToDevice);
dot<<<blocksPerGrid,threadsPerBlock>>>(dev_a,dev_b,dev_c);
cudaMemcpy(c,dev_c,N*sizeof(float),cudaMemcpyDeviceToHost);
float sum=0;
for(int i=0;i<blocksPerGrid;i++)
sum+=c[i];
cout<<"Summary="<<sum<<endl;
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
free(a);
free(b);
free(c);
return 0;
} |
23,897 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
// __device__ - GPU
// __global__ - GPU
// __host__ - CPU
__global__ void add( int a, int b, int *c)
{
*c = a + b;
}
int main()
{
int c;
int *dev_c;
cudaMalloc( (void**)&dev_c, sizeof(int));
add<<<1,1>>> (1, 2, dev_c);
cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("%i\n", c);
cudaFree(dev_c);
return 0;
}
|
23,898 | #include <iostream>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// Kernel function to add the elements of two arrays
__global__
void add(int n, float* x, float* y)
{
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1 << 20;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
float* x, * y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N * sizeof(float));
cudaMallocManaged(&y, N * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 2.0f;
y[i] = 1.0f;
}
int b=8,t=1024;
// Run kernel on 1M elements on the GPU
add <<<b, t>>> (N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
std::cout<<"Time for the GPU kernel of blocksize "<<b<< " thread size "<<t<<" number of elements "<<N<<": "<<time<<" ms"<<std::endl;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for (int i = 0; i < N; i++) {
x[i] = 2.0f;
y[i] = 1.0f;
}
b=1;t=1;
// Run kernel on 1M elements on the GPU
add <<<b, t>>> (N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
std::cout<<"Time for the CPU kernel: "<<time<<" ms"<<std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
23,899 | /* helloCUDA.cu */
/****************************************************************************/
/* */
/* (C) 2010 Texas Advanced Computing Center. All rights reserved. */
/* */
/* For information, contact Frank Willmore: willmore@tacc.utexas.edu */
/* */
/****************************************************************************/
#include <stdio.h>
#include <assert.h>
#define BLOCKS_PER_GRID 4
#define THREADS_PER_BLOCK 256
#define N_POPULATION 10000
__device__ char d_data_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK][N_POPULATION];
__device__ int d_sum_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK];
__device__ float d_mean_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK];
__device__ float d_std_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK];
__global__ void calculateMean()
{
// auto variables other than arrays are register
int index;
int sum=0;
int sample_number;
// get the sample number for this thread
sample_number = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
// calculate the sum for this thread
for (index=0; index<N_POPULATION; index++) sum += d_data_array[sample_number][index];
// record the sum and mean in global memory
d_sum_array[sample_number] = sum;
d_mean_array[sample_number] = (float)sum / N_POPULATION;
}
// use persistent data (sum) to calculate variance
__global__ void calculateStandardDeviation()
{
int index;
int sample_number;
float v_sum = 0.0f;
float delta;
float variance;
// get the sample number for this thread
sample_number = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
// calculate the sum for this thread
for (index=0; index<N_POPULATION; index++)
{
delta = (float)d_data_array[sample_number][index] - d_mean_array[sample_number];
v_sum += delta * delta;
}
variance = v_sum / N_POPULATION;
d_std_array[sample_number] = sqrt(variance);
}
int main(int argc, char* argv[])
{
int sample_number;
FILE *fptr;
char h_data_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK][N_POPULATION];
int h_sum_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK];
float h_std_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK];
float h_mean_array[BLOCKS_PER_GRID * THREADS_PER_BLOCK];
size_t size = sizeof(h_data_array);
// generate an array with random data, then copy it to the device
printf("Reading random data from /dev/urandom...\n");
fptr = fopen("/dev/urandom", "r");
fread(h_data_array, size, 1, fptr);
fclose(fptr);
printf("Read %d bytes from /dev/urandom...\n\n", size);
/////////////////// sum //////////////////////////////////////
printf("Calculating sums and means...\n\n");
cudaError_t result = cudaMemcpyToSymbol(d_data_array, h_data_array, sizeof(h_data_array), 0, cudaMemcpyHostToDevice);
cudaThreadSynchronize(); // block until the device has completed
assert(result == cudaSuccess);
calculateMean<<< BLOCKS_PER_GRID, THREADS_PER_BLOCK >>>();
cudaThreadSynchronize(); // block until the device has completed
assert(result == cudaSuccess);
result = cudaMemcpyFromSymbol(h_sum_array, d_sum_array, sizeof(h_sum_array), 0, cudaMemcpyDeviceToHost);
cudaThreadSynchronize(); // block until the device has completed
assert(result == cudaSuccess);
/////////////////// standard deviation //////////////////////
printf("Calculating standard deviations...\n\n");
calculateStandardDeviation<<< BLOCKS_PER_GRID, THREADS_PER_BLOCK >>>();
cudaThreadSynchronize(); // block until the device has completed
assert(result == cudaSuccess);
result = cudaMemcpyFromSymbol(h_mean_array, d_mean_array, sizeof(h_mean_array), 0, cudaMemcpyDeviceToHost);
cudaThreadSynchronize(); // block until the device has completed
assert(result == cudaSuccess);
result = cudaMemcpyFromSymbol(h_std_array, d_std_array, sizeof(h_std_array), 0, cudaMemcpyDeviceToHost);
cudaThreadSynchronize(); // block until the device has completed
assert(result == cudaSuccess);
for (sample_number=0; sample_number < BLOCKS_PER_GRID * THREADS_PER_BLOCK; sample_number++)
printf("x_mean[%3d] = %f\t\tx_std[%3d] = %f\n", sample_number, h_mean_array[sample_number], sample_number, h_std_array[sample_number]);
/////////////////// histogram //////////////////////////////
printf("\nBuilding histogram of results...\n\n");
float width=0.1, start_x=-3.0;
int bin, n_bins = 51;
int histogram[51];
for (bin = 0; bin < 51; bin++) histogram[bin] = 0;
for (sample_number = 0; sample_number < (BLOCKS_PER_GRID * THREADS_PER_BLOCK); sample_number++)
{
bin = (int)floor((h_mean_array[sample_number] - start_x)/width);
histogram[bin]++;
}
for (bin = 0; bin < n_bins; bin++)
{
printf("%f\t%d\t", start_x + bin*width, histogram[bin]);
while (histogram[bin]-- > 0) printf("X");
printf("\n");
}
}
|
23,900 | /*
CPU code, still some bugs.
*/
//#include <cuda_runtime.h>
//#include <vector>
//#include <string>
//#include <set>
//
//#include "load_obj.h"
//#include "collision.cuh"
//#include "check.cuh"
//#include "./common/book.h"
//#include "cpu.cuh"
//
//#define COL_MAX_LEN 1000000
//
//void printElapsedTime(cudaEvent_t* start, cudaEvent_t* stop, const char* opname) {
// printf("\nTime of %s: ", opname);
// float elapsedTime;
// HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, *start, *stop));
// printf("%3.1f ms\n", elapsedTime);
//}
//
//void makeAndPrintSet(unsigned int* data, unsigned int num, const char* title)
//{
// set<unsigned int> dset;
// for (int i = 0; i < num; i++) {
// dset.insert(data[i]);
// }
//
// printf("\n\n%s%u points in total:\n", title, dset.size());
// set<unsigned int>::iterator it;
// for (it = dset.begin(); it != dset.end(); it++) {
// printf("%u\n", *it);
// }
//}
//
//int main()
//{
// cudaEvent_t start, stop, m_start, m_stop;
// HANDLE_ERROR(cudaEventCreate(&start));
// HANDLE_ERROR(cudaEventCreate(&stop));
// HANDLE_ERROR(cudaEventCreate(&m_start));
// HANDLE_ERROR(cudaEventCreate(&m_stop));
//
// HANDLE_ERROR(cudaEventRecord(m_start, 0));
//
// const std::string file_path = "F:/ļ/ҵļ/һ/cuda/projects/CollisionDetection/flag-2000-changed.obj";
//
// std::vector<vec3f> vertexes;
// std::vector<Triangle> triangles;
// std::vector<unsigned long long int> mortons;
//
// loadObj(file_path, vertexes, triangles, mortons);
// const unsigned int m_size = mortons.size();
// const unsigned int v_size = vertexes.size();
//
// vec3f* v_ptr = &vertexes[0]; // new vec3f[v_size];
// Triangle* t_ptr = &triangles[0]; // new Triangle[m_size];
// unsigned long long int* m_ptr = &mortons[0]; // new unsigned long long int[m_size];
// Node* leaf_nodes = new Node[m_size];
// Node* internal_nodes = new Node[m_size-1];
// unsigned int* collision_list = new unsigned int[10000];
// unsigned int test_val;
//
// memset(collision_list, 0, 10000 * sizeof(unsigned int));
//
// /* Ҷڵ */
// fillLeafNodesCpu(t_ptr, m_size, leaf_nodes);
//
// /* BVH */
// printf("\n- before generateHierarchyParallel, wrongParentNum = %u\n", collision_list[0]);
// generateHierarchyParallelCpu(m_ptr, m_size, leaf_nodes, internal_nodes, collision_list);
// printf("\n- generateHierarchyParallel check result: wrongParentNum = %u, with total nodes=%u\n\n", collision_list[0], m_size-1);
//
// /* Χ */
// calBoundingBoxCpu(leaf_nodes, v_ptr, m_size);
//
// /* ڲڵҶڵԼ */
// memset(collision_list, 0, sizeof(unsigned int) * 5);
// checkInternalNodesCpu(internal_nodes, m_size - 1, &collision_list[0], &collision_list[1], &collision_list[2], &collision_list[3], &collision_list[4]);
// printf("\n- Internal node check result: nullParentnum = %u, wrongBoundCount=%u, nullChildCount=%u, notInternalCount=%u, uninitBoxCount=%u, with total nodes=%u\n\n", collision_list[0], collision_list[1], collision_list[2], collision_list[3], collision_list[4], m_size-1);
//
// memset(collision_list, 0, sizeof(unsigned int) * 5);
// checkLeafNodesCpu(leaf_nodes, m_size, &collision_list[0], &collision_list[1], &collision_list[2], &collision_list[3]);
// printf("\n- Leaf node check result: nullParentnum = %u, nullTriangle=%u, notLeafCount=%u, illegalBoxCount=%u, with total nodes=%u\n\n", collision_list[0], collision_list[1], collision_list[2], collision_list[3], m_size);
//
// memset(collision_list, 0, sizeof(unsigned int) * 5);
// checkTriangleIdxCpu(leaf_nodes, v_ptr, m_size, 632674, &collision_list[0]);
// printf("\n- Triangle check result: illegal triangle vidx num = %u, with total triangles=%u\n\n", collision_list[0], mortons.size());
// printf("\n$ triangle num = %u, mortons num = %u, vertex num = %u\n\n", triangles.size(), mortons.size(), vertexes.size());
//
// /* Ѱײ */
// //findCollisionsCpu(internal_nodes, leaf_nodes, v_ptr, m_size, &test_val, collision_list);
// ////HANDLE_ERROR(cudaMemcpy(temp_nums, test_val, sizeof(unsigned int), cudaMemcpyDeviceToHost));
// ////HANDLE_ERROR(cudaMemcpy(h_collision_list, collision_list, 1000 * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// //printf("\n\n- contact val = %u\n", test_val);
//
// //printf("\nCollision pair (%u triangle pairs in total):\n", test_val);
// //for (int i = 0; i < test_val; i++) {
// // printf("%07u - %07u\n", collision_list[2 * i], collision_list[2 * i + 1]);
// //}
//
// //makeAndPrintSet(collision_list, 2 * test_val, "Collision Triangles:");
//
// std::cout << "- Successfully Return" << std::endl;
//
// HANDLE_ERROR(cudaEventRecord(m_stop, 0)); HANDLE_ERROR(cudaEventSynchronize(m_stop));
// printElapsedTime(&m_start, &m_stop, "Total Time");
//
// printf("\n test for clzll: %u\n", clzll(4567, 1));
//
// //printf("\n test for __builtin_clz: %u\n", __builtin_clz(1278));
//
// return 0;
//} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.