hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
7a3342743a059573734cc255c1170affe6d4a6ff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* cuPrintf.cu
*
* This is a printf command callable from within a kernel. It is set
* up so that output is sent to a memory buffer, which is emptied from
* the host side - but only after a cutilDeviceSynchronize() on the host.
*
* Currently, there is a limitation of around 200 characters of output
* and no more than 10 arguments to a single cuPrintf() call. Issue
* multiple calls if longer format strings are required.
*
* It requires minimal setup, and is *NOT* optimised for performance.
* For example, writes are not coalesced - this is because there is an
* assumption that people will not want to printf from every single one
* of thousands of threads, but only from individual threads at a time.
*
* Using this is simple - it requires one host-side call to initialise
* everything, and then kernels can call cuPrintf at will. Sample code
* is the easiest way to demonstrate:
*
#include "cuPrintf.hip"
__global__ void testKernel(int val)
{
cuPrintf("Value is: %d\n", val);
}
int main()
{
cudaPrintfInit();
testKernel<<< 2, 3 >>>(10);
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
return 0;
}
*
* See the header file, "cuPrintf.cuh" for more info, especially
* arguments to cudaPrintfInit() and cudaPrintfDisplay();
*/
#ifndef CUPRINTF_CU
#define CUPRINTF_CU
#include "cuPrintf_hip.cuh"
#if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture
#include <sm_11_atomic_functions.h>
#endif
// This is the smallest amount of memory, per-thread, which is allowed.
// It is also the largest amount of space a single printf() can take up
const static int CUPRINTF_MAX_LEN = 256;
// This structure is used internally to track block/thread output restrictions.
typedef struct __align__(8) {
int threadid; // CUPRINTF_UNRESTRICTED for unrestricted
int blockid; // CUPRINTF_UNRESTRICTED for unrestricted
} cuPrintfRestriction;
// The main storage is in a global print buffer, which has a known
// start/end/length. These are atomically updated so it works as a
// circular buffer.
// Since the only control primitive that can be used is atomicAdd(),
// we cannot wrap the pointer as such. The actual address must be
// calculated from printfBufferPtr by mod-ing with printfBufferLength.
// For sm_10 architecture, we must subdivide the buffer per-thread
// since we do not even have an atomic primitive.
__constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host)
__constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host)
__device__ static cuPrintfRestriction restrictRules; // Output restrictions
__device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset
// This is the header preceeding all printf entries.
// NOTE: It *must* be size-aligned to the maximum entity size (size_t)
typedef struct __align__(8) {
unsigned short magic; // Magic number says we're valid
unsigned short fmtoffset; // Offset of fmt string into buffer
unsigned short blockid; // Block ID of author
unsigned short threadid; // Thread ID of author
} cuPrintfHeader;
// Special header for sm_10 architecture
#define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character
typedef struct __align__(16) {
unsigned short magic; // sm_10 specific magic number
unsigned short unused;
unsigned int thread_index; // thread ID for this buffer
unsigned int thread_buf_len; // per-thread buffer length
unsigned int offset; // most recent printf's offset
} cuPrintfHeaderSM10;
// Because we can't write an element which is not aligned to its bit-size,
// we have to align all sizes and variables on maximum-size boundaries.
// That means sizeof(double) in this case, but we'll use (long long) for
// better arch<1.3 support
#define CUPRINTF_ALIGN_SIZE sizeof(long long)
// All our headers are prefixed with a magic number so we know they're ready
#define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character
//
// getNextPrintfBufPtr
//
// Grabs a block of space in the general circular buffer, using an
// atomic function to ensure that it's ours. We handle wrapping
// around the circular buffer and return a pointer to a place which
// can be written to.
//
// Important notes:
// 1. We always grab CUPRINTF_MAX_LEN bytes
// 2. Because of 1, we never worry about wrapping around the end
// 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN
//
// This returns a pointer to the place where we own.
//
__device__ static char *getNextPrintfBufPtr()
{
// Initialisation check
if(!printfBufferPtr)
return NULL;
// Thread/block restriction check
if((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y)))
return NULL;
if((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z)))
return NULL;
// Conditional section, dependent on architecture
#if __CUDA_ARCH__ == 100
// For sm_10 architectures, we have no atomic add - this means we must split the
// entire available buffer into per-thread blocks. Inefficient, but what can you do.
int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z);
int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z +
(blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z);
// Find our own block of data and go to it. Make sure the per-thread length
// is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and
// alignment issues! We must round down, of course.
unsigned int thread_buf_len = printfBufferLength / thread_count;
thread_buf_len &= ~(CUPRINTF_MAX_LEN-1);
// We *must* have a thread buffer length able to fit at least two printfs (one header, one real)
if(thread_buf_len < (CUPRINTF_MAX_LEN * 2))
return NULL;
// Now address our section of the buffer. The first item is a header.
char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index);
cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer;
if(hdr.magic != CUPRINTF_SM10_MAGIC)
{
// If our header is not set up, initialise it
hdr.magic = CUPRINTF_SM10_MAGIC;
hdr.thread_index = thread_index;
hdr.thread_buf_len = thread_buf_len;
hdr.offset = 0; // Note we start at 0! We pre-increment below.
*(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header
// For initial setup purposes, we might need to init thread0's header too
// (so that cudaPrintfDisplay() below will work). This is only run once.
cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer;
tophdr->thread_buf_len = thread_buf_len;
}
// Adjust the offset by the right amount, and wrap it if need be
unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN;
if(offset >= hdr.thread_buf_len)
offset = CUPRINTF_MAX_LEN;
// Write back the new offset for next time and return a pointer to it
((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset;
return myPrintfBuffer + offset;
#else
// Much easier with an atomic operation!
size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer;
offset %= printfBufferLength;
return globalPrintfBuffer + offset;
#endif
}
//
// writePrintfHeader
//
// Inserts the header for containing our UID, fmt position and
// block/thread number. We generate it dynamically to avoid
// issues arising from requiring pre-initialisation.
//
__device__ static void writePrintfHeader(char *ptr, char *fmtptr)
{
if(ptr)
{
cuPrintfHeader header;
header.magic = CUPRINTF_SM11_MAGIC;
header.fmtoffset = (unsigned short)(fmtptr - ptr);
header.blockid = blockIdx.x + gridDim.x*blockIdx.y;
header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
*(cuPrintfHeader *)(void *)ptr = header;
}
}
//
// cuPrintfStrncpy
//
// This special strncpy outputs an aligned length value, followed by the
// string. It then zero-pads the rest of the string until a 64-aligned
// boundary. The length *includes* the padding. A pointer to the byte
// just after the \0 is returned.
//
// This function could overflow CUPRINTF_MAX_LEN characters in our buffer.
// To avoid it, we must count as we output and truncate where necessary.
//
__device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end)
{
// Initialisation and overflow check
if(!dest || !src || (dest >= end))
return NULL;
// Prepare to write the length specifier. We're guaranteed to have
// at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in
// chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE.
int *lenptr = (int *)(void *)dest;
int len = 0;
dest += CUPRINTF_ALIGN_SIZE;
// Now copy the string
while(n--)
{
if(dest >= end) // Overflow check
break;
len++;
*dest++ = *src;
if(*src++ == '\0')
break;
}
// Now write out the padding bytes, and we have our length.
while((dest < end) && (((size_t)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0))
{
len++;
*dest++ = 0;
}
*lenptr = len;
return (dest < end) ? dest : NULL; // Overflow means return NULL
}
//
// copyArg
//
// This copies a length specifier and then the argument out to the
// data buffer. Templates let the compiler figure all this out at
// compile-time, making life much simpler from the programming
// point of view. I'm assuimg all (const char *) is a string, and
// everything else is the variable it points at. I'd love to see
// a better way of doing it, but aside from parsing the format
// string I can't think of one.
//
// The length of the data type is inserted at the beginning (so that
// the display can distinguish between float and double), and the
// pointer to the end of the entry is returned.
//
__device__ static char *copyArg(char *ptr, const char *arg, char *end)
{
// Initialisation check
if(!ptr || !arg)
return NULL;
// strncpy does all our work. We just terminate.
if((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL)
*ptr = 0;
return ptr;
}
template <typename T>
__device__ static char *copyArg(char *ptr, T &arg, char *end)
{
// Initisalisation and overflow check. Alignment rules mean that
// we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need
// to check that one offset.
if(!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end))
return NULL;
// Write the length and argument
*(int *)(void *)ptr = sizeof(arg);
ptr += CUPRINTF_ALIGN_SIZE;
*(T *)(void *)ptr = arg;
ptr += CUPRINTF_ALIGN_SIZE;
*ptr = 0;
return ptr;
}
//
// cuPrintf
//
// Templated printf functions to handle multiple arguments.
// Note we return the total amount of data copied, not the number
// of characters output. But then again, who ever looks at the
// return from printf() anyway?
//
// The format is to grab a block of circular buffer space, the
// start of which will hold a header and a pointer to the format
// string. We then write in all the arguments, and finally the
// format string itself. This is to make it easy to prevent
// overflow of our buffer (we support up to 10 arguments, each of
// which can be 12 bytes in length - that means that only the
// format string (or a %s) can actually overflow; so the overflow
// check need only be in the strcpy function.
//
// The header is written at the very last because that's what
// makes it look like we're done.
//
// Errors, which are basically lack-of-initialisation, are ignored
// in the called functions because NULL pointers are passed around
//
// All printf variants basically do the same thing, setting up the
// buffer, writing all arguments, then finalising the header. For
// clarity, we'll pack the code into some big macros.
#define CUPRINTF_PREAMBLE \
char *start, *end, *bufptr, *fmtstart; \
if((start = getNextPrintfBufPtr()) == NULL) return 0; \
end = start + CUPRINTF_MAX_LEN; \
bufptr = start + sizeof(cuPrintfHeader);
// Posting an argument is easy
#define CUPRINTF_ARG(argname) \
bufptr = copyArg(bufptr, argname, end);
// After args are done, record start-of-fmt and write the fmt and header
#define CUPRINTF_POSTAMBLE \
fmtstart = bufptr; \
end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \
writePrintfHeader(start, end ? fmtstart : NULL); \
return end ? (int)(end - start) : 0;
__device__ int cuPrintf(const char *fmt)
{
CUPRINTF_PREAMBLE;
CUPRINTF_POSTAMBLE;
}
template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_ARG(arg10);
CUPRINTF_POSTAMBLE;
}
#undef CUPRINTF_PREAMBLE
#undef CUPRINTF_ARG
#undef CUPRINTF_POSTAMBLE
//
// cuPrintfRestrict
//
// Called to restrict output to a given thread/block.
// We store the info in "restrictRules", which is set up at
// init time by the host. It's not the cleanest way to do this
// because it means restrictions will last between
// invocations, but given the output-pointer continuity,
// I feel this is reasonable.
//
__device__ void cuPrintfRestrict(int threadid, int blockid)
{
int thread_count = blockDim.x * blockDim.y * blockDim.z;
if(((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED))
restrictRules.threadid = threadid;
int block_count = gridDim.x * gridDim.y;
if(((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED))
restrictRules.blockid = blockid;
}
///////////////////////////////////////////////////////////////////////////////
// HOST SIDE
#include <stdio.h>
static FILE *printf_fp;
static char *printfbuf_start=NULL;
static char *printfbuf_device=NULL;
static int printfbuf_len=0;
//
// outputPrintfData
//
// Our own internal function, which takes a pointer to a data buffer
// and passes it through libc's printf for output.
//
// We receive the formate string and a pointer to where the data is
// held. We then run through and print it out.
//
// Returns 0 on failure, 1 on success
//
static int outputPrintfData(char *fmt, char *data)
{
// Format string is prefixed by a length that we don't need
fmt += CUPRINTF_ALIGN_SIZE;
// Now run through it, printing everything we can. We must
// run to every % character, extract only that, and use printf
// to format it.
char *p = strchr(fmt, '%');
while(p != NULL)
{
// Print up to the % character
*p = '\0';
fputs(fmt, printf_fp);
*p = '%'; // Put back the %
// Now handle the format specifier
char *format = p++; // Points to the '%'
p += strcspn(p, "%cdiouxXeEfgGaAnps");
if(*p == '\0') // If no format specifier, print the whole thing
{
fmt = format;
break;
}
// Cut out the format bit and use printf to print it. It's prefixed
// by its length.
int arglen = *(int *)data;
if(arglen > CUPRINTF_MAX_LEN)
{
fputs("Corrupt printf buffer data - aborting\n", printf_fp);
return 0;
}
data += CUPRINTF_ALIGN_SIZE;
char specifier = *p++;
char c = *p; // Store for later
*p = '\0';
switch(specifier)
{
// These all take integer arguments
case 'c':
case 'd':
case 'i':
case 'o':
case 'u':
case 'x':
case 'X':
case 'p':
fprintf(printf_fp, format, *((int *)data));
break;
// These all take double arguments
case 'e':
case 'E':
case 'f':
case 'g':
case 'G':
case 'a':
case 'A':
if(arglen == 4) // Float vs. Double thing
fprintf(printf_fp, format, *((float *)data));
else
fprintf(printf_fp, format, *((double *)data));
break;
// Strings are handled in a special way
case 's':
fprintf(printf_fp, format, (char *)data);
break;
// % is special
case '%':
fprintf(printf_fp, "%%");
break;
// Everything else is just printed out as-is
default:
fprintf(printf_fp, "%s", format);
break;
}
data += CUPRINTF_ALIGN_SIZE; // Move on to next argument
*p = c; // Restore what we removed
fmt = p; // Adjust fmt string to be past the specifier
p = strchr(fmt, '%'); // and get the next specifier
}
// Print out the last of the string
fputs(fmt, printf_fp);
return 1;
}
//
// doPrintfDisplay
//
// This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the
// print function above to display them. We've got this separate from
// cudaPrintfDisplay() below so we can handle the SM_10 architecture
// partitioning.
//
static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr)
{
// Grab, piece-by-piece, each output element until we catch
// up with the circular buffer end pointer
int printf_count=0;
char printfbuf_local[CUPRINTF_MAX_LEN+1];
printfbuf_local[CUPRINTF_MAX_LEN] = '\0';
while(bufptr != endptr)
{
// Wrap ourselves at the end-of-buffer
if(bufptr == bufend)
bufptr = bufstart;
// Adjust our start pointer to within the circular buffer and copy a block.
hipMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, hipMemcpyDeviceToHost);
// If the magic number isn't valid, then this write hasn't gone through
// yet and we'll wait until it does (or we're past the end for non-async printfs).
cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local;
if((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN))
{
//fprintf(printf_fp, "Bad magic number in printf header\n");
break;
}
// Extract all the info and get this printf done
if(headings)
fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid);
if(hdr->fmtoffset == 0)
fprintf(printf_fp, "printf buffer overflow\n");
else if(!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader)))
break;
printf_count++;
// Clear if asked
if(clear)
hipMemset(bufptr, 0, CUPRINTF_MAX_LEN);
// Now advance our start location, because we're done, and keep copying
bufptr += CUPRINTF_MAX_LEN;
}
return printf_count;
}
//
// cudaPrintfInit
//
// Takes a buffer length to allocate, creates the memory on the device and
// returns a pointer to it for when a kernel is called. It's up to the caller
// to free it.
//
extern "C" hipError_t cudaPrintfInit(size_t bufferLen)
{
// Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN
bufferLen = (bufferLen < (size_t)CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen;
if((bufferLen % CUPRINTF_MAX_LEN) > 0)
bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN));
printfbuf_len = (int)bufferLen;
// Allocate a print buffer on the device and zero it
if(hipMalloc((void **)&printfbuf_device, printfbuf_len) != hipSuccess)
return hipErrorInitializationError;
hipMemset(printfbuf_device, 0, printfbuf_len);
printfbuf_start = printfbuf_device; // Where we start reading from
// No restrictions to begin with
cuPrintfRestriction restrict;
restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED;
hipMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict));
// Initialise the buffer and the respective lengths/pointers.
hipMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *));
hipMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *));
hipMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len));
return hipSuccess;
}
//
// cudaPrintfEnd
//
// Frees up the memory which we allocated
//
extern "C" void cudaPrintfEnd()
{
if(!printfbuf_start || !printfbuf_device)
return;
hipFree(printfbuf_device);
printfbuf_start = printfbuf_device = NULL;
}
//
// cudaPrintfDisplay
//
// Each call to this function dumps the entire current contents
// of the printf buffer to the pre-specified FILE pointer. The
// circular "start" pointer is advanced so that subsequent calls
// dumps only new stuff.
//
// In the case of async memory access (via streams), call this
// repeatedly to keep trying to empty the buffer. If it's a sync
// access, then the whole buffer should empty in one go.
//
// Arguments:
// outputFP - File descriptor to output to (NULL => stdout)
// showThreadID - If true, prints [block,thread] before each line
//
extern "C" hipError_t cudaPrintfDisplay(void *outputFP, bool showThreadID)
{
printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP);
// For now, we force "synchronous" mode which means we're not concurrent
// with kernel execution. This also means we don't need clearOnPrint.
// If you're patching it for async operation, here's where you want it.
bool sync_printfs = true;
bool clearOnPrint = false;
// Initialisation check
if(!printfbuf_start || !printfbuf_device || !printf_fp)
return hipErrorMissingConfiguration;
// To determine which architecture we're using, we read the
// first short from the buffer - it'll be the magic number
// relating to the version.
unsigned short magic;
hipMemcpy(&magic, printfbuf_device, sizeof(unsigned short), hipMemcpyDeviceToHost);
// For SM_10 architecture, we've split our buffer into one-per-thread.
// That means we must do each thread block separately. It'll require
// extra reading. We also, for now, don't support async printfs because
// that requires tracking one start pointer per thread.
if(magic == CUPRINTF_SM10_MAGIC)
{
sync_printfs = true;
clearOnPrint = false;
int blocklen = 0;
char *blockptr = printfbuf_device;
while(blockptr < (printfbuf_device + printfbuf_len))
{
cuPrintfHeaderSM10 hdr;
hipMemcpy(&hdr, blockptr, sizeof(hdr), hipMemcpyDeviceToHost);
// We get our block-size-step from the very first header
if(hdr.thread_buf_len != 0)
blocklen = hdr.thread_buf_len;
// No magic number means no printfs from this thread
if(hdr.magic != CUPRINTF_SM10_MAGIC)
{
if(blocklen == 0)
{
fprintf(printf_fp, "No printf headers found at all!\n");
break; // No valid headers!
}
blockptr += blocklen;
continue;
}
// "offset" is non-zero then we can print the block contents
if(hdr.offset > 0)
{
// For synchronous printfs, we must print from endptr->bufend, then from start->end
if(sync_printfs)
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len);
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN);
}
// Move on to the next block and loop again
blockptr += hdr.thread_buf_len;
}
}
// For SM_11 and up, everything is a single buffer and it's simple
else if(magic == CUPRINTF_SM11_MAGIC)
{
// Grab the current "end of circular buffer" pointer.
char *printfbuf_end = NULL;
hipMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *));
// Adjust our starting and ending pointers to within the block
char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device;
char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device;
// For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular
// buffer wrap carefully because we could miss those past "end".
if(sync_printfs)
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len);
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr);
printfbuf_start = printfbuf_end;
}
else
;//printf("Bad magic number in cuPrintf buffer header\n");
// If we were synchronous, then we must ensure that the memory is cleared on exit
// otherwise another kernel launch with a different grid size could conflict.
if(sync_printfs)
hipMemset(printfbuf_device, 0, printfbuf_len);
return hipSuccess;
}
// Cleanup
#undef CUPRINTF_MAX_LEN
#undef CUPRINTF_ALIGN_SIZE
#undef CUPRINTF_SM10_MAGIC
#undef CUPRINTF_SM11_MAGIC
#endif
| 7a3342743a059573734cc255c1170affe6d4a6ff.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* cuPrintf.cu
*
* This is a printf command callable from within a kernel. It is set
* up so that output is sent to a memory buffer, which is emptied from
* the host side - but only after a cutilDeviceSynchronize() on the host.
*
* Currently, there is a limitation of around 200 characters of output
* and no more than 10 arguments to a single cuPrintf() call. Issue
* multiple calls if longer format strings are required.
*
* It requires minimal setup, and is *NOT* optimised for performance.
* For example, writes are not coalesced - this is because there is an
* assumption that people will not want to printf from every single one
* of thousands of threads, but only from individual threads at a time.
*
* Using this is simple - it requires one host-side call to initialise
* everything, and then kernels can call cuPrintf at will. Sample code
* is the easiest way to demonstrate:
*
#include "cuPrintf.cu"
__global__ void testKernel(int val)
{
cuPrintf("Value is: %d\n", val);
}
int main()
{
cudaPrintfInit();
testKernel<<< 2, 3 >>>(10);
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
return 0;
}
*
* See the header file, "cuPrintf.cuh" for more info, especially
* arguments to cudaPrintfInit() and cudaPrintfDisplay();
*/
#ifndef CUPRINTF_CU
#define CUPRINTF_CU
#include "cuPrintf.cuh"
#if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture
#include <sm_11_atomic_functions.h>
#endif
// This is the smallest amount of memory, per-thread, which is allowed.
// It is also the largest amount of space a single printf() can take up
const static int CUPRINTF_MAX_LEN = 256;
// This structure is used internally to track block/thread output restrictions.
typedef struct __align__(8) {
int threadid; // CUPRINTF_UNRESTRICTED for unrestricted
int blockid; // CUPRINTF_UNRESTRICTED for unrestricted
} cuPrintfRestriction;
// The main storage is in a global print buffer, which has a known
// start/end/length. These are atomically updated so it works as a
// circular buffer.
// Since the only control primitive that can be used is atomicAdd(),
// we cannot wrap the pointer as such. The actual address must be
// calculated from printfBufferPtr by mod-ing with printfBufferLength.
// For sm_10 architecture, we must subdivide the buffer per-thread
// since we do not even have an atomic primitive.
__constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host)
__constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host)
__device__ static cuPrintfRestriction restrictRules; // Output restrictions
__device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset
// This is the header preceeding all printf entries.
// NOTE: It *must* be size-aligned to the maximum entity size (size_t)
typedef struct __align__(8) {
unsigned short magic; // Magic number says we're valid
unsigned short fmtoffset; // Offset of fmt string into buffer
unsigned short blockid; // Block ID of author
unsigned short threadid; // Thread ID of author
} cuPrintfHeader;
// Special header for sm_10 architecture
#define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character
typedef struct __align__(16) {
unsigned short magic; // sm_10 specific magic number
unsigned short unused;
unsigned int thread_index; // thread ID for this buffer
unsigned int thread_buf_len; // per-thread buffer length
unsigned int offset; // most recent printf's offset
} cuPrintfHeaderSM10;
// Because we can't write an element which is not aligned to its bit-size,
// we have to align all sizes and variables on maximum-size boundaries.
// That means sizeof(double) in this case, but we'll use (long long) for
// better arch<1.3 support
#define CUPRINTF_ALIGN_SIZE sizeof(long long)
// All our headers are prefixed with a magic number so we know they're ready
#define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character
//
// getNextPrintfBufPtr
//
// Grabs a block of space in the general circular buffer, using an
// atomic function to ensure that it's ours. We handle wrapping
// around the circular buffer and return a pointer to a place which
// can be written to.
//
// Important notes:
// 1. We always grab CUPRINTF_MAX_LEN bytes
// 2. Because of 1, we never worry about wrapping around the end
// 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN
//
// This returns a pointer to the place where we own.
//
__device__ static char *getNextPrintfBufPtr()
{
// Initialisation check
if(!printfBufferPtr)
return NULL;
// Thread/block restriction check
if((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y)))
return NULL;
if((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z)))
return NULL;
// Conditional section, dependent on architecture
#if __CUDA_ARCH__ == 100
// For sm_10 architectures, we have no atomic add - this means we must split the
// entire available buffer into per-thread blocks. Inefficient, but what can you do.
int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z);
int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z +
(blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z);
// Find our own block of data and go to it. Make sure the per-thread length
// is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and
// alignment issues! We must round down, of course.
unsigned int thread_buf_len = printfBufferLength / thread_count;
thread_buf_len &= ~(CUPRINTF_MAX_LEN-1);
// We *must* have a thread buffer length able to fit at least two printfs (one header, one real)
if(thread_buf_len < (CUPRINTF_MAX_LEN * 2))
return NULL;
// Now address our section of the buffer. The first item is a header.
char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index);
cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer;
if(hdr.magic != CUPRINTF_SM10_MAGIC)
{
// If our header is not set up, initialise it
hdr.magic = CUPRINTF_SM10_MAGIC;
hdr.thread_index = thread_index;
hdr.thread_buf_len = thread_buf_len;
hdr.offset = 0; // Note we start at 0! We pre-increment below.
*(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header
// For initial setup purposes, we might need to init thread0's header too
// (so that cudaPrintfDisplay() below will work). This is only run once.
cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer;
tophdr->thread_buf_len = thread_buf_len;
}
// Adjust the offset by the right amount, and wrap it if need be
unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN;
if(offset >= hdr.thread_buf_len)
offset = CUPRINTF_MAX_LEN;
// Write back the new offset for next time and return a pointer to it
((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset;
return myPrintfBuffer + offset;
#else
// Much easier with an atomic operation!
size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer;
offset %= printfBufferLength;
return globalPrintfBuffer + offset;
#endif
}
//
// writePrintfHeader
//
// Inserts the header for containing our UID, fmt position and
// block/thread number. We generate it dynamically to avoid
// issues arising from requiring pre-initialisation.
//
__device__ static void writePrintfHeader(char *ptr, char *fmtptr)
{
if(ptr)
{
cuPrintfHeader header;
header.magic = CUPRINTF_SM11_MAGIC;
header.fmtoffset = (unsigned short)(fmtptr - ptr);
header.blockid = blockIdx.x + gridDim.x*blockIdx.y;
header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
*(cuPrintfHeader *)(void *)ptr = header;
}
}
//
// cuPrintfStrncpy
//
// This special strncpy outputs an aligned length value, followed by the
// string. It then zero-pads the rest of the string until a 64-aligned
// boundary. The length *includes* the padding. A pointer to the byte
// just after the \0 is returned.
//
// This function could overflow CUPRINTF_MAX_LEN characters in our buffer.
// To avoid it, we must count as we output and truncate where necessary.
//
__device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end)
{
// Initialisation and overflow check
if(!dest || !src || (dest >= end))
return NULL;
// Prepare to write the length specifier. We're guaranteed to have
// at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in
// chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE.
int *lenptr = (int *)(void *)dest;
int len = 0;
dest += CUPRINTF_ALIGN_SIZE;
// Now copy the string
while(n--)
{
if(dest >= end) // Overflow check
break;
len++;
*dest++ = *src;
if(*src++ == '\0')
break;
}
// Now write out the padding bytes, and we have our length.
while((dest < end) && (((size_t)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0))
{
len++;
*dest++ = 0;
}
*lenptr = len;
return (dest < end) ? dest : NULL; // Overflow means return NULL
}
//
// copyArg
//
// This copies a length specifier and then the argument out to the
// data buffer. Templates let the compiler figure all this out at
// compile-time, making life much simpler from the programming
// point of view. I'm assuimg all (const char *) is a string, and
// everything else is the variable it points at. I'd love to see
// a better way of doing it, but aside from parsing the format
// string I can't think of one.
//
// The length of the data type is inserted at the beginning (so that
// the display can distinguish between float and double), and the
// pointer to the end of the entry is returned.
//
__device__ static char *copyArg(char *ptr, const char *arg, char *end)
{
// Initialisation check
if(!ptr || !arg)
return NULL;
// strncpy does all our work. We just terminate.
if((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL)
*ptr = 0;
return ptr;
}
template <typename T>
__device__ static char *copyArg(char *ptr, T &arg, char *end)
{
// Initisalisation and overflow check. Alignment rules mean that
// we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need
// to check that one offset.
if(!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end))
return NULL;
// Write the length and argument
*(int *)(void *)ptr = sizeof(arg);
ptr += CUPRINTF_ALIGN_SIZE;
*(T *)(void *)ptr = arg;
ptr += CUPRINTF_ALIGN_SIZE;
*ptr = 0;
return ptr;
}
//
// cuPrintf
//
// Templated printf functions to handle multiple arguments.
// Note we return the total amount of data copied, not the number
// of characters output. But then again, who ever looks at the
// return from printf() anyway?
//
// The format is to grab a block of circular buffer space, the
// start of which will hold a header and a pointer to the format
// string. We then write in all the arguments, and finally the
// format string itself. This is to make it easy to prevent
// overflow of our buffer (we support up to 10 arguments, each of
// which can be 12 bytes in length - that means that only the
// format string (or a %s) can actually overflow; so the overflow
// check need only be in the strcpy function.
//
// The header is written at the very last because that's what
// makes it look like we're done.
//
// Errors, which are basically lack-of-initialisation, are ignored
// in the called functions because NULL pointers are passed around
//
// All printf variants basically do the same thing, setting up the
// buffer, writing all arguments, then finalising the header. For
// clarity, we'll pack the code into some big macros.
#define CUPRINTF_PREAMBLE \
char *start, *end, *bufptr, *fmtstart; \
if((start = getNextPrintfBufPtr()) == NULL) return 0; \
end = start + CUPRINTF_MAX_LEN; \
bufptr = start + sizeof(cuPrintfHeader);
// Posting an argument is easy
#define CUPRINTF_ARG(argname) \
bufptr = copyArg(bufptr, argname, end);
// After args are done, record start-of-fmt and write the fmt and header
#define CUPRINTF_POSTAMBLE \
fmtstart = bufptr; \
end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \
writePrintfHeader(start, end ? fmtstart : NULL); \
return end ? (int)(end - start) : 0;
__device__ int cuPrintf(const char *fmt)
{
CUPRINTF_PREAMBLE;
CUPRINTF_POSTAMBLE;
}
template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_ARG(arg10);
CUPRINTF_POSTAMBLE;
}
#undef CUPRINTF_PREAMBLE
#undef CUPRINTF_ARG
#undef CUPRINTF_POSTAMBLE
//
// cuPrintfRestrict
//
// Called to restrict output to a given thread/block.
// We store the info in "restrictRules", which is set up at
// init time by the host. It's not the cleanest way to do this
// because it means restrictions will last between
// invocations, but given the output-pointer continuity,
// I feel this is reasonable.
//
__device__ void cuPrintfRestrict(int threadid, int blockid)
{
int thread_count = blockDim.x * blockDim.y * blockDim.z;
if(((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED))
restrictRules.threadid = threadid;
int block_count = gridDim.x * gridDim.y;
if(((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED))
restrictRules.blockid = blockid;
}
///////////////////////////////////////////////////////////////////////////////
// HOST SIDE
#include <stdio.h>
static FILE *printf_fp;
static char *printfbuf_start=NULL;
static char *printfbuf_device=NULL;
static int printfbuf_len=0;
//
// outputPrintfData
//
// Our own internal function, which takes a pointer to a data buffer
// and passes it through libc's printf for output.
//
// We receive the formate string and a pointer to where the data is
// held. We then run through and print it out.
//
// Returns 0 on failure, 1 on success
//
static int outputPrintfData(char *fmt, char *data)
{
// Format string is prefixed by a length that we don't need
fmt += CUPRINTF_ALIGN_SIZE;
// Now run through it, printing everything we can. We must
// run to every % character, extract only that, and use printf
// to format it.
char *p = strchr(fmt, '%');
while(p != NULL)
{
// Print up to the % character
*p = '\0';
fputs(fmt, printf_fp);
*p = '%'; // Put back the %
// Now handle the format specifier
char *format = p++; // Points to the '%'
p += strcspn(p, "%cdiouxXeEfgGaAnps");
if(*p == '\0') // If no format specifier, print the whole thing
{
fmt = format;
break;
}
// Cut out the format bit and use printf to print it. It's prefixed
// by its length.
int arglen = *(int *)data;
if(arglen > CUPRINTF_MAX_LEN)
{
fputs("Corrupt printf buffer data - aborting\n", printf_fp);
return 0;
}
data += CUPRINTF_ALIGN_SIZE;
char specifier = *p++;
char c = *p; // Store for later
*p = '\0';
switch(specifier)
{
// These all take integer arguments
case 'c':
case 'd':
case 'i':
case 'o':
case 'u':
case 'x':
case 'X':
case 'p':
fprintf(printf_fp, format, *((int *)data));
break;
// These all take double arguments
case 'e':
case 'E':
case 'f':
case 'g':
case 'G':
case 'a':
case 'A':
if(arglen == 4) // Float vs. Double thing
fprintf(printf_fp, format, *((float *)data));
else
fprintf(printf_fp, format, *((double *)data));
break;
// Strings are handled in a special way
case 's':
fprintf(printf_fp, format, (char *)data);
break;
// % is special
case '%':
fprintf(printf_fp, "%%");
break;
// Everything else is just printed out as-is
default:
fprintf(printf_fp, "%s", format);
break;
}
data += CUPRINTF_ALIGN_SIZE; // Move on to next argument
*p = c; // Restore what we removed
fmt = p; // Adjust fmt string to be past the specifier
p = strchr(fmt, '%'); // and get the next specifier
}
// Print out the last of the string
fputs(fmt, printf_fp);
return 1;
}
//
// doPrintfDisplay
//
// This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the
// print function above to display them. We've got this separate from
// cudaPrintfDisplay() below so we can handle the SM_10 architecture
// partitioning.
//
static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr)
{
// Grab, piece-by-piece, each output element until we catch
// up with the circular buffer end pointer
int printf_count=0;
char printfbuf_local[CUPRINTF_MAX_LEN+1];
printfbuf_local[CUPRINTF_MAX_LEN] = '\0';
while(bufptr != endptr)
{
// Wrap ourselves at the end-of-buffer
if(bufptr == bufend)
bufptr = bufstart;
// Adjust our start pointer to within the circular buffer and copy a block.
cudaMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, cudaMemcpyDeviceToHost);
// If the magic number isn't valid, then this write hasn't gone through
// yet and we'll wait until it does (or we're past the end for non-async printfs).
cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local;
if((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN))
{
//fprintf(printf_fp, "Bad magic number in printf header\n");
break;
}
// Extract all the info and get this printf done
if(headings)
fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid);
if(hdr->fmtoffset == 0)
fprintf(printf_fp, "printf buffer overflow\n");
else if(!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader)))
break;
printf_count++;
// Clear if asked
if(clear)
cudaMemset(bufptr, 0, CUPRINTF_MAX_LEN);
// Now advance our start location, because we're done, and keep copying
bufptr += CUPRINTF_MAX_LEN;
}
return printf_count;
}
//
// cudaPrintfInit
//
// Takes a buffer length to allocate, creates the memory on the device and
// returns a pointer to it for when a kernel is called. It's up to the caller
// to free it.
//
extern "C" cudaError_t cudaPrintfInit(size_t bufferLen)
{
// Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN
bufferLen = (bufferLen < (size_t)CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen;
if((bufferLen % CUPRINTF_MAX_LEN) > 0)
bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN));
printfbuf_len = (int)bufferLen;
// Allocate a print buffer on the device and zero it
if(cudaMalloc((void **)&printfbuf_device, printfbuf_len) != cudaSuccess)
return cudaErrorInitializationError;
cudaMemset(printfbuf_device, 0, printfbuf_len);
printfbuf_start = printfbuf_device; // Where we start reading from
// No restrictions to begin with
cuPrintfRestriction restrict;
restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED;
cudaMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict));
// Initialise the buffer and the respective lengths/pointers.
cudaMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *));
cudaMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *));
cudaMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len));
return cudaSuccess;
}
//
// cudaPrintfEnd
//
// Frees up the memory which we allocated
//
extern "C" void cudaPrintfEnd()
{
if(!printfbuf_start || !printfbuf_device)
return;
cudaFree(printfbuf_device);
printfbuf_start = printfbuf_device = NULL;
}
//
// cudaPrintfDisplay
//
// Each call to this function dumps the entire current contents
// of the printf buffer to the pre-specified FILE pointer. The
// circular "start" pointer is advanced so that subsequent calls
// dumps only new stuff.
//
// In the case of async memory access (via streams), call this
// repeatedly to keep trying to empty the buffer. If it's a sync
// access, then the whole buffer should empty in one go.
//
// Arguments:
// outputFP - File descriptor to output to (NULL => stdout)
// showThreadID - If true, prints [block,thread] before each line
//
extern "C" cudaError_t cudaPrintfDisplay(void *outputFP, bool showThreadID)
{
printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP);
// For now, we force "synchronous" mode which means we're not concurrent
// with kernel execution. This also means we don't need clearOnPrint.
// If you're patching it for async operation, here's where you want it.
bool sync_printfs = true;
bool clearOnPrint = false;
// Initialisation check
if(!printfbuf_start || !printfbuf_device || !printf_fp)
return cudaErrorMissingConfiguration;
// To determine which architecture we're using, we read the
// first short from the buffer - it'll be the magic number
// relating to the version.
unsigned short magic;
cudaMemcpy(&magic, printfbuf_device, sizeof(unsigned short), cudaMemcpyDeviceToHost);
// For SM_10 architecture, we've split our buffer into one-per-thread.
// That means we must do each thread block separately. It'll require
// extra reading. We also, for now, don't support async printfs because
// that requires tracking one start pointer per thread.
if(magic == CUPRINTF_SM10_MAGIC)
{
sync_printfs = true;
clearOnPrint = false;
int blocklen = 0;
char *blockptr = printfbuf_device;
while(blockptr < (printfbuf_device + printfbuf_len))
{
cuPrintfHeaderSM10 hdr;
cudaMemcpy(&hdr, blockptr, sizeof(hdr), cudaMemcpyDeviceToHost);
// We get our block-size-step from the very first header
if(hdr.thread_buf_len != 0)
blocklen = hdr.thread_buf_len;
// No magic number means no printfs from this thread
if(hdr.magic != CUPRINTF_SM10_MAGIC)
{
if(blocklen == 0)
{
fprintf(printf_fp, "No printf headers found at all!\n");
break; // No valid headers!
}
blockptr += blocklen;
continue;
}
// "offset" is non-zero then we can print the block contents
if(hdr.offset > 0)
{
// For synchronous printfs, we must print from endptr->bufend, then from start->end
if(sync_printfs)
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len);
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN);
}
// Move on to the next block and loop again
blockptr += hdr.thread_buf_len;
}
}
// For SM_11 and up, everything is a single buffer and it's simple
else if(magic == CUPRINTF_SM11_MAGIC)
{
// Grab the current "end of circular buffer" pointer.
char *printfbuf_end = NULL;
cudaMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *));
// Adjust our starting and ending pointers to within the block
char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device;
char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device;
// For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular
// buffer wrap carefully because we could miss those past "end".
if(sync_printfs)
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len);
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr);
printfbuf_start = printfbuf_end;
}
else
;//printf("Bad magic number in cuPrintf buffer header\n");
// If we were synchronous, then we must ensure that the memory is cleared on exit
// otherwise another kernel launch with a different grid size could conflict.
if(sync_printfs)
cudaMemset(printfbuf_device, 0, printfbuf_len);
return cudaSuccess;
}
// Cleanup
#undef CUPRINTF_MAX_LEN
#undef CUPRINTF_ALIGN_SIZE
#undef CUPRINTF_SM10_MAGIC
#undef CUPRINTF_SM11_MAGIC
#endif
|
7858956ecf1ea459fc3c4f615c00c31699394f62.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cudaAcc_GPS_kernel_mod3.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int NumDataPoints = 1;
float2 *FreqData = NULL;
hipMalloc(&FreqData, XSIZE*YSIZE);
float *PowerSpectrum = NULL;
hipMalloc(&PowerSpectrum, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cudaAcc_GPS_kernel_mod3), dim3(gridBlock),dim3(threadBlock), 0, 0, NumDataPoints,FreqData,PowerSpectrum);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cudaAcc_GPS_kernel_mod3), dim3(gridBlock),dim3(threadBlock), 0, 0, NumDataPoints,FreqData,PowerSpectrum);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cudaAcc_GPS_kernel_mod3), dim3(gridBlock),dim3(threadBlock), 0, 0, NumDataPoints,FreqData,PowerSpectrum);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7858956ecf1ea459fc3c4f615c00c31699394f62.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cudaAcc_GPS_kernel_mod3.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int NumDataPoints = 1;
float2 *FreqData = NULL;
cudaMalloc(&FreqData, XSIZE*YSIZE);
float *PowerSpectrum = NULL;
cudaMalloc(&PowerSpectrum, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cudaAcc_GPS_kernel_mod3<<<gridBlock,threadBlock>>>(NumDataPoints,FreqData,PowerSpectrum);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cudaAcc_GPS_kernel_mod3<<<gridBlock,threadBlock>>>(NumDataPoints,FreqData,PowerSpectrum);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cudaAcc_GPS_kernel_mod3<<<gridBlock,threadBlock>>>(NumDataPoints,FreqData,PowerSpectrum);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
405bd82902d2a394ae27eafebac735e3d65c5553.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_CUDNN
#include <vector>
#include<iostream>
#include "caffe/layers/cudnn_conv_layer.hpp"
using namespace std;
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
| 405bd82902d2a394ae27eafebac735e3d65c5553.cu | #ifdef USE_CUDNN
#include <vector>
#include<iostream>
#include "caffe/layers/cudnn_conv_layer.hpp"
using namespace std;
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* weight = this->blobs_[0]->gpu_data();
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + this->weight_offset_ * g,
conv_descs_[i],
fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i],
cudnn::dataType<Dtype>::zero,
top_descs_[i], top_data + top_offset_ * g));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
CUDNN_CHECK(cudnnAddTensor(handle_[g],
cudnn::dataType<Dtype>::one,
bias_desc_, bias_data + bias_offset_ * g,
cudnn::dataType<Dtype>::one,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
cudnn::dataType<Dtype>::one,
top_descs_[i], top_diff + top_offset_ * g,
cudnn::dataType<Dtype>::one,
bias_desc_, bias_diff + bias_offset_ * g));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(
handle_[1*this->group_ + g],
cudnn::dataType<Dtype>::one,
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_filter_algo_[i], workspace[1*this->group_ + g],
workspace_bwd_filter_sizes_[i],
cudnn::dataType<Dtype>::one,
filter_desc_, weight_diff + this->weight_offset_ * g));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
if (weight == NULL) {
weight = this->blobs_[0]->gpu_data();
}
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(
handle_[2*this->group_ + g],
cudnn::dataType<Dtype>::one,
filter_desc_, weight + this->weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bwd_data_algo_[i], workspace[2*this->group_ + g],
workspace_bwd_data_sizes_[i],
cudnn::dataType<Dtype>::zero,
bottom_descs_[i], bottom_diff + bottom_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
|
e7a368ed9d07cf2372c5bef45d308a473afb069f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#define GLFW_INCLUDE_VULKAN
#ifdef _WIN64
#include <aclapi.h>
#include <dxgi1_2.h>
#include <windows.h>
#include <VersionHelpers.h>
#define _USE_MATH_DEFINES
#endif
#include <GLFW/glfw3.h>
#include <vulkan/vulkan.h>
#ifdef _WIN64
#include <vulkan/vulkan_win32.h>
#endif
#include <algorithm>
#include <array>
#include <chrono>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iostream>
#include <set>
#include <stdexcept>
#include <thread>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_image.h>
#include <helper_math.h>
#include "linmath.h"
#define WIDTH 800
#define HEIGHT 600
const int MAX_FRAMES = 4;
const std::vector<const char*> validationLayers = {
"VK_LAYER_KHRONOS_validation"};
#ifdef NDEBUG
const bool enableValidationLayers = false;
#else
const bool enableValidationLayers = false;
#endif
std::string execution_path;
VkResult CreateDebugUtilsMessengerEXT(
VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkDebugUtilsMessengerEXT* pDebugMessenger) {
auto func = (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(
instance, "vkCreateDebugUtilsMessengerEXT");
if (func != nullptr) {
return func(instance, pCreateInfo, pAllocator, pDebugMessenger);
} else {
return VK_ERROR_EXTENSION_NOT_PRESENT;
}
};
const std::vector<const char*> deviceExtensions = {
VK_KHR_SWAPCHAIN_EXTENSION_NAME,
VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME,
#ifdef _WIN64
VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME,
VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME,
#else
VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME,
VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
#endif
};
#ifdef _WIN64
class WindowsSecurityAttributes {
protected:
SECURITY_ATTRIBUTES m_winSecurityAttributes;
PSECURITY_DESCRIPTOR m_winPSecurityDescriptor;
public:
WindowsSecurityAttributes();
SECURITY_ATTRIBUTES* operator&();
~WindowsSecurityAttributes();
};
WindowsSecurityAttributes::WindowsSecurityAttributes() {
m_winPSecurityDescriptor = (PSECURITY_DESCRIPTOR)calloc(
1, SECURITY_DESCRIPTOR_MIN_LENGTH + 2 * sizeof(void**));
PSID* ppSID =
(PSID*)((PBYTE)m_winPSecurityDescriptor + SECURITY_DESCRIPTOR_MIN_LENGTH);
PACL* ppACL = (PACL*)((PBYTE)ppSID + sizeof(PSID*));
InitializeSecurityDescriptor(m_winPSecurityDescriptor,
SECURITY_DESCRIPTOR_REVISION);
SID_IDENTIFIER_AUTHORITY sidIdentifierAuthority =
SECURITY_WORLD_SID_AUTHORITY;
AllocateAndInitializeSid(&sidIdentifierAuthority, 1, SECURITY_WORLD_RID, 0, 0,
0, 0, 0, 0, 0, ppSID);
EXPLICIT_ACCESS explicitAccess;
ZeroMemory(&explicitAccess, sizeof(EXPLICIT_ACCESS));
explicitAccess.grfAccessPermissions =
STANDARD_RIGHTS_ALL | SPECIFIC_RIGHTS_ALL;
explicitAccess.grfAccessMode = SET_ACCESS;
explicitAccess.grfInheritance = INHERIT_ONLY;
explicitAccess.Trustee.TrusteeForm = TRUSTEE_IS_SID;
explicitAccess.Trustee.TrusteeType = TRUSTEE_IS_WELL_KNOWN_GROUP;
explicitAccess.Trustee.ptstrName = (LPTSTR)*ppSID;
SetEntriesInAcl(1, &explicitAccess, NULL, ppACL);
SetSecurityDescriptorDacl(m_winPSecurityDescriptor, TRUE, *ppACL, FALSE);
m_winSecurityAttributes.nLength = sizeof(m_winSecurityAttributes);
m_winSecurityAttributes.lpSecurityDescriptor = m_winPSecurityDescriptor;
m_winSecurityAttributes.bInheritHandle = TRUE;
}
SECURITY_ATTRIBUTES* WindowsSecurityAttributes::operator&() {
return &m_winSecurityAttributes;
}
WindowsSecurityAttributes::~WindowsSecurityAttributes() {
PSID* ppSID =
(PSID*)((PBYTE)m_winPSecurityDescriptor + SECURITY_DESCRIPTOR_MIN_LENGTH);
PACL* ppACL = (PACL*)((PBYTE)ppSID + sizeof(PSID*));
if (*ppSID) {
FreeSid(*ppSID);
}
if (*ppACL) {
LocalFree(*ppACL);
}
free(m_winPSecurityDescriptor);
}
#endif
void DestroyDebugUtilsMessengerEXT(VkInstance instance,
VkDebugUtilsMessengerEXT debugMessenger,
const VkAllocationCallbacks* pAllocator) {
auto func = (PFN_vkDestroyDebugUtilsMessengerEXT)vkGetInstanceProcAddr(
instance, "vkDestroyDebugUtilsMessengerEXT");
if (func != nullptr) {
func(instance, debugMessenger, pAllocator);
}
}
struct QueueFamilyIndices {
int graphicsFamily = -1;
int presentFamily = -1;
bool isComplete() { return graphicsFamily >= 0 && presentFamily >= 0; }
};
struct SwapChainSupportDetails {
VkSurfaceCapabilitiesKHR capabilities;
std::vector<VkSurfaceFormatKHR> formats;
std::vector<VkPresentModeKHR> presentModes;
};
typedef float vec2[2];
struct Vertex {
vec4 pos;
vec3 color;
vec2 texCoord;
static VkVertexInputBindingDescription getBindingDescription() {
VkVertexInputBindingDescription bindingDescription = {};
bindingDescription.binding = 0;
bindingDescription.stride = sizeof(Vertex);
bindingDescription.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
return bindingDescription;
}
static std::array<VkVertexInputAttributeDescription, 3>
getAttributeDescriptions() {
std::array<VkVertexInputAttributeDescription, 3> attributeDescriptions = {};
attributeDescriptions[0].binding = 0;
attributeDescriptions[0].location = 0;
attributeDescriptions[0].format = VK_FORMAT_R32G32B32A32_SFLOAT;
attributeDescriptions[0].offset = offsetof(Vertex, pos);
attributeDescriptions[1].binding = 0;
attributeDescriptions[1].location = 1;
attributeDescriptions[1].format = VK_FORMAT_R32G32B32_SFLOAT;
attributeDescriptions[1].offset = offsetof(Vertex, color);
attributeDescriptions[2].binding = 0;
attributeDescriptions[2].location = 2;
attributeDescriptions[2].format = VK_FORMAT_R32G32_SFLOAT;
attributeDescriptions[2].offset = offsetof(Vertex, texCoord);
return attributeDescriptions;
}
};
struct UniformBufferObject {
alignas(16) mat4x4 model;
alignas(16) mat4x4 view;
alignas(16) mat4x4 proj;
};
const std::vector<Vertex> vertices = {
{{-1.0f, -1.0f, 0.0f, 1.0f}, {1.0f, 0.0f, 0.0f}, {0.0f, 0.0f}},
{{1.0f, -1.0f, 0.0f, 1.0f}, {0.0f, 1.0f, 0.0f}, {1.0f, 0.0f}},
{{1.0f, 1.0f, 0.0f, 1.0f}, {0.0f, 0.0f, 1.0f}, {1.0f, 1.0f}},
{{-1.0f, 1.0f, 0.0f, 1.0f}, {1.0f, 1.0f, 1.0f}, {0.0f, 1.0f}}};
const std::vector<uint16_t> indices = {0, 1, 2, 2, 3, 0};
// convert floating point rgba color to 32-bit integer
__device__ unsigned int rgbaFloatToInt(float4 rgba) {
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return ((unsigned int)(rgba.w * 255.0f) << 24) |
((unsigned int)(rgba.z * 255.0f) << 16) |
((unsigned int)(rgba.y * 255.0f) << 8) |
((unsigned int)(rgba.x * 255.0f));
}
__device__ float4 rgbaIntToFloat(unsigned int c) {
float4 rgba;
rgba.x = (c & 0xff) * 0.003921568627f; // /255.0f;
rgba.y = ((c >> 8) & 0xff) * 0.003921568627f; // /255.0f;
rgba.z = ((c >> 16) & 0xff) * 0.003921568627f; // /255.0f;
rgba.w = ((c >> 24) & 0xff) * 0.003921568627f; // /255.0f;
return rgba;
}
int filter_radius = 14;
int g_nFilterSign = 1;
// This varies the filter radius, so we can see automatic animation
void varySigma() {
filter_radius += g_nFilterSign;
if (filter_radius > 64) {
filter_radius = 64; // clamp to 64 and then negate sign
g_nFilterSign = -1;
} else if (filter_radius < 0) {
filter_radius = 0;
g_nFilterSign = 1;
}
}
// row pass using texture lookups
__global__ void d_boxfilter_rgba_x(hipSurfaceObject_t* dstSurfMipMapArray,
hipTextureObject_t textureMipMapInput,
size_t baseWidth, size_t baseHeight,
size_t mipLevels, int filter_radius) {
float scale = 1.0f / (float)((filter_radius << 1) + 1);
unsigned int y = blockIdx.x * blockDim.x + threadIdx.x;
if (y < baseHeight) {
for (uint32_t mipLevelIdx = 0; mipLevelIdx < mipLevels; mipLevelIdx++) {
uint32_t width =
(baseWidth >> mipLevelIdx) ? (baseWidth >> mipLevelIdx) : 1;
uint32_t height =
(baseHeight >> mipLevelIdx) ? (baseHeight >> mipLevelIdx) : 1;
if (y < height && filter_radius < width) {
float px = 1.0 / width;
float py = 1.0 / height;
float4 t = make_float4(0.0f);
for (int x = -filter_radius; x <= filter_radius; x++) {
t += tex2DLod<float4>(textureMipMapInput, x * px, y * py,
(float)mipLevelIdx);
}
unsigned int dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx], 0, y);
for (int x = 1; x < width; x++) {
t += tex2DLod<float4>(textureMipMapInput, (x + filter_radius) * px,
y * py, (float)mipLevelIdx);
t -=
tex2DLod<float4>(textureMipMapInput, (x - filter_radius - 1) * px,
y * py, (float)mipLevelIdx);
unsigned int dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx],
x * sizeof(uchar4), y);
}
}
}
}
}
// column pass using coalesced global memory reads
__global__ void d_boxfilter_rgba_y(hipSurfaceObject_t* dstSurfMipMapArray,
hipSurfaceObject_t* srcSurfMipMapArray,
size_t baseWidth, size_t baseHeight,
size_t mipLevels, int filter_radius) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float scale = 1.0f / (float)((filter_radius << 1) + 1);
for (uint32_t mipLevelIdx = 0; mipLevelIdx < mipLevels; mipLevelIdx++) {
uint32_t width =
(baseWidth >> mipLevelIdx) ? (baseWidth >> mipLevelIdx) : 1;
uint32_t height =
(baseHeight >> mipLevelIdx) ? (baseHeight >> mipLevelIdx) : 1;
if (x < width && height > filter_radius) {
float4 t;
// do left edge
int colInBytes = x * sizeof(uchar4);
unsigned int pixFirst = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, 0);
t = rgbaIntToFloat(pixFirst) * filter_radius;
for (int y = 0; (y < (filter_radius + 1)) && (y < height); y++) {
unsigned int pix = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, y);
t += rgbaIntToFloat(pix);
}
unsigned int dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx], colInBytes, 0);
for (int y = 1; (y < filter_radius + 1) && ((y + filter_radius) < height);
y++) {
unsigned int pix = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, y + filter_radius);
t += rgbaIntToFloat(pix);
t -= rgbaIntToFloat(pixFirst);
dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx], colInBytes, y);
}
// main loop
for (int y = (filter_radius + 1); y < (height - filter_radius); y++) {
unsigned int pix = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, y + filter_radius);
t += rgbaIntToFloat(pix);
pix = surf2Dread<unsigned int>(srcSurfMipMapArray[mipLevelIdx],
colInBytes, y - filter_radius - 1);
t -= rgbaIntToFloat(pix);
dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx], colInBytes, y);
}
// do right edge
unsigned int pixLast = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, height - 1);
for (int y = height - filter_radius;
(y < height) && ((y - filter_radius - 1) > 1); y++) {
t += rgbaIntToFloat(pixLast);
unsigned int pix = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, y - filter_radius - 1);
t -= rgbaIntToFloat(pix);
dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx], colInBytes, y);
}
}
}
}
class vulkanImageCUDA {
public:
void loadImageData(const std::string& filename) {
// load image (needed so we can get the width and height before we create
// the window
char* image_path =
sdkFindFilePath(filename.c_str(), execution_path.c_str());
if (image_path == 0) {
printf("Error finding image file '%s'\n", filename.c_str());
exit(EXIT_FAILURE);
}
sdkLoadPPM4(image_path, (unsigned char**)&image_data, &imageWidth,
&imageHeight);
if (!image_data) {
printf("Error opening file '%s'\n", image_path);
exit(EXIT_FAILURE);
}
printf("Loaded '%s', %d x %d pixels\n", image_path, imageWidth,
imageHeight);
}
void run() {
initWindow();
initVulkan();
initCuda();
mainLoop();
cleanup();
}
private:
GLFWwindow* window;
VkInstance instance;
VkDebugUtilsMessengerEXT debugMessenger;
VkSurfaceKHR surface;
VkPhysicalDevice physicalDevice = VK_NULL_HANDLE;
VkDevice device;
uint8_t vkDeviceUUID[VK_UUID_SIZE];
VkQueue graphicsQueue;
VkQueue presentQueue;
VkSwapchainKHR swapChain;
std::vector<VkImage> swapChainImages;
VkFormat swapChainImageFormat;
VkExtent2D swapChainExtent;
std::vector<VkImageView> swapChainImageViews;
std::vector<VkFramebuffer> swapChainFramebuffers;
VkRenderPass renderPass;
VkDescriptorSetLayout descriptorSetLayout;
VkPipelineLayout pipelineLayout;
VkPipeline graphicsPipeline;
VkCommandPool commandPool;
VkImage textureImage;
VkDeviceMemory textureImageMemory;
VkImageView textureImageView;
VkSampler textureSampler;
VkBuffer vertexBuffer;
VkDeviceMemory vertexBufferMemory;
VkBuffer indexBuffer;
VkDeviceMemory indexBufferMemory;
std::vector<VkBuffer> uniformBuffers;
std::vector<VkDeviceMemory> uniformBuffersMemory;
VkDescriptorPool descriptorPool;
std::vector<VkDescriptorSet> descriptorSets;
std::vector<VkCommandBuffer> commandBuffers;
std::vector<VkSemaphore> imageAvailableSemaphores;
std::vector<VkSemaphore> renderFinishedSemaphores;
VkSemaphore cudaUpdateVkSemaphore, vkUpdateCudaSemaphore;
std::vector<VkFence> inFlightFences;
size_t currentFrame = 0;
bool framebufferResized = false;
#ifdef _WIN64
PFN_vkGetMemoryWin32HandleKHR fpGetMemoryWin32HandleKHR;
PFN_vkGetSemaphoreWin32HandleKHR fpGetSemaphoreWin32HandleKHR;
#else
PFN_vkGetMemoryFdKHR fpGetMemoryFdKHR = NULL;
PFN_vkGetSemaphoreFdKHR fpGetSemaphoreFdKHR = NULL;
#endif
PFN_vkGetPhysicalDeviceProperties2 fpGetPhysicalDeviceProperties2;
unsigned int* image_data = NULL;
unsigned int imageWidth, imageHeight;
unsigned int mipLevels;
size_t totalImageMemSize;
// CUDA objects
cudaExternalMemory_t cudaExtMemImageBuffer;
hipMipmappedArray_t cudaMipmappedImageArray, cudaMipmappedImageArrayTemp,
cudaMipmappedImageArrayOrig;
std::vector<hipSurfaceObject_t> surfaceObjectList, surfaceObjectListTemp;
hipSurfaceObject_t *d_surfaceObjectList, *d_surfaceObjectListTemp;
hipTextureObject_t textureObjMipMapInput;
cudaExternalSemaphore_t cudaExtCudaUpdateVkSemaphore;
cudaExternalSemaphore_t cudaExtVkUpdateCudaSemaphore;
hipStream_t streamToRun;
void initWindow() {
glfwInit();
glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
window = glfwCreateWindow(WIDTH, HEIGHT, "Vulkan Image CUDA Box Filter",
nullptr, nullptr);
glfwSetWindowUserPointer(window, this);
glfwSetFramebufferSizeCallback(window, framebufferResizeCallback);
}
static void framebufferResizeCallback(GLFWwindow* window, int width,
int height) {
auto app =
reinterpret_cast<vulkanImageCUDA*>(glfwGetWindowUserPointer(window));
app->framebufferResized = true;
}
void initVulkan() {
createInstance();
setupDebugMessenger();
createSurface();
pickPhysicalDevice();
createLogicalDevice();
getKhrExtensionsFn();
createSwapChain();
createImageViews();
createRenderPass();
createDescriptorSetLayout();
createGraphicsPipeline();
createFramebuffers();
createCommandPool();
createTextureImage();
createTextureImageView();
createTextureSampler();
createVertexBuffer();
createIndexBuffer();
createUniformBuffers();
createDescriptorPool();
createDescriptorSets();
createCommandBuffers();
createSyncObjects();
createSyncObjectsExt();
}
void initCuda() {
setCudaVkDevice();
checkCudaErrors(hipStreamCreate(&streamToRun));
cudaVkImportImageMem();
cudaVkImportSemaphore();
}
void mainLoop() {
updateUniformBuffer();
while (!glfwWindowShouldClose(window)) {
glfwPollEvents();
drawFrame();
}
vkDeviceWaitIdle(device);
}
void cleanupSwapChain() {
for (auto framebuffer : swapChainFramebuffers) {
vkDestroyFramebuffer(device, framebuffer, nullptr);
}
vkFreeCommandBuffers(device, commandPool,
static_cast<uint32_t>(commandBuffers.size()),
commandBuffers.data());
vkDestroyPipeline(device, graphicsPipeline, nullptr);
vkDestroyPipelineLayout(device, pipelineLayout, nullptr);
vkDestroyRenderPass(device, renderPass, nullptr);
for (auto imageView : swapChainImageViews) {
vkDestroyImageView(device, imageView, nullptr);
}
vkDestroySwapchainKHR(device, swapChain, nullptr);
for (size_t i = 0; i < swapChainImages.size(); i++) {
vkDestroyBuffer(device, uniformBuffers[i], nullptr);
vkFreeMemory(device, uniformBuffersMemory[i], nullptr);
}
vkDestroyDescriptorPool(device, descriptorPool, nullptr);
}
void cleanup() {
cleanupSwapChain();
vkDestroySampler(device, textureSampler, nullptr);
vkDestroyImageView(device, textureImageView, nullptr);
for (int i = 0; i < mipLevels; i++) {
checkCudaErrors(hipDestroySurfaceObject(surfaceObjectList[i]));
checkCudaErrors(hipDestroySurfaceObject(surfaceObjectListTemp[i]));
}
checkCudaErrors(hipFree(d_surfaceObjectList));
checkCudaErrors(hipFree(d_surfaceObjectListTemp));
checkCudaErrors(hipFreeMipmappedArray(cudaMipmappedImageArrayTemp));
checkCudaErrors(hipFreeMipmappedArray(cudaMipmappedImageArrayOrig));
checkCudaErrors(hipFreeMipmappedArray(cudaMipmappedImageArray));
checkCudaErrors(hipDestroyTextureObject(textureObjMipMapInput));
checkCudaErrors(cudaDestroyExternalMemory(cudaExtMemImageBuffer));
checkCudaErrors(cudaDestroyExternalSemaphore(cudaExtCudaUpdateVkSemaphore));
checkCudaErrors(cudaDestroyExternalSemaphore(cudaExtVkUpdateCudaSemaphore));
vkDestroyImage(device, textureImage, nullptr);
vkFreeMemory(device, textureImageMemory, nullptr);
vkDestroyDescriptorSetLayout(device, descriptorSetLayout, nullptr);
vkDestroyBuffer(device, indexBuffer, nullptr);
vkFreeMemory(device, indexBufferMemory, nullptr);
vkDestroyBuffer(device, vertexBuffer, nullptr);
vkFreeMemory(device, vertexBufferMemory, nullptr);
for (size_t i = 0; i < MAX_FRAMES; i++) {
vkDestroySemaphore(device, renderFinishedSemaphores[i], nullptr);
vkDestroySemaphore(device, imageAvailableSemaphores[i], nullptr);
vkDestroyFence(device, inFlightFences[i], nullptr);
}
vkDestroyCommandPool(device, commandPool, nullptr);
vkDestroyDevice(device, nullptr);
if (enableValidationLayers) {
DestroyDebugUtilsMessengerEXT(instance, debugMessenger, nullptr);
}
vkDestroySurfaceKHR(instance, surface, nullptr);
vkDestroyInstance(instance, nullptr);
glfwDestroyWindow(window);
glfwTerminate();
}
void recreateSwapChain() {
int width = 0, height = 0;
while (width == 0 || height == 0) {
glfwGetFramebufferSize(window, &width, &height);
glfwWaitEvents();
}
vkDeviceWaitIdle(device);
cleanupSwapChain();
createSwapChain();
createImageViews();
createRenderPass();
createGraphicsPipeline();
createFramebuffers();
createUniformBuffers();
createDescriptorPool();
createDescriptorSets();
createCommandBuffers();
}
void createInstance() {
if (enableValidationLayers && !checkValidationLayerSupport()) {
throw std::runtime_error(
"validation layers requested, but not available!");
}
VkApplicationInfo appInfo = {};
appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
appInfo.pApplicationName = "Vulkan Image CUDA Interop";
appInfo.applicationVersion = VK_MAKE_VERSION(1, 0, 0);
appInfo.pEngineName = "No Engine";
appInfo.engineVersion = VK_MAKE_VERSION(1, 0, 0);
appInfo.apiVersion = VK_API_VERSION_1_0;
VkInstanceCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
createInfo.pApplicationInfo = &appInfo;
auto extensions = getRequiredExtensions();
createInfo.enabledExtensionCount = static_cast<uint32_t>(extensions.size());
createInfo.ppEnabledExtensionNames = extensions.data();
VkDebugUtilsMessengerCreateInfoEXT debugCreateInfo;
if (enableValidationLayers) {
createInfo.enabledLayerCount =
static_cast<uint32_t>(validationLayers.size());
createInfo.ppEnabledLayerNames = validationLayers.data();
populateDebugMessengerCreateInfo(debugCreateInfo);
createInfo.pNext = (VkDebugUtilsMessengerCreateInfoEXT*)&debugCreateInfo;
} else {
createInfo.enabledLayerCount = 0;
createInfo.pNext = nullptr;
}
if (vkCreateInstance(&createInfo, nullptr, &instance) != VK_SUCCESS) {
throw std::runtime_error("failed to create instance!");
}
fpGetPhysicalDeviceProperties2 =
(PFN_vkGetPhysicalDeviceProperties2)vkGetInstanceProcAddr(
instance, "vkGetPhysicalDeviceProperties2");
if (fpGetPhysicalDeviceProperties2 == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetPhysicalDeviceProperties2KHR\" not "
"found.\n");
}
#ifdef _WIN64
fpGetMemoryWin32HandleKHR =
(PFN_vkGetMemoryWin32HandleKHR)vkGetInstanceProcAddr(
instance, "vkGetMemoryWin32HandleKHR");
if (fpGetMemoryWin32HandleKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetMemoryWin32HandleKHR\" not "
"found.\n");
}
#else
fpGetMemoryFdKHR = (PFN_vkGetMemoryFdKHR)vkGetInstanceProcAddr(
instance, "vkGetMemoryFdKHR");
if (fpGetMemoryFdKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetMemoryFdKHR\" not found.\n");
} else {
std::cout << "Vulkan proc address for vkGetMemoryFdKHR - "
<< fpGetMemoryFdKHR << std::endl;
}
#endif
}
void populateDebugMessengerCreateInfo(
VkDebugUtilsMessengerCreateInfoEXT& createInfo) {
createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
createInfo.messageSeverity =
VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT;
createInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT;
createInfo.pfnUserCallback = debugCallback;
}
void setupDebugMessenger() {
if (!enableValidationLayers) return;
VkDebugUtilsMessengerCreateInfoEXT createInfo;
populateDebugMessengerCreateInfo(createInfo);
if (CreateDebugUtilsMessengerEXT(instance, &createInfo, nullptr,
&debugMessenger) != VK_SUCCESS) {
throw std::runtime_error("failed to set up debug messenger!");
}
}
void createSurface() {
if (glfwCreateWindowSurface(instance, window, nullptr, &surface) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create window surface!");
}
}
void pickPhysicalDevice() {
uint32_t deviceCount = 0;
vkEnumeratePhysicalDevices(instance, &deviceCount, nullptr);
if (deviceCount == 0) {
throw std::runtime_error("failed to find GPUs with Vulkan support!");
}
std::vector<VkPhysicalDevice> devices(deviceCount);
vkEnumeratePhysicalDevices(instance, &deviceCount, devices.data());
for (const auto& device : devices) {
if (isDeviceSuitable(device)) {
physicalDevice = device;
break;
}
}
if (physicalDevice == VK_NULL_HANDLE) {
throw std::runtime_error("failed to find a suitable GPU!");
}
std::cout << "Selected physical device = " << physicalDevice << std::endl;
VkPhysicalDeviceIDProperties vkPhysicalDeviceIDProperties = {};
vkPhysicalDeviceIDProperties.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES;
vkPhysicalDeviceIDProperties.pNext = NULL;
VkPhysicalDeviceProperties2 vkPhysicalDeviceProperties2 = {};
vkPhysicalDeviceProperties2.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
vkPhysicalDeviceProperties2.pNext = &vkPhysicalDeviceIDProperties;
fpGetPhysicalDeviceProperties2(physicalDevice,
&vkPhysicalDeviceProperties2);
memcpy(vkDeviceUUID, vkPhysicalDeviceIDProperties.deviceUUID,
sizeof(vkDeviceUUID));
}
void getKhrExtensionsFn() {
#ifdef _WIN64
fpGetSemaphoreWin32HandleKHR =
(PFN_vkGetSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(
device, "vkGetSemaphoreWin32HandleKHR");
if (fpGetSemaphoreWin32HandleKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetSemaphoreWin32HandleKHR\" not "
"found.\n");
}
#else
fpGetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)vkGetDeviceProcAddr(
device, "vkGetSemaphoreFdKHR");
if (fpGetSemaphoreFdKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetSemaphoreFdKHR\" not found.\n");
}
#endif
}
int setCudaVkDevice() {
int current_device = 0;
int device_count = 0;
int devices_prohibited = 0;
hipDeviceProp_t deviceProp;
checkCudaErrors(hipGetDeviceCount(&device_count));
if (device_count == 0) {
fprintf(stderr, "CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the GPU which is selected by Vulkan
while (current_device < device_count) {
hipGetDeviceProperties(&deviceProp, current_device);
if ((deviceProp.computeMode != hipComputeModeProhibited)) {
// Compare the cuda device UUID with vulkan UUID
int ret = memcmp(&deviceProp.uuid, &vkDeviceUUID, VK_UUID_SIZE);
if (ret == 0) {
checkCudaErrors(hipSetDevice(current_device));
checkCudaErrors(hipGetDeviceProperties(&deviceProp, current_device));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
current_device, deviceProp.name, deviceProp.major,
deviceProp.minor);
return current_device;
}
} else {
devices_prohibited++;
}
current_device++;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"CUDA error:"
" No Vulkan-CUDA Interop capable GPU found.\n");
exit(EXIT_FAILURE);
}
return -1;
}
void createLogicalDevice() {
QueueFamilyIndices indices = findQueueFamilies(physicalDevice);
std::vector<VkDeviceQueueCreateInfo> queueCreateInfos;
std::set<int> uniqueQueueFamilies = {indices.graphicsFamily,
indices.presentFamily};
float queuePriority = 1.0f;
for (int queueFamily : uniqueQueueFamilies) {
VkDeviceQueueCreateInfo queueCreateInfo = {};
queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queueCreateInfo.queueFamilyIndex = queueFamily;
queueCreateInfo.queueCount = 1;
queueCreateInfo.pQueuePriorities = &queuePriority;
queueCreateInfos.push_back(queueCreateInfo);
}
VkPhysicalDeviceFeatures deviceFeatures = {};
VkDeviceCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
createInfo.pQueueCreateInfos = queueCreateInfos.data();
createInfo.queueCreateInfoCount = queueCreateInfos.size();
createInfo.pEnabledFeatures = &deviceFeatures;
std::vector<const char*> enabledExtensionNameList;
for (int i = 0; i < deviceExtensions.size(); i++) {
enabledExtensionNameList.push_back(deviceExtensions[i]);
}
if (enableValidationLayers) {
createInfo.enabledLayerCount =
static_cast<uint32_t>(validationLayers.size());
createInfo.ppEnabledLayerNames = validationLayers.data();
} else {
createInfo.enabledLayerCount = 0;
}
createInfo.enabledExtensionCount =
static_cast<uint32_t>(enabledExtensionNameList.size());
createInfo.ppEnabledExtensionNames = enabledExtensionNameList.data();
if (vkCreateDevice(physicalDevice, &createInfo, nullptr, &device) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create logical device!");
}
vkGetDeviceQueue(device, indices.graphicsFamily, 0, &graphicsQueue);
vkGetDeviceQueue(device, indices.presentFamily, 0, &presentQueue);
}
void createSwapChain() {
SwapChainSupportDetails swapChainSupport =
querySwapChainSupport(physicalDevice);
VkSurfaceFormatKHR surfaceFormat =
chooseSwapSurfaceFormat(swapChainSupport.formats);
VkPresentModeKHR presentMode =
chooseSwapPresentMode(swapChainSupport.presentModes);
VkExtent2D extent = chooseSwapExtent(swapChainSupport.capabilities);
uint32_t imageCount = swapChainSupport.capabilities.minImageCount + 1;
if (swapChainSupport.capabilities.maxImageCount > 0 &&
imageCount > swapChainSupport.capabilities.maxImageCount) {
imageCount = swapChainSupport.capabilities.maxImageCount;
}
VkSwapchainCreateInfoKHR createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
createInfo.surface = surface;
createInfo.minImageCount = imageCount;
createInfo.imageFormat = surfaceFormat.format;
createInfo.imageColorSpace = surfaceFormat.colorSpace;
createInfo.imageExtent = extent;
createInfo.imageArrayLayers = 1;
createInfo.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
QueueFamilyIndices indices = findQueueFamilies(physicalDevice);
uint32_t queueFamilyIndices[] = {(uint32_t)indices.graphicsFamily,
(uint32_t)indices.presentFamily};
if (indices.graphicsFamily != indices.presentFamily) {
createInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
createInfo.queueFamilyIndexCount = 2;
createInfo.pQueueFamilyIndices = queueFamilyIndices;
} else {
createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
}
createInfo.preTransform = swapChainSupport.capabilities.currentTransform;
createInfo.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
createInfo.presentMode = presentMode;
createInfo.clipped = VK_TRUE;
if (vkCreateSwapchainKHR(device, &createInfo, nullptr, &swapChain) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create swap chain!");
}
vkGetSwapchainImagesKHR(device, swapChain, &imageCount, nullptr);
swapChainImages.resize(imageCount);
vkGetSwapchainImagesKHR(device, swapChain, &imageCount,
swapChainImages.data());
swapChainImageFormat = surfaceFormat.format;
swapChainExtent = extent;
}
void createImageViews() {
swapChainImageViews.resize(swapChainImages.size());
for (size_t i = 0; i < swapChainImages.size(); i++) {
swapChainImageViews[i] =
createImageView(swapChainImages[i], swapChainImageFormat);
}
}
void createRenderPass() {
VkAttachmentDescription colorAttachment = {};
colorAttachment.format = swapChainImageFormat;
colorAttachment.samples = VK_SAMPLE_COUNT_1_BIT;
colorAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
colorAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
colorAttachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
colorAttachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
colorAttachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
colorAttachment.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
VkAttachmentReference colorAttachmentRef = {};
colorAttachmentRef.attachment = 0;
colorAttachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkSubpassDescription subpass = {};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &colorAttachmentRef;
VkSubpassDependency dependency = {};
dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
dependency.dstSubpass = 0;
dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
dependency.srcAccessMask = 0;
dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
dependency.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
VkRenderPassCreateInfo renderPassInfo = {};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
renderPassInfo.attachmentCount = 1;
renderPassInfo.pAttachments = &colorAttachment;
renderPassInfo.subpassCount = 1;
renderPassInfo.pSubpasses = &subpass;
renderPassInfo.dependencyCount = 1;
renderPassInfo.pDependencies = &dependency;
if (vkCreateRenderPass(device, &renderPassInfo, nullptr, &renderPass) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create render pass!");
}
}
void createDescriptorSetLayout() {
VkDescriptorSetLayoutBinding uboLayoutBinding = {};
uboLayoutBinding.binding = 0;
uboLayoutBinding.descriptorCount = 1;
uboLayoutBinding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
uboLayoutBinding.pImmutableSamplers = nullptr;
uboLayoutBinding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
VkDescriptorSetLayoutBinding samplerLayoutBinding = {};
samplerLayoutBinding.binding = 1;
samplerLayoutBinding.descriptorCount = 1;
samplerLayoutBinding.descriptorType =
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
samplerLayoutBinding.pImmutableSamplers = nullptr;
samplerLayoutBinding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
std::array<VkDescriptorSetLayoutBinding, 2> bindings = {
uboLayoutBinding, samplerLayoutBinding};
VkDescriptorSetLayoutCreateInfo layoutInfo = {};
layoutInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
layoutInfo.bindingCount = static_cast<uint32_t>(bindings.size());
layoutInfo.pBindings = bindings.data();
if (vkCreateDescriptorSetLayout(device, &layoutInfo, nullptr,
&descriptorSetLayout) != VK_SUCCESS) {
throw std::runtime_error("failed to create descriptor set layout!");
}
}
void createGraphicsPipeline() {
auto vertShaderCode = readFile("shader.vert");
auto fragShaderCode = readFile("shader.frag");
VkShaderModule vertShaderModule = createShaderModule(vertShaderCode);
VkShaderModule fragShaderModule = createShaderModule(fragShaderCode);
VkPipelineShaderStageCreateInfo vertShaderStageInfo = {};
vertShaderStageInfo.sType =
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
vertShaderStageInfo.stage = VK_SHADER_STAGE_VERTEX_BIT;
vertShaderStageInfo.module = vertShaderModule;
vertShaderStageInfo.pName = "main";
VkPipelineShaderStageCreateInfo fragShaderStageInfo = {};
fragShaderStageInfo.sType =
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
fragShaderStageInfo.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
fragShaderStageInfo.module = fragShaderModule;
fragShaderStageInfo.pName = "main";
VkPipelineShaderStageCreateInfo shaderStages[] = {vertShaderStageInfo,
fragShaderStageInfo};
VkPipelineVertexInputStateCreateInfo vertexInputInfo = {};
vertexInputInfo.sType =
VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
auto bindingDescription = Vertex::getBindingDescription();
auto attributeDescriptions = Vertex::getAttributeDescriptions();
vertexInputInfo.vertexBindingDescriptionCount = 1;
vertexInputInfo.vertexAttributeDescriptionCount =
static_cast<uint32_t>(attributeDescriptions.size());
vertexInputInfo.pVertexBindingDescriptions = &bindingDescription;
vertexInputInfo.pVertexAttributeDescriptions = attributeDescriptions.data();
VkPipelineInputAssemblyStateCreateInfo inputAssembly = {};
inputAssembly.sType =
VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
inputAssembly.primitiveRestartEnable = VK_FALSE;
VkViewport viewport = {};
viewport.x = 0.0f;
viewport.y = 0.0f;
viewport.width = (float)swapChainExtent.width;
viewport.height = (float)swapChainExtent.height;
viewport.minDepth = 0.0f;
viewport.maxDepth = 1.0f;
VkRect2D scissor = {};
scissor.offset = {0, 0};
scissor.extent = swapChainExtent;
VkPipelineViewportStateCreateInfo viewportState = {};
viewportState.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
viewportState.viewportCount = 1;
viewportState.pViewports = &viewport;
viewportState.scissorCount = 1;
viewportState.pScissors = &scissor;
VkPipelineRasterizationStateCreateInfo rasterizer = {};
rasterizer.sType =
VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterizer.depthClampEnable = VK_FALSE;
rasterizer.rasterizerDiscardEnable = VK_FALSE;
rasterizer.polygonMode = VK_POLYGON_MODE_FILL;
rasterizer.lineWidth = 1.0f;
rasterizer.cullMode = VK_CULL_MODE_BACK_BIT;
rasterizer.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
rasterizer.depthBiasEnable = VK_FALSE;
VkPipelineMultisampleStateCreateInfo multisampling = {};
multisampling.sType =
VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
multisampling.sampleShadingEnable = VK_FALSE;
multisampling.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
VkPipelineColorBlendAttachmentState colorBlendAttachment = {};
colorBlendAttachment.colorWriteMask =
VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
colorBlendAttachment.blendEnable = VK_FALSE;
VkPipelineColorBlendStateCreateInfo colorBlending = {};
colorBlending.sType =
VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
colorBlending.logicOpEnable = VK_FALSE;
colorBlending.logicOp = VK_LOGIC_OP_COPY;
colorBlending.attachmentCount = 1;
colorBlending.pAttachments = &colorBlendAttachment;
colorBlending.blendConstants[0] = 0.0f;
colorBlending.blendConstants[1] = 0.0f;
colorBlending.blendConstants[2] = 0.0f;
colorBlending.blendConstants[3] = 0.0f;
VkPipelineLayoutCreateInfo pipelineLayoutInfo = {};
pipelineLayoutInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipelineLayoutInfo.setLayoutCount = 1;
pipelineLayoutInfo.pSetLayouts = &descriptorSetLayout;
if (vkCreatePipelineLayout(device, &pipelineLayoutInfo, nullptr,
&pipelineLayout) != VK_SUCCESS) {
throw std::runtime_error("failed to create pipeline layout!");
}
VkGraphicsPipelineCreateInfo pipelineInfo = {};
pipelineInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
pipelineInfo.stageCount = 2;
pipelineInfo.pStages = shaderStages;
pipelineInfo.pVertexInputState = &vertexInputInfo;
pipelineInfo.pInputAssemblyState = &inputAssembly;
pipelineInfo.pViewportState = &viewportState;
pipelineInfo.pRasterizationState = &rasterizer;
pipelineInfo.pMultisampleState = &multisampling;
pipelineInfo.pColorBlendState = &colorBlending;
pipelineInfo.layout = pipelineLayout;
pipelineInfo.renderPass = renderPass;
pipelineInfo.subpass = 0;
pipelineInfo.basePipelineHandle = VK_NULL_HANDLE;
if (vkCreateGraphicsPipelines(device, VK_NULL_HANDLE, 1, &pipelineInfo,
nullptr, &graphicsPipeline) != VK_SUCCESS) {
throw std::runtime_error("failed to create graphics pipeline!");
}
vkDestroyShaderModule(device, fragShaderModule, nullptr);
vkDestroyShaderModule(device, vertShaderModule, nullptr);
}
void createFramebuffers() {
swapChainFramebuffers.resize(swapChainImageViews.size());
for (size_t i = 0; i < swapChainImageViews.size(); i++) {
VkImageView attachments[] = {swapChainImageViews[i]};
VkFramebufferCreateInfo framebufferInfo = {};
framebufferInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebufferInfo.renderPass = renderPass;
framebufferInfo.attachmentCount = 1;
framebufferInfo.pAttachments = attachments;
framebufferInfo.width = swapChainExtent.width;
framebufferInfo.height = swapChainExtent.height;
framebufferInfo.layers = 1;
if (vkCreateFramebuffer(device, &framebufferInfo, nullptr,
&swapChainFramebuffers[i]) != VK_SUCCESS) {
throw std::runtime_error("failed to create framebuffer!");
}
}
}
void createCommandPool() {
QueueFamilyIndices queueFamilyIndices = findQueueFamilies(physicalDevice);
VkCommandPoolCreateInfo poolInfo = {};
poolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
poolInfo.queueFamilyIndex = queueFamilyIndices.graphicsFamily;
if (vkCreateCommandPool(device, &poolInfo, nullptr, &commandPool) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create graphics command pool!");
}
}
void createTextureImage() {
VkDeviceSize imageSize = imageWidth * imageHeight * 4;
mipLevels = static_cast<uint32_t>(
::floor(std::log2(::max(imageWidth, imageHeight)))) +
1;
printf("mipLevels = %d\n", mipLevels);
if (!image_data) {
throw std::runtime_error("failed to load texture image!");
}
VkBuffer stagingBuffer;
VkDeviceMemory stagingBufferMemory;
createBuffer(imageSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
stagingBuffer, stagingBufferMemory);
void* data;
vkMapMemory(device, stagingBufferMemory, 0, imageSize, 0, &data);
memcpy(data, image_data, static_cast<size_t>(imageSize));
vkUnmapMemory(device, stagingBufferMemory);
// VK_FORMAT_R8G8B8A8_UNORM changed to VK_FORMAT_R8G8B8A8_UINT
createImage(
imageWidth, imageHeight, VK_FORMAT_R8G8B8A8_UINT,
VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, textureImage, textureImageMemory);
transitionImageLayout(textureImage, VK_FORMAT_R8G8B8A8_UINT,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
copyBufferToImage(stagingBuffer, textureImage,
static_cast<uint32_t>(imageWidth),
static_cast<uint32_t>(imageHeight));
transitionImageLayout(textureImage, VK_FORMAT_R8G8B8A8_UINT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
vkDestroyBuffer(device, stagingBuffer, nullptr);
vkFreeMemory(device, stagingBufferMemory, nullptr);
generateMipmaps(textureImage, VK_FORMAT_R8G8B8A8_UNORM);
}
void generateMipmaps(VkImage image, VkFormat imageFormat) {
VkFormatProperties formatProperties;
vkGetPhysicalDeviceFormatProperties(physicalDevice, imageFormat,
&formatProperties);
if (!(formatProperties.optimalTilingFeatures &
VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT)) {
throw std::runtime_error(
"texture image format does not support linear blitting!");
}
VkCommandBuffer commandBuffer = beginSingleTimeCommands();
VkImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.image = image;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
barrier.subresourceRange.baseArrayLayer = 0;
barrier.subresourceRange.layerCount = 1;
barrier.subresourceRange.levelCount = 1;
int32_t mipWidth = imageWidth;
int32_t mipHeight = imageHeight;
for (uint32_t i = 1; i < mipLevels; i++) {
barrier.subresourceRange.baseMipLevel = i - 1;
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
vkCmdPipelineBarrier(commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0,
nullptr, 1, &barrier);
VkImageBlit blit = {};
blit.srcOffsets[0] = {0, 0, 0};
blit.srcOffsets[1] = {mipWidth, mipHeight, 1};
blit.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blit.srcSubresource.mipLevel = i - 1;
blit.srcSubresource.baseArrayLayer = 0;
blit.srcSubresource.layerCount = 1;
blit.dstOffsets[0] = {0, 0, 0};
blit.dstOffsets[1] = {mipWidth > 1 ? mipWidth / 2 : 1,
mipHeight > 1 ? mipHeight / 2 : 1, 1};
blit.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blit.dstSubresource.mipLevel = i;
blit.dstSubresource.baseArrayLayer = 0;
blit.dstSubresource.layerCount = 1;
vkCmdBlitImage(commandBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit,
VK_FILTER_LINEAR);
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
vkCmdPipelineBarrier(commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, nullptr,
0, nullptr, 1, &barrier);
if (mipWidth > 1) mipWidth /= 2;
if (mipHeight > 1) mipHeight /= 2;
}
barrier.subresourceRange.baseMipLevel = mipLevels - 1;
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
vkCmdPipelineBarrier(commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, nullptr,
0, nullptr, 1, &barrier);
endSingleTimeCommands(commandBuffer);
}
#ifdef _WIN64 // For windows
HANDLE getVkImageMemHandle(
VkExternalMemoryHandleTypeFlagsKHR externalMemoryHandleType) {
HANDLE handle;
VkMemoryGetWin32HandleInfoKHR vkMemoryGetWin32HandleInfoKHR = {};
vkMemoryGetWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR;
vkMemoryGetWin32HandleInfoKHR.pNext = NULL;
vkMemoryGetWin32HandleInfoKHR.memory = textureImageMemory;
vkMemoryGetWin32HandleInfoKHR.handleType =
(VkExternalMemoryHandleTypeFlagBitsKHR)externalMemoryHandleType;
fpGetMemoryWin32HandleKHR(device, &vkMemoryGetWin32HandleInfoKHR, &handle);
return handle;
}
HANDLE getVkSemaphoreHandle(
VkExternalSemaphoreHandleTypeFlagBitsKHR externalSemaphoreHandleType,
VkSemaphore& semVkCuda) {
HANDLE handle;
VkSemaphoreGetWin32HandleInfoKHR vulkanSemaphoreGetWin32HandleInfoKHR = {};
vulkanSemaphoreGetWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR;
vulkanSemaphoreGetWin32HandleInfoKHR.pNext = NULL;
vulkanSemaphoreGetWin32HandleInfoKHR.semaphore = semVkCuda;
vulkanSemaphoreGetWin32HandleInfoKHR.handleType =
externalSemaphoreHandleType;
fpGetSemaphoreWin32HandleKHR(device, &vulkanSemaphoreGetWin32HandleInfoKHR,
&handle);
return handle;
}
#else
int getVkImageMemHandle(
VkExternalMemoryHandleTypeFlagsKHR externalMemoryHandleType) {
if (externalMemoryHandleType ==
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR) {
int fd;
VkMemoryGetFdInfoKHR vkMemoryGetFdInfoKHR = {};
vkMemoryGetFdInfoKHR.sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR;
vkMemoryGetFdInfoKHR.pNext = NULL;
vkMemoryGetFdInfoKHR.memory = textureImageMemory;
vkMemoryGetFdInfoKHR.handleType =
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
fpGetMemoryFdKHR(device, &vkMemoryGetFdInfoKHR, &fd);
return fd;
}
return -1;
}
int getVkSemaphoreHandle(
VkExternalSemaphoreHandleTypeFlagBitsKHR externalSemaphoreHandleType,
VkSemaphore& semVkCuda) {
if (externalSemaphoreHandleType ==
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
int fd;
VkSemaphoreGetFdInfoKHR vulkanSemaphoreGetFdInfoKHR = {};
vulkanSemaphoreGetFdInfoKHR.sType =
VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
vulkanSemaphoreGetFdInfoKHR.pNext = NULL;
vulkanSemaphoreGetFdInfoKHR.semaphore = semVkCuda;
vulkanSemaphoreGetFdInfoKHR.handleType =
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
fpGetSemaphoreFdKHR(device, &vulkanSemaphoreGetFdInfoKHR, &fd);
return fd;
}
return -1;
}
#endif
void createTextureImageView() {
textureImageView = createImageView(textureImage, VK_FORMAT_R8G8B8A8_UNORM);
}
void createTextureSampler() {
VkSamplerCreateInfo samplerInfo = {};
samplerInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
samplerInfo.magFilter = VK_FILTER_LINEAR;
samplerInfo.minFilter = VK_FILTER_LINEAR;
samplerInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerInfo.anisotropyEnable = VK_TRUE;
samplerInfo.maxAnisotropy = 16;
samplerInfo.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK;
samplerInfo.unnormalizedCoordinates = VK_FALSE;
samplerInfo.compareEnable = VK_FALSE;
samplerInfo.compareOp = VK_COMPARE_OP_ALWAYS;
samplerInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
samplerInfo.minLod = 0; // Optional
samplerInfo.maxLod = static_cast<float>(mipLevels);
samplerInfo.mipLodBias = 0; // Optional
if (vkCreateSampler(device, &samplerInfo, nullptr, &textureSampler) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create texture sampler!");
}
}
VkImageView createImageView(VkImage image, VkFormat format) {
VkImageViewCreateInfo viewInfo = {};
viewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
viewInfo.image = image;
viewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
viewInfo.format = format;
viewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
viewInfo.subresourceRange.baseMipLevel = 0;
viewInfo.subresourceRange.levelCount = mipLevels;
viewInfo.subresourceRange.baseArrayLayer = 0;
viewInfo.subresourceRange.layerCount = 1;
VkImageView imageView;
if (vkCreateImageView(device, &viewInfo, nullptr, &imageView) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create texture image view!");
}
return imageView;
}
void createImage(uint32_t width, uint32_t height, VkFormat format,
VkImageTiling tiling, VkImageUsageFlags usage,
VkMemoryPropertyFlags properties, VkImage& image,
VkDeviceMemory& imageMemory) {
VkImageCreateInfo imageInfo = {};
imageInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
imageInfo.imageType = VK_IMAGE_TYPE_2D;
imageInfo.extent.width = width;
imageInfo.extent.height = height;
imageInfo.extent.depth = 1;
imageInfo.mipLevels = mipLevels;
imageInfo.arrayLayers = 1;
imageInfo.format = format;
imageInfo.tiling = tiling;
imageInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
imageInfo.usage = usage;
imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
imageInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkExternalMemoryImageCreateInfo vkExternalMemImageCreateInfo = {};
vkExternalMemImageCreateInfo.sType =
VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
vkExternalMemImageCreateInfo.pNext = NULL;
vkExternalMemImageCreateInfo.handleTypes =
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
imageInfo.pNext = &vkExternalMemImageCreateInfo;
if (vkCreateImage(device, &imageInfo, nullptr, &image) != VK_SUCCESS) {
throw std::runtime_error("failed to create image!");
}
VkMemoryRequirements memRequirements;
vkGetImageMemoryRequirements(device, image, &memRequirements);
#ifdef _WIN64
WindowsSecurityAttributes winSecurityAttributes;
VkExportMemoryWin32HandleInfoKHR vulkanExportMemoryWin32HandleInfoKHR = {};
vulkanExportMemoryWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR;
vulkanExportMemoryWin32HandleInfoKHR.pNext = NULL;
vulkanExportMemoryWin32HandleInfoKHR.pAttributes = &winSecurityAttributes;
vulkanExportMemoryWin32HandleInfoKHR.dwAccess =
DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE;
vulkanExportMemoryWin32HandleInfoKHR.name = (LPCWSTR)NULL;
#endif
VkExportMemoryAllocateInfoKHR vulkanExportMemoryAllocateInfoKHR = {};
vulkanExportMemoryAllocateInfoKHR.sType =
VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR;
#ifdef _WIN64
vulkanExportMemoryAllocateInfoKHR.pNext =
IsWindows8OrGreater() ? &vulkanExportMemoryWin32HandleInfoKHR : NULL;
vulkanExportMemoryAllocateInfoKHR.handleTypes =
IsWindows8OrGreater()
? VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT;
#else
vulkanExportMemoryAllocateInfoKHR.pNext = NULL;
vulkanExportMemoryAllocateInfoKHR.handleTypes =
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
#endif
VkMemoryAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.allocationSize = memRequirements.size;
allocInfo.pNext = &vulkanExportMemoryAllocateInfoKHR;
allocInfo.memoryTypeIndex =
findMemoryType(memRequirements.memoryTypeBits, properties);
VkMemoryRequirements vkMemoryRequirements = {};
vkGetImageMemoryRequirements(device, image, &vkMemoryRequirements);
totalImageMemSize = vkMemoryRequirements.size;
if (vkAllocateMemory(device, &allocInfo, nullptr, &textureImageMemory) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate image memory!");
}
vkBindImageMemory(device, image, textureImageMemory, 0);
}
void cudaVkImportSemaphore() {
cudaExternalSemaphoreHandleDesc externalSemaphoreHandleDesc;
memset(&externalSemaphoreHandleDesc, 0,
sizeof(externalSemaphoreHandleDesc));
#ifdef _WIN64
externalSemaphoreHandleDesc.type =
IsWindows8OrGreater() ? cudaExternalSemaphoreHandleTypeOpaqueWin32
: cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt;
externalSemaphoreHandleDesc.handle.win32.handle = getVkSemaphoreHandle(
IsWindows8OrGreater()
? VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
cudaUpdateVkSemaphore);
#else
externalSemaphoreHandleDesc.type = cudaExternalSemaphoreHandleTypeOpaqueFd;
externalSemaphoreHandleDesc.handle.fd = getVkSemaphoreHandle(
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT, cudaUpdateVkSemaphore);
#endif
externalSemaphoreHandleDesc.flags = 0;
checkCudaErrors(cudaImportExternalSemaphore(&cudaExtCudaUpdateVkSemaphore,
&externalSemaphoreHandleDesc));
memset(&externalSemaphoreHandleDesc, 0,
sizeof(externalSemaphoreHandleDesc));
#ifdef _WIN64
externalSemaphoreHandleDesc.type =
IsWindows8OrGreater() ? cudaExternalSemaphoreHandleTypeOpaqueWin32
: cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt;
;
externalSemaphoreHandleDesc.handle.win32.handle = getVkSemaphoreHandle(
IsWindows8OrGreater()
? VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
vkUpdateCudaSemaphore);
#else
externalSemaphoreHandleDesc.type = cudaExternalSemaphoreHandleTypeOpaqueFd;
externalSemaphoreHandleDesc.handle.fd = getVkSemaphoreHandle(
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT, vkUpdateCudaSemaphore);
#endif
externalSemaphoreHandleDesc.flags = 0;
checkCudaErrors(cudaImportExternalSemaphore(&cudaExtVkUpdateCudaSemaphore,
&externalSemaphoreHandleDesc));
printf("CUDA Imported Vulkan semaphore\n");
}
void cudaVkImportImageMem() {
cudaExternalMemoryHandleDesc cudaExtMemHandleDesc;
memset(&cudaExtMemHandleDesc, 0, sizeof(cudaExtMemHandleDesc));
#ifdef _WIN64
cudaExtMemHandleDesc.type =
IsWindows8OrGreater() ? cudaExternalMemoryHandleTypeOpaqueWin32
: cudaExternalMemoryHandleTypeOpaqueWin32Kmt;
cudaExtMemHandleDesc.handle.win32.handle = getVkImageMemHandle(
IsWindows8OrGreater()
? VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT);
#else
cudaExtMemHandleDesc.type = cudaExternalMemoryHandleTypeOpaqueFd;
cudaExtMemHandleDesc.handle.fd =
getVkImageMemHandle(VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
#endif
cudaExtMemHandleDesc.size = totalImageMemSize;
checkCudaErrors(cudaImportExternalMemory(&cudaExtMemImageBuffer,
&cudaExtMemHandleDesc));
cudaExternalMemoryMipmappedArrayDesc externalMemoryMipmappedArrayDesc;
memset(&externalMemoryMipmappedArrayDesc, 0,
sizeof(externalMemoryMipmappedArrayDesc));
hipExtent extent = make_hipExtent(imageWidth, imageHeight, 0);
hipChannelFormatDesc formatDesc;
formatDesc.x = 8;
formatDesc.y = 8;
formatDesc.z = 8;
formatDesc.w = 8;
formatDesc.f = hipChannelFormatKindUnsigned;
externalMemoryMipmappedArrayDesc.offset = 0;
externalMemoryMipmappedArrayDesc.formatDesc = formatDesc;
externalMemoryMipmappedArrayDesc.extent = extent;
externalMemoryMipmappedArrayDesc.flags = 0;
externalMemoryMipmappedArrayDesc.numLevels = mipLevels;
checkCudaErrors(cudaExternalMemoryGetMappedMipmappedArray(
&cudaMipmappedImageArray, cudaExtMemImageBuffer,
&externalMemoryMipmappedArrayDesc));
checkCudaErrors(hipMallocMipmappedArray(&cudaMipmappedImageArrayTemp,
&formatDesc, extent, mipLevels));
checkCudaErrors(hipMallocMipmappedArray(&cudaMipmappedImageArrayOrig,
&formatDesc, extent, mipLevels));
for (int mipLevelIdx = 0; mipLevelIdx < mipLevels; mipLevelIdx++) {
hipArray_t cudaMipLevelArray, cudaMipLevelArrayTemp,
cudaMipLevelArrayOrig;
hipResourceDesc resourceDesc;
checkCudaErrors(hipGetMipmappedArrayLevel(
&cudaMipLevelArray, cudaMipmappedImageArray, mipLevelIdx));
checkCudaErrors(hipGetMipmappedArrayLevel(
&cudaMipLevelArrayTemp, cudaMipmappedImageArrayTemp, mipLevelIdx));
checkCudaErrors(hipGetMipmappedArrayLevel(
&cudaMipLevelArrayOrig, cudaMipmappedImageArrayOrig, mipLevelIdx));
uint32_t width =
(imageWidth >> mipLevelIdx) ? (imageWidth >> mipLevelIdx) : 1;
uint32_t height =
(imageHeight >> mipLevelIdx) ? (imageHeight >> mipLevelIdx) : 1;
checkCudaErrors(hipMemcpy2DArrayToArray(
cudaMipLevelArrayOrig, 0, 0, cudaMipLevelArray, 0, 0,
width * sizeof(uchar4), height, hipMemcpyDeviceToDevice));
memset(&resourceDesc, 0, sizeof(resourceDesc));
resourceDesc.resType = hipResourceTypeArray;
resourceDesc.res.array.array = cudaMipLevelArray;
hipSurfaceObject_t surfaceObject;
checkCudaErrors(hipCreateSurfaceObject(&surfaceObject, &resourceDesc));
surfaceObjectList.push_back(surfaceObject);
memset(&resourceDesc, 0, sizeof(resourceDesc));
resourceDesc.resType = hipResourceTypeArray;
resourceDesc.res.array.array = cudaMipLevelArrayTemp;
hipSurfaceObject_t surfaceObjectTemp;
checkCudaErrors(
hipCreateSurfaceObject(&surfaceObjectTemp, &resourceDesc));
surfaceObjectListTemp.push_back(surfaceObjectTemp);
}
hipResourceDesc resDescr;
memset(&resDescr, 0, sizeof(hipResourceDesc));
resDescr.resType = hipResourceTypeMipmappedArray;
resDescr.res.mipmap.mipmap = cudaMipmappedImageArrayOrig;
hipTextureDesc texDescr;
memset(&texDescr, 0, sizeof(hipTextureDesc));
texDescr.normalizedCoords = true;
texDescr.filterMode = hipFilterModeLinear;
texDescr.mipmapFilterMode = hipFilterModeLinear;
texDescr.addressMode[0] = hipAddressModeWrap;
texDescr.addressMode[1] = hipAddressModeWrap;
texDescr.maxMipmapLevelClamp = float(mipLevels - 1);
texDescr.readMode = hipReadModeNormalizedFloat;
checkCudaErrors(hipCreateTextureObject(&textureObjMipMapInput, &resDescr,
&texDescr, NULL));
checkCudaErrors(hipMalloc((void**)&d_surfaceObjectList,
sizeof(hipSurfaceObject_t) * mipLevels));
checkCudaErrors(hipMalloc((void**)&d_surfaceObjectListTemp,
sizeof(hipSurfaceObject_t) * mipLevels));
checkCudaErrors(hipMemcpy(d_surfaceObjectList, surfaceObjectList.data(),
sizeof(hipSurfaceObject_t) * mipLevels,
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(
d_surfaceObjectListTemp, surfaceObjectListTemp.data(),
sizeof(hipSurfaceObject_t) * mipLevels, hipMemcpyHostToDevice));
printf("CUDA Kernel Vulkan image buffer\n");
}
void cudaUpdateVkImage() {
cudaVkSemaphoreWait(cudaExtVkUpdateCudaSemaphore);
int nthreads = 128;
/*Perform 2D box filter on image using CUDA */
hipLaunchKernelGGL(( d_boxfilter_rgba_x), dim3(imageHeight / nthreads), dim3(nthreads), 0, streamToRun,
d_surfaceObjectListTemp, textureObjMipMapInput, imageWidth, imageHeight,
mipLevels, filter_radius);
hipLaunchKernelGGL(( d_boxfilter_rgba_y), dim3(imageWidth / nthreads), dim3(nthreads), 0, streamToRun,
d_surfaceObjectList, d_surfaceObjectListTemp, imageWidth, imageHeight,
mipLevels, filter_radius);
varySigma();
cudaVkSemaphoreSignal(cudaExtCudaUpdateVkSemaphore);
}
void transitionImageLayout(VkImage image, VkFormat format,
VkImageLayout oldLayout, VkImageLayout newLayout) {
VkCommandBuffer commandBuffer = beginSingleTimeCommands();
VkImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.oldLayout = oldLayout;
barrier.newLayout = newLayout;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.image = image;
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
barrier.subresourceRange.baseMipLevel = 0;
barrier.subresourceRange.levelCount = mipLevels;
barrier.subresourceRange.baseArrayLayer = 0;
barrier.subresourceRange.layerCount = 1;
VkPipelineStageFlags sourceStage;
VkPipelineStageFlags destinationStage;
if (oldLayout == VK_IMAGE_LAYOUT_UNDEFINED &&
newLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
barrier.srcAccessMask = 0;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
sourceStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
destinationStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
} else if (oldLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL &&
newLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
sourceStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
destinationStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
} else {
throw std::invalid_argument("unsupported layout transition!");
}
vkCmdPipelineBarrier(commandBuffer, sourceStage, destinationStage, 0, 0,
nullptr, 0, nullptr, 1, &barrier);
endSingleTimeCommands(commandBuffer);
}
void copyBufferToImage(VkBuffer buffer, VkImage image, uint32_t width,
uint32_t height) {
VkCommandBuffer commandBuffer = beginSingleTimeCommands();
VkBufferImageCopy region = {};
region.bufferOffset = 0;
region.bufferRowLength = 0;
region.bufferImageHeight = 0;
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.imageSubresource.mipLevel = 0;
region.imageSubresource.baseArrayLayer = 0;
region.imageSubresource.layerCount = 1;
region.imageOffset = {0, 0, 0};
region.imageExtent = {width, height, 1};
vkCmdCopyBufferToImage(commandBuffer, buffer, image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
endSingleTimeCommands(commandBuffer);
}
void createVertexBuffer() {
VkDeviceSize bufferSize = sizeof(vertices[0]) * vertices.size();
VkBuffer stagingBuffer;
VkDeviceMemory stagingBufferMemory;
createBuffer(bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
stagingBuffer, stagingBufferMemory);
void* data;
vkMapMemory(device, stagingBufferMemory, 0, bufferSize, 0, &data);
memcpy(data, vertices.data(), (size_t)bufferSize);
vkUnmapMemory(device, stagingBufferMemory);
createBuffer(
bufferSize,
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, vertexBuffer, vertexBufferMemory);
copyBuffer(stagingBuffer, vertexBuffer, bufferSize);
vkDestroyBuffer(device, stagingBuffer, nullptr);
vkFreeMemory(device, stagingBufferMemory, nullptr);
}
void createIndexBuffer() {
VkDeviceSize bufferSize = sizeof(indices[0]) * indices.size();
VkBuffer stagingBuffer;
VkDeviceMemory stagingBufferMemory;
createBuffer(bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
stagingBuffer, stagingBufferMemory);
void* data;
vkMapMemory(device, stagingBufferMemory, 0, bufferSize, 0, &data);
memcpy(data, indices.data(), (size_t)bufferSize);
vkUnmapMemory(device, stagingBufferMemory);
createBuffer(
bufferSize,
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, indexBuffer, indexBufferMemory);
copyBuffer(stagingBuffer, indexBuffer, bufferSize);
vkDestroyBuffer(device, stagingBuffer, nullptr);
vkFreeMemory(device, stagingBufferMemory, nullptr);
}
void createUniformBuffers() {
VkDeviceSize bufferSize = sizeof(UniformBufferObject);
uniformBuffers.resize(swapChainImages.size());
uniformBuffersMemory.resize(swapChainImages.size());
for (size_t i = 0; i < swapChainImages.size(); i++) {
createBuffer(bufferSize, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
uniformBuffers[i], uniformBuffersMemory[i]);
}
}
void createDescriptorPool() {
std::array<VkDescriptorPoolSize, 2> poolSizes = {};
poolSizes[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
poolSizes[0].descriptorCount =
static_cast<uint32_t>(swapChainImages.size());
poolSizes[1].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
poolSizes[1].descriptorCount =
static_cast<uint32_t>(swapChainImages.size());
VkDescriptorPoolCreateInfo poolInfo = {};
poolInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
poolInfo.poolSizeCount = static_cast<uint32_t>(poolSizes.size());
poolInfo.pPoolSizes = poolSizes.data();
poolInfo.maxSets = static_cast<uint32_t>(swapChainImages.size());
if (vkCreateDescriptorPool(device, &poolInfo, nullptr, &descriptorPool) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create descriptor pool!");
}
}
void createDescriptorSets() {
std::vector<VkDescriptorSetLayout> layouts(swapChainImages.size(),
descriptorSetLayout);
VkDescriptorSetAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
allocInfo.descriptorPool = descriptorPool;
allocInfo.descriptorSetCount =
static_cast<uint32_t>(swapChainImages.size());
allocInfo.pSetLayouts = layouts.data();
descriptorSets.resize(swapChainImages.size());
if (vkAllocateDescriptorSets(device, &allocInfo, descriptorSets.data()) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate descriptor sets!");
}
for (size_t i = 0; i < swapChainImages.size(); i++) {
VkDescriptorBufferInfo bufferInfo = {};
bufferInfo.buffer = uniformBuffers[i];
bufferInfo.offset = 0;
bufferInfo.range = sizeof(UniformBufferObject);
VkDescriptorImageInfo imageInfo = {};
imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
imageInfo.imageView = textureImageView;
imageInfo.sampler = textureSampler;
std::array<VkWriteDescriptorSet, 2> descriptorWrites = {};
descriptorWrites[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptorWrites[0].dstSet = descriptorSets[i];
descriptorWrites[0].dstBinding = 0;
descriptorWrites[0].dstArrayElement = 0;
descriptorWrites[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptorWrites[0].descriptorCount = 1;
descriptorWrites[0].pBufferInfo = &bufferInfo;
descriptorWrites[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptorWrites[1].dstSet = descriptorSets[i];
descriptorWrites[1].dstBinding = 1;
descriptorWrites[1].dstArrayElement = 0;
descriptorWrites[1].descriptorType =
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
descriptorWrites[1].descriptorCount = 1;
descriptorWrites[1].pImageInfo = &imageInfo;
vkUpdateDescriptorSets(device,
static_cast<uint32_t>(descriptorWrites.size()),
descriptorWrites.data(), 0, nullptr);
}
}
void createBuffer(VkDeviceSize size, VkBufferUsageFlags usage,
VkMemoryPropertyFlags properties, VkBuffer& buffer,
VkDeviceMemory& bufferMemory) {
VkBufferCreateInfo bufferInfo = {};
bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferInfo.size = size;
bufferInfo.usage = usage;
bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
if (vkCreateBuffer(device, &bufferInfo, nullptr, &buffer) != VK_SUCCESS) {
throw std::runtime_error("failed to create buffer!");
}
VkMemoryRequirements memRequirements;
vkGetBufferMemoryRequirements(device, buffer, &memRequirements);
VkMemoryAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.allocationSize = memRequirements.size;
allocInfo.memoryTypeIndex =
findMemoryType(memRequirements.memoryTypeBits, properties);
if (vkAllocateMemory(device, &allocInfo, nullptr, &bufferMemory) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate buffer memory!");
}
vkBindBufferMemory(device, buffer, bufferMemory, 0);
}
VkCommandBuffer beginSingleTimeCommands() {
VkCommandBufferAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
allocInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
allocInfo.commandPool = commandPool;
allocInfo.commandBufferCount = 1;
VkCommandBuffer commandBuffer;
vkAllocateCommandBuffers(device, &allocInfo, &commandBuffer);
VkCommandBufferBeginInfo beginInfo = {};
beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
vkBeginCommandBuffer(commandBuffer, &beginInfo);
return commandBuffer;
}
void endSingleTimeCommands(VkCommandBuffer commandBuffer) {
vkEndCommandBuffer(commandBuffer);
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &commandBuffer;
vkQueueSubmit(graphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
vkQueueWaitIdle(graphicsQueue);
vkFreeCommandBuffers(device, commandPool, 1, &commandBuffer);
}
void copyBuffer(VkBuffer srcBuffer, VkBuffer dstBuffer, VkDeviceSize size) {
VkCommandBuffer commandBuffer = beginSingleTimeCommands();
VkBufferCopy copyRegion = {};
copyRegion.size = size;
vkCmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, 1, ©Region);
endSingleTimeCommands(commandBuffer);
}
uint32_t findMemoryType(uint32_t typeFilter,
VkMemoryPropertyFlags properties) {
VkPhysicalDeviceMemoryProperties memProperties;
vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProperties);
for (uint32_t i = 0; i < memProperties.memoryTypeCount; i++) {
if ((typeFilter & (1 << i)) &&
(memProperties.memoryTypes[i].propertyFlags & properties) ==
properties) {
return i;
}
}
throw std::runtime_error("failed to find suitable memory type!");
}
void createCommandBuffers() {
commandBuffers.resize(swapChainFramebuffers.size());
VkCommandBufferAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
allocInfo.commandPool = commandPool;
allocInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
allocInfo.commandBufferCount = (uint32_t)commandBuffers.size();
if (vkAllocateCommandBuffers(device, &allocInfo, commandBuffers.data()) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate command buffers!");
}
for (size_t i = 0; i < commandBuffers.size(); i++) {
VkCommandBufferBeginInfo beginInfo = {};
beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
if (vkBeginCommandBuffer(commandBuffers[i], &beginInfo) != VK_SUCCESS) {
throw std::runtime_error("failed to begin recording command buffer!");
}
VkRenderPassBeginInfo renderPassInfo = {};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
renderPassInfo.renderPass = renderPass;
renderPassInfo.framebuffer = swapChainFramebuffers[i];
renderPassInfo.renderArea.offset = {0, 0};
renderPassInfo.renderArea.extent = swapChainExtent;
VkClearValue clearColor = {0.0f, 0.0f, 0.0f, 1.0f};
renderPassInfo.clearValueCount = 1;
renderPassInfo.pClearValues = &clearColor;
vkCmdBeginRenderPass(commandBuffers[i], &renderPassInfo,
VK_SUBPASS_CONTENTS_INLINE);
vkCmdBindPipeline(commandBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS,
graphicsPipeline);
VkBuffer vertexBuffers[] = {vertexBuffer};
VkDeviceSize offsets[] = {0};
vkCmdBindVertexBuffers(commandBuffers[i], 0, 1, vertexBuffers, offsets);
vkCmdBindIndexBuffer(commandBuffers[i], indexBuffer, 0,
VK_INDEX_TYPE_UINT16);
vkCmdBindDescriptorSets(commandBuffers[i],
VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout,
0, 1, &descriptorSets[i], 0, nullptr);
vkCmdDrawIndexed(commandBuffers[i], static_cast<uint32_t>(indices.size()),
1, 0, 0, 0);
// vkCmdDraw(commandBuffers[i], static_cast<uint32_t>(vertices.size()), 1,
// 0, 0);
vkCmdEndRenderPass(commandBuffers[i]);
if (vkEndCommandBuffer(commandBuffers[i]) != VK_SUCCESS) {
throw std::runtime_error("failed to record command buffer!");
}
}
}
void createSyncObjects() {
imageAvailableSemaphores.resize(MAX_FRAMES);
renderFinishedSemaphores.resize(MAX_FRAMES);
inFlightFences.resize(MAX_FRAMES);
VkSemaphoreCreateInfo semaphoreInfo = {};
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VkFenceCreateInfo fenceInfo = {};
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
for (size_t i = 0; i < MAX_FRAMES; i++) {
if (vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&imageAvailableSemaphores[i]) != VK_SUCCESS ||
vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&renderFinishedSemaphores[i]) != VK_SUCCESS ||
vkCreateFence(device, &fenceInfo, nullptr, &inFlightFences[i]) !=
VK_SUCCESS) {
throw std::runtime_error(
"failed to create synchronization objects for a frame!");
}
}
}
void createSyncObjectsExt() {
VkSemaphoreCreateInfo semaphoreInfo = {};
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
memset(&semaphoreInfo, 0, sizeof(semaphoreInfo));
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
#ifdef _WIN64
WindowsSecurityAttributes winSecurityAttributes;
VkExportSemaphoreWin32HandleInfoKHR
vulkanExportSemaphoreWin32HandleInfoKHR = {};
vulkanExportSemaphoreWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR;
vulkanExportSemaphoreWin32HandleInfoKHR.pNext = NULL;
vulkanExportSemaphoreWin32HandleInfoKHR.pAttributes =
&winSecurityAttributes;
vulkanExportSemaphoreWin32HandleInfoKHR.dwAccess =
DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE;
vulkanExportSemaphoreWin32HandleInfoKHR.name = (LPCWSTR)NULL;
#endif
VkExportSemaphoreCreateInfoKHR vulkanExportSemaphoreCreateInfo = {};
vulkanExportSemaphoreCreateInfo.sType =
VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;
#ifdef _WIN64
vulkanExportSemaphoreCreateInfo.pNext =
IsWindows8OrGreater() ? &vulkanExportSemaphoreWin32HandleInfoKHR : NULL;
vulkanExportSemaphoreCreateInfo.handleTypes =
IsWindows8OrGreater()
? VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT;
#else
vulkanExportSemaphoreCreateInfo.pNext = NULL;
vulkanExportSemaphoreCreateInfo.handleTypes =
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
#endif
semaphoreInfo.pNext = &vulkanExportSemaphoreCreateInfo;
if (vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&cudaUpdateVkSemaphore) != VK_SUCCESS ||
vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&vkUpdateCudaSemaphore) != VK_SUCCESS) {
throw std::runtime_error(
"failed to create synchronization objects for a CUDA-Vulkan!");
}
}
void updateUniformBuffer() {
UniformBufferObject ubo = {};
mat4x4_identity(ubo.model);
mat4x4 Model;
mat4x4_dup(Model, ubo.model);
mat4x4_rotate(ubo.model, Model, 0.0f, 0.0f, 1.0f, degreesToRadians(135.0f));
vec3 eye = {2.0f, 2.0f, 2.0f};
vec3 center = {0.0f, 0.0f, 0.0f};
vec3 up = {0.0f, 0.0f, 1.0f};
mat4x4_look_at(ubo.view, eye, center, up);
mat4x4_perspective(ubo.proj, degreesToRadians(45.0f),
swapChainExtent.width / (float)swapChainExtent.height,
0.1f, 10.0f);
ubo.proj[1][1] *= -1;
for (size_t i = 0; i < swapChainImages.size(); i++) {
void* data;
vkMapMemory(device, uniformBuffersMemory[i], 0, sizeof(ubo), 0, &data);
memcpy(data, &ubo, sizeof(ubo));
vkUnmapMemory(device, uniformBuffersMemory[i]);
}
}
void drawFrame() {
static int startSubmit = 0;
vkWaitForFences(device, 1, &inFlightFences[currentFrame], VK_TRUE,
std::numeric_limits<uint64_t>::max());
uint32_t imageIndex;
VkResult result = vkAcquireNextImageKHR(
device, swapChain, std::numeric_limits<uint64_t>::max(),
imageAvailableSemaphores[currentFrame], VK_NULL_HANDLE, &imageIndex);
if (result == VK_ERROR_OUT_OF_DATE_KHR) {
recreateSwapChain();
return;
} else if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
throw std::runtime_error("failed to acquire swap chain image!");
}
vkResetFences(device, 1, &inFlightFences[currentFrame]);
if (!startSubmit) {
submitVulkan(imageIndex);
startSubmit = 1;
} else {
submitVulkanCuda(imageIndex);
}
VkPresentInfoKHR presentInfo = {};
presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
VkSemaphore signalSemaphores[] = {renderFinishedSemaphores[currentFrame]};
presentInfo.waitSemaphoreCount = 1;
presentInfo.pWaitSemaphores = signalSemaphores;
VkSwapchainKHR swapChains[] = {swapChain};
presentInfo.swapchainCount = 1;
presentInfo.pSwapchains = swapChains;
presentInfo.pImageIndices = &imageIndex;
presentInfo.pResults = nullptr; // Optional
result = vkQueuePresentKHR(presentQueue, &presentInfo);
if (result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR ||
framebufferResized) {
framebufferResized = false;
recreateSwapChain();
} else if (result != VK_SUCCESS) {
throw std::runtime_error("failed to present swap chain image!");
}
cudaUpdateVkImage();
currentFrame = (currentFrame + 1) % MAX_FRAMES;
// Added sleep of 10 millisecs so that CPU does not submit too much work to
// GPU
std::this_thread::sleep_for(std::chrono::microseconds(10000));
char title[256];
sprintf(title, "Vulkan Image CUDA Box Filter (radius=%d)", filter_radius);
glfwSetWindowTitle(window, title);
}
void cudaVkSemaphoreSignal(cudaExternalSemaphore_t& extSemaphore) {
cudaExternalSemaphoreSignalParams extSemaphoreSignalParams;
memset(&extSemaphoreSignalParams, 0, sizeof(extSemaphoreSignalParams));
extSemaphoreSignalParams.params.fence.value = 0;
extSemaphoreSignalParams.flags = 0;
checkCudaErrors(cudaSignalExternalSemaphoresAsync(
&extSemaphore, &extSemaphoreSignalParams, 1, streamToRun));
}
void cudaVkSemaphoreWait(cudaExternalSemaphore_t& extSemaphore) {
cudaExternalSemaphoreWaitParams extSemaphoreWaitParams;
memset(&extSemaphoreWaitParams, 0, sizeof(extSemaphoreWaitParams));
extSemaphoreWaitParams.params.fence.value = 0;
extSemaphoreWaitParams.flags = 0;
checkCudaErrors(cudaWaitExternalSemaphoresAsync(
&extSemaphore, &extSemaphoreWaitParams, 1, streamToRun));
}
void submitVulkan(uint32_t imageIndex) {
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
VkSemaphore waitSemaphores[] = {imageAvailableSemaphores[currentFrame]};
VkPipelineStageFlags waitStages[] = {
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT};
submitInfo.waitSemaphoreCount = 1;
submitInfo.pWaitSemaphores = waitSemaphores;
submitInfo.pWaitDstStageMask = waitStages;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &commandBuffers[imageIndex];
VkSemaphore signalSemaphores[] = {renderFinishedSemaphores[currentFrame],
vkUpdateCudaSemaphore};
submitInfo.signalSemaphoreCount = 2;
submitInfo.pSignalSemaphores = signalSemaphores;
if (vkQueueSubmit(graphicsQueue, 1, &submitInfo, inFlightFences[currentFrame]) !=
VK_SUCCESS) {
throw std::runtime_error("failed to submit draw command buffer!");
}
}
void submitVulkanCuda(uint32_t imageIndex) {
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
VkSemaphore waitSemaphores[] = {imageAvailableSemaphores[currentFrame],
cudaUpdateVkSemaphore};
VkPipelineStageFlags waitStages[] = {
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT};
submitInfo.waitSemaphoreCount = 2;
submitInfo.pWaitSemaphores = waitSemaphores;
submitInfo.pWaitDstStageMask = waitStages;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &commandBuffers[imageIndex];
VkSemaphore signalSemaphores[] = {renderFinishedSemaphores[currentFrame],
vkUpdateCudaSemaphore};
submitInfo.signalSemaphoreCount = 2;
submitInfo.pSignalSemaphores = signalSemaphores;
if (vkQueueSubmit(graphicsQueue, 1, &submitInfo, inFlightFences[currentFrame]) !=
VK_SUCCESS) {
throw std::runtime_error("failed to submit draw command buffer!");
}
}
VkShaderModule createShaderModule(const std::vector<char>& code) {
VkShaderModuleCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
createInfo.codeSize = code.size();
createInfo.pCode = reinterpret_cast<const uint32_t*>(code.data());
VkShaderModule shaderModule;
if (vkCreateShaderModule(device, &createInfo, nullptr, &shaderModule) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create shader module!");
}
return shaderModule;
}
VkSurfaceFormatKHR chooseSwapSurfaceFormat(
const std::vector<VkSurfaceFormatKHR>& availableFormats) {
if (availableFormats.size() == 1 &&
availableFormats[0].format == VK_FORMAT_UNDEFINED) {
return {VK_FORMAT_B8G8R8A8_UNORM, VK_COLOR_SPACE_SRGB_NONLINEAR_KHR};
}
for (const auto& availableFormat : availableFormats) {
if (availableFormat.format == VK_FORMAT_B8G8R8A8_UNORM &&
availableFormat.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR) {
return availableFormat;
}
}
return availableFormats[0];
}
VkPresentModeKHR chooseSwapPresentMode(
const std::vector<VkPresentModeKHR>& availablePresentModes) {
VkPresentModeKHR bestMode = VK_PRESENT_MODE_FIFO_KHR;
for (const auto& availablePresentMode : availablePresentModes) {
if (availablePresentMode == VK_PRESENT_MODE_MAILBOX_KHR) {
return availablePresentMode;
} else if (availablePresentMode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
bestMode = availablePresentMode;
}
}
return bestMode;
}
VkExtent2D chooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities) {
if (capabilities.currentExtent.width !=
std::numeric_limits<uint32_t>::max()) {
return capabilities.currentExtent;
} else {
int width, height;
glfwGetFramebufferSize(window, &width, &height);
VkExtent2D actualExtent = {static_cast<uint32_t>(width),
static_cast<uint32_t>(height)};
actualExtent.width = ::max(
capabilities.minImageExtent.width,
::min(capabilities.maxImageExtent.width, actualExtent.width));
actualExtent.height = ::max(
capabilities.minImageExtent.height,
::min(capabilities.maxImageExtent.height, actualExtent.height));
return actualExtent;
}
}
SwapChainSupportDetails querySwapChainSupport(VkPhysicalDevice device) {
SwapChainSupportDetails details;
vkGetPhysicalDeviceSurfaceCapabilitiesKHR(device, surface,
&details.capabilities);
uint32_t formatCount;
vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &formatCount,
nullptr);
if (formatCount != 0) {
details.formats.resize(formatCount);
vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &formatCount,
details.formats.data());
}
uint32_t presentModeCount;
vkGetPhysicalDeviceSurfacePresentModesKHR(device, surface,
&presentModeCount, nullptr);
if (presentModeCount != 0) {
details.presentModes.resize(presentModeCount);
vkGetPhysicalDeviceSurfacePresentModesKHR(
device, surface, &presentModeCount, details.presentModes.data());
}
return details;
}
bool isDeviceSuitable(VkPhysicalDevice device) {
QueueFamilyIndices indices = findQueueFamilies(device);
bool extensionsSupported = checkDeviceExtensionSupport(device);
bool swapChainAdequate = false;
if (extensionsSupported) {
SwapChainSupportDetails swapChainSupport = querySwapChainSupport(device);
swapChainAdequate = !swapChainSupport.formats.empty() &&
!swapChainSupport.presentModes.empty();
}
VkPhysicalDeviceFeatures supportedFeatures;
vkGetPhysicalDeviceFeatures(device, &supportedFeatures);
return indices.isComplete() && extensionsSupported && swapChainAdequate &&
supportedFeatures.samplerAnisotropy;
}
bool checkDeviceExtensionSupport(VkPhysicalDevice device) {
uint32_t extensionCount;
vkEnumerateDeviceExtensionProperties(device, nullptr, &extensionCount,
nullptr);
std::vector<VkExtensionProperties> availableExtensions(extensionCount);
vkEnumerateDeviceExtensionProperties(device, nullptr, &extensionCount,
availableExtensions.data());
std::set<std::string> requiredExtensions(deviceExtensions.begin(),
deviceExtensions.end());
for (const auto& extension : availableExtensions) {
requiredExtensions.erase(extension.extensionName);
}
return requiredExtensions.empty();
}
QueueFamilyIndices findQueueFamilies(VkPhysicalDevice device) {
QueueFamilyIndices indices;
uint32_t queueFamilyCount = 0;
vkGetPhysicalDeviceQueueFamilyProperties(device, &queueFamilyCount,
nullptr);
std::vector<VkQueueFamilyProperties> queueFamilies(queueFamilyCount);
vkGetPhysicalDeviceQueueFamilyProperties(device, &queueFamilyCount,
queueFamilies.data());
int i = 0;
for (const auto& queueFamily : queueFamilies) {
if (queueFamily.queueCount > 0 &&
queueFamily.queueFlags & VK_QUEUE_GRAPHICS_BIT) {
indices.graphicsFamily = i;
}
VkBool32 presentSupport = false;
vkGetPhysicalDeviceSurfaceSupportKHR(device, i, surface, &presentSupport);
if (queueFamily.queueCount > 0 && presentSupport) {
indices.presentFamily = i;
}
if (indices.isComplete()) {
break;
}
i++;
}
return indices;
}
std::vector<const char*> getRequiredExtensions() {
uint32_t glfwExtensionCount = 0;
const char** glfwExtensions;
glfwExtensions = glfwGetRequiredInstanceExtensions(&glfwExtensionCount);
std::vector<const char*> extensions(glfwExtensions,
glfwExtensions + glfwExtensionCount);
if (enableValidationLayers) {
extensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
}
return extensions;
}
bool checkValidationLayerSupport() {
uint32_t layerCount;
vkEnumerateInstanceLayerProperties(&layerCount, nullptr);
std::vector<VkLayerProperties> availableLayers(layerCount);
vkEnumerateInstanceLayerProperties(&layerCount, availableLayers.data());
for (const char* layerName : validationLayers) {
bool layerFound = false;
for (const auto& layerProperties : availableLayers) {
if (strcmp(layerName, layerProperties.layerName) == 0) {
layerFound = true;
break;
}
}
if (!layerFound) {
return false;
}
}
return true;
}
static std::vector<char> readFile(const std::string& filename) {
char* file_path = sdkFindFilePath(filename.c_str(), execution_path.c_str());
std::ifstream file(file_path, std::ios::ate | std::ios::binary);
if (!file.is_open()) {
throw std::runtime_error("failed to open file!");
}
size_t fileSize = (size_t)file.tellg();
std::vector<char> buffer(fileSize);
file.seekg(0);
file.read(buffer.data(), fileSize);
file.close();
return buffer;
}
static VKAPI_ATTR VkBool32 VKAPI_CALL
debugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
VkDebugUtilsMessageTypeFlagsEXT messageType,
const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
void* pUserData) {
std::cerr << "validation layer: " << pCallbackData->pMessage << std::endl;
return VK_FALSE;
}
};
int main(int argc, char** argv) {
execution_path = argv[0];
std::string image_filename = "lenaRGB.ppm";
if (checkCmdLineFlag(argc, (const char**)argv, "file")) {
getCmdLineArgumentString(argc, (const char**)argv, "file",
(char**)&image_filename);
}
vulkanImageCUDA app;
try {
// This app only works on ppm images
app.loadImageData(image_filename);
app.run();
} catch (const std::exception& e) {
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
| e7a368ed9d07cf2372c5bef45d308a473afb069f.cu | /*
* Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#define GLFW_INCLUDE_VULKAN
#ifdef _WIN64
#include <aclapi.h>
#include <dxgi1_2.h>
#include <windows.h>
#include <VersionHelpers.h>
#define _USE_MATH_DEFINES
#endif
#include <GLFW/glfw3.h>
#include <vulkan/vulkan.h>
#ifdef _WIN64
#include <vulkan/vulkan_win32.h>
#endif
#include <algorithm>
#include <array>
#include <chrono>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iostream>
#include <set>
#include <stdexcept>
#include <thread>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_image.h>
#include <helper_math.h>
#include "linmath.h"
#define WIDTH 800
#define HEIGHT 600
const int MAX_FRAMES = 4;
const std::vector<const char*> validationLayers = {
"VK_LAYER_KHRONOS_validation"};
#ifdef NDEBUG
const bool enableValidationLayers = false;
#else
const bool enableValidationLayers = false;
#endif
std::string execution_path;
VkResult CreateDebugUtilsMessengerEXT(
VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkDebugUtilsMessengerEXT* pDebugMessenger) {
auto func = (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(
instance, "vkCreateDebugUtilsMessengerEXT");
if (func != nullptr) {
return func(instance, pCreateInfo, pAllocator, pDebugMessenger);
} else {
return VK_ERROR_EXTENSION_NOT_PRESENT;
}
};
const std::vector<const char*> deviceExtensions = {
VK_KHR_SWAPCHAIN_EXTENSION_NAME,
VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME,
#ifdef _WIN64
VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME,
VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME,
#else
VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME,
VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
#endif
};
#ifdef _WIN64
class WindowsSecurityAttributes {
protected:
SECURITY_ATTRIBUTES m_winSecurityAttributes;
PSECURITY_DESCRIPTOR m_winPSecurityDescriptor;
public:
WindowsSecurityAttributes();
SECURITY_ATTRIBUTES* operator&();
~WindowsSecurityAttributes();
};
WindowsSecurityAttributes::WindowsSecurityAttributes() {
m_winPSecurityDescriptor = (PSECURITY_DESCRIPTOR)calloc(
1, SECURITY_DESCRIPTOR_MIN_LENGTH + 2 * sizeof(void**));
PSID* ppSID =
(PSID*)((PBYTE)m_winPSecurityDescriptor + SECURITY_DESCRIPTOR_MIN_LENGTH);
PACL* ppACL = (PACL*)((PBYTE)ppSID + sizeof(PSID*));
InitializeSecurityDescriptor(m_winPSecurityDescriptor,
SECURITY_DESCRIPTOR_REVISION);
SID_IDENTIFIER_AUTHORITY sidIdentifierAuthority =
SECURITY_WORLD_SID_AUTHORITY;
AllocateAndInitializeSid(&sidIdentifierAuthority, 1, SECURITY_WORLD_RID, 0, 0,
0, 0, 0, 0, 0, ppSID);
EXPLICIT_ACCESS explicitAccess;
ZeroMemory(&explicitAccess, sizeof(EXPLICIT_ACCESS));
explicitAccess.grfAccessPermissions =
STANDARD_RIGHTS_ALL | SPECIFIC_RIGHTS_ALL;
explicitAccess.grfAccessMode = SET_ACCESS;
explicitAccess.grfInheritance = INHERIT_ONLY;
explicitAccess.Trustee.TrusteeForm = TRUSTEE_IS_SID;
explicitAccess.Trustee.TrusteeType = TRUSTEE_IS_WELL_KNOWN_GROUP;
explicitAccess.Trustee.ptstrName = (LPTSTR)*ppSID;
SetEntriesInAcl(1, &explicitAccess, NULL, ppACL);
SetSecurityDescriptorDacl(m_winPSecurityDescriptor, TRUE, *ppACL, FALSE);
m_winSecurityAttributes.nLength = sizeof(m_winSecurityAttributes);
m_winSecurityAttributes.lpSecurityDescriptor = m_winPSecurityDescriptor;
m_winSecurityAttributes.bInheritHandle = TRUE;
}
SECURITY_ATTRIBUTES* WindowsSecurityAttributes::operator&() {
return &m_winSecurityAttributes;
}
WindowsSecurityAttributes::~WindowsSecurityAttributes() {
PSID* ppSID =
(PSID*)((PBYTE)m_winPSecurityDescriptor + SECURITY_DESCRIPTOR_MIN_LENGTH);
PACL* ppACL = (PACL*)((PBYTE)ppSID + sizeof(PSID*));
if (*ppSID) {
FreeSid(*ppSID);
}
if (*ppACL) {
LocalFree(*ppACL);
}
free(m_winPSecurityDescriptor);
}
#endif
void DestroyDebugUtilsMessengerEXT(VkInstance instance,
VkDebugUtilsMessengerEXT debugMessenger,
const VkAllocationCallbacks* pAllocator) {
auto func = (PFN_vkDestroyDebugUtilsMessengerEXT)vkGetInstanceProcAddr(
instance, "vkDestroyDebugUtilsMessengerEXT");
if (func != nullptr) {
func(instance, debugMessenger, pAllocator);
}
}
struct QueueFamilyIndices {
int graphicsFamily = -1;
int presentFamily = -1;
bool isComplete() { return graphicsFamily >= 0 && presentFamily >= 0; }
};
struct SwapChainSupportDetails {
VkSurfaceCapabilitiesKHR capabilities;
std::vector<VkSurfaceFormatKHR> formats;
std::vector<VkPresentModeKHR> presentModes;
};
typedef float vec2[2];
struct Vertex {
vec4 pos;
vec3 color;
vec2 texCoord;
static VkVertexInputBindingDescription getBindingDescription() {
VkVertexInputBindingDescription bindingDescription = {};
bindingDescription.binding = 0;
bindingDescription.stride = sizeof(Vertex);
bindingDescription.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
return bindingDescription;
}
static std::array<VkVertexInputAttributeDescription, 3>
getAttributeDescriptions() {
std::array<VkVertexInputAttributeDescription, 3> attributeDescriptions = {};
attributeDescriptions[0].binding = 0;
attributeDescriptions[0].location = 0;
attributeDescriptions[0].format = VK_FORMAT_R32G32B32A32_SFLOAT;
attributeDescriptions[0].offset = offsetof(Vertex, pos);
attributeDescriptions[1].binding = 0;
attributeDescriptions[1].location = 1;
attributeDescriptions[1].format = VK_FORMAT_R32G32B32_SFLOAT;
attributeDescriptions[1].offset = offsetof(Vertex, color);
attributeDescriptions[2].binding = 0;
attributeDescriptions[2].location = 2;
attributeDescriptions[2].format = VK_FORMAT_R32G32_SFLOAT;
attributeDescriptions[2].offset = offsetof(Vertex, texCoord);
return attributeDescriptions;
}
};
struct UniformBufferObject {
alignas(16) mat4x4 model;
alignas(16) mat4x4 view;
alignas(16) mat4x4 proj;
};
const std::vector<Vertex> vertices = {
{{-1.0f, -1.0f, 0.0f, 1.0f}, {1.0f, 0.0f, 0.0f}, {0.0f, 0.0f}},
{{1.0f, -1.0f, 0.0f, 1.0f}, {0.0f, 1.0f, 0.0f}, {1.0f, 0.0f}},
{{1.0f, 1.0f, 0.0f, 1.0f}, {0.0f, 0.0f, 1.0f}, {1.0f, 1.0f}},
{{-1.0f, 1.0f, 0.0f, 1.0f}, {1.0f, 1.0f, 1.0f}, {0.0f, 1.0f}}};
const std::vector<uint16_t> indices = {0, 1, 2, 2, 3, 0};
// convert floating point rgba color to 32-bit integer
__device__ unsigned int rgbaFloatToInt(float4 rgba) {
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return ((unsigned int)(rgba.w * 255.0f) << 24) |
((unsigned int)(rgba.z * 255.0f) << 16) |
((unsigned int)(rgba.y * 255.0f) << 8) |
((unsigned int)(rgba.x * 255.0f));
}
__device__ float4 rgbaIntToFloat(unsigned int c) {
float4 rgba;
rgba.x = (c & 0xff) * 0.003921568627f; // /255.0f;
rgba.y = ((c >> 8) & 0xff) * 0.003921568627f; // /255.0f;
rgba.z = ((c >> 16) & 0xff) * 0.003921568627f; // /255.0f;
rgba.w = ((c >> 24) & 0xff) * 0.003921568627f; // /255.0f;
return rgba;
}
int filter_radius = 14;
int g_nFilterSign = 1;
// This varies the filter radius, so we can see automatic animation
void varySigma() {
filter_radius += g_nFilterSign;
if (filter_radius > 64) {
filter_radius = 64; // clamp to 64 and then negate sign
g_nFilterSign = -1;
} else if (filter_radius < 0) {
filter_radius = 0;
g_nFilterSign = 1;
}
}
// row pass using texture lookups
__global__ void d_boxfilter_rgba_x(cudaSurfaceObject_t* dstSurfMipMapArray,
cudaTextureObject_t textureMipMapInput,
size_t baseWidth, size_t baseHeight,
size_t mipLevels, int filter_radius) {
float scale = 1.0f / (float)((filter_radius << 1) + 1);
unsigned int y = blockIdx.x * blockDim.x + threadIdx.x;
if (y < baseHeight) {
for (uint32_t mipLevelIdx = 0; mipLevelIdx < mipLevels; mipLevelIdx++) {
uint32_t width =
(baseWidth >> mipLevelIdx) ? (baseWidth >> mipLevelIdx) : 1;
uint32_t height =
(baseHeight >> mipLevelIdx) ? (baseHeight >> mipLevelIdx) : 1;
if (y < height && filter_radius < width) {
float px = 1.0 / width;
float py = 1.0 / height;
float4 t = make_float4(0.0f);
for (int x = -filter_radius; x <= filter_radius; x++) {
t += tex2DLod<float4>(textureMipMapInput, x * px, y * py,
(float)mipLevelIdx);
}
unsigned int dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx], 0, y);
for (int x = 1; x < width; x++) {
t += tex2DLod<float4>(textureMipMapInput, (x + filter_radius) * px,
y * py, (float)mipLevelIdx);
t -=
tex2DLod<float4>(textureMipMapInput, (x - filter_radius - 1) * px,
y * py, (float)mipLevelIdx);
unsigned int dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx],
x * sizeof(uchar4), y);
}
}
}
}
}
// column pass using coalesced global memory reads
__global__ void d_boxfilter_rgba_y(cudaSurfaceObject_t* dstSurfMipMapArray,
cudaSurfaceObject_t* srcSurfMipMapArray,
size_t baseWidth, size_t baseHeight,
size_t mipLevels, int filter_radius) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
float scale = 1.0f / (float)((filter_radius << 1) + 1);
for (uint32_t mipLevelIdx = 0; mipLevelIdx < mipLevels; mipLevelIdx++) {
uint32_t width =
(baseWidth >> mipLevelIdx) ? (baseWidth >> mipLevelIdx) : 1;
uint32_t height =
(baseHeight >> mipLevelIdx) ? (baseHeight >> mipLevelIdx) : 1;
if (x < width && height > filter_radius) {
float4 t;
// do left edge
int colInBytes = x * sizeof(uchar4);
unsigned int pixFirst = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, 0);
t = rgbaIntToFloat(pixFirst) * filter_radius;
for (int y = 0; (y < (filter_radius + 1)) && (y < height); y++) {
unsigned int pix = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, y);
t += rgbaIntToFloat(pix);
}
unsigned int dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx], colInBytes, 0);
for (int y = 1; (y < filter_radius + 1) && ((y + filter_radius) < height);
y++) {
unsigned int pix = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, y + filter_radius);
t += rgbaIntToFloat(pix);
t -= rgbaIntToFloat(pixFirst);
dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx], colInBytes, y);
}
// main loop
for (int y = (filter_radius + 1); y < (height - filter_radius); y++) {
unsigned int pix = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, y + filter_radius);
t += rgbaIntToFloat(pix);
pix = surf2Dread<unsigned int>(srcSurfMipMapArray[mipLevelIdx],
colInBytes, y - filter_radius - 1);
t -= rgbaIntToFloat(pix);
dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx], colInBytes, y);
}
// do right edge
unsigned int pixLast = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, height - 1);
for (int y = height - filter_radius;
(y < height) && ((y - filter_radius - 1) > 1); y++) {
t += rgbaIntToFloat(pixLast);
unsigned int pix = surf2Dread<unsigned int>(
srcSurfMipMapArray[mipLevelIdx], colInBytes, y - filter_radius - 1);
t -= rgbaIntToFloat(pix);
dataB = rgbaFloatToInt(t * scale);
surf2Dwrite(dataB, dstSurfMipMapArray[mipLevelIdx], colInBytes, y);
}
}
}
}
class vulkanImageCUDA {
public:
void loadImageData(const std::string& filename) {
// load image (needed so we can get the width and height before we create
// the window
char* image_path =
sdkFindFilePath(filename.c_str(), execution_path.c_str());
if (image_path == 0) {
printf("Error finding image file '%s'\n", filename.c_str());
exit(EXIT_FAILURE);
}
sdkLoadPPM4(image_path, (unsigned char**)&image_data, &imageWidth,
&imageHeight);
if (!image_data) {
printf("Error opening file '%s'\n", image_path);
exit(EXIT_FAILURE);
}
printf("Loaded '%s', %d x %d pixels\n", image_path, imageWidth,
imageHeight);
}
void run() {
initWindow();
initVulkan();
initCuda();
mainLoop();
cleanup();
}
private:
GLFWwindow* window;
VkInstance instance;
VkDebugUtilsMessengerEXT debugMessenger;
VkSurfaceKHR surface;
VkPhysicalDevice physicalDevice = VK_NULL_HANDLE;
VkDevice device;
uint8_t vkDeviceUUID[VK_UUID_SIZE];
VkQueue graphicsQueue;
VkQueue presentQueue;
VkSwapchainKHR swapChain;
std::vector<VkImage> swapChainImages;
VkFormat swapChainImageFormat;
VkExtent2D swapChainExtent;
std::vector<VkImageView> swapChainImageViews;
std::vector<VkFramebuffer> swapChainFramebuffers;
VkRenderPass renderPass;
VkDescriptorSetLayout descriptorSetLayout;
VkPipelineLayout pipelineLayout;
VkPipeline graphicsPipeline;
VkCommandPool commandPool;
VkImage textureImage;
VkDeviceMemory textureImageMemory;
VkImageView textureImageView;
VkSampler textureSampler;
VkBuffer vertexBuffer;
VkDeviceMemory vertexBufferMemory;
VkBuffer indexBuffer;
VkDeviceMemory indexBufferMemory;
std::vector<VkBuffer> uniformBuffers;
std::vector<VkDeviceMemory> uniformBuffersMemory;
VkDescriptorPool descriptorPool;
std::vector<VkDescriptorSet> descriptorSets;
std::vector<VkCommandBuffer> commandBuffers;
std::vector<VkSemaphore> imageAvailableSemaphores;
std::vector<VkSemaphore> renderFinishedSemaphores;
VkSemaphore cudaUpdateVkSemaphore, vkUpdateCudaSemaphore;
std::vector<VkFence> inFlightFences;
size_t currentFrame = 0;
bool framebufferResized = false;
#ifdef _WIN64
PFN_vkGetMemoryWin32HandleKHR fpGetMemoryWin32HandleKHR;
PFN_vkGetSemaphoreWin32HandleKHR fpGetSemaphoreWin32HandleKHR;
#else
PFN_vkGetMemoryFdKHR fpGetMemoryFdKHR = NULL;
PFN_vkGetSemaphoreFdKHR fpGetSemaphoreFdKHR = NULL;
#endif
PFN_vkGetPhysicalDeviceProperties2 fpGetPhysicalDeviceProperties2;
unsigned int* image_data = NULL;
unsigned int imageWidth, imageHeight;
unsigned int mipLevels;
size_t totalImageMemSize;
// CUDA objects
cudaExternalMemory_t cudaExtMemImageBuffer;
cudaMipmappedArray_t cudaMipmappedImageArray, cudaMipmappedImageArrayTemp,
cudaMipmappedImageArrayOrig;
std::vector<cudaSurfaceObject_t> surfaceObjectList, surfaceObjectListTemp;
cudaSurfaceObject_t *d_surfaceObjectList, *d_surfaceObjectListTemp;
cudaTextureObject_t textureObjMipMapInput;
cudaExternalSemaphore_t cudaExtCudaUpdateVkSemaphore;
cudaExternalSemaphore_t cudaExtVkUpdateCudaSemaphore;
cudaStream_t streamToRun;
void initWindow() {
glfwInit();
glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
window = glfwCreateWindow(WIDTH, HEIGHT, "Vulkan Image CUDA Box Filter",
nullptr, nullptr);
glfwSetWindowUserPointer(window, this);
glfwSetFramebufferSizeCallback(window, framebufferResizeCallback);
}
static void framebufferResizeCallback(GLFWwindow* window, int width,
int height) {
auto app =
reinterpret_cast<vulkanImageCUDA*>(glfwGetWindowUserPointer(window));
app->framebufferResized = true;
}
void initVulkan() {
createInstance();
setupDebugMessenger();
createSurface();
pickPhysicalDevice();
createLogicalDevice();
getKhrExtensionsFn();
createSwapChain();
createImageViews();
createRenderPass();
createDescriptorSetLayout();
createGraphicsPipeline();
createFramebuffers();
createCommandPool();
createTextureImage();
createTextureImageView();
createTextureSampler();
createVertexBuffer();
createIndexBuffer();
createUniformBuffers();
createDescriptorPool();
createDescriptorSets();
createCommandBuffers();
createSyncObjects();
createSyncObjectsExt();
}
void initCuda() {
setCudaVkDevice();
checkCudaErrors(cudaStreamCreate(&streamToRun));
cudaVkImportImageMem();
cudaVkImportSemaphore();
}
void mainLoop() {
updateUniformBuffer();
while (!glfwWindowShouldClose(window)) {
glfwPollEvents();
drawFrame();
}
vkDeviceWaitIdle(device);
}
void cleanupSwapChain() {
for (auto framebuffer : swapChainFramebuffers) {
vkDestroyFramebuffer(device, framebuffer, nullptr);
}
vkFreeCommandBuffers(device, commandPool,
static_cast<uint32_t>(commandBuffers.size()),
commandBuffers.data());
vkDestroyPipeline(device, graphicsPipeline, nullptr);
vkDestroyPipelineLayout(device, pipelineLayout, nullptr);
vkDestroyRenderPass(device, renderPass, nullptr);
for (auto imageView : swapChainImageViews) {
vkDestroyImageView(device, imageView, nullptr);
}
vkDestroySwapchainKHR(device, swapChain, nullptr);
for (size_t i = 0; i < swapChainImages.size(); i++) {
vkDestroyBuffer(device, uniformBuffers[i], nullptr);
vkFreeMemory(device, uniformBuffersMemory[i], nullptr);
}
vkDestroyDescriptorPool(device, descriptorPool, nullptr);
}
void cleanup() {
cleanupSwapChain();
vkDestroySampler(device, textureSampler, nullptr);
vkDestroyImageView(device, textureImageView, nullptr);
for (int i = 0; i < mipLevels; i++) {
checkCudaErrors(cudaDestroySurfaceObject(surfaceObjectList[i]));
checkCudaErrors(cudaDestroySurfaceObject(surfaceObjectListTemp[i]));
}
checkCudaErrors(cudaFree(d_surfaceObjectList));
checkCudaErrors(cudaFree(d_surfaceObjectListTemp));
checkCudaErrors(cudaFreeMipmappedArray(cudaMipmappedImageArrayTemp));
checkCudaErrors(cudaFreeMipmappedArray(cudaMipmappedImageArrayOrig));
checkCudaErrors(cudaFreeMipmappedArray(cudaMipmappedImageArray));
checkCudaErrors(cudaDestroyTextureObject(textureObjMipMapInput));
checkCudaErrors(cudaDestroyExternalMemory(cudaExtMemImageBuffer));
checkCudaErrors(cudaDestroyExternalSemaphore(cudaExtCudaUpdateVkSemaphore));
checkCudaErrors(cudaDestroyExternalSemaphore(cudaExtVkUpdateCudaSemaphore));
vkDestroyImage(device, textureImage, nullptr);
vkFreeMemory(device, textureImageMemory, nullptr);
vkDestroyDescriptorSetLayout(device, descriptorSetLayout, nullptr);
vkDestroyBuffer(device, indexBuffer, nullptr);
vkFreeMemory(device, indexBufferMemory, nullptr);
vkDestroyBuffer(device, vertexBuffer, nullptr);
vkFreeMemory(device, vertexBufferMemory, nullptr);
for (size_t i = 0; i < MAX_FRAMES; i++) {
vkDestroySemaphore(device, renderFinishedSemaphores[i], nullptr);
vkDestroySemaphore(device, imageAvailableSemaphores[i], nullptr);
vkDestroyFence(device, inFlightFences[i], nullptr);
}
vkDestroyCommandPool(device, commandPool, nullptr);
vkDestroyDevice(device, nullptr);
if (enableValidationLayers) {
DestroyDebugUtilsMessengerEXT(instance, debugMessenger, nullptr);
}
vkDestroySurfaceKHR(instance, surface, nullptr);
vkDestroyInstance(instance, nullptr);
glfwDestroyWindow(window);
glfwTerminate();
}
void recreateSwapChain() {
int width = 0, height = 0;
while (width == 0 || height == 0) {
glfwGetFramebufferSize(window, &width, &height);
glfwWaitEvents();
}
vkDeviceWaitIdle(device);
cleanupSwapChain();
createSwapChain();
createImageViews();
createRenderPass();
createGraphicsPipeline();
createFramebuffers();
createUniformBuffers();
createDescriptorPool();
createDescriptorSets();
createCommandBuffers();
}
void createInstance() {
if (enableValidationLayers && !checkValidationLayerSupport()) {
throw std::runtime_error(
"validation layers requested, but not available!");
}
VkApplicationInfo appInfo = {};
appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
appInfo.pApplicationName = "Vulkan Image CUDA Interop";
appInfo.applicationVersion = VK_MAKE_VERSION(1, 0, 0);
appInfo.pEngineName = "No Engine";
appInfo.engineVersion = VK_MAKE_VERSION(1, 0, 0);
appInfo.apiVersion = VK_API_VERSION_1_0;
VkInstanceCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
createInfo.pApplicationInfo = &appInfo;
auto extensions = getRequiredExtensions();
createInfo.enabledExtensionCount = static_cast<uint32_t>(extensions.size());
createInfo.ppEnabledExtensionNames = extensions.data();
VkDebugUtilsMessengerCreateInfoEXT debugCreateInfo;
if (enableValidationLayers) {
createInfo.enabledLayerCount =
static_cast<uint32_t>(validationLayers.size());
createInfo.ppEnabledLayerNames = validationLayers.data();
populateDebugMessengerCreateInfo(debugCreateInfo);
createInfo.pNext = (VkDebugUtilsMessengerCreateInfoEXT*)&debugCreateInfo;
} else {
createInfo.enabledLayerCount = 0;
createInfo.pNext = nullptr;
}
if (vkCreateInstance(&createInfo, nullptr, &instance) != VK_SUCCESS) {
throw std::runtime_error("failed to create instance!");
}
fpGetPhysicalDeviceProperties2 =
(PFN_vkGetPhysicalDeviceProperties2)vkGetInstanceProcAddr(
instance, "vkGetPhysicalDeviceProperties2");
if (fpGetPhysicalDeviceProperties2 == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetPhysicalDeviceProperties2KHR\" not "
"found.\n");
}
#ifdef _WIN64
fpGetMemoryWin32HandleKHR =
(PFN_vkGetMemoryWin32HandleKHR)vkGetInstanceProcAddr(
instance, "vkGetMemoryWin32HandleKHR");
if (fpGetMemoryWin32HandleKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetMemoryWin32HandleKHR\" not "
"found.\n");
}
#else
fpGetMemoryFdKHR = (PFN_vkGetMemoryFdKHR)vkGetInstanceProcAddr(
instance, "vkGetMemoryFdKHR");
if (fpGetMemoryFdKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetMemoryFdKHR\" not found.\n");
} else {
std::cout << "Vulkan proc address for vkGetMemoryFdKHR - "
<< fpGetMemoryFdKHR << std::endl;
}
#endif
}
void populateDebugMessengerCreateInfo(
VkDebugUtilsMessengerCreateInfoEXT& createInfo) {
createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
createInfo.messageSeverity =
VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT;
createInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT |
VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT;
createInfo.pfnUserCallback = debugCallback;
}
void setupDebugMessenger() {
if (!enableValidationLayers) return;
VkDebugUtilsMessengerCreateInfoEXT createInfo;
populateDebugMessengerCreateInfo(createInfo);
if (CreateDebugUtilsMessengerEXT(instance, &createInfo, nullptr,
&debugMessenger) != VK_SUCCESS) {
throw std::runtime_error("failed to set up debug messenger!");
}
}
void createSurface() {
if (glfwCreateWindowSurface(instance, window, nullptr, &surface) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create window surface!");
}
}
void pickPhysicalDevice() {
uint32_t deviceCount = 0;
vkEnumeratePhysicalDevices(instance, &deviceCount, nullptr);
if (deviceCount == 0) {
throw std::runtime_error("failed to find GPUs with Vulkan support!");
}
std::vector<VkPhysicalDevice> devices(deviceCount);
vkEnumeratePhysicalDevices(instance, &deviceCount, devices.data());
for (const auto& device : devices) {
if (isDeviceSuitable(device)) {
physicalDevice = device;
break;
}
}
if (physicalDevice == VK_NULL_HANDLE) {
throw std::runtime_error("failed to find a suitable GPU!");
}
std::cout << "Selected physical device = " << physicalDevice << std::endl;
VkPhysicalDeviceIDProperties vkPhysicalDeviceIDProperties = {};
vkPhysicalDeviceIDProperties.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES;
vkPhysicalDeviceIDProperties.pNext = NULL;
VkPhysicalDeviceProperties2 vkPhysicalDeviceProperties2 = {};
vkPhysicalDeviceProperties2.sType =
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
vkPhysicalDeviceProperties2.pNext = &vkPhysicalDeviceIDProperties;
fpGetPhysicalDeviceProperties2(physicalDevice,
&vkPhysicalDeviceProperties2);
memcpy(vkDeviceUUID, vkPhysicalDeviceIDProperties.deviceUUID,
sizeof(vkDeviceUUID));
}
void getKhrExtensionsFn() {
#ifdef _WIN64
fpGetSemaphoreWin32HandleKHR =
(PFN_vkGetSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(
device, "vkGetSemaphoreWin32HandleKHR");
if (fpGetSemaphoreWin32HandleKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetSemaphoreWin32HandleKHR\" not "
"found.\n");
}
#else
fpGetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)vkGetDeviceProcAddr(
device, "vkGetSemaphoreFdKHR");
if (fpGetSemaphoreFdKHR == NULL) {
throw std::runtime_error(
"Vulkan: Proc address for \"vkGetSemaphoreFdKHR\" not found.\n");
}
#endif
}
int setCudaVkDevice() {
int current_device = 0;
int device_count = 0;
int devices_prohibited = 0;
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceCount(&device_count));
if (device_count == 0) {
fprintf(stderr, "CUDA error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
// Find the GPU which is selected by Vulkan
while (current_device < device_count) {
cudaGetDeviceProperties(&deviceProp, current_device);
if ((deviceProp.computeMode != cudaComputeModeProhibited)) {
// Compare the cuda device UUID with vulkan UUID
int ret = memcmp(&deviceProp.uuid, &vkDeviceUUID, VK_UUID_SIZE);
if (ret == 0) {
checkCudaErrors(cudaSetDevice(current_device));
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, current_device));
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n",
current_device, deviceProp.name, deviceProp.major,
deviceProp.minor);
return current_device;
}
} else {
devices_prohibited++;
}
current_device++;
}
if (devices_prohibited == device_count) {
fprintf(stderr,
"CUDA error:"
" No Vulkan-CUDA Interop capable GPU found.\n");
exit(EXIT_FAILURE);
}
return -1;
}
void createLogicalDevice() {
QueueFamilyIndices indices = findQueueFamilies(physicalDevice);
std::vector<VkDeviceQueueCreateInfo> queueCreateInfos;
std::set<int> uniqueQueueFamilies = {indices.graphicsFamily,
indices.presentFamily};
float queuePriority = 1.0f;
for (int queueFamily : uniqueQueueFamilies) {
VkDeviceQueueCreateInfo queueCreateInfo = {};
queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queueCreateInfo.queueFamilyIndex = queueFamily;
queueCreateInfo.queueCount = 1;
queueCreateInfo.pQueuePriorities = &queuePriority;
queueCreateInfos.push_back(queueCreateInfo);
}
VkPhysicalDeviceFeatures deviceFeatures = {};
VkDeviceCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
createInfo.pQueueCreateInfos = queueCreateInfos.data();
createInfo.queueCreateInfoCount = queueCreateInfos.size();
createInfo.pEnabledFeatures = &deviceFeatures;
std::vector<const char*> enabledExtensionNameList;
for (int i = 0; i < deviceExtensions.size(); i++) {
enabledExtensionNameList.push_back(deviceExtensions[i]);
}
if (enableValidationLayers) {
createInfo.enabledLayerCount =
static_cast<uint32_t>(validationLayers.size());
createInfo.ppEnabledLayerNames = validationLayers.data();
} else {
createInfo.enabledLayerCount = 0;
}
createInfo.enabledExtensionCount =
static_cast<uint32_t>(enabledExtensionNameList.size());
createInfo.ppEnabledExtensionNames = enabledExtensionNameList.data();
if (vkCreateDevice(physicalDevice, &createInfo, nullptr, &device) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create logical device!");
}
vkGetDeviceQueue(device, indices.graphicsFamily, 0, &graphicsQueue);
vkGetDeviceQueue(device, indices.presentFamily, 0, &presentQueue);
}
void createSwapChain() {
SwapChainSupportDetails swapChainSupport =
querySwapChainSupport(physicalDevice);
VkSurfaceFormatKHR surfaceFormat =
chooseSwapSurfaceFormat(swapChainSupport.formats);
VkPresentModeKHR presentMode =
chooseSwapPresentMode(swapChainSupport.presentModes);
VkExtent2D extent = chooseSwapExtent(swapChainSupport.capabilities);
uint32_t imageCount = swapChainSupport.capabilities.minImageCount + 1;
if (swapChainSupport.capabilities.maxImageCount > 0 &&
imageCount > swapChainSupport.capabilities.maxImageCount) {
imageCount = swapChainSupport.capabilities.maxImageCount;
}
VkSwapchainCreateInfoKHR createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
createInfo.surface = surface;
createInfo.minImageCount = imageCount;
createInfo.imageFormat = surfaceFormat.format;
createInfo.imageColorSpace = surfaceFormat.colorSpace;
createInfo.imageExtent = extent;
createInfo.imageArrayLayers = 1;
createInfo.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
QueueFamilyIndices indices = findQueueFamilies(physicalDevice);
uint32_t queueFamilyIndices[] = {(uint32_t)indices.graphicsFamily,
(uint32_t)indices.presentFamily};
if (indices.graphicsFamily != indices.presentFamily) {
createInfo.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
createInfo.queueFamilyIndexCount = 2;
createInfo.pQueueFamilyIndices = queueFamilyIndices;
} else {
createInfo.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
}
createInfo.preTransform = swapChainSupport.capabilities.currentTransform;
createInfo.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
createInfo.presentMode = presentMode;
createInfo.clipped = VK_TRUE;
if (vkCreateSwapchainKHR(device, &createInfo, nullptr, &swapChain) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create swap chain!");
}
vkGetSwapchainImagesKHR(device, swapChain, &imageCount, nullptr);
swapChainImages.resize(imageCount);
vkGetSwapchainImagesKHR(device, swapChain, &imageCount,
swapChainImages.data());
swapChainImageFormat = surfaceFormat.format;
swapChainExtent = extent;
}
void createImageViews() {
swapChainImageViews.resize(swapChainImages.size());
for (size_t i = 0; i < swapChainImages.size(); i++) {
swapChainImageViews[i] =
createImageView(swapChainImages[i], swapChainImageFormat);
}
}
void createRenderPass() {
VkAttachmentDescription colorAttachment = {};
colorAttachment.format = swapChainImageFormat;
colorAttachment.samples = VK_SAMPLE_COUNT_1_BIT;
colorAttachment.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
colorAttachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
colorAttachment.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
colorAttachment.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
colorAttachment.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
colorAttachment.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
VkAttachmentReference colorAttachmentRef = {};
colorAttachmentRef.attachment = 0;
colorAttachmentRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkSubpassDescription subpass = {};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &colorAttachmentRef;
VkSubpassDependency dependency = {};
dependency.srcSubpass = VK_SUBPASS_EXTERNAL;
dependency.dstSubpass = 0;
dependency.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
dependency.srcAccessMask = 0;
dependency.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
dependency.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
VkRenderPassCreateInfo renderPassInfo = {};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
renderPassInfo.attachmentCount = 1;
renderPassInfo.pAttachments = &colorAttachment;
renderPassInfo.subpassCount = 1;
renderPassInfo.pSubpasses = &subpass;
renderPassInfo.dependencyCount = 1;
renderPassInfo.pDependencies = &dependency;
if (vkCreateRenderPass(device, &renderPassInfo, nullptr, &renderPass) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create render pass!");
}
}
void createDescriptorSetLayout() {
VkDescriptorSetLayoutBinding uboLayoutBinding = {};
uboLayoutBinding.binding = 0;
uboLayoutBinding.descriptorCount = 1;
uboLayoutBinding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
uboLayoutBinding.pImmutableSamplers = nullptr;
uboLayoutBinding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
VkDescriptorSetLayoutBinding samplerLayoutBinding = {};
samplerLayoutBinding.binding = 1;
samplerLayoutBinding.descriptorCount = 1;
samplerLayoutBinding.descriptorType =
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
samplerLayoutBinding.pImmutableSamplers = nullptr;
samplerLayoutBinding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
std::array<VkDescriptorSetLayoutBinding, 2> bindings = {
uboLayoutBinding, samplerLayoutBinding};
VkDescriptorSetLayoutCreateInfo layoutInfo = {};
layoutInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
layoutInfo.bindingCount = static_cast<uint32_t>(bindings.size());
layoutInfo.pBindings = bindings.data();
if (vkCreateDescriptorSetLayout(device, &layoutInfo, nullptr,
&descriptorSetLayout) != VK_SUCCESS) {
throw std::runtime_error("failed to create descriptor set layout!");
}
}
void createGraphicsPipeline() {
auto vertShaderCode = readFile("shader.vert");
auto fragShaderCode = readFile("shader.frag");
VkShaderModule vertShaderModule = createShaderModule(vertShaderCode);
VkShaderModule fragShaderModule = createShaderModule(fragShaderCode);
VkPipelineShaderStageCreateInfo vertShaderStageInfo = {};
vertShaderStageInfo.sType =
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
vertShaderStageInfo.stage = VK_SHADER_STAGE_VERTEX_BIT;
vertShaderStageInfo.module = vertShaderModule;
vertShaderStageInfo.pName = "main";
VkPipelineShaderStageCreateInfo fragShaderStageInfo = {};
fragShaderStageInfo.sType =
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
fragShaderStageInfo.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
fragShaderStageInfo.module = fragShaderModule;
fragShaderStageInfo.pName = "main";
VkPipelineShaderStageCreateInfo shaderStages[] = {vertShaderStageInfo,
fragShaderStageInfo};
VkPipelineVertexInputStateCreateInfo vertexInputInfo = {};
vertexInputInfo.sType =
VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
auto bindingDescription = Vertex::getBindingDescription();
auto attributeDescriptions = Vertex::getAttributeDescriptions();
vertexInputInfo.vertexBindingDescriptionCount = 1;
vertexInputInfo.vertexAttributeDescriptionCount =
static_cast<uint32_t>(attributeDescriptions.size());
vertexInputInfo.pVertexBindingDescriptions = &bindingDescription;
vertexInputInfo.pVertexAttributeDescriptions = attributeDescriptions.data();
VkPipelineInputAssemblyStateCreateInfo inputAssembly = {};
inputAssembly.sType =
VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
inputAssembly.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
inputAssembly.primitiveRestartEnable = VK_FALSE;
VkViewport viewport = {};
viewport.x = 0.0f;
viewport.y = 0.0f;
viewport.width = (float)swapChainExtent.width;
viewport.height = (float)swapChainExtent.height;
viewport.minDepth = 0.0f;
viewport.maxDepth = 1.0f;
VkRect2D scissor = {};
scissor.offset = {0, 0};
scissor.extent = swapChainExtent;
VkPipelineViewportStateCreateInfo viewportState = {};
viewportState.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
viewportState.viewportCount = 1;
viewportState.pViewports = &viewport;
viewportState.scissorCount = 1;
viewportState.pScissors = &scissor;
VkPipelineRasterizationStateCreateInfo rasterizer = {};
rasterizer.sType =
VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterizer.depthClampEnable = VK_FALSE;
rasterizer.rasterizerDiscardEnable = VK_FALSE;
rasterizer.polygonMode = VK_POLYGON_MODE_FILL;
rasterizer.lineWidth = 1.0f;
rasterizer.cullMode = VK_CULL_MODE_BACK_BIT;
rasterizer.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
rasterizer.depthBiasEnable = VK_FALSE;
VkPipelineMultisampleStateCreateInfo multisampling = {};
multisampling.sType =
VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
multisampling.sampleShadingEnable = VK_FALSE;
multisampling.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
VkPipelineColorBlendAttachmentState colorBlendAttachment = {};
colorBlendAttachment.colorWriteMask =
VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
colorBlendAttachment.blendEnable = VK_FALSE;
VkPipelineColorBlendStateCreateInfo colorBlending = {};
colorBlending.sType =
VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
colorBlending.logicOpEnable = VK_FALSE;
colorBlending.logicOp = VK_LOGIC_OP_COPY;
colorBlending.attachmentCount = 1;
colorBlending.pAttachments = &colorBlendAttachment;
colorBlending.blendConstants[0] = 0.0f;
colorBlending.blendConstants[1] = 0.0f;
colorBlending.blendConstants[2] = 0.0f;
colorBlending.blendConstants[3] = 0.0f;
VkPipelineLayoutCreateInfo pipelineLayoutInfo = {};
pipelineLayoutInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipelineLayoutInfo.setLayoutCount = 1;
pipelineLayoutInfo.pSetLayouts = &descriptorSetLayout;
if (vkCreatePipelineLayout(device, &pipelineLayoutInfo, nullptr,
&pipelineLayout) != VK_SUCCESS) {
throw std::runtime_error("failed to create pipeline layout!");
}
VkGraphicsPipelineCreateInfo pipelineInfo = {};
pipelineInfo.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
pipelineInfo.stageCount = 2;
pipelineInfo.pStages = shaderStages;
pipelineInfo.pVertexInputState = &vertexInputInfo;
pipelineInfo.pInputAssemblyState = &inputAssembly;
pipelineInfo.pViewportState = &viewportState;
pipelineInfo.pRasterizationState = &rasterizer;
pipelineInfo.pMultisampleState = &multisampling;
pipelineInfo.pColorBlendState = &colorBlending;
pipelineInfo.layout = pipelineLayout;
pipelineInfo.renderPass = renderPass;
pipelineInfo.subpass = 0;
pipelineInfo.basePipelineHandle = VK_NULL_HANDLE;
if (vkCreateGraphicsPipelines(device, VK_NULL_HANDLE, 1, &pipelineInfo,
nullptr, &graphicsPipeline) != VK_SUCCESS) {
throw std::runtime_error("failed to create graphics pipeline!");
}
vkDestroyShaderModule(device, fragShaderModule, nullptr);
vkDestroyShaderModule(device, vertShaderModule, nullptr);
}
void createFramebuffers() {
swapChainFramebuffers.resize(swapChainImageViews.size());
for (size_t i = 0; i < swapChainImageViews.size(); i++) {
VkImageView attachments[] = {swapChainImageViews[i]};
VkFramebufferCreateInfo framebufferInfo = {};
framebufferInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebufferInfo.renderPass = renderPass;
framebufferInfo.attachmentCount = 1;
framebufferInfo.pAttachments = attachments;
framebufferInfo.width = swapChainExtent.width;
framebufferInfo.height = swapChainExtent.height;
framebufferInfo.layers = 1;
if (vkCreateFramebuffer(device, &framebufferInfo, nullptr,
&swapChainFramebuffers[i]) != VK_SUCCESS) {
throw std::runtime_error("failed to create framebuffer!");
}
}
}
void createCommandPool() {
QueueFamilyIndices queueFamilyIndices = findQueueFamilies(physicalDevice);
VkCommandPoolCreateInfo poolInfo = {};
poolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
poolInfo.queueFamilyIndex = queueFamilyIndices.graphicsFamily;
if (vkCreateCommandPool(device, &poolInfo, nullptr, &commandPool) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create graphics command pool!");
}
}
void createTextureImage() {
VkDeviceSize imageSize = imageWidth * imageHeight * 4;
mipLevels = static_cast<uint32_t>(
std::floor(std::log2(std::max(imageWidth, imageHeight)))) +
1;
printf("mipLevels = %d\n", mipLevels);
if (!image_data) {
throw std::runtime_error("failed to load texture image!");
}
VkBuffer stagingBuffer;
VkDeviceMemory stagingBufferMemory;
createBuffer(imageSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
stagingBuffer, stagingBufferMemory);
void* data;
vkMapMemory(device, stagingBufferMemory, 0, imageSize, 0, &data);
memcpy(data, image_data, static_cast<size_t>(imageSize));
vkUnmapMemory(device, stagingBufferMemory);
// VK_FORMAT_R8G8B8A8_UNORM changed to VK_FORMAT_R8G8B8A8_UINT
createImage(
imageWidth, imageHeight, VK_FORMAT_R8G8B8A8_UINT,
VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, textureImage, textureImageMemory);
transitionImageLayout(textureImage, VK_FORMAT_R8G8B8A8_UINT,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
copyBufferToImage(stagingBuffer, textureImage,
static_cast<uint32_t>(imageWidth),
static_cast<uint32_t>(imageHeight));
transitionImageLayout(textureImage, VK_FORMAT_R8G8B8A8_UINT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
vkDestroyBuffer(device, stagingBuffer, nullptr);
vkFreeMemory(device, stagingBufferMemory, nullptr);
generateMipmaps(textureImage, VK_FORMAT_R8G8B8A8_UNORM);
}
void generateMipmaps(VkImage image, VkFormat imageFormat) {
VkFormatProperties formatProperties;
vkGetPhysicalDeviceFormatProperties(physicalDevice, imageFormat,
&formatProperties);
if (!(formatProperties.optimalTilingFeatures &
VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT)) {
throw std::runtime_error(
"texture image format does not support linear blitting!");
}
VkCommandBuffer commandBuffer = beginSingleTimeCommands();
VkImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.image = image;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
barrier.subresourceRange.baseArrayLayer = 0;
barrier.subresourceRange.layerCount = 1;
barrier.subresourceRange.levelCount = 1;
int32_t mipWidth = imageWidth;
int32_t mipHeight = imageHeight;
for (uint32_t i = 1; i < mipLevels; i++) {
barrier.subresourceRange.baseMipLevel = i - 1;
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
vkCmdPipelineBarrier(commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0,
nullptr, 1, &barrier);
VkImageBlit blit = {};
blit.srcOffsets[0] = {0, 0, 0};
blit.srcOffsets[1] = {mipWidth, mipHeight, 1};
blit.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blit.srcSubresource.mipLevel = i - 1;
blit.srcSubresource.baseArrayLayer = 0;
blit.srcSubresource.layerCount = 1;
blit.dstOffsets[0] = {0, 0, 0};
blit.dstOffsets[1] = {mipWidth > 1 ? mipWidth / 2 : 1,
mipHeight > 1 ? mipHeight / 2 : 1, 1};
blit.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blit.dstSubresource.mipLevel = i;
blit.dstSubresource.baseArrayLayer = 0;
blit.dstSubresource.layerCount = 1;
vkCmdBlitImage(commandBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit,
VK_FILTER_LINEAR);
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
vkCmdPipelineBarrier(commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, nullptr,
0, nullptr, 1, &barrier);
if (mipWidth > 1) mipWidth /= 2;
if (mipHeight > 1) mipHeight /= 2;
}
barrier.subresourceRange.baseMipLevel = mipLevels - 1;
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
vkCmdPipelineBarrier(commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, 0, nullptr,
0, nullptr, 1, &barrier);
endSingleTimeCommands(commandBuffer);
}
#ifdef _WIN64 // For windows
HANDLE getVkImageMemHandle(
VkExternalMemoryHandleTypeFlagsKHR externalMemoryHandleType) {
HANDLE handle;
VkMemoryGetWin32HandleInfoKHR vkMemoryGetWin32HandleInfoKHR = {};
vkMemoryGetWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR;
vkMemoryGetWin32HandleInfoKHR.pNext = NULL;
vkMemoryGetWin32HandleInfoKHR.memory = textureImageMemory;
vkMemoryGetWin32HandleInfoKHR.handleType =
(VkExternalMemoryHandleTypeFlagBitsKHR)externalMemoryHandleType;
fpGetMemoryWin32HandleKHR(device, &vkMemoryGetWin32HandleInfoKHR, &handle);
return handle;
}
HANDLE getVkSemaphoreHandle(
VkExternalSemaphoreHandleTypeFlagBitsKHR externalSemaphoreHandleType,
VkSemaphore& semVkCuda) {
HANDLE handle;
VkSemaphoreGetWin32HandleInfoKHR vulkanSemaphoreGetWin32HandleInfoKHR = {};
vulkanSemaphoreGetWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR;
vulkanSemaphoreGetWin32HandleInfoKHR.pNext = NULL;
vulkanSemaphoreGetWin32HandleInfoKHR.semaphore = semVkCuda;
vulkanSemaphoreGetWin32HandleInfoKHR.handleType =
externalSemaphoreHandleType;
fpGetSemaphoreWin32HandleKHR(device, &vulkanSemaphoreGetWin32HandleInfoKHR,
&handle);
return handle;
}
#else
int getVkImageMemHandle(
VkExternalMemoryHandleTypeFlagsKHR externalMemoryHandleType) {
if (externalMemoryHandleType ==
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR) {
int fd;
VkMemoryGetFdInfoKHR vkMemoryGetFdInfoKHR = {};
vkMemoryGetFdInfoKHR.sType = VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR;
vkMemoryGetFdInfoKHR.pNext = NULL;
vkMemoryGetFdInfoKHR.memory = textureImageMemory;
vkMemoryGetFdInfoKHR.handleType =
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
fpGetMemoryFdKHR(device, &vkMemoryGetFdInfoKHR, &fd);
return fd;
}
return -1;
}
int getVkSemaphoreHandle(
VkExternalSemaphoreHandleTypeFlagBitsKHR externalSemaphoreHandleType,
VkSemaphore& semVkCuda) {
if (externalSemaphoreHandleType ==
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
int fd;
VkSemaphoreGetFdInfoKHR vulkanSemaphoreGetFdInfoKHR = {};
vulkanSemaphoreGetFdInfoKHR.sType =
VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
vulkanSemaphoreGetFdInfoKHR.pNext = NULL;
vulkanSemaphoreGetFdInfoKHR.semaphore = semVkCuda;
vulkanSemaphoreGetFdInfoKHR.handleType =
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
fpGetSemaphoreFdKHR(device, &vulkanSemaphoreGetFdInfoKHR, &fd);
return fd;
}
return -1;
}
#endif
void createTextureImageView() {
textureImageView = createImageView(textureImage, VK_FORMAT_R8G8B8A8_UNORM);
}
void createTextureSampler() {
VkSamplerCreateInfo samplerInfo = {};
samplerInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
samplerInfo.magFilter = VK_FILTER_LINEAR;
samplerInfo.minFilter = VK_FILTER_LINEAR;
samplerInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
samplerInfo.anisotropyEnable = VK_TRUE;
samplerInfo.maxAnisotropy = 16;
samplerInfo.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK;
samplerInfo.unnormalizedCoordinates = VK_FALSE;
samplerInfo.compareEnable = VK_FALSE;
samplerInfo.compareOp = VK_COMPARE_OP_ALWAYS;
samplerInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
samplerInfo.minLod = 0; // Optional
samplerInfo.maxLod = static_cast<float>(mipLevels);
samplerInfo.mipLodBias = 0; // Optional
if (vkCreateSampler(device, &samplerInfo, nullptr, &textureSampler) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create texture sampler!");
}
}
VkImageView createImageView(VkImage image, VkFormat format) {
VkImageViewCreateInfo viewInfo = {};
viewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
viewInfo.image = image;
viewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
viewInfo.format = format;
viewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
viewInfo.subresourceRange.baseMipLevel = 0;
viewInfo.subresourceRange.levelCount = mipLevels;
viewInfo.subresourceRange.baseArrayLayer = 0;
viewInfo.subresourceRange.layerCount = 1;
VkImageView imageView;
if (vkCreateImageView(device, &viewInfo, nullptr, &imageView) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create texture image view!");
}
return imageView;
}
void createImage(uint32_t width, uint32_t height, VkFormat format,
VkImageTiling tiling, VkImageUsageFlags usage,
VkMemoryPropertyFlags properties, VkImage& image,
VkDeviceMemory& imageMemory) {
VkImageCreateInfo imageInfo = {};
imageInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
imageInfo.imageType = VK_IMAGE_TYPE_2D;
imageInfo.extent.width = width;
imageInfo.extent.height = height;
imageInfo.extent.depth = 1;
imageInfo.mipLevels = mipLevels;
imageInfo.arrayLayers = 1;
imageInfo.format = format;
imageInfo.tiling = tiling;
imageInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
imageInfo.usage = usage;
imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
imageInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkExternalMemoryImageCreateInfo vkExternalMemImageCreateInfo = {};
vkExternalMemImageCreateInfo.sType =
VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO;
vkExternalMemImageCreateInfo.pNext = NULL;
vkExternalMemImageCreateInfo.handleTypes =
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
imageInfo.pNext = &vkExternalMemImageCreateInfo;
if (vkCreateImage(device, &imageInfo, nullptr, &image) != VK_SUCCESS) {
throw std::runtime_error("failed to create image!");
}
VkMemoryRequirements memRequirements;
vkGetImageMemoryRequirements(device, image, &memRequirements);
#ifdef _WIN64
WindowsSecurityAttributes winSecurityAttributes;
VkExportMemoryWin32HandleInfoKHR vulkanExportMemoryWin32HandleInfoKHR = {};
vulkanExportMemoryWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR;
vulkanExportMemoryWin32HandleInfoKHR.pNext = NULL;
vulkanExportMemoryWin32HandleInfoKHR.pAttributes = &winSecurityAttributes;
vulkanExportMemoryWin32HandleInfoKHR.dwAccess =
DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE;
vulkanExportMemoryWin32HandleInfoKHR.name = (LPCWSTR)NULL;
#endif
VkExportMemoryAllocateInfoKHR vulkanExportMemoryAllocateInfoKHR = {};
vulkanExportMemoryAllocateInfoKHR.sType =
VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR;
#ifdef _WIN64
vulkanExportMemoryAllocateInfoKHR.pNext =
IsWindows8OrGreater() ? &vulkanExportMemoryWin32HandleInfoKHR : NULL;
vulkanExportMemoryAllocateInfoKHR.handleTypes =
IsWindows8OrGreater()
? VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT;
#else
vulkanExportMemoryAllocateInfoKHR.pNext = NULL;
vulkanExportMemoryAllocateInfoKHR.handleTypes =
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
#endif
VkMemoryAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.allocationSize = memRequirements.size;
allocInfo.pNext = &vulkanExportMemoryAllocateInfoKHR;
allocInfo.memoryTypeIndex =
findMemoryType(memRequirements.memoryTypeBits, properties);
VkMemoryRequirements vkMemoryRequirements = {};
vkGetImageMemoryRequirements(device, image, &vkMemoryRequirements);
totalImageMemSize = vkMemoryRequirements.size;
if (vkAllocateMemory(device, &allocInfo, nullptr, &textureImageMemory) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate image memory!");
}
vkBindImageMemory(device, image, textureImageMemory, 0);
}
void cudaVkImportSemaphore() {
cudaExternalSemaphoreHandleDesc externalSemaphoreHandleDesc;
memset(&externalSemaphoreHandleDesc, 0,
sizeof(externalSemaphoreHandleDesc));
#ifdef _WIN64
externalSemaphoreHandleDesc.type =
IsWindows8OrGreater() ? cudaExternalSemaphoreHandleTypeOpaqueWin32
: cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt;
externalSemaphoreHandleDesc.handle.win32.handle = getVkSemaphoreHandle(
IsWindows8OrGreater()
? VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
cudaUpdateVkSemaphore);
#else
externalSemaphoreHandleDesc.type = cudaExternalSemaphoreHandleTypeOpaqueFd;
externalSemaphoreHandleDesc.handle.fd = getVkSemaphoreHandle(
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT, cudaUpdateVkSemaphore);
#endif
externalSemaphoreHandleDesc.flags = 0;
checkCudaErrors(cudaImportExternalSemaphore(&cudaExtCudaUpdateVkSemaphore,
&externalSemaphoreHandleDesc));
memset(&externalSemaphoreHandleDesc, 0,
sizeof(externalSemaphoreHandleDesc));
#ifdef _WIN64
externalSemaphoreHandleDesc.type =
IsWindows8OrGreater() ? cudaExternalSemaphoreHandleTypeOpaqueWin32
: cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt;
;
externalSemaphoreHandleDesc.handle.win32.handle = getVkSemaphoreHandle(
IsWindows8OrGreater()
? VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT,
vkUpdateCudaSemaphore);
#else
externalSemaphoreHandleDesc.type = cudaExternalSemaphoreHandleTypeOpaqueFd;
externalSemaphoreHandleDesc.handle.fd = getVkSemaphoreHandle(
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT, vkUpdateCudaSemaphore);
#endif
externalSemaphoreHandleDesc.flags = 0;
checkCudaErrors(cudaImportExternalSemaphore(&cudaExtVkUpdateCudaSemaphore,
&externalSemaphoreHandleDesc));
printf("CUDA Imported Vulkan semaphore\n");
}
void cudaVkImportImageMem() {
cudaExternalMemoryHandleDesc cudaExtMemHandleDesc;
memset(&cudaExtMemHandleDesc, 0, sizeof(cudaExtMemHandleDesc));
#ifdef _WIN64
cudaExtMemHandleDesc.type =
IsWindows8OrGreater() ? cudaExternalMemoryHandleTypeOpaqueWin32
: cudaExternalMemoryHandleTypeOpaqueWin32Kmt;
cudaExtMemHandleDesc.handle.win32.handle = getVkImageMemHandle(
IsWindows8OrGreater()
? VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT);
#else
cudaExtMemHandleDesc.type = cudaExternalMemoryHandleTypeOpaqueFd;
cudaExtMemHandleDesc.handle.fd =
getVkImageMemHandle(VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR);
#endif
cudaExtMemHandleDesc.size = totalImageMemSize;
checkCudaErrors(cudaImportExternalMemory(&cudaExtMemImageBuffer,
&cudaExtMemHandleDesc));
cudaExternalMemoryMipmappedArrayDesc externalMemoryMipmappedArrayDesc;
memset(&externalMemoryMipmappedArrayDesc, 0,
sizeof(externalMemoryMipmappedArrayDesc));
cudaExtent extent = make_cudaExtent(imageWidth, imageHeight, 0);
cudaChannelFormatDesc formatDesc;
formatDesc.x = 8;
formatDesc.y = 8;
formatDesc.z = 8;
formatDesc.w = 8;
formatDesc.f = cudaChannelFormatKindUnsigned;
externalMemoryMipmappedArrayDesc.offset = 0;
externalMemoryMipmappedArrayDesc.formatDesc = formatDesc;
externalMemoryMipmappedArrayDesc.extent = extent;
externalMemoryMipmappedArrayDesc.flags = 0;
externalMemoryMipmappedArrayDesc.numLevels = mipLevels;
checkCudaErrors(cudaExternalMemoryGetMappedMipmappedArray(
&cudaMipmappedImageArray, cudaExtMemImageBuffer,
&externalMemoryMipmappedArrayDesc));
checkCudaErrors(cudaMallocMipmappedArray(&cudaMipmappedImageArrayTemp,
&formatDesc, extent, mipLevels));
checkCudaErrors(cudaMallocMipmappedArray(&cudaMipmappedImageArrayOrig,
&formatDesc, extent, mipLevels));
for (int mipLevelIdx = 0; mipLevelIdx < mipLevels; mipLevelIdx++) {
cudaArray_t cudaMipLevelArray, cudaMipLevelArrayTemp,
cudaMipLevelArrayOrig;
cudaResourceDesc resourceDesc;
checkCudaErrors(cudaGetMipmappedArrayLevel(
&cudaMipLevelArray, cudaMipmappedImageArray, mipLevelIdx));
checkCudaErrors(cudaGetMipmappedArrayLevel(
&cudaMipLevelArrayTemp, cudaMipmappedImageArrayTemp, mipLevelIdx));
checkCudaErrors(cudaGetMipmappedArrayLevel(
&cudaMipLevelArrayOrig, cudaMipmappedImageArrayOrig, mipLevelIdx));
uint32_t width =
(imageWidth >> mipLevelIdx) ? (imageWidth >> mipLevelIdx) : 1;
uint32_t height =
(imageHeight >> mipLevelIdx) ? (imageHeight >> mipLevelIdx) : 1;
checkCudaErrors(cudaMemcpy2DArrayToArray(
cudaMipLevelArrayOrig, 0, 0, cudaMipLevelArray, 0, 0,
width * sizeof(uchar4), height, cudaMemcpyDeviceToDevice));
memset(&resourceDesc, 0, sizeof(resourceDesc));
resourceDesc.resType = cudaResourceTypeArray;
resourceDesc.res.array.array = cudaMipLevelArray;
cudaSurfaceObject_t surfaceObject;
checkCudaErrors(cudaCreateSurfaceObject(&surfaceObject, &resourceDesc));
surfaceObjectList.push_back(surfaceObject);
memset(&resourceDesc, 0, sizeof(resourceDesc));
resourceDesc.resType = cudaResourceTypeArray;
resourceDesc.res.array.array = cudaMipLevelArrayTemp;
cudaSurfaceObject_t surfaceObjectTemp;
checkCudaErrors(
cudaCreateSurfaceObject(&surfaceObjectTemp, &resourceDesc));
surfaceObjectListTemp.push_back(surfaceObjectTemp);
}
cudaResourceDesc resDescr;
memset(&resDescr, 0, sizeof(cudaResourceDesc));
resDescr.resType = cudaResourceTypeMipmappedArray;
resDescr.res.mipmap.mipmap = cudaMipmappedImageArrayOrig;
cudaTextureDesc texDescr;
memset(&texDescr, 0, sizeof(cudaTextureDesc));
texDescr.normalizedCoords = true;
texDescr.filterMode = cudaFilterModeLinear;
texDescr.mipmapFilterMode = cudaFilterModeLinear;
texDescr.addressMode[0] = cudaAddressModeWrap;
texDescr.addressMode[1] = cudaAddressModeWrap;
texDescr.maxMipmapLevelClamp = float(mipLevels - 1);
texDescr.readMode = cudaReadModeNormalizedFloat;
checkCudaErrors(cudaCreateTextureObject(&textureObjMipMapInput, &resDescr,
&texDescr, NULL));
checkCudaErrors(cudaMalloc((void**)&d_surfaceObjectList,
sizeof(cudaSurfaceObject_t) * mipLevels));
checkCudaErrors(cudaMalloc((void**)&d_surfaceObjectListTemp,
sizeof(cudaSurfaceObject_t) * mipLevels));
checkCudaErrors(cudaMemcpy(d_surfaceObjectList, surfaceObjectList.data(),
sizeof(cudaSurfaceObject_t) * mipLevels,
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(
d_surfaceObjectListTemp, surfaceObjectListTemp.data(),
sizeof(cudaSurfaceObject_t) * mipLevels, cudaMemcpyHostToDevice));
printf("CUDA Kernel Vulkan image buffer\n");
}
void cudaUpdateVkImage() {
cudaVkSemaphoreWait(cudaExtVkUpdateCudaSemaphore);
int nthreads = 128;
/*Perform 2D box filter on image using CUDA */
d_boxfilter_rgba_x<<<imageHeight / nthreads, nthreads, 0, streamToRun>>>(
d_surfaceObjectListTemp, textureObjMipMapInput, imageWidth, imageHeight,
mipLevels, filter_radius);
d_boxfilter_rgba_y<<<imageWidth / nthreads, nthreads, 0, streamToRun>>>(
d_surfaceObjectList, d_surfaceObjectListTemp, imageWidth, imageHeight,
mipLevels, filter_radius);
varySigma();
cudaVkSemaphoreSignal(cudaExtCudaUpdateVkSemaphore);
}
void transitionImageLayout(VkImage image, VkFormat format,
VkImageLayout oldLayout, VkImageLayout newLayout) {
VkCommandBuffer commandBuffer = beginSingleTimeCommands();
VkImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.oldLayout = oldLayout;
barrier.newLayout = newLayout;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.image = image;
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
barrier.subresourceRange.baseMipLevel = 0;
barrier.subresourceRange.levelCount = mipLevels;
barrier.subresourceRange.baseArrayLayer = 0;
barrier.subresourceRange.layerCount = 1;
VkPipelineStageFlags sourceStage;
VkPipelineStageFlags destinationStage;
if (oldLayout == VK_IMAGE_LAYOUT_UNDEFINED &&
newLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
barrier.srcAccessMask = 0;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
sourceStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
destinationStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
} else if (oldLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL &&
newLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
sourceStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
destinationStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
} else {
throw std::invalid_argument("unsupported layout transition!");
}
vkCmdPipelineBarrier(commandBuffer, sourceStage, destinationStage, 0, 0,
nullptr, 0, nullptr, 1, &barrier);
endSingleTimeCommands(commandBuffer);
}
void copyBufferToImage(VkBuffer buffer, VkImage image, uint32_t width,
uint32_t height) {
VkCommandBuffer commandBuffer = beginSingleTimeCommands();
VkBufferImageCopy region = {};
region.bufferOffset = 0;
region.bufferRowLength = 0;
region.bufferImageHeight = 0;
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.imageSubresource.mipLevel = 0;
region.imageSubresource.baseArrayLayer = 0;
region.imageSubresource.layerCount = 1;
region.imageOffset = {0, 0, 0};
region.imageExtent = {width, height, 1};
vkCmdCopyBufferToImage(commandBuffer, buffer, image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
endSingleTimeCommands(commandBuffer);
}
void createVertexBuffer() {
VkDeviceSize bufferSize = sizeof(vertices[0]) * vertices.size();
VkBuffer stagingBuffer;
VkDeviceMemory stagingBufferMemory;
createBuffer(bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
stagingBuffer, stagingBufferMemory);
void* data;
vkMapMemory(device, stagingBufferMemory, 0, bufferSize, 0, &data);
memcpy(data, vertices.data(), (size_t)bufferSize);
vkUnmapMemory(device, stagingBufferMemory);
createBuffer(
bufferSize,
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, vertexBuffer, vertexBufferMemory);
copyBuffer(stagingBuffer, vertexBuffer, bufferSize);
vkDestroyBuffer(device, stagingBuffer, nullptr);
vkFreeMemory(device, stagingBufferMemory, nullptr);
}
void createIndexBuffer() {
VkDeviceSize bufferSize = sizeof(indices[0]) * indices.size();
VkBuffer stagingBuffer;
VkDeviceMemory stagingBufferMemory;
createBuffer(bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
stagingBuffer, stagingBufferMemory);
void* data;
vkMapMemory(device, stagingBufferMemory, 0, bufferSize, 0, &data);
memcpy(data, indices.data(), (size_t)bufferSize);
vkUnmapMemory(device, stagingBufferMemory);
createBuffer(
bufferSize,
VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, indexBuffer, indexBufferMemory);
copyBuffer(stagingBuffer, indexBuffer, bufferSize);
vkDestroyBuffer(device, stagingBuffer, nullptr);
vkFreeMemory(device, stagingBufferMemory, nullptr);
}
void createUniformBuffers() {
VkDeviceSize bufferSize = sizeof(UniformBufferObject);
uniformBuffers.resize(swapChainImages.size());
uniformBuffersMemory.resize(swapChainImages.size());
for (size_t i = 0; i < swapChainImages.size(); i++) {
createBuffer(bufferSize, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
uniformBuffers[i], uniformBuffersMemory[i]);
}
}
void createDescriptorPool() {
std::array<VkDescriptorPoolSize, 2> poolSizes = {};
poolSizes[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
poolSizes[0].descriptorCount =
static_cast<uint32_t>(swapChainImages.size());
poolSizes[1].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
poolSizes[1].descriptorCount =
static_cast<uint32_t>(swapChainImages.size());
VkDescriptorPoolCreateInfo poolInfo = {};
poolInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
poolInfo.poolSizeCount = static_cast<uint32_t>(poolSizes.size());
poolInfo.pPoolSizes = poolSizes.data();
poolInfo.maxSets = static_cast<uint32_t>(swapChainImages.size());
if (vkCreateDescriptorPool(device, &poolInfo, nullptr, &descriptorPool) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create descriptor pool!");
}
}
void createDescriptorSets() {
std::vector<VkDescriptorSetLayout> layouts(swapChainImages.size(),
descriptorSetLayout);
VkDescriptorSetAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
allocInfo.descriptorPool = descriptorPool;
allocInfo.descriptorSetCount =
static_cast<uint32_t>(swapChainImages.size());
allocInfo.pSetLayouts = layouts.data();
descriptorSets.resize(swapChainImages.size());
if (vkAllocateDescriptorSets(device, &allocInfo, descriptorSets.data()) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate descriptor sets!");
}
for (size_t i = 0; i < swapChainImages.size(); i++) {
VkDescriptorBufferInfo bufferInfo = {};
bufferInfo.buffer = uniformBuffers[i];
bufferInfo.offset = 0;
bufferInfo.range = sizeof(UniformBufferObject);
VkDescriptorImageInfo imageInfo = {};
imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
imageInfo.imageView = textureImageView;
imageInfo.sampler = textureSampler;
std::array<VkWriteDescriptorSet, 2> descriptorWrites = {};
descriptorWrites[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptorWrites[0].dstSet = descriptorSets[i];
descriptorWrites[0].dstBinding = 0;
descriptorWrites[0].dstArrayElement = 0;
descriptorWrites[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptorWrites[0].descriptorCount = 1;
descriptorWrites[0].pBufferInfo = &bufferInfo;
descriptorWrites[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptorWrites[1].dstSet = descriptorSets[i];
descriptorWrites[1].dstBinding = 1;
descriptorWrites[1].dstArrayElement = 0;
descriptorWrites[1].descriptorType =
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
descriptorWrites[1].descriptorCount = 1;
descriptorWrites[1].pImageInfo = &imageInfo;
vkUpdateDescriptorSets(device,
static_cast<uint32_t>(descriptorWrites.size()),
descriptorWrites.data(), 0, nullptr);
}
}
void createBuffer(VkDeviceSize size, VkBufferUsageFlags usage,
VkMemoryPropertyFlags properties, VkBuffer& buffer,
VkDeviceMemory& bufferMemory) {
VkBufferCreateInfo bufferInfo = {};
bufferInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferInfo.size = size;
bufferInfo.usage = usage;
bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
if (vkCreateBuffer(device, &bufferInfo, nullptr, &buffer) != VK_SUCCESS) {
throw std::runtime_error("failed to create buffer!");
}
VkMemoryRequirements memRequirements;
vkGetBufferMemoryRequirements(device, buffer, &memRequirements);
VkMemoryAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocInfo.allocationSize = memRequirements.size;
allocInfo.memoryTypeIndex =
findMemoryType(memRequirements.memoryTypeBits, properties);
if (vkAllocateMemory(device, &allocInfo, nullptr, &bufferMemory) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate buffer memory!");
}
vkBindBufferMemory(device, buffer, bufferMemory, 0);
}
VkCommandBuffer beginSingleTimeCommands() {
VkCommandBufferAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
allocInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
allocInfo.commandPool = commandPool;
allocInfo.commandBufferCount = 1;
VkCommandBuffer commandBuffer;
vkAllocateCommandBuffers(device, &allocInfo, &commandBuffer);
VkCommandBufferBeginInfo beginInfo = {};
beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
beginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
vkBeginCommandBuffer(commandBuffer, &beginInfo);
return commandBuffer;
}
void endSingleTimeCommands(VkCommandBuffer commandBuffer) {
vkEndCommandBuffer(commandBuffer);
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &commandBuffer;
vkQueueSubmit(graphicsQueue, 1, &submitInfo, VK_NULL_HANDLE);
vkQueueWaitIdle(graphicsQueue);
vkFreeCommandBuffers(device, commandPool, 1, &commandBuffer);
}
void copyBuffer(VkBuffer srcBuffer, VkBuffer dstBuffer, VkDeviceSize size) {
VkCommandBuffer commandBuffer = beginSingleTimeCommands();
VkBufferCopy copyRegion = {};
copyRegion.size = size;
vkCmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, 1, ©Region);
endSingleTimeCommands(commandBuffer);
}
uint32_t findMemoryType(uint32_t typeFilter,
VkMemoryPropertyFlags properties) {
VkPhysicalDeviceMemoryProperties memProperties;
vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProperties);
for (uint32_t i = 0; i < memProperties.memoryTypeCount; i++) {
if ((typeFilter & (1 << i)) &&
(memProperties.memoryTypes[i].propertyFlags & properties) ==
properties) {
return i;
}
}
throw std::runtime_error("failed to find suitable memory type!");
}
void createCommandBuffers() {
commandBuffers.resize(swapChainFramebuffers.size());
VkCommandBufferAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
allocInfo.commandPool = commandPool;
allocInfo.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
allocInfo.commandBufferCount = (uint32_t)commandBuffers.size();
if (vkAllocateCommandBuffers(device, &allocInfo, commandBuffers.data()) !=
VK_SUCCESS) {
throw std::runtime_error("failed to allocate command buffers!");
}
for (size_t i = 0; i < commandBuffers.size(); i++) {
VkCommandBufferBeginInfo beginInfo = {};
beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
beginInfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
if (vkBeginCommandBuffer(commandBuffers[i], &beginInfo) != VK_SUCCESS) {
throw std::runtime_error("failed to begin recording command buffer!");
}
VkRenderPassBeginInfo renderPassInfo = {};
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
renderPassInfo.renderPass = renderPass;
renderPassInfo.framebuffer = swapChainFramebuffers[i];
renderPassInfo.renderArea.offset = {0, 0};
renderPassInfo.renderArea.extent = swapChainExtent;
VkClearValue clearColor = {0.0f, 0.0f, 0.0f, 1.0f};
renderPassInfo.clearValueCount = 1;
renderPassInfo.pClearValues = &clearColor;
vkCmdBeginRenderPass(commandBuffers[i], &renderPassInfo,
VK_SUBPASS_CONTENTS_INLINE);
vkCmdBindPipeline(commandBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS,
graphicsPipeline);
VkBuffer vertexBuffers[] = {vertexBuffer};
VkDeviceSize offsets[] = {0};
vkCmdBindVertexBuffers(commandBuffers[i], 0, 1, vertexBuffers, offsets);
vkCmdBindIndexBuffer(commandBuffers[i], indexBuffer, 0,
VK_INDEX_TYPE_UINT16);
vkCmdBindDescriptorSets(commandBuffers[i],
VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineLayout,
0, 1, &descriptorSets[i], 0, nullptr);
vkCmdDrawIndexed(commandBuffers[i], static_cast<uint32_t>(indices.size()),
1, 0, 0, 0);
// vkCmdDraw(commandBuffers[i], static_cast<uint32_t>(vertices.size()), 1,
// 0, 0);
vkCmdEndRenderPass(commandBuffers[i]);
if (vkEndCommandBuffer(commandBuffers[i]) != VK_SUCCESS) {
throw std::runtime_error("failed to record command buffer!");
}
}
}
void createSyncObjects() {
imageAvailableSemaphores.resize(MAX_FRAMES);
renderFinishedSemaphores.resize(MAX_FRAMES);
inFlightFences.resize(MAX_FRAMES);
VkSemaphoreCreateInfo semaphoreInfo = {};
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VkFenceCreateInfo fenceInfo = {};
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
for (size_t i = 0; i < MAX_FRAMES; i++) {
if (vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&imageAvailableSemaphores[i]) != VK_SUCCESS ||
vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&renderFinishedSemaphores[i]) != VK_SUCCESS ||
vkCreateFence(device, &fenceInfo, nullptr, &inFlightFences[i]) !=
VK_SUCCESS) {
throw std::runtime_error(
"failed to create synchronization objects for a frame!");
}
}
}
void createSyncObjectsExt() {
VkSemaphoreCreateInfo semaphoreInfo = {};
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
memset(&semaphoreInfo, 0, sizeof(semaphoreInfo));
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
#ifdef _WIN64
WindowsSecurityAttributes winSecurityAttributes;
VkExportSemaphoreWin32HandleInfoKHR
vulkanExportSemaphoreWin32HandleInfoKHR = {};
vulkanExportSemaphoreWin32HandleInfoKHR.sType =
VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR;
vulkanExportSemaphoreWin32HandleInfoKHR.pNext = NULL;
vulkanExportSemaphoreWin32HandleInfoKHR.pAttributes =
&winSecurityAttributes;
vulkanExportSemaphoreWin32HandleInfoKHR.dwAccess =
DXGI_SHARED_RESOURCE_READ | DXGI_SHARED_RESOURCE_WRITE;
vulkanExportSemaphoreWin32HandleInfoKHR.name = (LPCWSTR)NULL;
#endif
VkExportSemaphoreCreateInfoKHR vulkanExportSemaphoreCreateInfo = {};
vulkanExportSemaphoreCreateInfo.sType =
VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR;
#ifdef _WIN64
vulkanExportSemaphoreCreateInfo.pNext =
IsWindows8OrGreater() ? &vulkanExportSemaphoreWin32HandleInfoKHR : NULL;
vulkanExportSemaphoreCreateInfo.handleTypes =
IsWindows8OrGreater()
? VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT
: VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT;
#else
vulkanExportSemaphoreCreateInfo.pNext = NULL;
vulkanExportSemaphoreCreateInfo.handleTypes =
VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
#endif
semaphoreInfo.pNext = &vulkanExportSemaphoreCreateInfo;
if (vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&cudaUpdateVkSemaphore) != VK_SUCCESS ||
vkCreateSemaphore(device, &semaphoreInfo, nullptr,
&vkUpdateCudaSemaphore) != VK_SUCCESS) {
throw std::runtime_error(
"failed to create synchronization objects for a CUDA-Vulkan!");
}
}
void updateUniformBuffer() {
UniformBufferObject ubo = {};
mat4x4_identity(ubo.model);
mat4x4 Model;
mat4x4_dup(Model, ubo.model);
mat4x4_rotate(ubo.model, Model, 0.0f, 0.0f, 1.0f, degreesToRadians(135.0f));
vec3 eye = {2.0f, 2.0f, 2.0f};
vec3 center = {0.0f, 0.0f, 0.0f};
vec3 up = {0.0f, 0.0f, 1.0f};
mat4x4_look_at(ubo.view, eye, center, up);
mat4x4_perspective(ubo.proj, degreesToRadians(45.0f),
swapChainExtent.width / (float)swapChainExtent.height,
0.1f, 10.0f);
ubo.proj[1][1] *= -1;
for (size_t i = 0; i < swapChainImages.size(); i++) {
void* data;
vkMapMemory(device, uniformBuffersMemory[i], 0, sizeof(ubo), 0, &data);
memcpy(data, &ubo, sizeof(ubo));
vkUnmapMemory(device, uniformBuffersMemory[i]);
}
}
void drawFrame() {
static int startSubmit = 0;
vkWaitForFences(device, 1, &inFlightFences[currentFrame], VK_TRUE,
std::numeric_limits<uint64_t>::max());
uint32_t imageIndex;
VkResult result = vkAcquireNextImageKHR(
device, swapChain, std::numeric_limits<uint64_t>::max(),
imageAvailableSemaphores[currentFrame], VK_NULL_HANDLE, &imageIndex);
if (result == VK_ERROR_OUT_OF_DATE_KHR) {
recreateSwapChain();
return;
} else if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
throw std::runtime_error("failed to acquire swap chain image!");
}
vkResetFences(device, 1, &inFlightFences[currentFrame]);
if (!startSubmit) {
submitVulkan(imageIndex);
startSubmit = 1;
} else {
submitVulkanCuda(imageIndex);
}
VkPresentInfoKHR presentInfo = {};
presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
VkSemaphore signalSemaphores[] = {renderFinishedSemaphores[currentFrame]};
presentInfo.waitSemaphoreCount = 1;
presentInfo.pWaitSemaphores = signalSemaphores;
VkSwapchainKHR swapChains[] = {swapChain};
presentInfo.swapchainCount = 1;
presentInfo.pSwapchains = swapChains;
presentInfo.pImageIndices = &imageIndex;
presentInfo.pResults = nullptr; // Optional
result = vkQueuePresentKHR(presentQueue, &presentInfo);
if (result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR ||
framebufferResized) {
framebufferResized = false;
recreateSwapChain();
} else if (result != VK_SUCCESS) {
throw std::runtime_error("failed to present swap chain image!");
}
cudaUpdateVkImage();
currentFrame = (currentFrame + 1) % MAX_FRAMES;
// Added sleep of 10 millisecs so that CPU does not submit too much work to
// GPU
std::this_thread::sleep_for(std::chrono::microseconds(10000));
char title[256];
sprintf(title, "Vulkan Image CUDA Box Filter (radius=%d)", filter_radius);
glfwSetWindowTitle(window, title);
}
void cudaVkSemaphoreSignal(cudaExternalSemaphore_t& extSemaphore) {
cudaExternalSemaphoreSignalParams extSemaphoreSignalParams;
memset(&extSemaphoreSignalParams, 0, sizeof(extSemaphoreSignalParams));
extSemaphoreSignalParams.params.fence.value = 0;
extSemaphoreSignalParams.flags = 0;
checkCudaErrors(cudaSignalExternalSemaphoresAsync(
&extSemaphore, &extSemaphoreSignalParams, 1, streamToRun));
}
void cudaVkSemaphoreWait(cudaExternalSemaphore_t& extSemaphore) {
cudaExternalSemaphoreWaitParams extSemaphoreWaitParams;
memset(&extSemaphoreWaitParams, 0, sizeof(extSemaphoreWaitParams));
extSemaphoreWaitParams.params.fence.value = 0;
extSemaphoreWaitParams.flags = 0;
checkCudaErrors(cudaWaitExternalSemaphoresAsync(
&extSemaphore, &extSemaphoreWaitParams, 1, streamToRun));
}
void submitVulkan(uint32_t imageIndex) {
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
VkSemaphore waitSemaphores[] = {imageAvailableSemaphores[currentFrame]};
VkPipelineStageFlags waitStages[] = {
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT};
submitInfo.waitSemaphoreCount = 1;
submitInfo.pWaitSemaphores = waitSemaphores;
submitInfo.pWaitDstStageMask = waitStages;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &commandBuffers[imageIndex];
VkSemaphore signalSemaphores[] = {renderFinishedSemaphores[currentFrame],
vkUpdateCudaSemaphore};
submitInfo.signalSemaphoreCount = 2;
submitInfo.pSignalSemaphores = signalSemaphores;
if (vkQueueSubmit(graphicsQueue, 1, &submitInfo, inFlightFences[currentFrame]) !=
VK_SUCCESS) {
throw std::runtime_error("failed to submit draw command buffer!");
}
}
void submitVulkanCuda(uint32_t imageIndex) {
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
VkSemaphore waitSemaphores[] = {imageAvailableSemaphores[currentFrame],
cudaUpdateVkSemaphore};
VkPipelineStageFlags waitStages[] = {
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT};
submitInfo.waitSemaphoreCount = 2;
submitInfo.pWaitSemaphores = waitSemaphores;
submitInfo.pWaitDstStageMask = waitStages;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &commandBuffers[imageIndex];
VkSemaphore signalSemaphores[] = {renderFinishedSemaphores[currentFrame],
vkUpdateCudaSemaphore};
submitInfo.signalSemaphoreCount = 2;
submitInfo.pSignalSemaphores = signalSemaphores;
if (vkQueueSubmit(graphicsQueue, 1, &submitInfo, inFlightFences[currentFrame]) !=
VK_SUCCESS) {
throw std::runtime_error("failed to submit draw command buffer!");
}
}
VkShaderModule createShaderModule(const std::vector<char>& code) {
VkShaderModuleCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
createInfo.codeSize = code.size();
createInfo.pCode = reinterpret_cast<const uint32_t*>(code.data());
VkShaderModule shaderModule;
if (vkCreateShaderModule(device, &createInfo, nullptr, &shaderModule) !=
VK_SUCCESS) {
throw std::runtime_error("failed to create shader module!");
}
return shaderModule;
}
VkSurfaceFormatKHR chooseSwapSurfaceFormat(
const std::vector<VkSurfaceFormatKHR>& availableFormats) {
if (availableFormats.size() == 1 &&
availableFormats[0].format == VK_FORMAT_UNDEFINED) {
return {VK_FORMAT_B8G8R8A8_UNORM, VK_COLOR_SPACE_SRGB_NONLINEAR_KHR};
}
for (const auto& availableFormat : availableFormats) {
if (availableFormat.format == VK_FORMAT_B8G8R8A8_UNORM &&
availableFormat.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR) {
return availableFormat;
}
}
return availableFormats[0];
}
VkPresentModeKHR chooseSwapPresentMode(
const std::vector<VkPresentModeKHR>& availablePresentModes) {
VkPresentModeKHR bestMode = VK_PRESENT_MODE_FIFO_KHR;
for (const auto& availablePresentMode : availablePresentModes) {
if (availablePresentMode == VK_PRESENT_MODE_MAILBOX_KHR) {
return availablePresentMode;
} else if (availablePresentMode == VK_PRESENT_MODE_IMMEDIATE_KHR) {
bestMode = availablePresentMode;
}
}
return bestMode;
}
VkExtent2D chooseSwapExtent(const VkSurfaceCapabilitiesKHR& capabilities) {
if (capabilities.currentExtent.width !=
std::numeric_limits<uint32_t>::max()) {
return capabilities.currentExtent;
} else {
int width, height;
glfwGetFramebufferSize(window, &width, &height);
VkExtent2D actualExtent = {static_cast<uint32_t>(width),
static_cast<uint32_t>(height)};
actualExtent.width = std::max(
capabilities.minImageExtent.width,
std::min(capabilities.maxImageExtent.width, actualExtent.width));
actualExtent.height = std::max(
capabilities.minImageExtent.height,
std::min(capabilities.maxImageExtent.height, actualExtent.height));
return actualExtent;
}
}
SwapChainSupportDetails querySwapChainSupport(VkPhysicalDevice device) {
SwapChainSupportDetails details;
vkGetPhysicalDeviceSurfaceCapabilitiesKHR(device, surface,
&details.capabilities);
uint32_t formatCount;
vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &formatCount,
nullptr);
if (formatCount != 0) {
details.formats.resize(formatCount);
vkGetPhysicalDeviceSurfaceFormatsKHR(device, surface, &formatCount,
details.formats.data());
}
uint32_t presentModeCount;
vkGetPhysicalDeviceSurfacePresentModesKHR(device, surface,
&presentModeCount, nullptr);
if (presentModeCount != 0) {
details.presentModes.resize(presentModeCount);
vkGetPhysicalDeviceSurfacePresentModesKHR(
device, surface, &presentModeCount, details.presentModes.data());
}
return details;
}
bool isDeviceSuitable(VkPhysicalDevice device) {
QueueFamilyIndices indices = findQueueFamilies(device);
bool extensionsSupported = checkDeviceExtensionSupport(device);
bool swapChainAdequate = false;
if (extensionsSupported) {
SwapChainSupportDetails swapChainSupport = querySwapChainSupport(device);
swapChainAdequate = !swapChainSupport.formats.empty() &&
!swapChainSupport.presentModes.empty();
}
VkPhysicalDeviceFeatures supportedFeatures;
vkGetPhysicalDeviceFeatures(device, &supportedFeatures);
return indices.isComplete() && extensionsSupported && swapChainAdequate &&
supportedFeatures.samplerAnisotropy;
}
bool checkDeviceExtensionSupport(VkPhysicalDevice device) {
uint32_t extensionCount;
vkEnumerateDeviceExtensionProperties(device, nullptr, &extensionCount,
nullptr);
std::vector<VkExtensionProperties> availableExtensions(extensionCount);
vkEnumerateDeviceExtensionProperties(device, nullptr, &extensionCount,
availableExtensions.data());
std::set<std::string> requiredExtensions(deviceExtensions.begin(),
deviceExtensions.end());
for (const auto& extension : availableExtensions) {
requiredExtensions.erase(extension.extensionName);
}
return requiredExtensions.empty();
}
QueueFamilyIndices findQueueFamilies(VkPhysicalDevice device) {
QueueFamilyIndices indices;
uint32_t queueFamilyCount = 0;
vkGetPhysicalDeviceQueueFamilyProperties(device, &queueFamilyCount,
nullptr);
std::vector<VkQueueFamilyProperties> queueFamilies(queueFamilyCount);
vkGetPhysicalDeviceQueueFamilyProperties(device, &queueFamilyCount,
queueFamilies.data());
int i = 0;
for (const auto& queueFamily : queueFamilies) {
if (queueFamily.queueCount > 0 &&
queueFamily.queueFlags & VK_QUEUE_GRAPHICS_BIT) {
indices.graphicsFamily = i;
}
VkBool32 presentSupport = false;
vkGetPhysicalDeviceSurfaceSupportKHR(device, i, surface, &presentSupport);
if (queueFamily.queueCount > 0 && presentSupport) {
indices.presentFamily = i;
}
if (indices.isComplete()) {
break;
}
i++;
}
return indices;
}
std::vector<const char*> getRequiredExtensions() {
uint32_t glfwExtensionCount = 0;
const char** glfwExtensions;
glfwExtensions = glfwGetRequiredInstanceExtensions(&glfwExtensionCount);
std::vector<const char*> extensions(glfwExtensions,
glfwExtensions + glfwExtensionCount);
if (enableValidationLayers) {
extensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
}
return extensions;
}
bool checkValidationLayerSupport() {
uint32_t layerCount;
vkEnumerateInstanceLayerProperties(&layerCount, nullptr);
std::vector<VkLayerProperties> availableLayers(layerCount);
vkEnumerateInstanceLayerProperties(&layerCount, availableLayers.data());
for (const char* layerName : validationLayers) {
bool layerFound = false;
for (const auto& layerProperties : availableLayers) {
if (strcmp(layerName, layerProperties.layerName) == 0) {
layerFound = true;
break;
}
}
if (!layerFound) {
return false;
}
}
return true;
}
static std::vector<char> readFile(const std::string& filename) {
char* file_path = sdkFindFilePath(filename.c_str(), execution_path.c_str());
std::ifstream file(file_path, std::ios::ate | std::ios::binary);
if (!file.is_open()) {
throw std::runtime_error("failed to open file!");
}
size_t fileSize = (size_t)file.tellg();
std::vector<char> buffer(fileSize);
file.seekg(0);
file.read(buffer.data(), fileSize);
file.close();
return buffer;
}
static VKAPI_ATTR VkBool32 VKAPI_CALL
debugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
VkDebugUtilsMessageTypeFlagsEXT messageType,
const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
void* pUserData) {
std::cerr << "validation layer: " << pCallbackData->pMessage << std::endl;
return VK_FALSE;
}
};
int main(int argc, char** argv) {
execution_path = argv[0];
std::string image_filename = "lenaRGB.ppm";
if (checkCmdLineFlag(argc, (const char**)argv, "file")) {
getCmdLineArgumentString(argc, (const char**)argv, "file",
(char**)&image_filename);
}
vulkanImageCUDA app;
try {
// This app only works on ppm images
app.loadImageData(image_filename);
app.run();
} catch (const std::exception& e) {
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
|
7a72b3c6e6a789991fdf6a13f1195701dbc68665.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void update_e(int nx, int ny, int nz, DTYPE *ex, DTYPE *ey, DTYPE *ez, DTYPE *hx, DTYPE *hy, DTYPE *hz ARGS_CE) {
int tx = threadIdx.x;
int idx = blockIdx.x * blockDim.x + tx;
int i, j, k;
__shared__ DTYPE sx[DX+1], sy[DX+1], sz[DX];
//while( idx < nx*ny*nz ) {
sx[tx] = hx[idx];
sy[tx] = hy[idx];
sz[tx] = hz[idx];
if( tx == DX-1 ) {
sx[tx+1] = hx[idx+1];
sy[tx+1] = hy[idx+1];
}
__syncthreads();
i = idx/(ny*nz);
j = (idx - i*ny*nz)/nz;
k = idx%nz;
if( j<ny-1 && k<nz-1 PAD ) ex[idx] += CEX * ((hz[idx+nz] - sz[tx]) - (sy[tx+1] - sy[tx]));
if( i<nx-1 && k<nz-1 PAD ) ey[idx] += CEY * ((sx[tx+1] - sx[tx]) - (hz[idx+ny*nz] - sz[tx]));
if( i<nx-1 && j<ny-1 && k<nz PAD) ez[idx] += CEZ * ((hy[idx+ny*nz] - sy[tx]) - (hx[idx+nz] - sx[tx]));
//idx += blockDim.x * gridDim.x;
//__syncthreads();
//}
}
__global__ void update_h(int nx, int ny, int nz, DTYPE *ex, DTYPE *ey, DTYPE *ez, DTYPE *hx, DTYPE *hy, DTYPE *hz ARGS_CH) {
int tx = threadIdx.x;
int idx = blockIdx.x * blockDim.x + tx;
int i, j, k;
__shared__ DTYPE s[3*DX+2];
__shared__ DTYPE *sx, *sy, *sz;
sz = s;
sy = &sz[DX+1];
sx = &sy[DX+1];
//while( idx < nx*ny*nz ) {
sx[tx] = ex[idx];
sy[tx] = ey[idx];
sz[tx] = ez[idx];
if( tx == 0 ) {
sx[tx-1] = ex[idx-1];
sy[tx-1] = ey[idx-1];
}
__syncthreads();
i = idx/(ny*nz);
j = (idx - i*ny*nz)/nz;
k = idx%nz;
if( j>0 && k>0 && k<nz PAD ) hx[idx] -= CHX * ((sz[tx] - ez[idx-nz]) - (sy[tx] - sy[tx-1]));
if( i>0 && k>0 && k<nz PAD ) hy[idx] -= CHY * ((sx[tx] - sx[tx-1]) - (sz[tx] - ez[idx-ny*nz]));
if( i>0 && j>0 && k<nz PAD ) hz[idx] -= CHZ * ((sy[tx] - ey[idx-ny*nz]) - (sx[tx] - ex[idx-nz]));
//idx += blockDim.x * gridDim.x;
//__syncthreads();
//}
}
| 7a72b3c6e6a789991fdf6a13f1195701dbc68665.cu | __global__ void update_e(int nx, int ny, int nz, DTYPE *ex, DTYPE *ey, DTYPE *ez, DTYPE *hx, DTYPE *hy, DTYPE *hz ARGS_CE) {
int tx = threadIdx.x;
int idx = blockIdx.x * blockDim.x + tx;
int i, j, k;
__shared__ DTYPE sx[DX+1], sy[DX+1], sz[DX];
//while( idx < nx*ny*nz ) {
sx[tx] = hx[idx];
sy[tx] = hy[idx];
sz[tx] = hz[idx];
if( tx == DX-1 ) {
sx[tx+1] = hx[idx+1];
sy[tx+1] = hy[idx+1];
}
__syncthreads();
i = idx/(ny*nz);
j = (idx - i*ny*nz)/nz;
k = idx%nz;
if( j<ny-1 && k<nz-1 PAD ) ex[idx] += CEX * ((hz[idx+nz] - sz[tx]) - (sy[tx+1] - sy[tx]));
if( i<nx-1 && k<nz-1 PAD ) ey[idx] += CEY * ((sx[tx+1] - sx[tx]) - (hz[idx+ny*nz] - sz[tx]));
if( i<nx-1 && j<ny-1 && k<nz PAD) ez[idx] += CEZ * ((hy[idx+ny*nz] - sy[tx]) - (hx[idx+nz] - sx[tx]));
//idx += blockDim.x * gridDim.x;
//__syncthreads();
//}
}
__global__ void update_h(int nx, int ny, int nz, DTYPE *ex, DTYPE *ey, DTYPE *ez, DTYPE *hx, DTYPE *hy, DTYPE *hz ARGS_CH) {
int tx = threadIdx.x;
int idx = blockIdx.x * blockDim.x + tx;
int i, j, k;
__shared__ DTYPE s[3*DX+2];
__shared__ DTYPE *sx, *sy, *sz;
sz = s;
sy = &sz[DX+1];
sx = &sy[DX+1];
//while( idx < nx*ny*nz ) {
sx[tx] = ex[idx];
sy[tx] = ey[idx];
sz[tx] = ez[idx];
if( tx == 0 ) {
sx[tx-1] = ex[idx-1];
sy[tx-1] = ey[idx-1];
}
__syncthreads();
i = idx/(ny*nz);
j = (idx - i*ny*nz)/nz;
k = idx%nz;
if( j>0 && k>0 && k<nz PAD ) hx[idx] -= CHX * ((sz[tx] - ez[idx-nz]) - (sy[tx] - sy[tx-1]));
if( i>0 && k>0 && k<nz PAD ) hy[idx] -= CHY * ((sx[tx] - sx[tx-1]) - (sz[tx] - ez[idx-ny*nz]));
if( i>0 && j>0 && k<nz PAD ) hz[idx] -= CHZ * ((sy[tx] - ey[idx-ny*nz]) - (sx[tx] - ex[idx-nz]));
//idx += blockDim.x * gridDim.x;
//__syncthreads();
//}
}
|
72a49d8af6381eeecef2cd177c4a5f099b5656b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <read_gauge.h>
#include <gauge_field.h>
#include <hisq_force_quda.h>
#include <hw_quda.h>
#include <hisq_force_macros.h>
#include<utility>
//DEBUG : control compile
#define COMPILE_HISQ_DP_18
#define COMPILE_HISQ_DP_12
#define COMPILE_HISQ_SP_18
#define COMPILE_HISQ_SP_12
// Disable texture read for now. Need to revisit this.
#define HISQ_SITE_MATRIX_LOAD_TEX 1
#define HISQ_NEW_OPROD_LOAD_TEX 1
namespace quda {
namespace fermion_force {
typedef struct hisq_kernel_param_s{
unsigned long threads;
int D1, D2,D3, D4, D1h;
int base_idx[4];
int ghostDim[4];
}hisq_kernel_param_t;
texture<int4, 1> newOprod0TexDouble;
texture<int4, 1> newOprod1TexDouble;
texture<float2, 1, hipReadModeElementType> newOprod0TexSingle;
texture<float2, 1, hipReadModeElementType> newOprod1TexSingle;
void hisqForceInitCuda(QudaGaugeParam* param)
{
static int hisq_force_init_cuda_flag = 0;
if (hisq_force_init_cuda_flag){
return;
}
hisq_force_init_cuda_flag=1;
int Vh = param->X[0]*param->X[1]*param->X[2]*param->X[3]/2;
fat_force_const_t hf_h;
#ifdef MULTI_GPU
int Vh_ex = (param->X[0]+4)*(param->X[1]+4)*(param->X[2]+4)*(param->X[3]+4)/2;
hf_h.site_ga_stride = Vh_ex + param->site_ga_pad;;
hf_h.color_matrix_stride = Vh_ex;
#else
hf_h.site_ga_stride = Vh + param->site_ga_pad;
hf_h.color_matrix_stride = Vh;
#endif
hf_h.mom_ga_stride = Vh + param->mom_ga_pad;
hipMemcpyToSymbol(hf, &hf_h, sizeof(fat_force_const_t));
checkCudaError();
}
// struct for holding the fattening path coefficients
template<class Real>
struct PathCoefficients
{
Real one;
Real three;
Real five;
Real seven;
Real naik;
Real lepage;
};
inline __device__ float2 operator*(float a, const float2 & b)
{
return make_float2(a*b.x,a*b.y);
}
inline __device__ double2 operator*(double a, const double2 & b)
{
return make_double2(a*b.x,a*b.y);
}
inline __device__ const float2 & operator+=(float2 & a, const float2 & b)
{
a.x += b.x;
a.y += b.y;
return a;
}
inline __device__ const double2 & operator+=(double2 & a, const double2 & b)
{
a.x += b.x;
a.y += b.y;
return a;
}
inline __device__ const float4 & operator+=(float4 & a, const float4 & b)
{
a.x += b.x;
a.y += b.y;
a.z += b.z;
a.w += b.w;
return a;
}
// Replication of code
// This structure is already defined in
// unitarize_utilities.h
template<class T>
struct RealTypeId;
template<>
struct RealTypeId<float2>
{
typedef float Type;
};
template<>
struct RealTypeId<double2>
{
typedef double Type;
};
template<class T>
inline __device__
void adjointMatrix(T* mat)
{
#define CONJ_INDEX(i,j) j*3 + i
T tmp;
mat[CONJ_INDEX(0,0)] = conj(mat[0]);
mat[CONJ_INDEX(1,1)] = conj(mat[4]);
mat[CONJ_INDEX(2,2)] = conj(mat[8]);
tmp = conj(mat[1]);
mat[CONJ_INDEX(1,0)] = conj(mat[3]);
mat[CONJ_INDEX(0,1)] = tmp;
tmp = conj(mat[2]);
mat[CONJ_INDEX(2,0)] = conj(mat[6]);
mat[CONJ_INDEX(0,2)] = tmp;
tmp = conj(mat[5]);
mat[CONJ_INDEX(2,1)] = conj(mat[7]);
mat[CONJ_INDEX(1,2)] = tmp;
#undef CONJ_INDEX
return;
}
template<int N, class T>
inline __device__
void loadMatrixFromField(const T* const field_even, const T* const field_odd,
int dir, int idx, T* const mat, int oddness, int stride)
{
const T* const field = (oddness)?field_odd:field_even;
for(int i = 0;i < N ;i++){
mat[i] = field[idx + dir*N*stride + i*stride];
}
return;
}
template<class T>
inline __device__
void loadMatrixFromField(const T* const field_even, const T* const field_odd,
int dir, int idx, T* const mat, int oddness, int stride)
{
loadMatrixFromField<9> (field_even, field_odd, dir, idx, mat, oddness, stride);
return;
}
inline __device__
void loadMatrixFromField(const float4* const field_even, const float4* const field_odd,
int dir, int idx, float2* const mat, int oddness, int stride)
{
const float4* const field = oddness?field_odd: field_even;
float4 tmp;
tmp = field[idx + dir*stride*3];
mat[0] = make_float2(tmp.x, tmp.y);
mat[1] = make_float2(tmp.z, tmp.w);
tmp = field[idx + dir*stride*3 + stride];
mat[2] = make_float2(tmp.x, tmp.y);
mat[3] = make_float2(tmp.z, tmp.w);
tmp = field[idx + dir*stride*3 + 2*stride];
mat[4] = make_float2(tmp.x, tmp.y);
mat[5] = make_float2(tmp.z, tmp.w);
return;
}
template<class T>
inline __device__
void loadMatrixFromField(const T* const field_even, const T* const field_odd, int idx, T* const mat, int oddness, int stride)
{
const T* const field = (oddness)?field_odd:field_even;
mat[0] = field[idx];
mat[1] = field[idx + stride];
mat[2] = field[idx + stride*2];
mat[3] = field[idx + stride*3];
mat[4] = field[idx + stride*4];
mat[5] = field[idx + stride*5];
mat[6] = field[idx + stride*6];
mat[7] = field[idx + stride*7];
mat[8] = field[idx + stride*8];
return;
}
#define addMatrixToNewOprod(mat, dir, idx, coeff, field_even, field_odd, oddness) do { \
RealA* const field = (oddness)?field_odd: field_even; \
RealA value[9]; \
value[0] = LOAD_TEX_ENTRY( ((oddness)?NEWOPROD_ODD_TEX:NEWOPROD_EVEN_TEX), field, idx+dir*hf.color_matrix_stride*9); \
value[1] = LOAD_TEX_ENTRY( ((oddness)?NEWOPROD_ODD_TEX:NEWOPROD_EVEN_TEX), field, idx+dir*hf.color_matrix_stride*9 + hf.color_matrix_stride); \
value[2] = LOAD_TEX_ENTRY( ((oddness)?NEWOPROD_ODD_TEX:NEWOPROD_EVEN_TEX), field, idx+dir*hf.color_matrix_stride*9 + 2*hf.color_matrix_stride); \
value[3] = LOAD_TEX_ENTRY( ((oddness)?NEWOPROD_ODD_TEX:NEWOPROD_EVEN_TEX), field, idx+dir*hf.color_matrix_stride*9 + 3*hf.color_matrix_stride); \
value[4] = LOAD_TEX_ENTRY( ((oddness)?NEWOPROD_ODD_TEX:NEWOPROD_EVEN_TEX), field, idx+dir*hf.color_matrix_stride*9 + 4*hf.color_matrix_stride); \
value[5] = LOAD_TEX_ENTRY( ((oddness)?NEWOPROD_ODD_TEX:NEWOPROD_EVEN_TEX), field, idx+dir*hf.color_matrix_stride*9 + 5*hf.color_matrix_stride); \
value[6] = LOAD_TEX_ENTRY( ((oddness)?NEWOPROD_ODD_TEX:NEWOPROD_EVEN_TEX), field, idx+dir*hf.color_matrix_stride*9 + 6*hf.color_matrix_stride); \
value[7] = LOAD_TEX_ENTRY( ((oddness)?NEWOPROD_ODD_TEX:NEWOPROD_EVEN_TEX), field, idx+dir*hf.color_matrix_stride*9 + 7*hf.color_matrix_stride); \
value[8] = LOAD_TEX_ENTRY( ((oddness)?NEWOPROD_ODD_TEX:NEWOPROD_EVEN_TEX), field, idx+dir*hf.color_matrix_stride*9 + 8*hf.color_matrix_stride); \
field[idx + dir*hf.color_matrix_stride*9] = value[0] + coeff*mat[0]; \
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride] = value[1] + coeff*mat[1]; \
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*2] = value[2] + coeff*mat[2]; \
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*3] = value[3] + coeff*mat[3]; \
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*4] = value[4] + coeff*mat[4]; \
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*5] = value[5] + coeff*mat[5]; \
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*6] = value[6] + coeff*mat[6]; \
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*7] = value[7] + coeff*mat[7]; \
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*8] = value[8] + coeff*mat[8]; \
}while(0)
// only works if Promote<T,U>::Type = T
template<class T, class U>
inline __device__
void addMatrixToField(const T* const mat, int dir, int idx, U coeff,
T* const field_even, T* const field_odd, int oddness)
{
T* const field = (oddness)?field_odd: field_even;
field[idx + dir*hf.color_matrix_stride*9] += coeff*mat[0];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride] += coeff*mat[1];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*2] += coeff*mat[2];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*3] += coeff*mat[3];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*4] += coeff*mat[4];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*5] += coeff*mat[5];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*6] += coeff*mat[6];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*7] += coeff*mat[7];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*8] += coeff*mat[8];
return;
}
template<class T, class U>
inline __device__
void addMatrixToField(const T* const mat, int idx, U coeff, T* const field_even,
T* const field_odd, int oddness)
{
T* const field = (oddness)?field_odd: field_even;
field[idx ] += coeff*mat[0];
field[idx + hf.color_matrix_stride] += coeff*mat[1];
field[idx + hf.color_matrix_stride*2] += coeff*mat[2];
field[idx + hf.color_matrix_stride*3] += coeff*mat[3];
field[idx + hf.color_matrix_stride*4] += coeff*mat[4];
field[idx + hf.color_matrix_stride*5] += coeff*mat[5];
field[idx + hf.color_matrix_stride*6] += coeff*mat[6];
field[idx + hf.color_matrix_stride*7] += coeff*mat[7];
field[idx + hf.color_matrix_stride*8] += coeff*mat[8];
return;
}
template<class T, class U>
inline __device__
void addMatrixToField_test(const T* const mat, int idx, U coeff, T* const field_even,
T* const field_odd, int oddness)
{
T* const field = (oddness)?field_odd: field_even;
//T oldvalue=field[idx];
field[idx ] += coeff*mat[0];
field[idx + hf.color_matrix_stride] += coeff*mat[1];
field[idx + hf.color_matrix_stride*2] += coeff*mat[2];
field[idx + hf.color_matrix_stride*3] += coeff*mat[3];
field[idx + hf.color_matrix_stride*4] += coeff*mat[4];
field[idx + hf.color_matrix_stride*5] += coeff*mat[5];
field[idx + hf.color_matrix_stride*6] += coeff*mat[6];
field[idx + hf.color_matrix_stride*7] += coeff*mat[7];
field[idx + hf.color_matrix_stride*8] += coeff*mat[8];
#if (!defined(__CUDA_ARCH__) || (__COMPUTE_CAPABILITY__>=200))
printf("value is coeff(%f) * mat[0].x(%f)=%f\n", coeff, mat[0].x, field[idx].x);
#endif
return;
}
template<class T>
inline __device__
void storeMatrixToField(const T* const mat, int dir, int idx, T* const field_even, T* const field_odd, int oddness)
{
T* const field = (oddness)?field_odd: field_even;
field[idx + dir*hf.color_matrix_stride*9] = mat[0];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride] = mat[1];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*2] = mat[2];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*3] = mat[3];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*4] = mat[4];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*5] = mat[5];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*6] = mat[6];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*7] = mat[7];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*8] = mat[8];
return;
}
template<class T>
inline __device__
void storeMatrixToField(const T* const mat, int idx, T* const field_even, T* const field_odd, int oddness)
{
T* const field = (oddness)?field_odd: field_even;
field[idx] = mat[0];
field[idx + hf.color_matrix_stride] = mat[1];
field[idx + hf.color_matrix_stride*2] = mat[2];
field[idx + hf.color_matrix_stride*3] = mat[3];
field[idx + hf.color_matrix_stride*4] = mat[4];
field[idx + hf.color_matrix_stride*5] = mat[5];
field[idx + hf.color_matrix_stride*6] = mat[6];
field[idx + hf.color_matrix_stride*7] = mat[7];
field[idx + hf.color_matrix_stride*8] = mat[8];
return;
}
template<class T, class U>
inline __device__
void storeMatrixToMomentumField(const T* const mat, int dir, int idx, U coeff,
T* const mom_even, T* const mom_odd, int oddness)
{
T* const mom_field = (oddness)?mom_odd:mom_even;
T temp2;
temp2.x = (mat[1].x - mat[3].x)*0.5*coeff;
temp2.y = (mat[1].y + mat[3].y)*0.5*coeff;
mom_field[idx + dir*hf.mom_ga_stride*5] = temp2;
temp2.x = (mat[2].x - mat[6].x)*0.5*coeff;
temp2.y = (mat[2].y + mat[6].y)*0.5*coeff;
mom_field[idx + dir*hf.mom_ga_stride*5 + hf.mom_ga_stride] = temp2;
temp2.x = (mat[5].x - mat[7].x)*0.5*coeff;
temp2.y = (mat[5].y + mat[7].y)*0.5*coeff;
mom_field[idx + dir*hf.mom_ga_stride*5 + hf.mom_ga_stride*2] = temp2;
const typename RealTypeId<T>::Type temp = (mat[0].y + mat[4].y + mat[8].y)*0.3333333333333333333333333;
temp2.x = (mat[0].y-temp)*coeff;
temp2.y = (mat[4].y-temp)*coeff;
mom_field[idx + dir*hf.mom_ga_stride*5 + hf.mom_ga_stride*3] = temp2;
temp2.x = (mat[8].y - temp)*coeff;
temp2.y = 0.0;
mom_field[idx + dir*hf.mom_ga_stride*5 + hf.mom_ga_stride*4] = temp2;
return;
}
// Struct to determine the coefficient sign at compile time
template<int pos_dir, int odd_lattice>
struct CoeffSign
{
static const int result = -1;
};
template<>
struct CoeffSign<0,1>
{
static const int result = -1;
};
template<>
struct CoeffSign<0,0>
{
static const int result = 1;
};
template<>
struct CoeffSign<1,1>
{
static const int result = 1;
};
template<int odd_lattice>
struct Sign
{
static const int result = 1;
};
template<>
struct Sign<1>
{
static const int result = -1;
};
template<class RealX>
struct ArrayLength
{
static const int result=9;
};
template<>
struct ArrayLength<float4>
{
static const int result=5;
};
// reconstructSign doesn't do anything right now,
// but it will, soon.
template<typename T>
__device__ void reconstructSign(int* const sign, int dir, const T i[4]){
*sign=1;
switch(dir){
case XUP:
if( (i[3]&1)==1) *sign=-1;
break;
case YUP:
if( ((i[3]+i[0])&1) == 1) *sign=-1;
break;
case ZUP:
if( ((i[3]+i[0]+i[1])&1) == 1) *sign=-1;
break;
case TUP:
#ifdef MULTI_GPU
if( (i[3] == X4+1 && PtNm1)
|| (i[3] == 1 && Pt0)) {
*sign=-1;
}
#else
if(i[3] == X4m1) *sign=-1;
#endif
break;
default:
#if (!defined(__CUDA_ARCH__) || (__COMPUTE_CAPABILITY__>=200))
printf("Error: invalid dir\n");
#endif
break;
}
return;
}
template<class RealA, int oddBit>
__global__ void
do_one_link_term_kernel(const RealA* const oprodEven, const RealA* const oprodOdd,
int sig, typename RealTypeId<RealA>::Type coeff,
RealA* const outputEven, RealA* const outputOdd, const int threads)
{
int sid = blockIdx.x * blockDim.x + threadIdx.x;
if (sid >= threads) return;
#ifdef MULTI_GPU
int x[4];
int z1 = sid/X1h;
int x1h = sid - z1*X1h;
int z2 = z1/X2;
x[1] = z1 - z2*X2;
x[3] = z2/X3;
x[2] = z2 - x[3]*X3;
int x1odd = (x[1] + x[2] + x[3] + oddBit) & 1;
x[0] = 2*x1h + x1odd;
//int X = 2*sid + x1odd;
int new_sid = ( (x[3]+2)*E3E2E1+(x[2]+2)*E2E1+(x[1]+2)*E1+(x[0]+2))>>1 ;
#else
int new_sid = sid;
#endif
RealA COLOR_MAT_W[ArrayLength<RealA>::result];
if(GOES_FORWARDS(sig)){
loadMatrixFromField(oprodEven, oprodOdd, sig, new_sid, COLOR_MAT_W, oddBit, hf.color_matrix_stride);
addMatrixToField(COLOR_MAT_W, sig, new_sid, coeff, outputEven, outputOdd, oddBit);
}
return;
}
#define DD_CONCAT(n,r) n ## r ## kernel
#define HISQ_KERNEL_NAME(a,b) DD_CONCAT(a,b)
//precision: 0 is for double, 1 is for single
#define NEWOPROD_EVEN_TEX newOprod0TexDouble
#define NEWOPROD_ODD_TEX newOprod1TexDouble
#if (HISQ_NEW_OPROD_LOAD_TEX == 1)
#define LOAD_TEX_ENTRY(tex, field, idx) READ_DOUBLE2_TEXTURE(tex, field, idx)
#else
#define LOAD_TEX_ENTRY(tex, field, idx) field[idx]
#endif
//double precision, recon=18
#define PRECISION 0
#define RECON 18
#if (HISQ_SITE_MATRIX_LOAD_TEX == 1)
#define HISQ_LOAD_LINK(linkEven, linkOdd, dir, idx, var, oddness) HISQ_LOAD_MATRIX_18_DOUBLE_TEX((oddness)?siteLink1TexDouble:siteLink0TexDouble, (oddness)?linkOdd:linkEven, dir, idx, var, hf.site_ga_stride)
#else
#define HISQ_LOAD_LINK(linkEven, linkOdd, dir, idx, var, oddness) loadMatrixFromField(linkEven, linkOdd, dir, idx, var, oddness, hf.site_ga_stride)
#endif
#define COMPUTE_LINK_SIGN(sign, dir, x)
#define RECONSTRUCT_SITE_LINK(var, sign)
#include "hisq_paths_force_core.h"
#undef PRECISION
#undef RECON
#undef HISQ_LOAD_LINK
#undef COMPUTE_LINK_SIGN
#undef RECONSTRUCT_SITE_LINK
//double precision, recon=12
#define PRECISION 0
#define RECON 12
#if (HISQ_SITE_MATRIX_LOAD_TEX == 1)
#define HISQ_LOAD_LINK(linkEven, linkOdd, dir, idx, var, oddness) HISQ_LOAD_MATRIX_12_DOUBLE_TEX((oddness)?siteLink1TexDouble:siteLink0TexDouble, (oddness)?linkOdd:linkEven,dir, idx, var, hf.site_ga_stride)
#else
#define HISQ_LOAD_LINK(linkEven, linkOdd, dir, idx, var, oddness) loadMatrixFromField<6>(linkEven, linkOdd, dir, idx, var, oddness, hf.site_ga_stride)
#endif
#define COMPUTE_LINK_SIGN(sign, dir, x) reconstructSign(sign, dir, x)
#define RECONSTRUCT_SITE_LINK(var, sign) FF_RECONSTRUCT_LINK_12(var, sign)
#include "hisq_paths_force_core.h"
#undef PRECISION
#undef RECON
#undef HISQ_LOAD_LINK
#undef COMPUTE_LINK_SIGN
#undef RECONSTRUCT_SITE_LINK
#undef NEWOPROD_EVEN_TEX
#undef NEWOPROD_ODD_TEX
#undef LOAD_TEX_ENTRY
#define NEWOPROD_EVEN_TEX newOprod0TexSingle
#define NEWOPROD_ODD_TEX newOprod1TexSingle
#if (HISQ_NEW_OPROD_LOAD_TEX==1)
#define LOAD_TEX_ENTRY(tex, field, idx) tex1Dfetch(tex,idx)
#else
#define LOAD_TEX_ENTRY(tex, field, idx) field[idx]
#endif
//single precision, recon=18
#define PRECISION 1
#define RECON 18
#if (HISQ_SITE_MATRIX_LOAD_TEX == 1)
#define HISQ_LOAD_LINK(linkEven, linkOdd, dir, idx, var, oddness) HISQ_LOAD_MATRIX_18_SINGLE_TEX((oddness)?siteLink1TexSingle:siteLink0TexSingle, dir, idx, var, hf.site_ga_stride)
#else
#define HISQ_LOAD_LINK(linkEven, linkOdd, dir, idx, var, oddness) loadMatrixFromField(linkEven, linkOdd, dir, idx, var, oddness, hf.site_ga_stride)
#endif
#define COMPUTE_LINK_SIGN(sign, dir, x)
#define RECONSTRUCT_SITE_LINK(var, sign)
#include "hisq_paths_force_core.h"
#undef PRECISION
#undef RECON
#undef HISQ_LOAD_LINK
#undef COMPUTE_LINK_SIGN
#undef RECONSTRUCT_SITE_LINK
//single precision, recon=12
#define PRECISION 1
#define RECON 12
#if (HISQ_SITE_MATRIX_LOAD_TEX == 1)
#define HISQ_LOAD_LINK(linkEven, linkOdd, dir, idx, var, oddness) HISQ_LOAD_MATRIX_12_SINGLE_TEX((oddness)?siteLink1TexSingle_recon:siteLink0TexSingle_recon, dir, idx, var, hf.site_ga_stride)
#else
#define HISQ_LOAD_LINK(linkEven, linkOdd, dir, idx, var, oddness) loadMatrixFromField(linkEven, linkOdd, dir, idx, var, oddness, hf.site_ga_stride)
#endif
#define COMPUTE_LINK_SIGN(sign, dir, x) reconstructSign(sign, dir, x)
#define RECONSTRUCT_SITE_LINK(var, sign) FF_RECONSTRUCT_LINK_12(var, sign)
#include "hisq_paths_force_core.h"
#undef PRECISION
#undef RECON
#undef HISQ_LOAD_LINK
#undef COMPUTE_LINK_SIGN
#undef RECONSTRUCT_SITE_LINK
#undef NEWOPROD_EVEN_TEX
#undef NEWOPROD_ODD_TEX
#undef LOAD_TEX_ENTRY
template<class RealA, class RealB>
class MiddleLink : public Tunable {
private:
const cudaGaugeField &link;
const cudaGaugeField &oprod;
const cudaGaugeField &Qprev;
const int sig;
const int mu;
const typename RealTypeId<RealA>::Type &coeff;
cudaGaugeField &Pmu;
cudaGaugeField &P3;
cudaGaugeField &Qmu;
cudaGaugeField &newOprod;
const hisq_kernel_param_t &kparam;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool advanceGridDim(TuneParam ¶m) const { return false; }
// generalize Tunable::advanceBlockDim() to also set gridDim, with extra checking to ensure that gridDim isn't too large for the device
bool advanceBlockDim(TuneParam ¶m) const
{
const unsigned int max_threads = deviceProp.maxThreadsDim[0];
const unsigned int max_blocks = deviceProp.maxGridSize[0];
const unsigned int max_shared = 16384; // FIXME: use deviceProp.sharedMemPerBlock;
const int step = deviceProp.warpSize;
bool ret;
param.block.x += step;
if (param.block.x > max_threads || sharedBytesPerThread()*param.block.x > max_shared) {
param.block = dim3((kparam.threads+max_blocks-1)/max_blocks, 1, 1); // ensure the blockDim is large enough, given the limit on gridDim
param.block.x = ((param.block.x+step-1) / step) * step; // round up to the nearest "step"
if (param.block.x > max_threads) errorQuda("Local lattice volume is too large for device");
ret = false;
} else {
ret = true;
}
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
return ret;
}
public:
MiddleLink(const cudaGaugeField &link,
const cudaGaugeField &oprod,
const cudaGaugeField &Qprev,
int sig, int mu,
const typename RealTypeId<RealA>::Type &coeff,
cudaGaugeField &Pmu, // write only
cudaGaugeField &P3, // write only
cudaGaugeField &Qmu,
cudaGaugeField &newOprod,
const hisq_kernel_param_t &kparam) :
link(link), oprod(oprod), Qprev(Qprev), sig(sig), mu(mu),
coeff(coeff), Pmu(Pmu), P3(P3), Qmu(Qmu), newOprod(newOprod), kparam(kparam)
{ ; }
// need alternative constructor to hack around null pointer passing
MiddleLink(const cudaGaugeField &link,
const cudaGaugeField &oprod,
int sig, int mu,
const typename RealTypeId<RealA>::Type &coeff,
cudaGaugeField &Pmu, // write only
cudaGaugeField &P3, // write only
cudaGaugeField &Qmu,
cudaGaugeField &newOprod,
const hisq_kernel_param_t &kparam) :
link(link), oprod(oprod), Qprev(link), sig(sig), mu(mu),
coeff(coeff), Pmu(Pmu), P3(P3), Qmu(Qmu), newOprod(newOprod), kparam(kparam)
{ ; }
virtual ~MiddleLink() { ; }
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << kparam.D1 << "x";
vol << kparam.D2 << "x";
vol << kparam.D3 << "x";
vol << kparam.D4;
aux << "threads=" << kparam.threads << ",prec=" << link.Precision();
aux << ",recon=" << link.Reconstruct() << ",sig=" << sig << ",mu=" << mu;
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
#define CALL_ARGUMENTS(typeA,hipLaunchKernelGGL(( typeB)) , dim3(tp.grid), dim3(tp.block), 0, 0, \
(typeA*)oprod.Even_p(), (typeA*)oprod.Odd_p(), \
(typeA*)Qprev_even, (typeA*)Qprev_odd, \
(typeB*)link.Even_p(), (typeB*)link.Odd_p(), \
sig, mu, coeff, \
(typeA*)Pmu.Even_p(), (typeA*)Pmu.Odd_p(), \
(typeA*)P3.Even_p(), (typeA*)P3.Odd_p(), \
(typeA*)Qmu.Even_p(), (typeA*)Qmu.Odd_p(), \
(typeA*)newOprod.Even_p(), (typeA*)newOprod.Odd_p(), kparam)
#define CALL_MIDDLE_LINK_KERNEL(sig_sign, mu_sign) \
if(oddness_change ==0 ){ \
if(sizeof(RealA) == sizeof(float2)){ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_middle_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(float2, float2); \
do_middle_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(float2, float2); \
}else{ \
do_middle_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(float2, float4); \
do_middle_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(float2, float4); \
} \
}else{ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_middle_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(double2, double2); \
do_middle_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(double2, double2); \
}else{ \
do_middle_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(double2, double2); \
do_middle_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(double2, double2); \
} \
} \
}else{ \
if(sizeof(RealA) == sizeof(float2)){ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_middle_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(float2, float2); \
do_middle_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(float2, float2); \
}else{ \
do_middle_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(float2, float4); \
do_middle_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(float2, float4); \
} \
}else{ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_middle_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(double2, double2); \
do_middle_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(double2, double2); \
}else{ \
do_middle_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(double2, double2); \
do_middle_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(double2, double2); \
} \
} \
}
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
QudaReconstructType recon = link.Reconstruct();
int oddness_change = (kparam.base_idx[0] + kparam.base_idx[1]
+ kparam.base_idx[2] + kparam.base_idx[3])&1;
const void *Qprev_even = (&Qprev == &link) ? NULL : Qprev.Even_p();
const void *Qprev_odd = (&Qprev == &link) ? NULL : Qprev.Odd_p();
if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){
CALL_MIDDLE_LINK_KERNEL(1,1);
}else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){
CALL_MIDDLE_LINK_KERNEL(1,0);
}else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){
CALL_MIDDLE_LINK_KERNEL(0,1);
}else{
CALL_MIDDLE_LINK_KERNEL(0,0);
}
}
#undef CALL_ARGUMENTS
#undef CALL_MIDDLE_LINK_KERNEL
void preTune() {
Pmu.backup();
P3.backup();
Qmu.backup();
newOprod.backup();
}
void postTune() {
Pmu.restore();
P3.restore();
Qmu.restore();
newOprod.restore();
}
virtual void initTuneParam(TuneParam ¶m) const
{
const unsigned int max_threads = deviceProp.maxThreadsDim[0];
const unsigned int max_blocks = deviceProp.maxGridSize[0];
const int step = deviceProp.warpSize;
param.block = dim3((kparam.threads+max_blocks-1)/max_blocks, 1, 1); // ensure the blockDim is large enough, given the limit on gridDim
param.block.x = ((param.block.x+step-1) / step) * step; // round up to the nearest "step"
if (param.block.x > max_threads) errorQuda("Local lattice volume is too large for device");
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
param.shared_bytes = sharedBytesPerThread()*param.block.x > sharedBytesPerBlock(param) ?
sharedBytesPerThread()*param.block.x : sharedBytesPerBlock(param);
}
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const {
initTuneParam(param);
}
long long flops() const { return 0; }
};
template<class RealA, class RealB>
class LepageMiddleLink : public Tunable {
private:
const cudaGaugeField &link;
const cudaGaugeField &oprod;
const cudaGaugeField &Qprev;
const int sig;
const int mu;
const typename RealTypeId<RealA>::Type &coeff;
cudaGaugeField &P3; // write only
cudaGaugeField &newOprod;
const hisq_kernel_param_t &kparam;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool advanceGridDim(TuneParam ¶m) const { return false; }
bool advanceBlockDim(TuneParam ¶m) const {
bool rtn = Tunable::advanceBlockDim(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
return rtn;
}
public:
LepageMiddleLink(const cudaGaugeField &link,
const cudaGaugeField &oprod,
const cudaGaugeField &Qprev,
int sig, int mu,
const typename RealTypeId<RealA>::Type &coeff,
cudaGaugeField &P3, cudaGaugeField &newOprod,
const hisq_kernel_param_t &kparam) :
link(link), oprod(oprod), Qprev(Qprev), sig(sig), mu(mu),
coeff(coeff), P3(P3), newOprod(newOprod), kparam(kparam)
{ ; }
virtual ~LepageMiddleLink() { ; }
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << kparam.D1 << "x";
vol << kparam.D2 << "x";
vol << kparam.D3 << "x";
vol << kparam.D4;
aux << "threads=" << kparam.threads << ",prec=" << link.Precision();
aux << ",recon=" << link.Reconstruct() << ",sig=" << sig << ",mu=" << mu;
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
#define CALL_ARGUMENTS(typeA,hipLaunchKernelGGL(( typeB)) , dim3(tp.grid), dim3(tp.block), 0, 0, \
(typeA*)oprod.Even_p(), (typeA*)oprod.Odd_p(), \
(typeA*)Qprev.Even_p(), (typeA*)Qprev.Odd_p(), \
(typeB*)link.Even_p(), (typeB*)link.Odd_p(), \
sig, mu, coeff, \
(typeA*)P3.Even_p(), (typeA*)P3.Odd_p(), \
(typeA*)newOprod.Even_p(), (typeA*)newOprod.Odd_p(), \
kparam)
#define CALL_MIDDLE_LINK_KERNEL(sig_sign, mu_sign) \
if(oddness_change == 0){ \
if(sizeof(RealA) == sizeof(float2)){ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_lepage_middle_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(float2, float2); \
do_lepage_middle_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(float2, float2); \
}else{ \
do_lepage_middle_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(float2, float4); \
do_lepage_middle_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(float2, float4); \
} \
}else{ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_lepage_middle_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(double2, double2); \
do_lepage_middle_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(double2, double2); \
}else{ \
do_lepage_middle_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(double2, double2); \
do_lepage_middle_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(double2, double2); \
} \
} \
}else{ \
if(sizeof(RealA) == sizeof(float2)){ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_lepage_middle_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(float2, float2); \
do_lepage_middle_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(float2, float2); \
}else{ \
do_lepage_middle_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(float2, float4); \
do_lepage_middle_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(float2, float4); \
} \
}else{ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_lepage_middle_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(double2, double2); \
do_lepage_middle_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(double2, double2); \
}else{ \
do_lepage_middle_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(double2, double2); \
do_lepage_middle_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(double2, double2); \
} \
} \
}
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
QudaReconstructType recon = link.Reconstruct();
int oddness_change = (kparam.base_idx[0] + kparam.base_idx[1]
+ kparam.base_idx[2] + kparam.base_idx[3])&1;
if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){
CALL_MIDDLE_LINK_KERNEL(1,1);
}else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){
CALL_MIDDLE_LINK_KERNEL(1,0);
}else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){
CALL_MIDDLE_LINK_KERNEL(0,1);
}else{
CALL_MIDDLE_LINK_KERNEL(0,0);
}
}
#undef CALL_ARGUMENTS
#undef CALL_MIDDLE_LINK_KERNEL
void preTune() {
P3.backup();
newOprod.backup();
}
void postTune() {
P3.restore();
newOprod.restore();
}
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
}
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
}
long long flops() const { return 0; }
};
template<class RealA, class RealB>
class SideLink : public Tunable {
private:
const cudaGaugeField &link;
const cudaGaugeField &P3;
const cudaGaugeField &oprod;
const int sig;
const int mu;
const typename RealTypeId<RealA>::Type &coeff;
const typename RealTypeId<RealA>::Type &accumu_coeff;
cudaGaugeField &shortP;
cudaGaugeField &newOprod;
const hisq_kernel_param_t &kparam;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool advanceGridDim(TuneParam ¶m) const { return false; }
bool advanceBlockDim(TuneParam ¶m) const {
bool rtn = Tunable::advanceBlockDim(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
return rtn;
}
public:
SideLink(const cudaGaugeField &link,
const cudaGaugeField &P3,
const cudaGaugeField &oprod,
int sig, int mu,
const typename RealTypeId<RealA>::Type &coeff,
const typename RealTypeId<RealA>::Type &accumu_coeff,
cudaGaugeField &shortP,
cudaGaugeField &newOprod,
const hisq_kernel_param_t &kparam) :
link(link), P3(P3), oprod(oprod),
sig(sig), mu(mu), coeff(coeff), accumu_coeff(accumu_coeff),
shortP(shortP), newOprod(newOprod), kparam(kparam)
{ ; }
virtual ~SideLink() { ; }
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << kparam.D1 << "x";
vol << kparam.D2 << "x";
vol << kparam.D3 << "x";
vol << kparam.D4;
aux << "threads=" << kparam.threads << ",prec=" << link.Precision();
aux << ",recon=" << link.Reconstruct() << ",sig=" << sig << ",mu=" << mu;
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
#define CALL_ARGUMENTS(typeA,hipLaunchKernelGGL(( typeB)) , dim3(tp.grid), dim3(tp.block), 0, 0, \
(typeA*)P3.Even_p(), (typeA*)P3.Odd_p(), \
(typeA*)oprod.Even_p(), (typeA*)oprod.Odd_p(), \
(typeB*)link.Even_p(), (typeB*)link.Odd_p(), \
sig, mu, \
coeff, \
(typename RealTypeId<typeA>::Type) accumu_coeff, \
(typeA*)shortP.Even_p(), (typeA*)shortP.Odd_p(), \
(typeA*)newOprod.Even_p(), (typeA*)newOprod.Odd_p(), \
kparam)
#define CALL_SIDE_LINK_KERNEL(sig_sign, mu_sign) \
if(oddness_change == 0){ \
if(sizeof(RealA) == sizeof(float2)){ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_side_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(float2, float2); \
do_side_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(float2, float2); \
}else{ \
do_side_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(float2, float4); \
do_side_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(float2, float4); \
} \
}else{ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_side_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(double2, double2); \
do_side_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(double2, double2); \
}else{ \
do_side_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(double2, double2); \
do_side_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(double2, double2); \
} \
} \
}else{ \
if(sizeof(RealA) == sizeof(float2)){ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_side_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(float2, float2); \
do_side_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(float2, float2); \
}else{ \
do_side_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(float2, float4); \
do_side_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(float2, float4); \
} \
}else{ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_side_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(double2, double2); \
do_side_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(double2, double2); \
}else{ \
do_side_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(double2, double2); \
do_side_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(double2, double2); \
} \
} \
}
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
QudaReconstructType recon = link.Reconstruct();
int oddness_change = (kparam.base_idx[0] + kparam.base_idx[1]
+ kparam.base_idx[2] + kparam.base_idx[3])&1;
if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){
CALL_SIDE_LINK_KERNEL(1,1);
}else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){
CALL_SIDE_LINK_KERNEL(1,0);
}else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){
CALL_SIDE_LINK_KERNEL(0,1);
}else{
CALL_SIDE_LINK_KERNEL(0,0);
}
}
#undef CALL_SIDE_LINK_KERNEL
#undef CALL_ARGUMENTS
void preTune() {
shortP.backup();
newOprod.backup();
}
void postTune() {
shortP.restore();
newOprod.restore();
}
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
}
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
}
long long flops() const { return 0; }
};
template<class RealA, class RealB>
class SideLinkShort : public Tunable {
private:
const cudaGaugeField &link;
const cudaGaugeField &P3;
const int sig;
const int mu;
const typename RealTypeId<RealA>::Type &coeff;
cudaGaugeField &newOprod;
const hisq_kernel_param_t &kparam;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool advanceGridDim(TuneParam ¶m) const { return false; }
bool advanceBlockDim(TuneParam ¶m) const {
bool rtn = Tunable::advanceBlockDim(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
return rtn;
}
public:
SideLinkShort(const cudaGaugeField &link, const cudaGaugeField &P3, int sig, int mu,
const typename RealTypeId<RealA>::Type &coeff, cudaGaugeField &newOprod,
const hisq_kernel_param_t &kparam) :
link(link), P3(P3), sig(sig), mu(mu), coeff(coeff), newOprod(newOprod), kparam(kparam)
{ ; }
virtual ~SideLinkShort() { ; }
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << kparam.D1 << "x";
vol << kparam.D2 << "x";
vol << kparam.D3 << "x";
vol << kparam.D4;
aux << "threads=" << kparam.threads << ",prec=" << link.Precision();
aux << ",recon=" << link.Reconstruct() << ",sig=" << sig << ",mu=" << mu;
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
#define CALL_ARGUMENTS(typeA,hipLaunchKernelGGL(( typeB)) , dim3(tp.grid), dim3(tp.block), 0, 0, \
(typeA*)P3.Even_p(), (typeA*)P3.Odd_p(), \
(typeB*)link.Even_p(), (typeB*)link.Odd_p(), \
sig, mu, (typename RealTypeId<typeA>::Type) coeff, \
(typeA*)newOprod.Even_p(), (typeA*)newOprod.Odd_p(), kparam)
#define CALL_SIDE_LINK_KERNEL(sig_sign, mu_sign) \
if(oddness_change == 0){ \
if(sizeof(RealA) == sizeof(float2)){ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_side_link_short_sp_18_kernel<float2, float2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(float2, float2); \
do_side_link_short_sp_18_kernel<float2, float2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(float2, float2); \
}else{ \
do_side_link_short_sp_12_kernel<float2, float4, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(float2, float4); \
do_side_link_short_sp_12_kernel<float2, float4, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(float2, float4); \
} \
}else{ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_side_link_short_dp_18_kernel<double2, double2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(double2, double2); \
do_side_link_short_dp_18_kernel<double2, double2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(double2, double2); \
}else{ \
do_side_link_short_dp_12_kernel<double2, double2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(double2, double2); \
do_side_link_short_dp_12_kernel<double2, double2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(double2, double2); \
} \
} \
}else{ \
if(sizeof(RealA) == sizeof(float2)){ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_side_link_short_sp_18_kernel<float2, float2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(float2, float2); \
do_side_link_short_sp_18_kernel<float2, float2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(float2, float2); \
}else{ \
do_side_link_short_sp_12_kernel<float2, float4, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(float2, float4); \
do_side_link_short_sp_12_kernel<float2, float4, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(float2, float4); \
} \
}else{ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_side_link_short_dp_18_kernel<double2, double2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(double2, double2); \
do_side_link_short_dp_18_kernel<double2, double2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(double2, double2); \
}else{ \
do_side_link_short_dp_12_kernel<double2, double2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(double2, double2); \
do_side_link_short_dp_12_kernel<double2, double2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(double2, double2); \
} \
} \
}
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
QudaReconstructType recon = link.Reconstruct();
int oddness_change = (kparam.base_idx[0] + kparam.base_idx[1]
+ kparam.base_idx[2] + kparam.base_idx[3])&1;
if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){
CALL_SIDE_LINK_KERNEL(1,1);
}else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){
CALL_SIDE_LINK_KERNEL(1,0);
}else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){
CALL_SIDE_LINK_KERNEL(0,1);
}else{
CALL_SIDE_LINK_KERNEL(0,0);
}
}
#undef CALL_SIDE_LINK_KERNEL
#undef CALL_ARGUMENTS
void preTune() {
newOprod.backup();
}
void postTune() {
newOprod.restore();
}
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
}
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
}
long long flops() const { return 0; }
};
template<class RealA, class RealB>
class AllLink : public Tunable {
private:
const cudaGaugeField &link;
const cudaGaugeField &oprod;
const cudaGaugeField &Qprev;
const int sig;
const int mu;
const typename RealTypeId<RealA>::Type &coeff;
const typename RealTypeId<RealA>::Type &accumu_coeff;
cudaGaugeField &shortP;
cudaGaugeField &newOprod;
const hisq_kernel_param_t &kparam;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool advanceGridDim(TuneParam ¶m) const { return false; }
bool advanceBlockDim(TuneParam ¶m) const {
bool rtn = Tunable::advanceBlockDim(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
return rtn;
}
public:
AllLink(const cudaGaugeField &link,
const cudaGaugeField &oprod,
const cudaGaugeField &Qprev,
int sig, int mu,
const typename RealTypeId<RealA>::Type &coeff,
const typename RealTypeId<RealA>::Type &accumu_coeff,
cudaGaugeField &shortP, cudaGaugeField &newOprod,
const hisq_kernel_param_t &kparam) :
link(link), oprod(oprod), Qprev(Qprev), sig(sig), mu(mu),
coeff(coeff), accumu_coeff(accumu_coeff), shortP(shortP),
newOprod(newOprod), kparam(kparam)
{ ; }
virtual ~AllLink() { ; }
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << kparam.D1 << "x";
vol << kparam.D2 << "x";
vol << kparam.D3 << "x";
vol << kparam.D4;
aux << "threads=" << kparam.threads << ",prec=" << link.Precision();
aux << ",recon=" << link.Reconstruct() << ",sig=" << sig << ",mu=" << mu;
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
#define CALL_ARGUMENTS(typeA,hipLaunchKernelGGL(( typeB)) , dim3(tp.grid), dim3(tp.block), 0, 0, \
(typeA*)oprod.Even_p(), (typeA*)oprod.Odd_p(), \
(typeA*)Qprev.Even_p(), (typeA*)Qprev.Odd_p(), \
(typeB*)link.Even_p(), (typeB*)link.Odd_p(), sig, mu, \
(typename RealTypeId<typeA>::Type)coeff, \
(typename RealTypeId<typeA>::Type)accumu_coeff, \
(typeA*)shortP.Even_p(),(typeA*)shortP.Odd_p(), \
(typeA*)newOprod.Even_p(), (typeA*)newOprod.Odd_p(), kparam)
#define CALL_ALL_LINK_KERNEL(sig_sign, mu_sign) \
if(oddness_change == 0){ \
if(sizeof(RealA) == sizeof(float2)){ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_all_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(float2, float2); \
do_all_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(float2, float2); \
}else{ \
do_all_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(float2, float4); \
do_all_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(float2, float4); \
} \
}else{ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_all_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(double2, double2); \
do_all_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(double2, double2); \
}else{ \
do_all_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(double2, double2); \
do_all_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(double2, double2); \
} \
} \
}else{ \
if(sizeof(RealA) == sizeof(float2)){ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_all_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(float2, float2); \
do_all_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(float2, float2); \
}else{ \
do_all_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(float2, float4); \
do_all_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(float2, float4); \
} \
}else{ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_all_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(double2, double2); \
do_all_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(double2, double2); \
}else{ \
do_all_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(double2, double2); \
do_all_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(double2, double2); \
} \
} \
}
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
QudaReconstructType recon = link.Reconstruct();
int oddness_change = (kparam.base_idx[0] + kparam.base_idx[1]
+ kparam.base_idx[2] + kparam.base_idx[3])&1;
if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){
CALL_ALL_LINK_KERNEL(1, 1);
}else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){
CALL_ALL_LINK_KERNEL(1, 0);
}else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){
CALL_ALL_LINK_KERNEL(0, 1);
}else{
CALL_ALL_LINK_KERNEL(0, 0);
}
return;
}
#undef CALL_ARGUMENTS
#undef CALL_ALL_LINK_KERNEL
void preTune() {
shortP.backup();
newOprod.backup();
}
void postTune() {
shortP.restore();
newOprod.restore();
}
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
}
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
}
long long flops() const { return 0; }
};
template<class RealA, class RealB>
class OneLinkTerm : public Tunable {
private:
const cudaGaugeField &oprod;
const int sig;
const typename RealTypeId<RealA>::Type &coeff;
cudaGaugeField &ForceMatrix;
const int* X;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool advanceGridDim(TuneParam ¶m) const { return false; }
bool advanceBlockDim(TuneParam ¶m) const {
bool rtn = Tunable::advanceBlockDim(param);
int threads = X[0]*X[1]*X[2]*X[3]/2;
param.grid = dim3((threads + param.block.x-1)/param.block.x, 1, 1);
return rtn;
}
public:
OneLinkTerm(const cudaGaugeField &oprod, int sig,
const typename RealTypeId<RealA>::Type &coeff,
cudaGaugeField &ForceMatrix, const int* _X) :
oprod(oprod), sig(sig), coeff(coeff), ForceMatrix(ForceMatrix), X(_X)
{ ; }
virtual ~OneLinkTerm() { ; }
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << X[0] << "x";
vol << X[1] << "x";
vol << X[2] << "x";
vol << X[3];
int threads = X[0]*X[1]*X[2]*X[3]/2;
aux << "threads=" << threads << ",prec=" << oprod.Precision();
aux << ",sig=" << sig << ",coeff=" << coeff;
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
int threads = X[0]*X[1]*X[2]*X[3]/2;
if(GOES_FORWARDS(sig)){
hipLaunchKernelGGL(( do_one_link_term_kernel<RealA,0>), dim3(tp.grid),dim3(tp.block), 0, 0, static_cast<const RealA*>(oprod.Even_p()),
static_cast<const RealA*>(oprod.Odd_p()),
sig, coeff,
static_cast<RealA*>(ForceMatrix.Even_p()),
static_cast<RealA*>(ForceMatrix.Odd_p()),
threads);
hipLaunchKernelGGL(( do_one_link_term_kernel<RealA,1>), dim3(tp.grid),dim3(tp.block), 0, 0, static_cast<const RealA*>(oprod.Even_p()),
static_cast<const RealA*>(oprod.Odd_p()),
sig, coeff,
static_cast<RealA*>(ForceMatrix.Even_p()),
static_cast<RealA*>(ForceMatrix.Odd_p()),
threads);
}
}
void preTune() {
ForceMatrix.backup();
}
void postTune() {
ForceMatrix.restore();
}
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
int threads = X[0]*X[1]*X[2]*X[3]/2;
param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1);
}
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
int threads = X[0]*X[1]*X[2]*X[3]/2;
param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1);
}
long long flops() const { return 0; }
};
template<class RealA, class RealB>
class LongLinkTerm : public Tunable {
private:
const cudaGaugeField &link;
const cudaGaugeField &naikOprod;
const int sig;
const typename RealTypeId<RealA>::Type &naik_coeff;
cudaGaugeField &output;
const int * X;
const hisq_kernel_param_t &kparam;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool advanceGridDim(TuneParam ¶m) const { return false; }
bool advanceBlockDim(TuneParam ¶m) const {
bool rtn = Tunable::advanceBlockDim(param);
int threads = X[0]*X[1]*X[2]*X[3]/2;
param.grid = dim3((threads + param.block.x-1)/param.block.x, 1, 1);
return rtn;
}
public:
LongLinkTerm(const cudaGaugeField &link, const cudaGaugeField &naikOprod,
int sig, const typename RealTypeId<RealA>::Type &naik_coeff,
cudaGaugeField &output, const int* _X, const hisq_kernel_param_t &kparam) :
link(link), naikOprod(naikOprod), sig(sig), naik_coeff(naik_coeff), output(output),
X(_X), kparam(kparam)
{ ; }
virtual ~LongLinkTerm() { ; }
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << X[0] << "x";
vol << X[1] << "x";
vol << X[2] << "x";
vol << X[3];
int threads = X[0]*X[1]*X[2]*X[3]/2;
aux << "threads=" << threads << ",prec=" << link.Precision();
aux << ",sig=" << sig;
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
#define CALL_ARGUMENTS(typeA,hipLaunchKernelGGL(( typeB)) , dim3(tp.grid),dim3(tp.block), 0, 0, \
(typeB*)link.Even_p(), (typeB*)link.Odd_p(), \
(typeA*)naikOprod.Even_p(), (typeA*)naikOprod.Odd_p(), \
sig, naik_coeff, \
(typeA*)output.Even_p(), (typeA*)output.Odd_p(), \
kparam);
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
QudaReconstructType recon = link.Reconstruct();
if(GOES_BACKWARDS(sig)) errorQuda("sig does not go forward\n");
if(sizeof(RealA) == sizeof(float2)){
if(recon == QUDA_RECONSTRUCT_NO){
do_longlink_sp_18_kernel<float2,float2, 0> CALL_ARGUMENTS(float2, float2);
do_longlink_sp_18_kernel<float2,float2, 1> CALL_ARGUMENTS(float2, float2);
}else{
do_longlink_sp_12_kernel<float2,float4, 0> CALL_ARGUMENTS(float2, float4);
do_longlink_sp_12_kernel<float2,float4, 1> CALL_ARGUMENTS(float2, float4);
}
}else{
if(recon == QUDA_RECONSTRUCT_NO){
do_longlink_dp_18_kernel<double2,double2, 0> CALL_ARGUMENTS(double2, double2);
do_longlink_dp_18_kernel<double2,double2, 1> CALL_ARGUMENTS(double2, double2);
}else{
do_longlink_dp_12_kernel<double2,double2, 0> CALL_ARGUMENTS(double2, double2);
do_longlink_dp_12_kernel<double2,double2, 1> CALL_ARGUMENTS(double2, double2);
}
}
}
#undef CALL_ARGUMENTS
void preTune() {
output.backup();
}
void postTune() {
output.restore();
}
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
int threads = X[0]*X[1]*X[2]*X[3]/2;
param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1);
}
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
int threads = X[0]*X[1]*X[2]*X[3]/2;
param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1);
}
long long flops() const { return 0; }
};
template<class RealA, class RealB>
class CompleteForce : public Tunable {
private:
const cudaGaugeField &link;
const cudaGaugeField &oprod;
const int sig;
cudaGaugeField &mom;
const int* X;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool advanceGridDim(TuneParam ¶m) const { return false; }
bool advanceBlockDim(TuneParam ¶m) const {
bool rtn = Tunable::advanceBlockDim(param);
int threads = X[0]*X[1]*X[2]*X[3]/2;
param.grid = dim3((threads + param.block.x-1)/param.block.x, 1, 1);
return rtn;
}
public:
CompleteForce(const cudaGaugeField &link, const cudaGaugeField &oprod,
int sig, cudaGaugeField &mom, const int* _X) :
link(link), oprod(oprod), sig(sig), mom(mom), X(_X)
{ ; }
virtual ~CompleteForce() { ; }
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << X[0] << "x";
vol << X[1] << "x";
vol << X[2] << "x";
vol << X[3];
int threads = X[0]*X[1]*X[2]*X[3]/2;
aux << "threads=" << threads << ",prec=" << link.Precision() << ",sig=" << sig;
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
#define CALL_ARGUMENTS(typeA,hipLaunchKernelGGL(( typeB)) , dim3(tp.grid), dim3(tp.block), 0, 0, \
(typeB*)link.Even_p(), (typeB*)link.Odd_p(), \
(typeA*)oprod.Even_p(), (typeA*)oprod.Odd_p(), \
sig, \
(typeA*)mom.Even_p(), (typeA*)mom.Odd_p(), \
X[0] * X[1] * X[2] * X[3]/2);
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
QudaReconstructType recon = link.Reconstruct();;
if(sizeof(RealA) == sizeof(float2)){
if(recon == QUDA_RECONSTRUCT_NO){
do_complete_force_sp_18_kernel<float2,float2, 0> CALL_ARGUMENTS(float2, float2);
do_complete_force_sp_18_kernel<float2,float2, 1> CALL_ARGUMENTS(float2, float2);
}else{
do_complete_force_sp_12_kernel<float2,float4, 0> CALL_ARGUMENTS(float2, float4);
do_complete_force_sp_12_kernel<float2,float4, 1> CALL_ARGUMENTS(float2, float4);
}
}else{
if(recon == QUDA_RECONSTRUCT_NO){
do_complete_force_dp_18_kernel<double2,double2, 0> CALL_ARGUMENTS(double2, double2);
do_complete_force_dp_18_kernel<double2,double2, 1> CALL_ARGUMENTS(double2, double2);
}else{
do_complete_force_dp_12_kernel<double2,double2, 0> CALL_ARGUMENTS(double2, double2);
do_complete_force_dp_12_kernel<double2,double2, 1> CALL_ARGUMENTS(double2, double2);
}
}
}
#undef CALL_ARGUMENTS
void preTune() {
mom.backup();
}
void postTune() {
mom.restore();
}
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
int threads = X[0]*X[1]*X[2]*X[3]/2;
param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1);
}
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
int threads = X[0]*X[1]*X[2]*X[3]/2;
param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1);
}
long long flops() const { return 0; }
};
static void
bind_tex_link(const cudaGaugeField& link, const cudaGaugeField& newOprod)
{
if(link.Precision() == QUDA_DOUBLE_PRECISION){
hipBindTexture(0, siteLink0TexDouble, link.Even_p(), link.Bytes()/2);
hipBindTexture(0, siteLink1TexDouble, link.Odd_p(), link.Bytes()/2);
hipBindTexture(0, newOprod0TexDouble, newOprod.Even_p(), newOprod.Bytes()/2);
hipBindTexture(0, newOprod1TexDouble, newOprod.Odd_p(), newOprod.Bytes()/2);
}else{
if(link.Reconstruct() == QUDA_RECONSTRUCT_NO){
hipBindTexture(0, siteLink0TexSingle, link.Even_p(), link.Bytes()/2);
hipBindTexture(0, siteLink1TexSingle, link.Odd_p(), link.Bytes()/2);
}else{
hipBindTexture(0, siteLink0TexSingle_recon, link.Even_p(), link.Bytes()/2);
hipBindTexture(0, siteLink1TexSingle_recon, link.Odd_p(), link.Bytes()/2);
}
hipBindTexture(0, newOprod0TexSingle, newOprod.Even_p(), newOprod.Bytes()/2);
hipBindTexture(0, newOprod1TexSingle, newOprod.Odd_p(), newOprod.Bytes()/2);
}
}
static void
unbind_tex_link(const cudaGaugeField& link, const cudaGaugeField& newOprod)
{
if(link.Precision() == QUDA_DOUBLE_PRECISION){
hipUnbindTexture(siteLink0TexDouble);
hipUnbindTexture(siteLink1TexDouble);
hipUnbindTexture(newOprod0TexDouble);
hipUnbindTexture(newOprod1TexDouble);
}else{
if(link.Reconstruct() == QUDA_RECONSTRUCT_NO){
hipUnbindTexture(siteLink0TexSingle);
hipUnbindTexture(siteLink1TexSingle);
}else{
hipUnbindTexture(siteLink0TexSingle_recon);
hipUnbindTexture(siteLink1TexSingle_recon);
}
hipUnbindTexture(newOprod0TexSingle);
hipUnbindTexture(newOprod1TexSingle);
}
}
template<class Real, class RealA, class RealB>
static void
do_hisq_staples_force_cuda( PathCoefficients<Real> act_path_coeff,
const QudaGaugeParam& param,
const cudaGaugeField &oprod,
const cudaGaugeField &link,
cudaGaugeField &Pmu,
cudaGaugeField &P3,
cudaGaugeField &P5,
cudaGaugeField &Pnumu,
cudaGaugeField &Qmu,
cudaGaugeField &Qnumu,
cudaGaugeField &newOprod)
{
Real coeff;
Real OneLink, Lepage, FiveSt, ThreeSt, SevenSt;
Real mLepage, mFiveSt, mThreeSt;
OneLink = act_path_coeff.one;
ThreeSt = act_path_coeff.three; mThreeSt = -ThreeSt;
FiveSt = act_path_coeff.five; mFiveSt = -FiveSt;
SevenSt = act_path_coeff.seven;
Lepage = act_path_coeff.lepage; mLepage = -Lepage;
for(int sig=0; sig<8; ++sig){
if(GOES_FORWARDS(sig)){
OneLinkTerm<RealA, RealB> oneLink(oprod, sig, OneLink, newOprod, param.X);
oneLink.apply(0);
checkCudaError();
} // GOES_FORWARDS(sig)
}
int ghostDim[4]={
commDimPartitioned(0),
commDimPartitioned(1),
commDimPartitioned(2),
commDimPartitioned(3)
};
hisq_kernel_param_t kparam_1g, kparam_2g;
#ifdef MULTI_GPU
kparam_1g.D1 = commDimPartitioned(0)?(param.X[0]+2):(param.X[0]);
kparam_1g.D2 = commDimPartitioned(1)?(param.X[1]+2):(param.X[1]);
kparam_1g.D3 = commDimPartitioned(2)?(param.X[2]+2):(param.X[2]);
kparam_1g.D4 = commDimPartitioned(3)?(param.X[3]+2):(param.X[3]);
kparam_1g.D1h = kparam_1g.D1/2;
kparam_1g.base_idx[0]=commDimPartitioned(0)?1:2;
kparam_1g.base_idx[1]=commDimPartitioned(1)?1:2;
kparam_1g.base_idx[2]=commDimPartitioned(2)?1:2;
kparam_1g.base_idx[3]=commDimPartitioned(3)?1:2;
kparam_1g.threads = kparam_1g.D1*kparam_1g.D2*kparam_1g.D3*kparam_1g.D4/2;
kparam_2g.D1 = commDimPartitioned(0)?(param.X[0]+4):(param.X[0]);
kparam_2g.D2 = commDimPartitioned(1)?(param.X[1]+4):(param.X[1]);
kparam_2g.D3 = commDimPartitioned(2)?(param.X[2]+4):(param.X[2]);
kparam_2g.D4 = commDimPartitioned(3)?(param.X[3]+4):(param.X[3]);
kparam_2g.D1h = kparam_2g.D1/2;
kparam_2g.base_idx[0]=commDimPartitioned(0)?0:2;
kparam_2g.base_idx[1]=commDimPartitioned(1)?0:2;
kparam_2g.base_idx[2]=commDimPartitioned(2)?0:2;
kparam_2g.base_idx[3]=commDimPartitioned(3)?0:2;
kparam_2g.threads = kparam_2g.D1*kparam_2g.D2*kparam_2g.D3*kparam_2g.D4/2;
for(int i=0;i < 4; i++){
kparam_1g.ghostDim[i] = kparam_2g.ghostDim[i]=kparam_1g.ghostDim[i]=kparam_2g.ghostDim[i] = ghostDim[i];
}
#else
hisq_kernel_param_t kparam;
kparam.D1 = param.X[0];
kparam.D2 = param.X[1];
kparam.D3 = param.X[2];
kparam.D4 = param.X[3];
kparam.D1h = param.X[0]/2;
kparam.threads=param.X[0]*param.X[1]*param.X[2]*param.X[3]/2;
kparam.base_idx[0]=0;
kparam.base_idx[1]=0;
kparam.base_idx[2]=0;
kparam.base_idx[3]=0;
kparam_2g = kparam_1g = kparam;
#endif
for(int sig=0; sig<8; sig++){
for(int mu=0; mu<8; mu++){
if ( (mu == sig) || (mu == OPP_DIR(sig))){
continue;
}
//3-link
//Kernel A: middle link
MiddleLink<RealA,RealB> middleLink( link, oprod, // read only
sig, mu, mThreeSt,
Pmu, P3, Qmu, // write only
newOprod, kparam_2g);
middleLink.apply(0);
checkCudaError();
for(int nu=0; nu < 8; nu++){
if (nu == sig || nu == OPP_DIR(sig)
|| nu == mu || nu == OPP_DIR(mu)){
continue;
}
//5-link: middle link
//Kernel B
MiddleLink<RealA,RealB> middleLink( link, Pmu, Qmu, // read only
sig, nu, FiveSt,
Pnumu, P5, Qnumu, // write only
newOprod, kparam_1g);
middleLink.apply(0);
checkCudaError();
for(int rho = 0; rho < 8; rho++){
if (rho == sig || rho == OPP_DIR(sig)
|| rho == mu || rho == OPP_DIR(mu)
|| rho == nu || rho == OPP_DIR(nu)){
continue;
}
//7-link: middle link and side link
if(FiveSt != 0)coeff = SevenSt/FiveSt; else coeff = 0;
AllLink<RealA,RealB> allLink(link, Pnumu, Qnumu, sig, rho, SevenSt, coeff,
P5, newOprod, kparam_1g);
allLink.apply(0);
checkCudaError();
//return;
}//rho
//5-link: side link
if(ThreeSt != 0)coeff = FiveSt/ThreeSt; else coeff = 0;
SideLink<RealA,RealB> sideLink(link, P5, Qmu, //read only
sig, nu, mFiveSt, coeff,
P3, // write only
newOprod, kparam_1g);
sideLink.apply(0);
checkCudaError();
} //nu
//lepage
if(Lepage != 0.){
LepageMiddleLink<RealA,RealB>
lepageMiddleLink ( link, Pmu, Qmu, // read only
sig, mu, Lepage,
P5, // write only
newOprod, kparam_2g);
lepageMiddleLink.apply(0);
checkCudaError();
if(ThreeSt != 0)coeff = Lepage/ThreeSt ; else coeff = 0;
SideLink<RealA, RealB> sideLink(link, P5, Qmu, // read only
sig, mu, mLepage, coeff,
P3, //write only
newOprod, kparam_2g);
sideLink.apply(0);
checkCudaError();
} // Lepage != 0.0
//3-link side link
SideLinkShort<RealA,RealB> sideLinkShort(link, P3, // read only
sig, mu, ThreeSt,
newOprod, kparam_1g);
sideLinkShort.apply(0);
checkCudaError();
}//mu
}//sig
return;
} // do_hisq_staples_force_cuda
#undef Pmu
#undef Pnumu
#undef P3
#undef P5
#undef Qmu
#undef Qnumu
void hisqCompleteForceCuda(const QudaGaugeParam ¶m,
const cudaGaugeField &oprod,
const cudaGaugeField &link,
cudaGaugeField* force)
{
bind_tex_link(link, oprod);
for(int sig=0; sig<4; sig++){
if(param.cuda_prec == QUDA_DOUBLE_PRECISION){
CompleteForce<double2,double2> completeForce(link, oprod, sig, *force, param.X);
completeForce.apply(0);
checkCudaError();
}else if(param.cuda_prec == QUDA_SINGLE_PRECISION){
CompleteForce<float2,float2> completeForce(link, oprod, sig, *force, param.X);
completeForce.apply(0);
checkCudaError();
}else{
errorQuda("Unsupported precision");
}
} // loop over directions
unbind_tex_link(link, oprod);
return;
}
void hisqLongLinkForceCuda(double coeff,
const QudaGaugeParam ¶m,
const cudaGaugeField &oldOprod,
const cudaGaugeField &link,
cudaGaugeField *newOprod)
{
bind_tex_link(link, *newOprod);
const int volume = param.X[0]*param.X[1]*param.X[2]*param.X[3];
hisq_kernel_param_t kparam;
for(int i =0;i < 4;i++){
kparam.ghostDim[i] = commDimPartitioned(i);
}
kparam.threads = volume/2;
for(int sig=0; sig<4; ++sig){
if(param.cuda_prec == QUDA_DOUBLE_PRECISION){
LongLinkTerm<double2,double2> longLink(link, oldOprod, sig, coeff, *newOprod, param.X, kparam);
longLink.apply(0);
checkCudaError();
}else if(param.cuda_prec == QUDA_SINGLE_PRECISION){
LongLinkTerm<float2,float2> longLink(link, oldOprod, sig, static_cast<float>(coeff),
*newOprod, param.X, kparam);
longLink.apply(0);
checkCudaError();
}else{
errorQuda("Unsupported precision");
}
} // loop over directions
unbind_tex_link(link, *newOprod);
return;
}
void
hisqStaplesForceCuda(const double path_coeff_array[6],
const QudaGaugeParam ¶m,
const cudaGaugeField &oprod,
const cudaGaugeField &link,
cudaGaugeField* newOprod)
{
#ifdef MULTI_GPU
int X[4] = {
param.X[0]+4, param.X[1]+4, param.X[2]+4, param.X[3]+4
};
#else
int X[4] = {
param.X[0], param.X[1], param.X[2], param.X[3]
};
#endif
// create color matrix fields with zero padding
int pad = 0;
GaugeFieldParam gauge_param(X, param.cuda_prec, QUDA_RECONSTRUCT_NO, pad, QUDA_SCALAR_GEOMETRY);
cudaGaugeField Pmu(gauge_param);
cudaGaugeField P3(gauge_param);
cudaGaugeField P5(gauge_param);
cudaGaugeField Pnumu(gauge_param);
cudaGaugeField Qmu(gauge_param);
cudaGaugeField Qnumu(gauge_param);
bind_tex_link(link, *newOprod);
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
if (param.cuda_prec == QUDA_DOUBLE_PRECISION){
PathCoefficients<double> act_path_coeff;
act_path_coeff.one = path_coeff_array[0];
act_path_coeff.naik = path_coeff_array[1];
act_path_coeff.three = path_coeff_array[2];
act_path_coeff.five = path_coeff_array[3];
act_path_coeff.seven = path_coeff_array[4];
act_path_coeff.lepage = path_coeff_array[5];
do_hisq_staples_force_cuda<double,double2,double2>( act_path_coeff,
param,
oprod,
link,
Pmu,
P3,
P5,
Pnumu,
Qmu,
Qnumu,
*newOprod);
}else if(param.cuda_prec == QUDA_SINGLE_PRECISION){
PathCoefficients<float> act_path_coeff;
act_path_coeff.one = path_coeff_array[0];
act_path_coeff.naik = path_coeff_array[1];
act_path_coeff.three = path_coeff_array[2];
act_path_coeff.five = path_coeff_array[3];
act_path_coeff.seven = path_coeff_array[4];
act_path_coeff.lepage = path_coeff_array[5];
do_hisq_staples_force_cuda<float,float2,float2>( act_path_coeff,
param,
oprod,
link,
Pmu,
P3,
P5,
Pnumu,
Qmu,
Qnumu,
*newOprod);
}else{
errorQuda("Unsupported precision");
}
hipEventRecord(end);
hipEventSynchronize(end);
float runtime;
hipEventElapsedTime(&runtime, start, end);
unbind_tex_link(link, *newOprod);
hipEventDestroy(start);
hipEventDestroy(end);
return;
}
} // namespace fermion_force
} // namespace quda
| 72a49d8af6381eeecef2cd177c4a5f099b5656b4.cu | #include <read_gauge.h>
#include <gauge_field.h>
#include <hisq_force_quda.h>
#include <hw_quda.h>
#include <hisq_force_macros.h>
#include<utility>
//DEBUG : control compile
#define COMPILE_HISQ_DP_18
#define COMPILE_HISQ_DP_12
#define COMPILE_HISQ_SP_18
#define COMPILE_HISQ_SP_12
// Disable texture read for now. Need to revisit this.
#define HISQ_SITE_MATRIX_LOAD_TEX 1
#define HISQ_NEW_OPROD_LOAD_TEX 1
namespace quda {
namespace fermion_force {
typedef struct hisq_kernel_param_s{
unsigned long threads;
int D1, D2,D3, D4, D1h;
int base_idx[4];
int ghostDim[4];
}hisq_kernel_param_t;
texture<int4, 1> newOprod0TexDouble;
texture<int4, 1> newOprod1TexDouble;
texture<float2, 1, cudaReadModeElementType> newOprod0TexSingle;
texture<float2, 1, cudaReadModeElementType> newOprod1TexSingle;
void hisqForceInitCuda(QudaGaugeParam* param)
{
static int hisq_force_init_cuda_flag = 0;
if (hisq_force_init_cuda_flag){
return;
}
hisq_force_init_cuda_flag=1;
int Vh = param->X[0]*param->X[1]*param->X[2]*param->X[3]/2;
fat_force_const_t hf_h;
#ifdef MULTI_GPU
int Vh_ex = (param->X[0]+4)*(param->X[1]+4)*(param->X[2]+4)*(param->X[3]+4)/2;
hf_h.site_ga_stride = Vh_ex + param->site_ga_pad;;
hf_h.color_matrix_stride = Vh_ex;
#else
hf_h.site_ga_stride = Vh + param->site_ga_pad;
hf_h.color_matrix_stride = Vh;
#endif
hf_h.mom_ga_stride = Vh + param->mom_ga_pad;
cudaMemcpyToSymbol(hf, &hf_h, sizeof(fat_force_const_t));
checkCudaError();
}
// struct for holding the fattening path coefficients
template<class Real>
struct PathCoefficients
{
Real one;
Real three;
Real five;
Real seven;
Real naik;
Real lepage;
};
inline __device__ float2 operator*(float a, const float2 & b)
{
return make_float2(a*b.x,a*b.y);
}
inline __device__ double2 operator*(double a, const double2 & b)
{
return make_double2(a*b.x,a*b.y);
}
inline __device__ const float2 & operator+=(float2 & a, const float2 & b)
{
a.x += b.x;
a.y += b.y;
return a;
}
inline __device__ const double2 & operator+=(double2 & a, const double2 & b)
{
a.x += b.x;
a.y += b.y;
return a;
}
inline __device__ const float4 & operator+=(float4 & a, const float4 & b)
{
a.x += b.x;
a.y += b.y;
a.z += b.z;
a.w += b.w;
return a;
}
// Replication of code
// This structure is already defined in
// unitarize_utilities.h
template<class T>
struct RealTypeId;
template<>
struct RealTypeId<float2>
{
typedef float Type;
};
template<>
struct RealTypeId<double2>
{
typedef double Type;
};
template<class T>
inline __device__
void adjointMatrix(T* mat)
{
#define CONJ_INDEX(i,j) j*3 + i
T tmp;
mat[CONJ_INDEX(0,0)] = conj(mat[0]);
mat[CONJ_INDEX(1,1)] = conj(mat[4]);
mat[CONJ_INDEX(2,2)] = conj(mat[8]);
tmp = conj(mat[1]);
mat[CONJ_INDEX(1,0)] = conj(mat[3]);
mat[CONJ_INDEX(0,1)] = tmp;
tmp = conj(mat[2]);
mat[CONJ_INDEX(2,0)] = conj(mat[6]);
mat[CONJ_INDEX(0,2)] = tmp;
tmp = conj(mat[5]);
mat[CONJ_INDEX(2,1)] = conj(mat[7]);
mat[CONJ_INDEX(1,2)] = tmp;
#undef CONJ_INDEX
return;
}
template<int N, class T>
inline __device__
void loadMatrixFromField(const T* const field_even, const T* const field_odd,
int dir, int idx, T* const mat, int oddness, int stride)
{
const T* const field = (oddness)?field_odd:field_even;
for(int i = 0;i < N ;i++){
mat[i] = field[idx + dir*N*stride + i*stride];
}
return;
}
template<class T>
inline __device__
void loadMatrixFromField(const T* const field_even, const T* const field_odd,
int dir, int idx, T* const mat, int oddness, int stride)
{
loadMatrixFromField<9> (field_even, field_odd, dir, idx, mat, oddness, stride);
return;
}
inline __device__
void loadMatrixFromField(const float4* const field_even, const float4* const field_odd,
int dir, int idx, float2* const mat, int oddness, int stride)
{
const float4* const field = oddness?field_odd: field_even;
float4 tmp;
tmp = field[idx + dir*stride*3];
mat[0] = make_float2(tmp.x, tmp.y);
mat[1] = make_float2(tmp.z, tmp.w);
tmp = field[idx + dir*stride*3 + stride];
mat[2] = make_float2(tmp.x, tmp.y);
mat[3] = make_float2(tmp.z, tmp.w);
tmp = field[idx + dir*stride*3 + 2*stride];
mat[4] = make_float2(tmp.x, tmp.y);
mat[5] = make_float2(tmp.z, tmp.w);
return;
}
template<class T>
inline __device__
void loadMatrixFromField(const T* const field_even, const T* const field_odd, int idx, T* const mat, int oddness, int stride)
{
const T* const field = (oddness)?field_odd:field_even;
mat[0] = field[idx];
mat[1] = field[idx + stride];
mat[2] = field[idx + stride*2];
mat[3] = field[idx + stride*3];
mat[4] = field[idx + stride*4];
mat[5] = field[idx + stride*5];
mat[6] = field[idx + stride*6];
mat[7] = field[idx + stride*7];
mat[8] = field[idx + stride*8];
return;
}
#define addMatrixToNewOprod(mat, dir, idx, coeff, field_even, field_odd, oddness) do { \
RealA* const field = (oddness)?field_odd: field_even; \
RealA value[9]; \
value[0] = LOAD_TEX_ENTRY( ((oddness)?NEWOPROD_ODD_TEX:NEWOPROD_EVEN_TEX), field, idx+dir*hf.color_matrix_stride*9); \
value[1] = LOAD_TEX_ENTRY( ((oddness)?NEWOPROD_ODD_TEX:NEWOPROD_EVEN_TEX), field, idx+dir*hf.color_matrix_stride*9 + hf.color_matrix_stride); \
value[2] = LOAD_TEX_ENTRY( ((oddness)?NEWOPROD_ODD_TEX:NEWOPROD_EVEN_TEX), field, idx+dir*hf.color_matrix_stride*9 + 2*hf.color_matrix_stride); \
value[3] = LOAD_TEX_ENTRY( ((oddness)?NEWOPROD_ODD_TEX:NEWOPROD_EVEN_TEX), field, idx+dir*hf.color_matrix_stride*9 + 3*hf.color_matrix_stride); \
value[4] = LOAD_TEX_ENTRY( ((oddness)?NEWOPROD_ODD_TEX:NEWOPROD_EVEN_TEX), field, idx+dir*hf.color_matrix_stride*9 + 4*hf.color_matrix_stride); \
value[5] = LOAD_TEX_ENTRY( ((oddness)?NEWOPROD_ODD_TEX:NEWOPROD_EVEN_TEX), field, idx+dir*hf.color_matrix_stride*9 + 5*hf.color_matrix_stride); \
value[6] = LOAD_TEX_ENTRY( ((oddness)?NEWOPROD_ODD_TEX:NEWOPROD_EVEN_TEX), field, idx+dir*hf.color_matrix_stride*9 + 6*hf.color_matrix_stride); \
value[7] = LOAD_TEX_ENTRY( ((oddness)?NEWOPROD_ODD_TEX:NEWOPROD_EVEN_TEX), field, idx+dir*hf.color_matrix_stride*9 + 7*hf.color_matrix_stride); \
value[8] = LOAD_TEX_ENTRY( ((oddness)?NEWOPROD_ODD_TEX:NEWOPROD_EVEN_TEX), field, idx+dir*hf.color_matrix_stride*9 + 8*hf.color_matrix_stride); \
field[idx + dir*hf.color_matrix_stride*9] = value[0] + coeff*mat[0]; \
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride] = value[1] + coeff*mat[1]; \
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*2] = value[2] + coeff*mat[2]; \
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*3] = value[3] + coeff*mat[3]; \
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*4] = value[4] + coeff*mat[4]; \
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*5] = value[5] + coeff*mat[5]; \
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*6] = value[6] + coeff*mat[6]; \
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*7] = value[7] + coeff*mat[7]; \
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*8] = value[8] + coeff*mat[8]; \
}while(0)
// only works if Promote<T,U>::Type = T
template<class T, class U>
inline __device__
void addMatrixToField(const T* const mat, int dir, int idx, U coeff,
T* const field_even, T* const field_odd, int oddness)
{
T* const field = (oddness)?field_odd: field_even;
field[idx + dir*hf.color_matrix_stride*9] += coeff*mat[0];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride] += coeff*mat[1];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*2] += coeff*mat[2];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*3] += coeff*mat[3];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*4] += coeff*mat[4];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*5] += coeff*mat[5];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*6] += coeff*mat[6];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*7] += coeff*mat[7];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*8] += coeff*mat[8];
return;
}
template<class T, class U>
inline __device__
void addMatrixToField(const T* const mat, int idx, U coeff, T* const field_even,
T* const field_odd, int oddness)
{
T* const field = (oddness)?field_odd: field_even;
field[idx ] += coeff*mat[0];
field[idx + hf.color_matrix_stride] += coeff*mat[1];
field[idx + hf.color_matrix_stride*2] += coeff*mat[2];
field[idx + hf.color_matrix_stride*3] += coeff*mat[3];
field[idx + hf.color_matrix_stride*4] += coeff*mat[4];
field[idx + hf.color_matrix_stride*5] += coeff*mat[5];
field[idx + hf.color_matrix_stride*6] += coeff*mat[6];
field[idx + hf.color_matrix_stride*7] += coeff*mat[7];
field[idx + hf.color_matrix_stride*8] += coeff*mat[8];
return;
}
template<class T, class U>
inline __device__
void addMatrixToField_test(const T* const mat, int idx, U coeff, T* const field_even,
T* const field_odd, int oddness)
{
T* const field = (oddness)?field_odd: field_even;
//T oldvalue=field[idx];
field[idx ] += coeff*mat[0];
field[idx + hf.color_matrix_stride] += coeff*mat[1];
field[idx + hf.color_matrix_stride*2] += coeff*mat[2];
field[idx + hf.color_matrix_stride*3] += coeff*mat[3];
field[idx + hf.color_matrix_stride*4] += coeff*mat[4];
field[idx + hf.color_matrix_stride*5] += coeff*mat[5];
field[idx + hf.color_matrix_stride*6] += coeff*mat[6];
field[idx + hf.color_matrix_stride*7] += coeff*mat[7];
field[idx + hf.color_matrix_stride*8] += coeff*mat[8];
#if (!defined(__CUDA_ARCH__) || (__COMPUTE_CAPABILITY__>=200))
printf("value is coeff(%f) * mat[0].x(%f)=%f\n", coeff, mat[0].x, field[idx].x);
#endif
return;
}
template<class T>
inline __device__
void storeMatrixToField(const T* const mat, int dir, int idx, T* const field_even, T* const field_odd, int oddness)
{
T* const field = (oddness)?field_odd: field_even;
field[idx + dir*hf.color_matrix_stride*9] = mat[0];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride] = mat[1];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*2] = mat[2];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*3] = mat[3];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*4] = mat[4];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*5] = mat[5];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*6] = mat[6];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*7] = mat[7];
field[idx + dir*hf.color_matrix_stride*9 + hf.color_matrix_stride*8] = mat[8];
return;
}
template<class T>
inline __device__
void storeMatrixToField(const T* const mat, int idx, T* const field_even, T* const field_odd, int oddness)
{
T* const field = (oddness)?field_odd: field_even;
field[idx] = mat[0];
field[idx + hf.color_matrix_stride] = mat[1];
field[idx + hf.color_matrix_stride*2] = mat[2];
field[idx + hf.color_matrix_stride*3] = mat[3];
field[idx + hf.color_matrix_stride*4] = mat[4];
field[idx + hf.color_matrix_stride*5] = mat[5];
field[idx + hf.color_matrix_stride*6] = mat[6];
field[idx + hf.color_matrix_stride*7] = mat[7];
field[idx + hf.color_matrix_stride*8] = mat[8];
return;
}
template<class T, class U>
inline __device__
void storeMatrixToMomentumField(const T* const mat, int dir, int idx, U coeff,
T* const mom_even, T* const mom_odd, int oddness)
{
T* const mom_field = (oddness)?mom_odd:mom_even;
T temp2;
temp2.x = (mat[1].x - mat[3].x)*0.5*coeff;
temp2.y = (mat[1].y + mat[3].y)*0.5*coeff;
mom_field[idx + dir*hf.mom_ga_stride*5] = temp2;
temp2.x = (mat[2].x - mat[6].x)*0.5*coeff;
temp2.y = (mat[2].y + mat[6].y)*0.5*coeff;
mom_field[idx + dir*hf.mom_ga_stride*5 + hf.mom_ga_stride] = temp2;
temp2.x = (mat[5].x - mat[7].x)*0.5*coeff;
temp2.y = (mat[5].y + mat[7].y)*0.5*coeff;
mom_field[idx + dir*hf.mom_ga_stride*5 + hf.mom_ga_stride*2] = temp2;
const typename RealTypeId<T>::Type temp = (mat[0].y + mat[4].y + mat[8].y)*0.3333333333333333333333333;
temp2.x = (mat[0].y-temp)*coeff;
temp2.y = (mat[4].y-temp)*coeff;
mom_field[idx + dir*hf.mom_ga_stride*5 + hf.mom_ga_stride*3] = temp2;
temp2.x = (mat[8].y - temp)*coeff;
temp2.y = 0.0;
mom_field[idx + dir*hf.mom_ga_stride*5 + hf.mom_ga_stride*4] = temp2;
return;
}
// Struct to determine the coefficient sign at compile time
template<int pos_dir, int odd_lattice>
struct CoeffSign
{
static const int result = -1;
};
template<>
struct CoeffSign<0,1>
{
static const int result = -1;
};
template<>
struct CoeffSign<0,0>
{
static const int result = 1;
};
template<>
struct CoeffSign<1,1>
{
static const int result = 1;
};
template<int odd_lattice>
struct Sign
{
static const int result = 1;
};
template<>
struct Sign<1>
{
static const int result = -1;
};
template<class RealX>
struct ArrayLength
{
static const int result=9;
};
template<>
struct ArrayLength<float4>
{
static const int result=5;
};
// reconstructSign doesn't do anything right now,
// but it will, soon.
template<typename T>
__device__ void reconstructSign(int* const sign, int dir, const T i[4]){
*sign=1;
switch(dir){
case XUP:
if( (i[3]&1)==1) *sign=-1;
break;
case YUP:
if( ((i[3]+i[0])&1) == 1) *sign=-1;
break;
case ZUP:
if( ((i[3]+i[0]+i[1])&1) == 1) *sign=-1;
break;
case TUP:
#ifdef MULTI_GPU
if( (i[3] == X4+1 && PtNm1)
|| (i[3] == 1 && Pt0)) {
*sign=-1;
}
#else
if(i[3] == X4m1) *sign=-1;
#endif
break;
default:
#if (!defined(__CUDA_ARCH__) || (__COMPUTE_CAPABILITY__>=200))
printf("Error: invalid dir\n");
#endif
break;
}
return;
}
template<class RealA, int oddBit>
__global__ void
do_one_link_term_kernel(const RealA* const oprodEven, const RealA* const oprodOdd,
int sig, typename RealTypeId<RealA>::Type coeff,
RealA* const outputEven, RealA* const outputOdd, const int threads)
{
int sid = blockIdx.x * blockDim.x + threadIdx.x;
if (sid >= threads) return;
#ifdef MULTI_GPU
int x[4];
int z1 = sid/X1h;
int x1h = sid - z1*X1h;
int z2 = z1/X2;
x[1] = z1 - z2*X2;
x[3] = z2/X3;
x[2] = z2 - x[3]*X3;
int x1odd = (x[1] + x[2] + x[3] + oddBit) & 1;
x[0] = 2*x1h + x1odd;
//int X = 2*sid + x1odd;
int new_sid = ( (x[3]+2)*E3E2E1+(x[2]+2)*E2E1+(x[1]+2)*E1+(x[0]+2))>>1 ;
#else
int new_sid = sid;
#endif
RealA COLOR_MAT_W[ArrayLength<RealA>::result];
if(GOES_FORWARDS(sig)){
loadMatrixFromField(oprodEven, oprodOdd, sig, new_sid, COLOR_MAT_W, oddBit, hf.color_matrix_stride);
addMatrixToField(COLOR_MAT_W, sig, new_sid, coeff, outputEven, outputOdd, oddBit);
}
return;
}
#define DD_CONCAT(n,r) n ## r ## kernel
#define HISQ_KERNEL_NAME(a,b) DD_CONCAT(a,b)
//precision: 0 is for double, 1 is for single
#define NEWOPROD_EVEN_TEX newOprod0TexDouble
#define NEWOPROD_ODD_TEX newOprod1TexDouble
#if (HISQ_NEW_OPROD_LOAD_TEX == 1)
#define LOAD_TEX_ENTRY(tex, field, idx) READ_DOUBLE2_TEXTURE(tex, field, idx)
#else
#define LOAD_TEX_ENTRY(tex, field, idx) field[idx]
#endif
//double precision, recon=18
#define PRECISION 0
#define RECON 18
#if (HISQ_SITE_MATRIX_LOAD_TEX == 1)
#define HISQ_LOAD_LINK(linkEven, linkOdd, dir, idx, var, oddness) HISQ_LOAD_MATRIX_18_DOUBLE_TEX((oddness)?siteLink1TexDouble:siteLink0TexDouble, (oddness)?linkOdd:linkEven, dir, idx, var, hf.site_ga_stride)
#else
#define HISQ_LOAD_LINK(linkEven, linkOdd, dir, idx, var, oddness) loadMatrixFromField(linkEven, linkOdd, dir, idx, var, oddness, hf.site_ga_stride)
#endif
#define COMPUTE_LINK_SIGN(sign, dir, x)
#define RECONSTRUCT_SITE_LINK(var, sign)
#include "hisq_paths_force_core.h"
#undef PRECISION
#undef RECON
#undef HISQ_LOAD_LINK
#undef COMPUTE_LINK_SIGN
#undef RECONSTRUCT_SITE_LINK
//double precision, recon=12
#define PRECISION 0
#define RECON 12
#if (HISQ_SITE_MATRIX_LOAD_TEX == 1)
#define HISQ_LOAD_LINK(linkEven, linkOdd, dir, idx, var, oddness) HISQ_LOAD_MATRIX_12_DOUBLE_TEX((oddness)?siteLink1TexDouble:siteLink0TexDouble, (oddness)?linkOdd:linkEven,dir, idx, var, hf.site_ga_stride)
#else
#define HISQ_LOAD_LINK(linkEven, linkOdd, dir, idx, var, oddness) loadMatrixFromField<6>(linkEven, linkOdd, dir, idx, var, oddness, hf.site_ga_stride)
#endif
#define COMPUTE_LINK_SIGN(sign, dir, x) reconstructSign(sign, dir, x)
#define RECONSTRUCT_SITE_LINK(var, sign) FF_RECONSTRUCT_LINK_12(var, sign)
#include "hisq_paths_force_core.h"
#undef PRECISION
#undef RECON
#undef HISQ_LOAD_LINK
#undef COMPUTE_LINK_SIGN
#undef RECONSTRUCT_SITE_LINK
#undef NEWOPROD_EVEN_TEX
#undef NEWOPROD_ODD_TEX
#undef LOAD_TEX_ENTRY
#define NEWOPROD_EVEN_TEX newOprod0TexSingle
#define NEWOPROD_ODD_TEX newOprod1TexSingle
#if (HISQ_NEW_OPROD_LOAD_TEX==1)
#define LOAD_TEX_ENTRY(tex, field, idx) tex1Dfetch(tex,idx)
#else
#define LOAD_TEX_ENTRY(tex, field, idx) field[idx]
#endif
//single precision, recon=18
#define PRECISION 1
#define RECON 18
#if (HISQ_SITE_MATRIX_LOAD_TEX == 1)
#define HISQ_LOAD_LINK(linkEven, linkOdd, dir, idx, var, oddness) HISQ_LOAD_MATRIX_18_SINGLE_TEX((oddness)?siteLink1TexSingle:siteLink0TexSingle, dir, idx, var, hf.site_ga_stride)
#else
#define HISQ_LOAD_LINK(linkEven, linkOdd, dir, idx, var, oddness) loadMatrixFromField(linkEven, linkOdd, dir, idx, var, oddness, hf.site_ga_stride)
#endif
#define COMPUTE_LINK_SIGN(sign, dir, x)
#define RECONSTRUCT_SITE_LINK(var, sign)
#include "hisq_paths_force_core.h"
#undef PRECISION
#undef RECON
#undef HISQ_LOAD_LINK
#undef COMPUTE_LINK_SIGN
#undef RECONSTRUCT_SITE_LINK
//single precision, recon=12
#define PRECISION 1
#define RECON 12
#if (HISQ_SITE_MATRIX_LOAD_TEX == 1)
#define HISQ_LOAD_LINK(linkEven, linkOdd, dir, idx, var, oddness) HISQ_LOAD_MATRIX_12_SINGLE_TEX((oddness)?siteLink1TexSingle_recon:siteLink0TexSingle_recon, dir, idx, var, hf.site_ga_stride)
#else
#define HISQ_LOAD_LINK(linkEven, linkOdd, dir, idx, var, oddness) loadMatrixFromField(linkEven, linkOdd, dir, idx, var, oddness, hf.site_ga_stride)
#endif
#define COMPUTE_LINK_SIGN(sign, dir, x) reconstructSign(sign, dir, x)
#define RECONSTRUCT_SITE_LINK(var, sign) FF_RECONSTRUCT_LINK_12(var, sign)
#include "hisq_paths_force_core.h"
#undef PRECISION
#undef RECON
#undef HISQ_LOAD_LINK
#undef COMPUTE_LINK_SIGN
#undef RECONSTRUCT_SITE_LINK
#undef NEWOPROD_EVEN_TEX
#undef NEWOPROD_ODD_TEX
#undef LOAD_TEX_ENTRY
template<class RealA, class RealB>
class MiddleLink : public Tunable {
private:
const cudaGaugeField &link;
const cudaGaugeField &oprod;
const cudaGaugeField &Qprev;
const int sig;
const int mu;
const typename RealTypeId<RealA>::Type &coeff;
cudaGaugeField &Pmu;
cudaGaugeField &P3;
cudaGaugeField &Qmu;
cudaGaugeField &newOprod;
const hisq_kernel_param_t &kparam;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool advanceGridDim(TuneParam ¶m) const { return false; }
// generalize Tunable::advanceBlockDim() to also set gridDim, with extra checking to ensure that gridDim isn't too large for the device
bool advanceBlockDim(TuneParam ¶m) const
{
const unsigned int max_threads = deviceProp.maxThreadsDim[0];
const unsigned int max_blocks = deviceProp.maxGridSize[0];
const unsigned int max_shared = 16384; // FIXME: use deviceProp.sharedMemPerBlock;
const int step = deviceProp.warpSize;
bool ret;
param.block.x += step;
if (param.block.x > max_threads || sharedBytesPerThread()*param.block.x > max_shared) {
param.block = dim3((kparam.threads+max_blocks-1)/max_blocks, 1, 1); // ensure the blockDim is large enough, given the limit on gridDim
param.block.x = ((param.block.x+step-1) / step) * step; // round up to the nearest "step"
if (param.block.x > max_threads) errorQuda("Local lattice volume is too large for device");
ret = false;
} else {
ret = true;
}
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
return ret;
}
public:
MiddleLink(const cudaGaugeField &link,
const cudaGaugeField &oprod,
const cudaGaugeField &Qprev,
int sig, int mu,
const typename RealTypeId<RealA>::Type &coeff,
cudaGaugeField &Pmu, // write only
cudaGaugeField &P3, // write only
cudaGaugeField &Qmu,
cudaGaugeField &newOprod,
const hisq_kernel_param_t &kparam) :
link(link), oprod(oprod), Qprev(Qprev), sig(sig), mu(mu),
coeff(coeff), Pmu(Pmu), P3(P3), Qmu(Qmu), newOprod(newOprod), kparam(kparam)
{ ; }
// need alternative constructor to hack around null pointer passing
MiddleLink(const cudaGaugeField &link,
const cudaGaugeField &oprod,
int sig, int mu,
const typename RealTypeId<RealA>::Type &coeff,
cudaGaugeField &Pmu, // write only
cudaGaugeField &P3, // write only
cudaGaugeField &Qmu,
cudaGaugeField &newOprod,
const hisq_kernel_param_t &kparam) :
link(link), oprod(oprod), Qprev(link), sig(sig), mu(mu),
coeff(coeff), Pmu(Pmu), P3(P3), Qmu(Qmu), newOprod(newOprod), kparam(kparam)
{ ; }
virtual ~MiddleLink() { ; }
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << kparam.D1 << "x";
vol << kparam.D2 << "x";
vol << kparam.D3 << "x";
vol << kparam.D4;
aux << "threads=" << kparam.threads << ",prec=" << link.Precision();
aux << ",recon=" << link.Reconstruct() << ",sig=" << sig << ",mu=" << mu;
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
#define CALL_ARGUMENTS(typeA, typeB) <<<tp.grid, tp.block>>> \
((typeA*)oprod.Even_p(), (typeA*)oprod.Odd_p(), \
(typeA*)Qprev_even, (typeA*)Qprev_odd, \
(typeB*)link.Even_p(), (typeB*)link.Odd_p(), \
sig, mu, coeff, \
(typeA*)Pmu.Even_p(), (typeA*)Pmu.Odd_p(), \
(typeA*)P3.Even_p(), (typeA*)P3.Odd_p(), \
(typeA*)Qmu.Even_p(), (typeA*)Qmu.Odd_p(), \
(typeA*)newOprod.Even_p(), (typeA*)newOprod.Odd_p(), kparam)
#define CALL_MIDDLE_LINK_KERNEL(sig_sign, mu_sign) \
if(oddness_change ==0 ){ \
if(sizeof(RealA) == sizeof(float2)){ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_middle_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(float2, float2); \
do_middle_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(float2, float2); \
}else{ \
do_middle_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(float2, float4); \
do_middle_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(float2, float4); \
} \
}else{ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_middle_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(double2, double2); \
do_middle_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(double2, double2); \
}else{ \
do_middle_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(double2, double2); \
do_middle_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(double2, double2); \
} \
} \
}else{ \
if(sizeof(RealA) == sizeof(float2)){ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_middle_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(float2, float2); \
do_middle_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(float2, float2); \
}else{ \
do_middle_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(float2, float4); \
do_middle_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(float2, float4); \
} \
}else{ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_middle_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(double2, double2); \
do_middle_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(double2, double2); \
}else{ \
do_middle_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(double2, double2); \
do_middle_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(double2, double2); \
} \
} \
}
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
QudaReconstructType recon = link.Reconstruct();
int oddness_change = (kparam.base_idx[0] + kparam.base_idx[1]
+ kparam.base_idx[2] + kparam.base_idx[3])&1;
const void *Qprev_even = (&Qprev == &link) ? NULL : Qprev.Even_p();
const void *Qprev_odd = (&Qprev == &link) ? NULL : Qprev.Odd_p();
if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){
CALL_MIDDLE_LINK_KERNEL(1,1);
}else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){
CALL_MIDDLE_LINK_KERNEL(1,0);
}else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){
CALL_MIDDLE_LINK_KERNEL(0,1);
}else{
CALL_MIDDLE_LINK_KERNEL(0,0);
}
}
#undef CALL_ARGUMENTS
#undef CALL_MIDDLE_LINK_KERNEL
void preTune() {
Pmu.backup();
P3.backup();
Qmu.backup();
newOprod.backup();
}
void postTune() {
Pmu.restore();
P3.restore();
Qmu.restore();
newOprod.restore();
}
virtual void initTuneParam(TuneParam ¶m) const
{
const unsigned int max_threads = deviceProp.maxThreadsDim[0];
const unsigned int max_blocks = deviceProp.maxGridSize[0];
const int step = deviceProp.warpSize;
param.block = dim3((kparam.threads+max_blocks-1)/max_blocks, 1, 1); // ensure the blockDim is large enough, given the limit on gridDim
param.block.x = ((param.block.x+step-1) / step) * step; // round up to the nearest "step"
if (param.block.x > max_threads) errorQuda("Local lattice volume is too large for device");
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
param.shared_bytes = sharedBytesPerThread()*param.block.x > sharedBytesPerBlock(param) ?
sharedBytesPerThread()*param.block.x : sharedBytesPerBlock(param);
}
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const {
initTuneParam(param);
}
long long flops() const { return 0; }
};
template<class RealA, class RealB>
class LepageMiddleLink : public Tunable {
private:
const cudaGaugeField &link;
const cudaGaugeField &oprod;
const cudaGaugeField &Qprev;
const int sig;
const int mu;
const typename RealTypeId<RealA>::Type &coeff;
cudaGaugeField &P3; // write only
cudaGaugeField &newOprod;
const hisq_kernel_param_t &kparam;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool advanceGridDim(TuneParam ¶m) const { return false; }
bool advanceBlockDim(TuneParam ¶m) const {
bool rtn = Tunable::advanceBlockDim(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
return rtn;
}
public:
LepageMiddleLink(const cudaGaugeField &link,
const cudaGaugeField &oprod,
const cudaGaugeField &Qprev,
int sig, int mu,
const typename RealTypeId<RealA>::Type &coeff,
cudaGaugeField &P3, cudaGaugeField &newOprod,
const hisq_kernel_param_t &kparam) :
link(link), oprod(oprod), Qprev(Qprev), sig(sig), mu(mu),
coeff(coeff), P3(P3), newOprod(newOprod), kparam(kparam)
{ ; }
virtual ~LepageMiddleLink() { ; }
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << kparam.D1 << "x";
vol << kparam.D2 << "x";
vol << kparam.D3 << "x";
vol << kparam.D4;
aux << "threads=" << kparam.threads << ",prec=" << link.Precision();
aux << ",recon=" << link.Reconstruct() << ",sig=" << sig << ",mu=" << mu;
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
#define CALL_ARGUMENTS(typeA, typeB) <<<tp.grid, tp.block>>> \
((typeA*)oprod.Even_p(), (typeA*)oprod.Odd_p(), \
(typeA*)Qprev.Even_p(), (typeA*)Qprev.Odd_p(), \
(typeB*)link.Even_p(), (typeB*)link.Odd_p(), \
sig, mu, coeff, \
(typeA*)P3.Even_p(), (typeA*)P3.Odd_p(), \
(typeA*)newOprod.Even_p(), (typeA*)newOprod.Odd_p(), \
kparam)
#define CALL_MIDDLE_LINK_KERNEL(sig_sign, mu_sign) \
if(oddness_change == 0){ \
if(sizeof(RealA) == sizeof(float2)){ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_lepage_middle_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(float2, float2); \
do_lepage_middle_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(float2, float2); \
}else{ \
do_lepage_middle_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(float2, float4); \
do_lepage_middle_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(float2, float4); \
} \
}else{ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_lepage_middle_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(double2, double2); \
do_lepage_middle_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(double2, double2); \
}else{ \
do_lepage_middle_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(double2, double2); \
do_lepage_middle_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(double2, double2); \
} \
} \
}else{ \
if(sizeof(RealA) == sizeof(float2)){ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_lepage_middle_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(float2, float2); \
do_lepage_middle_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(float2, float2); \
}else{ \
do_lepage_middle_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(float2, float4); \
do_lepage_middle_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(float2, float4); \
} \
}else{ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_lepage_middle_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(double2, double2); \
do_lepage_middle_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(double2, double2); \
}else{ \
do_lepage_middle_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(double2, double2); \
do_lepage_middle_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(double2, double2); \
} \
} \
}
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
QudaReconstructType recon = link.Reconstruct();
int oddness_change = (kparam.base_idx[0] + kparam.base_idx[1]
+ kparam.base_idx[2] + kparam.base_idx[3])&1;
if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){
CALL_MIDDLE_LINK_KERNEL(1,1);
}else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){
CALL_MIDDLE_LINK_KERNEL(1,0);
}else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){
CALL_MIDDLE_LINK_KERNEL(0,1);
}else{
CALL_MIDDLE_LINK_KERNEL(0,0);
}
}
#undef CALL_ARGUMENTS
#undef CALL_MIDDLE_LINK_KERNEL
void preTune() {
P3.backup();
newOprod.backup();
}
void postTune() {
P3.restore();
newOprod.restore();
}
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
}
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
}
long long flops() const { return 0; }
};
template<class RealA, class RealB>
class SideLink : public Tunable {
private:
const cudaGaugeField &link;
const cudaGaugeField &P3;
const cudaGaugeField &oprod;
const int sig;
const int mu;
const typename RealTypeId<RealA>::Type &coeff;
const typename RealTypeId<RealA>::Type &accumu_coeff;
cudaGaugeField &shortP;
cudaGaugeField &newOprod;
const hisq_kernel_param_t &kparam;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool advanceGridDim(TuneParam ¶m) const { return false; }
bool advanceBlockDim(TuneParam ¶m) const {
bool rtn = Tunable::advanceBlockDim(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
return rtn;
}
public:
SideLink(const cudaGaugeField &link,
const cudaGaugeField &P3,
const cudaGaugeField &oprod,
int sig, int mu,
const typename RealTypeId<RealA>::Type &coeff,
const typename RealTypeId<RealA>::Type &accumu_coeff,
cudaGaugeField &shortP,
cudaGaugeField &newOprod,
const hisq_kernel_param_t &kparam) :
link(link), P3(P3), oprod(oprod),
sig(sig), mu(mu), coeff(coeff), accumu_coeff(accumu_coeff),
shortP(shortP), newOprod(newOprod), kparam(kparam)
{ ; }
virtual ~SideLink() { ; }
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << kparam.D1 << "x";
vol << kparam.D2 << "x";
vol << kparam.D3 << "x";
vol << kparam.D4;
aux << "threads=" << kparam.threads << ",prec=" << link.Precision();
aux << ",recon=" << link.Reconstruct() << ",sig=" << sig << ",mu=" << mu;
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
#define CALL_ARGUMENTS(typeA, typeB) <<<tp.grid, tp.block>>> \
((typeA*)P3.Even_p(), (typeA*)P3.Odd_p(), \
(typeA*)oprod.Even_p(), (typeA*)oprod.Odd_p(), \
(typeB*)link.Even_p(), (typeB*)link.Odd_p(), \
sig, mu, \
coeff, \
(typename RealTypeId<typeA>::Type) accumu_coeff, \
(typeA*)shortP.Even_p(), (typeA*)shortP.Odd_p(), \
(typeA*)newOprod.Even_p(), (typeA*)newOprod.Odd_p(), \
kparam)
#define CALL_SIDE_LINK_KERNEL(sig_sign, mu_sign) \
if(oddness_change == 0){ \
if(sizeof(RealA) == sizeof(float2)){ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_side_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(float2, float2); \
do_side_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(float2, float2); \
}else{ \
do_side_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(float2, float4); \
do_side_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(float2, float4); \
} \
}else{ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_side_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(double2, double2); \
do_side_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(double2, double2); \
}else{ \
do_side_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(double2, double2); \
do_side_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(double2, double2); \
} \
} \
}else{ \
if(sizeof(RealA) == sizeof(float2)){ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_side_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(float2, float2); \
do_side_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(float2, float2); \
}else{ \
do_side_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(float2, float4); \
do_side_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(float2, float4); \
} \
}else{ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_side_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(double2, double2); \
do_side_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(double2, double2); \
}else{ \
do_side_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(double2, double2); \
do_side_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(double2, double2); \
} \
} \
}
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
QudaReconstructType recon = link.Reconstruct();
int oddness_change = (kparam.base_idx[0] + kparam.base_idx[1]
+ kparam.base_idx[2] + kparam.base_idx[3])&1;
if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){
CALL_SIDE_LINK_KERNEL(1,1);
}else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){
CALL_SIDE_LINK_KERNEL(1,0);
}else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){
CALL_SIDE_LINK_KERNEL(0,1);
}else{
CALL_SIDE_LINK_KERNEL(0,0);
}
}
#undef CALL_SIDE_LINK_KERNEL
#undef CALL_ARGUMENTS
void preTune() {
shortP.backup();
newOprod.backup();
}
void postTune() {
shortP.restore();
newOprod.restore();
}
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
}
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
}
long long flops() const { return 0; }
};
template<class RealA, class RealB>
class SideLinkShort : public Tunable {
private:
const cudaGaugeField &link;
const cudaGaugeField &P3;
const int sig;
const int mu;
const typename RealTypeId<RealA>::Type &coeff;
cudaGaugeField &newOprod;
const hisq_kernel_param_t &kparam;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool advanceGridDim(TuneParam ¶m) const { return false; }
bool advanceBlockDim(TuneParam ¶m) const {
bool rtn = Tunable::advanceBlockDim(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
return rtn;
}
public:
SideLinkShort(const cudaGaugeField &link, const cudaGaugeField &P3, int sig, int mu,
const typename RealTypeId<RealA>::Type &coeff, cudaGaugeField &newOprod,
const hisq_kernel_param_t &kparam) :
link(link), P3(P3), sig(sig), mu(mu), coeff(coeff), newOprod(newOprod), kparam(kparam)
{ ; }
virtual ~SideLinkShort() { ; }
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << kparam.D1 << "x";
vol << kparam.D2 << "x";
vol << kparam.D3 << "x";
vol << kparam.D4;
aux << "threads=" << kparam.threads << ",prec=" << link.Precision();
aux << ",recon=" << link.Reconstruct() << ",sig=" << sig << ",mu=" << mu;
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
#define CALL_ARGUMENTS(typeA, typeB) <<<tp.grid, tp.block>>> \
((typeA*)P3.Even_p(), (typeA*)P3.Odd_p(), \
(typeB*)link.Even_p(), (typeB*)link.Odd_p(), \
sig, mu, (typename RealTypeId<typeA>::Type) coeff, \
(typeA*)newOprod.Even_p(), (typeA*)newOprod.Odd_p(), kparam)
#define CALL_SIDE_LINK_KERNEL(sig_sign, mu_sign) \
if(oddness_change == 0){ \
if(sizeof(RealA) == sizeof(float2)){ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_side_link_short_sp_18_kernel<float2, float2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(float2, float2); \
do_side_link_short_sp_18_kernel<float2, float2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(float2, float2); \
}else{ \
do_side_link_short_sp_12_kernel<float2, float4, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(float2, float4); \
do_side_link_short_sp_12_kernel<float2, float4, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(float2, float4); \
} \
}else{ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_side_link_short_dp_18_kernel<double2, double2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(double2, double2); \
do_side_link_short_dp_18_kernel<double2, double2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(double2, double2); \
}else{ \
do_side_link_short_dp_12_kernel<double2, double2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(double2, double2); \
do_side_link_short_dp_12_kernel<double2, double2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(double2, double2); \
} \
} \
}else{ \
if(sizeof(RealA) == sizeof(float2)){ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_side_link_short_sp_18_kernel<float2, float2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(float2, float2); \
do_side_link_short_sp_18_kernel<float2, float2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(float2, float2); \
}else{ \
do_side_link_short_sp_12_kernel<float2, float4, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(float2, float4); \
do_side_link_short_sp_12_kernel<float2, float4, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(float2, float4); \
} \
}else{ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_side_link_short_dp_18_kernel<double2, double2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(double2, double2); \
do_side_link_short_dp_18_kernel<double2, double2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(double2, double2); \
}else{ \
do_side_link_short_dp_12_kernel<double2, double2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(double2, double2); \
do_side_link_short_dp_12_kernel<double2, double2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(double2, double2); \
} \
} \
}
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
QudaReconstructType recon = link.Reconstruct();
int oddness_change = (kparam.base_idx[0] + kparam.base_idx[1]
+ kparam.base_idx[2] + kparam.base_idx[3])&1;
if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){
CALL_SIDE_LINK_KERNEL(1,1);
}else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){
CALL_SIDE_LINK_KERNEL(1,0);
}else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){
CALL_SIDE_LINK_KERNEL(0,1);
}else{
CALL_SIDE_LINK_KERNEL(0,0);
}
}
#undef CALL_SIDE_LINK_KERNEL
#undef CALL_ARGUMENTS
void preTune() {
newOprod.backup();
}
void postTune() {
newOprod.restore();
}
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
}
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
}
long long flops() const { return 0; }
};
template<class RealA, class RealB>
class AllLink : public Tunable {
private:
const cudaGaugeField &link;
const cudaGaugeField &oprod;
const cudaGaugeField &Qprev;
const int sig;
const int mu;
const typename RealTypeId<RealA>::Type &coeff;
const typename RealTypeId<RealA>::Type &accumu_coeff;
cudaGaugeField &shortP;
cudaGaugeField &newOprod;
const hisq_kernel_param_t &kparam;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool advanceGridDim(TuneParam ¶m) const { return false; }
bool advanceBlockDim(TuneParam ¶m) const {
bool rtn = Tunable::advanceBlockDim(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
return rtn;
}
public:
AllLink(const cudaGaugeField &link,
const cudaGaugeField &oprod,
const cudaGaugeField &Qprev,
int sig, int mu,
const typename RealTypeId<RealA>::Type &coeff,
const typename RealTypeId<RealA>::Type &accumu_coeff,
cudaGaugeField &shortP, cudaGaugeField &newOprod,
const hisq_kernel_param_t &kparam) :
link(link), oprod(oprod), Qprev(Qprev), sig(sig), mu(mu),
coeff(coeff), accumu_coeff(accumu_coeff), shortP(shortP),
newOprod(newOprod), kparam(kparam)
{ ; }
virtual ~AllLink() { ; }
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << kparam.D1 << "x";
vol << kparam.D2 << "x";
vol << kparam.D3 << "x";
vol << kparam.D4;
aux << "threads=" << kparam.threads << ",prec=" << link.Precision();
aux << ",recon=" << link.Reconstruct() << ",sig=" << sig << ",mu=" << mu;
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
#define CALL_ARGUMENTS(typeA, typeB) <<<tp.grid, tp.block>>> \
((typeA*)oprod.Even_p(), (typeA*)oprod.Odd_p(), \
(typeA*)Qprev.Even_p(), (typeA*)Qprev.Odd_p(), \
(typeB*)link.Even_p(), (typeB*)link.Odd_p(), sig, mu, \
(typename RealTypeId<typeA>::Type)coeff, \
(typename RealTypeId<typeA>::Type)accumu_coeff, \
(typeA*)shortP.Even_p(),(typeA*)shortP.Odd_p(), \
(typeA*)newOprod.Even_p(), (typeA*)newOprod.Odd_p(), kparam)
#define CALL_ALL_LINK_KERNEL(sig_sign, mu_sign) \
if(oddness_change == 0){ \
if(sizeof(RealA) == sizeof(float2)){ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_all_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(float2, float2); \
do_all_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(float2, float2); \
}else{ \
do_all_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(float2, float4); \
do_all_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(float2, float4); \
} \
}else{ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_all_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(double2, double2); \
do_all_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(double2, double2); \
}else{ \
do_all_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 0, 0> CALL_ARGUMENTS(double2, double2); \
do_all_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 1, 0> CALL_ARGUMENTS(double2, double2); \
} \
} \
}else{ \
if(sizeof(RealA) == sizeof(float2)){ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_all_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(float2, float2); \
do_all_link_sp_18_kernel<float2, float2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(float2, float2); \
}else{ \
do_all_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(float2, float4); \
do_all_link_sp_12_kernel<float2, float4, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(float2, float4); \
} \
}else{ \
if(recon == QUDA_RECONSTRUCT_NO){ \
do_all_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(double2, double2); \
do_all_link_dp_18_kernel<double2, double2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(double2, double2); \
}else{ \
do_all_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 0, 1> CALL_ARGUMENTS(double2, double2); \
do_all_link_dp_12_kernel<double2, double2, sig_sign, mu_sign, 1, 1> CALL_ARGUMENTS(double2, double2); \
} \
} \
}
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
QudaReconstructType recon = link.Reconstruct();
int oddness_change = (kparam.base_idx[0] + kparam.base_idx[1]
+ kparam.base_idx[2] + kparam.base_idx[3])&1;
if (GOES_FORWARDS(sig) && GOES_FORWARDS(mu)){
CALL_ALL_LINK_KERNEL(1, 1);
}else if (GOES_FORWARDS(sig) && GOES_BACKWARDS(mu)){
CALL_ALL_LINK_KERNEL(1, 0);
}else if (GOES_BACKWARDS(sig) && GOES_FORWARDS(mu)){
CALL_ALL_LINK_KERNEL(0, 1);
}else{
CALL_ALL_LINK_KERNEL(0, 0);
}
return;
}
#undef CALL_ARGUMENTS
#undef CALL_ALL_LINK_KERNEL
void preTune() {
shortP.backup();
newOprod.backup();
}
void postTune() {
shortP.restore();
newOprod.restore();
}
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
}
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
param.grid = dim3((kparam.threads+param.block.x-1)/param.block.x, 1, 1);
}
long long flops() const { return 0; }
};
template<class RealA, class RealB>
class OneLinkTerm : public Tunable {
private:
const cudaGaugeField &oprod;
const int sig;
const typename RealTypeId<RealA>::Type &coeff;
cudaGaugeField &ForceMatrix;
const int* X;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool advanceGridDim(TuneParam ¶m) const { return false; }
bool advanceBlockDim(TuneParam ¶m) const {
bool rtn = Tunable::advanceBlockDim(param);
int threads = X[0]*X[1]*X[2]*X[3]/2;
param.grid = dim3((threads + param.block.x-1)/param.block.x, 1, 1);
return rtn;
}
public:
OneLinkTerm(const cudaGaugeField &oprod, int sig,
const typename RealTypeId<RealA>::Type &coeff,
cudaGaugeField &ForceMatrix, const int* _X) :
oprod(oprod), sig(sig), coeff(coeff), ForceMatrix(ForceMatrix), X(_X)
{ ; }
virtual ~OneLinkTerm() { ; }
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << X[0] << "x";
vol << X[1] << "x";
vol << X[2] << "x";
vol << X[3];
int threads = X[0]*X[1]*X[2]*X[3]/2;
aux << "threads=" << threads << ",prec=" << oprod.Precision();
aux << ",sig=" << sig << ",coeff=" << coeff;
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
int threads = X[0]*X[1]*X[2]*X[3]/2;
if(GOES_FORWARDS(sig)){
do_one_link_term_kernel<RealA,0><<<tp.grid,tp.block>>>(static_cast<const RealA*>(oprod.Even_p()),
static_cast<const RealA*>(oprod.Odd_p()),
sig, coeff,
static_cast<RealA*>(ForceMatrix.Even_p()),
static_cast<RealA*>(ForceMatrix.Odd_p()),
threads);
do_one_link_term_kernel<RealA,1><<<tp.grid,tp.block>>>(static_cast<const RealA*>(oprod.Even_p()),
static_cast<const RealA*>(oprod.Odd_p()),
sig, coeff,
static_cast<RealA*>(ForceMatrix.Even_p()),
static_cast<RealA*>(ForceMatrix.Odd_p()),
threads);
}
}
void preTune() {
ForceMatrix.backup();
}
void postTune() {
ForceMatrix.restore();
}
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
int threads = X[0]*X[1]*X[2]*X[3]/2;
param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1);
}
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
int threads = X[0]*X[1]*X[2]*X[3]/2;
param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1);
}
long long flops() const { return 0; }
};
template<class RealA, class RealB>
class LongLinkTerm : public Tunable {
private:
const cudaGaugeField &link;
const cudaGaugeField &naikOprod;
const int sig;
const typename RealTypeId<RealA>::Type &naik_coeff;
cudaGaugeField &output;
const int * X;
const hisq_kernel_param_t &kparam;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool advanceGridDim(TuneParam ¶m) const { return false; }
bool advanceBlockDim(TuneParam ¶m) const {
bool rtn = Tunable::advanceBlockDim(param);
int threads = X[0]*X[1]*X[2]*X[3]/2;
param.grid = dim3((threads + param.block.x-1)/param.block.x, 1, 1);
return rtn;
}
public:
LongLinkTerm(const cudaGaugeField &link, const cudaGaugeField &naikOprod,
int sig, const typename RealTypeId<RealA>::Type &naik_coeff,
cudaGaugeField &output, const int* _X, const hisq_kernel_param_t &kparam) :
link(link), naikOprod(naikOprod), sig(sig), naik_coeff(naik_coeff), output(output),
X(_X), kparam(kparam)
{ ; }
virtual ~LongLinkTerm() { ; }
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << X[0] << "x";
vol << X[1] << "x";
vol << X[2] << "x";
vol << X[3];
int threads = X[0]*X[1]*X[2]*X[3]/2;
aux << "threads=" << threads << ",prec=" << link.Precision();
aux << ",sig=" << sig;
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
#define CALL_ARGUMENTS(typeA, typeB) <<<tp.grid,tp.block>>> \
((typeB*)link.Even_p(), (typeB*)link.Odd_p(), \
(typeA*)naikOprod.Even_p(), (typeA*)naikOprod.Odd_p(), \
sig, naik_coeff, \
(typeA*)output.Even_p(), (typeA*)output.Odd_p(), \
kparam);
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
QudaReconstructType recon = link.Reconstruct();
if(GOES_BACKWARDS(sig)) errorQuda("sig does not go forward\n");
if(sizeof(RealA) == sizeof(float2)){
if(recon == QUDA_RECONSTRUCT_NO){
do_longlink_sp_18_kernel<float2,float2, 0> CALL_ARGUMENTS(float2, float2);
do_longlink_sp_18_kernel<float2,float2, 1> CALL_ARGUMENTS(float2, float2);
}else{
do_longlink_sp_12_kernel<float2,float4, 0> CALL_ARGUMENTS(float2, float4);
do_longlink_sp_12_kernel<float2,float4, 1> CALL_ARGUMENTS(float2, float4);
}
}else{
if(recon == QUDA_RECONSTRUCT_NO){
do_longlink_dp_18_kernel<double2,double2, 0> CALL_ARGUMENTS(double2, double2);
do_longlink_dp_18_kernel<double2,double2, 1> CALL_ARGUMENTS(double2, double2);
}else{
do_longlink_dp_12_kernel<double2,double2, 0> CALL_ARGUMENTS(double2, double2);
do_longlink_dp_12_kernel<double2,double2, 1> CALL_ARGUMENTS(double2, double2);
}
}
}
#undef CALL_ARGUMENTS
void preTune() {
output.backup();
}
void postTune() {
output.restore();
}
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
int threads = X[0]*X[1]*X[2]*X[3]/2;
param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1);
}
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
int threads = X[0]*X[1]*X[2]*X[3]/2;
param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1);
}
long long flops() const { return 0; }
};
template<class RealA, class RealB>
class CompleteForce : public Tunable {
private:
const cudaGaugeField &link;
const cudaGaugeField &oprod;
const int sig;
cudaGaugeField &mom;
const int* X;
int sharedBytesPerThread() const { return 0; }
int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool advanceGridDim(TuneParam ¶m) const { return false; }
bool advanceBlockDim(TuneParam ¶m) const {
bool rtn = Tunable::advanceBlockDim(param);
int threads = X[0]*X[1]*X[2]*X[3]/2;
param.grid = dim3((threads + param.block.x-1)/param.block.x, 1, 1);
return rtn;
}
public:
CompleteForce(const cudaGaugeField &link, const cudaGaugeField &oprod,
int sig, cudaGaugeField &mom, const int* _X) :
link(link), oprod(oprod), sig(sig), mom(mom), X(_X)
{ ; }
virtual ~CompleteForce() { ; }
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << X[0] << "x";
vol << X[1] << "x";
vol << X[2] << "x";
vol << X[3];
int threads = X[0]*X[1]*X[2]*X[3]/2;
aux << "threads=" << threads << ",prec=" << link.Precision() << ",sig=" << sig;
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
#define CALL_ARGUMENTS(typeA, typeB) <<<tp.grid, tp.block>>> \
((typeB*)link.Even_p(), (typeB*)link.Odd_p(), \
(typeA*)oprod.Even_p(), (typeA*)oprod.Odd_p(), \
sig, \
(typeA*)mom.Even_p(), (typeA*)mom.Odd_p(), \
X[0] * X[1] * X[2] * X[3]/2);
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
QudaReconstructType recon = link.Reconstruct();;
if(sizeof(RealA) == sizeof(float2)){
if(recon == QUDA_RECONSTRUCT_NO){
do_complete_force_sp_18_kernel<float2,float2, 0> CALL_ARGUMENTS(float2, float2);
do_complete_force_sp_18_kernel<float2,float2, 1> CALL_ARGUMENTS(float2, float2);
}else{
do_complete_force_sp_12_kernel<float2,float4, 0> CALL_ARGUMENTS(float2, float4);
do_complete_force_sp_12_kernel<float2,float4, 1> CALL_ARGUMENTS(float2, float4);
}
}else{
if(recon == QUDA_RECONSTRUCT_NO){
do_complete_force_dp_18_kernel<double2,double2, 0> CALL_ARGUMENTS(double2, double2);
do_complete_force_dp_18_kernel<double2,double2, 1> CALL_ARGUMENTS(double2, double2);
}else{
do_complete_force_dp_12_kernel<double2,double2, 0> CALL_ARGUMENTS(double2, double2);
do_complete_force_dp_12_kernel<double2,double2, 1> CALL_ARGUMENTS(double2, double2);
}
}
}
#undef CALL_ARGUMENTS
void preTune() {
mom.backup();
}
void postTune() {
mom.restore();
}
virtual void initTuneParam(TuneParam ¶m) const
{
Tunable::initTuneParam(param);
int threads = X[0]*X[1]*X[2]*X[3]/2;
param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1);
}
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const
{
Tunable::defaultTuneParam(param);
int threads = X[0]*X[1]*X[2]*X[3]/2;
param.grid = dim3((threads+param.block.x-1)/param.block.x, 1, 1);
}
long long flops() const { return 0; }
};
static void
bind_tex_link(const cudaGaugeField& link, const cudaGaugeField& newOprod)
{
if(link.Precision() == QUDA_DOUBLE_PRECISION){
cudaBindTexture(0, siteLink0TexDouble, link.Even_p(), link.Bytes()/2);
cudaBindTexture(0, siteLink1TexDouble, link.Odd_p(), link.Bytes()/2);
cudaBindTexture(0, newOprod0TexDouble, newOprod.Even_p(), newOprod.Bytes()/2);
cudaBindTexture(0, newOprod1TexDouble, newOprod.Odd_p(), newOprod.Bytes()/2);
}else{
if(link.Reconstruct() == QUDA_RECONSTRUCT_NO){
cudaBindTexture(0, siteLink0TexSingle, link.Even_p(), link.Bytes()/2);
cudaBindTexture(0, siteLink1TexSingle, link.Odd_p(), link.Bytes()/2);
}else{
cudaBindTexture(0, siteLink0TexSingle_recon, link.Even_p(), link.Bytes()/2);
cudaBindTexture(0, siteLink1TexSingle_recon, link.Odd_p(), link.Bytes()/2);
}
cudaBindTexture(0, newOprod0TexSingle, newOprod.Even_p(), newOprod.Bytes()/2);
cudaBindTexture(0, newOprod1TexSingle, newOprod.Odd_p(), newOprod.Bytes()/2);
}
}
static void
unbind_tex_link(const cudaGaugeField& link, const cudaGaugeField& newOprod)
{
if(link.Precision() == QUDA_DOUBLE_PRECISION){
cudaUnbindTexture(siteLink0TexDouble);
cudaUnbindTexture(siteLink1TexDouble);
cudaUnbindTexture(newOprod0TexDouble);
cudaUnbindTexture(newOprod1TexDouble);
}else{
if(link.Reconstruct() == QUDA_RECONSTRUCT_NO){
cudaUnbindTexture(siteLink0TexSingle);
cudaUnbindTexture(siteLink1TexSingle);
}else{
cudaUnbindTexture(siteLink0TexSingle_recon);
cudaUnbindTexture(siteLink1TexSingle_recon);
}
cudaUnbindTexture(newOprod0TexSingle);
cudaUnbindTexture(newOprod1TexSingle);
}
}
template<class Real, class RealA, class RealB>
static void
do_hisq_staples_force_cuda( PathCoefficients<Real> act_path_coeff,
const QudaGaugeParam& param,
const cudaGaugeField &oprod,
const cudaGaugeField &link,
cudaGaugeField &Pmu,
cudaGaugeField &P3,
cudaGaugeField &P5,
cudaGaugeField &Pnumu,
cudaGaugeField &Qmu,
cudaGaugeField &Qnumu,
cudaGaugeField &newOprod)
{
Real coeff;
Real OneLink, Lepage, FiveSt, ThreeSt, SevenSt;
Real mLepage, mFiveSt, mThreeSt;
OneLink = act_path_coeff.one;
ThreeSt = act_path_coeff.three; mThreeSt = -ThreeSt;
FiveSt = act_path_coeff.five; mFiveSt = -FiveSt;
SevenSt = act_path_coeff.seven;
Lepage = act_path_coeff.lepage; mLepage = -Lepage;
for(int sig=0; sig<8; ++sig){
if(GOES_FORWARDS(sig)){
OneLinkTerm<RealA, RealB> oneLink(oprod, sig, OneLink, newOprod, param.X);
oneLink.apply(0);
checkCudaError();
} // GOES_FORWARDS(sig)
}
int ghostDim[4]={
commDimPartitioned(0),
commDimPartitioned(1),
commDimPartitioned(2),
commDimPartitioned(3)
};
hisq_kernel_param_t kparam_1g, kparam_2g;
#ifdef MULTI_GPU
kparam_1g.D1 = commDimPartitioned(0)?(param.X[0]+2):(param.X[0]);
kparam_1g.D2 = commDimPartitioned(1)?(param.X[1]+2):(param.X[1]);
kparam_1g.D3 = commDimPartitioned(2)?(param.X[2]+2):(param.X[2]);
kparam_1g.D4 = commDimPartitioned(3)?(param.X[3]+2):(param.X[3]);
kparam_1g.D1h = kparam_1g.D1/2;
kparam_1g.base_idx[0]=commDimPartitioned(0)?1:2;
kparam_1g.base_idx[1]=commDimPartitioned(1)?1:2;
kparam_1g.base_idx[2]=commDimPartitioned(2)?1:2;
kparam_1g.base_idx[3]=commDimPartitioned(3)?1:2;
kparam_1g.threads = kparam_1g.D1*kparam_1g.D2*kparam_1g.D3*kparam_1g.D4/2;
kparam_2g.D1 = commDimPartitioned(0)?(param.X[0]+4):(param.X[0]);
kparam_2g.D2 = commDimPartitioned(1)?(param.X[1]+4):(param.X[1]);
kparam_2g.D3 = commDimPartitioned(2)?(param.X[2]+4):(param.X[2]);
kparam_2g.D4 = commDimPartitioned(3)?(param.X[3]+4):(param.X[3]);
kparam_2g.D1h = kparam_2g.D1/2;
kparam_2g.base_idx[0]=commDimPartitioned(0)?0:2;
kparam_2g.base_idx[1]=commDimPartitioned(1)?0:2;
kparam_2g.base_idx[2]=commDimPartitioned(2)?0:2;
kparam_2g.base_idx[3]=commDimPartitioned(3)?0:2;
kparam_2g.threads = kparam_2g.D1*kparam_2g.D2*kparam_2g.D3*kparam_2g.D4/2;
for(int i=0;i < 4; i++){
kparam_1g.ghostDim[i] = kparam_2g.ghostDim[i]=kparam_1g.ghostDim[i]=kparam_2g.ghostDim[i] = ghostDim[i];
}
#else
hisq_kernel_param_t kparam;
kparam.D1 = param.X[0];
kparam.D2 = param.X[1];
kparam.D3 = param.X[2];
kparam.D4 = param.X[3];
kparam.D1h = param.X[0]/2;
kparam.threads=param.X[0]*param.X[1]*param.X[2]*param.X[3]/2;
kparam.base_idx[0]=0;
kparam.base_idx[1]=0;
kparam.base_idx[2]=0;
kparam.base_idx[3]=0;
kparam_2g = kparam_1g = kparam;
#endif
for(int sig=0; sig<8; sig++){
for(int mu=0; mu<8; mu++){
if ( (mu == sig) || (mu == OPP_DIR(sig))){
continue;
}
//3-link
//Kernel A: middle link
MiddleLink<RealA,RealB> middleLink( link, oprod, // read only
sig, mu, mThreeSt,
Pmu, P3, Qmu, // write only
newOprod, kparam_2g);
middleLink.apply(0);
checkCudaError();
for(int nu=0; nu < 8; nu++){
if (nu == sig || nu == OPP_DIR(sig)
|| nu == mu || nu == OPP_DIR(mu)){
continue;
}
//5-link: middle link
//Kernel B
MiddleLink<RealA,RealB> middleLink( link, Pmu, Qmu, // read only
sig, nu, FiveSt,
Pnumu, P5, Qnumu, // write only
newOprod, kparam_1g);
middleLink.apply(0);
checkCudaError();
for(int rho = 0; rho < 8; rho++){
if (rho == sig || rho == OPP_DIR(sig)
|| rho == mu || rho == OPP_DIR(mu)
|| rho == nu || rho == OPP_DIR(nu)){
continue;
}
//7-link: middle link and side link
if(FiveSt != 0)coeff = SevenSt/FiveSt; else coeff = 0;
AllLink<RealA,RealB> allLink(link, Pnumu, Qnumu, sig, rho, SevenSt, coeff,
P5, newOprod, kparam_1g);
allLink.apply(0);
checkCudaError();
//return;
}//rho
//5-link: side link
if(ThreeSt != 0)coeff = FiveSt/ThreeSt; else coeff = 0;
SideLink<RealA,RealB> sideLink(link, P5, Qmu, //read only
sig, nu, mFiveSt, coeff,
P3, // write only
newOprod, kparam_1g);
sideLink.apply(0);
checkCudaError();
} //nu
//lepage
if(Lepage != 0.){
LepageMiddleLink<RealA,RealB>
lepageMiddleLink ( link, Pmu, Qmu, // read only
sig, mu, Lepage,
P5, // write only
newOprod, kparam_2g);
lepageMiddleLink.apply(0);
checkCudaError();
if(ThreeSt != 0)coeff = Lepage/ThreeSt ; else coeff = 0;
SideLink<RealA, RealB> sideLink(link, P5, Qmu, // read only
sig, mu, mLepage, coeff,
P3, //write only
newOprod, kparam_2g);
sideLink.apply(0);
checkCudaError();
} // Lepage != 0.0
//3-link side link
SideLinkShort<RealA,RealB> sideLinkShort(link, P3, // read only
sig, mu, ThreeSt,
newOprod, kparam_1g);
sideLinkShort.apply(0);
checkCudaError();
}//mu
}//sig
return;
} // do_hisq_staples_force_cuda
#undef Pmu
#undef Pnumu
#undef P3
#undef P5
#undef Qmu
#undef Qnumu
void hisqCompleteForceCuda(const QudaGaugeParam ¶m,
const cudaGaugeField &oprod,
const cudaGaugeField &link,
cudaGaugeField* force)
{
bind_tex_link(link, oprod);
for(int sig=0; sig<4; sig++){
if(param.cuda_prec == QUDA_DOUBLE_PRECISION){
CompleteForce<double2,double2> completeForce(link, oprod, sig, *force, param.X);
completeForce.apply(0);
checkCudaError();
}else if(param.cuda_prec == QUDA_SINGLE_PRECISION){
CompleteForce<float2,float2> completeForce(link, oprod, sig, *force, param.X);
completeForce.apply(0);
checkCudaError();
}else{
errorQuda("Unsupported precision");
}
} // loop over directions
unbind_tex_link(link, oprod);
return;
}
void hisqLongLinkForceCuda(double coeff,
const QudaGaugeParam ¶m,
const cudaGaugeField &oldOprod,
const cudaGaugeField &link,
cudaGaugeField *newOprod)
{
bind_tex_link(link, *newOprod);
const int volume = param.X[0]*param.X[1]*param.X[2]*param.X[3];
hisq_kernel_param_t kparam;
for(int i =0;i < 4;i++){
kparam.ghostDim[i] = commDimPartitioned(i);
}
kparam.threads = volume/2;
for(int sig=0; sig<4; ++sig){
if(param.cuda_prec == QUDA_DOUBLE_PRECISION){
LongLinkTerm<double2,double2> longLink(link, oldOprod, sig, coeff, *newOprod, param.X, kparam);
longLink.apply(0);
checkCudaError();
}else if(param.cuda_prec == QUDA_SINGLE_PRECISION){
LongLinkTerm<float2,float2> longLink(link, oldOprod, sig, static_cast<float>(coeff),
*newOprod, param.X, kparam);
longLink.apply(0);
checkCudaError();
}else{
errorQuda("Unsupported precision");
}
} // loop over directions
unbind_tex_link(link, *newOprod);
return;
}
void
hisqStaplesForceCuda(const double path_coeff_array[6],
const QudaGaugeParam ¶m,
const cudaGaugeField &oprod,
const cudaGaugeField &link,
cudaGaugeField* newOprod)
{
#ifdef MULTI_GPU
int X[4] = {
param.X[0]+4, param.X[1]+4, param.X[2]+4, param.X[3]+4
};
#else
int X[4] = {
param.X[0], param.X[1], param.X[2], param.X[3]
};
#endif
// create color matrix fields with zero padding
int pad = 0;
GaugeFieldParam gauge_param(X, param.cuda_prec, QUDA_RECONSTRUCT_NO, pad, QUDA_SCALAR_GEOMETRY);
cudaGaugeField Pmu(gauge_param);
cudaGaugeField P3(gauge_param);
cudaGaugeField P5(gauge_param);
cudaGaugeField Pnumu(gauge_param);
cudaGaugeField Qmu(gauge_param);
cudaGaugeField Qnumu(gauge_param);
bind_tex_link(link, *newOprod);
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
if (param.cuda_prec == QUDA_DOUBLE_PRECISION){
PathCoefficients<double> act_path_coeff;
act_path_coeff.one = path_coeff_array[0];
act_path_coeff.naik = path_coeff_array[1];
act_path_coeff.three = path_coeff_array[2];
act_path_coeff.five = path_coeff_array[3];
act_path_coeff.seven = path_coeff_array[4];
act_path_coeff.lepage = path_coeff_array[5];
do_hisq_staples_force_cuda<double,double2,double2>( act_path_coeff,
param,
oprod,
link,
Pmu,
P3,
P5,
Pnumu,
Qmu,
Qnumu,
*newOprod);
}else if(param.cuda_prec == QUDA_SINGLE_PRECISION){
PathCoefficients<float> act_path_coeff;
act_path_coeff.one = path_coeff_array[0];
act_path_coeff.naik = path_coeff_array[1];
act_path_coeff.three = path_coeff_array[2];
act_path_coeff.five = path_coeff_array[3];
act_path_coeff.seven = path_coeff_array[4];
act_path_coeff.lepage = path_coeff_array[5];
do_hisq_staples_force_cuda<float,float2,float2>( act_path_coeff,
param,
oprod,
link,
Pmu,
P3,
P5,
Pnumu,
Qmu,
Qnumu,
*newOprod);
}else{
errorQuda("Unsupported precision");
}
cudaEventRecord(end);
cudaEventSynchronize(end);
float runtime;
cudaEventElapsedTime(&runtime, start, end);
unbind_tex_link(link, *newOprod);
cudaEventDestroy(start);
cudaEventDestroy(end);
return;
}
} // namespace fermion_force
} // namespace quda
|
222129a84826e20a77fdceeed65e7bdf50a037ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ inline unsigned int RM_Index(unsigned int row, unsigned int col, unsigned int width) {
return (row * width + col);
}
__global__ void GaussianNBSumKernel(const float *d_data, const int *d_labels, float *feature_means_, int *class_count_, unsigned int n_samples_, unsigned int n_classes_, unsigned int n_features_) {
// Each thread will take care of one feature for all training samples
unsigned int tidx = threadIdx.x;
unsigned int feat_col = tidx + (blockIdx.x * blockDim.x);
unsigned int i = 0, row = 0;
if (feat_col < n_features_) { /* End condition check */
for (i = 0; i < n_samples_; ++i) { /* For each training sample */
row = d_labels[i];
// No race condition since each thread deals with one feature only
feature_means_[RM_Index(row, feat_col, n_features_)] +=
d_data[RM_Index(i, feat_col, n_features_)];
// WARNING: thread divergence :/
if (feat_col == 0) {
class_count_[row] += 1;
}
}
}
return;
} | 222129a84826e20a77fdceeed65e7bdf50a037ea.cu | #include "includes.h"
__device__ inline unsigned int RM_Index(unsigned int row, unsigned int col, unsigned int width) {
return (row * width + col);
}
__global__ void GaussianNBSumKernel(const float *d_data, const int *d_labels, float *feature_means_, int *class_count_, unsigned int n_samples_, unsigned int n_classes_, unsigned int n_features_) {
// Each thread will take care of one feature for all training samples
unsigned int tidx = threadIdx.x;
unsigned int feat_col = tidx + (blockIdx.x * blockDim.x);
unsigned int i = 0, row = 0;
if (feat_col < n_features_) { /* End condition check */
for (i = 0; i < n_samples_; ++i) { /* For each training sample */
row = d_labels[i];
// No race condition since each thread deals with one feature only
feature_means_[RM_Index(row, feat_col, n_features_)] +=
d_data[RM_Index(i, feat_col, n_features_)];
// WARNING: thread divergence :/
if (feat_col == 0) {
class_count_[row] += 1;
}
}
}
return;
} |
1f18b4deedafeb4081c3dd97ceccfa5e19d4eedb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/********************************************************************
* sample.cu
* This is a example of the CUDA program.
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include "definitions.h"
#include "qsort.h"
#define float_lt(a,b) ((*a)<(*b))
__device__ void int_qsort(float *arr, unsigned n);
__device__ void torben(float m[], int n, float* med);
/// filtruje rodek Obrazka
__global__ static void CUDAMedianMain(Matrix din, Matrix dout, Mask Okno, unsigned int startw, unsigned int starth)
{
unsigned int r,c,rb,cb,rr,cc,licznik=0;
float* rowin;
float* rowout;
int less, greater, equal;
int n = Okno.e;
int row,col;
float min, max, guess, maxltguess, mingtguess, ret;
r = blockIdx.y*blockDim.y + threadIdx.y + Okno.py + starth; // srodek okna
c = blockIdx.x*blockDim.x + threadIdx.x + Okno.px + startw;
rb = r - Okno.py; // lewy grny rg okna
cb = c - Okno.px;
if(r>=din.height-Okno.py || c>=din.width-Okno.px)
return;
rowin = (float*)((char*)din.data + rb*din.pitch);
min = max = rowin[0];
for (row=rb ; row<rb+Okno.wy ; row++)
for (col=cb ; col<cb+Okno.wx ; col++)
{
rowin = (float*)((char*)din.data + row*din.pitch);
if (rowin[col]<min) min=rowin[col];
if (rowin[col]>max) max=rowin[col];
}
while (1) {
guess = (min+max)/2;
less = 0; greater = 0; equal = 0;
maxltguess = min ;
mingtguess = max ;
for (row=rb; row<rb+Okno.wy; row++)
for (col=cb; col<cb+Okno.wx; col++)
{
rowin = (float*)((char*)din.data + row*din.pitch);
if (rowin[col]<guess) {
less++;
if (rowin[col]>maxltguess) maxltguess = rowin[col] ;
} else if (rowin[col]>guess) {
greater++;
if (rowin[col]<mingtguess) mingtguess = rowin[col] ;
} else equal++;
}
if (less <= (n+1)/2 && greater <= (n+1)/2) break ;
else if (less>greater) max = maxltguess ;
else min = mingtguess;
}
if (less >= (n+1)/2) ret = maxltguess;
else if (less+equal >= (n+1)/2) ret = guess;
else ret = mingtguess;
rowout = (float*)((char*)dout.data + r*dout.pitch);
rowout[c] = ret;
}
///
/// dxx, dyy - poowa z wielkoci nieparzystego okna - po to aby zacz obrbk w pierwszm przebiegu od tych fragmentw obrazu ktre nie s brzegami (mieszcz cae okno)
/// startw, starth - indexy od ktrych powinien zacz si wykonywa grid (bd dodawane do tych obliczonych)
void RunCUDA(Matrix din, Matrix dout, dim3 tpb, dim3 nob, Mask Okno, unsigned int startw, unsigned int starth)
{
// CUDAMedianMain<<<nob, tpb, Okno.e*sizeof(float)>>>(din, dout, Okno, startw, starth);
hipLaunchKernelGGL(( CUDAMedianMain), dim3(nob), dim3(tpb), 0, 0, din, dout, Okno, startw, starth);
}
/*
__device__ void int_qsort(float *arr, unsigned n)
{
QSORT(float, arr, n, float_lt);
}*/
/*
__device__ void torben(float m[], int n, float* med)
{
int i, less, greater, equal;
float min, max, guess, maxltguess, mingtguess;
min = max = m[0] ;
for (i=1 ; i<n ; i++) {
if (m[i]<min) min=m[i];
if (m[i]>max) max=m[i];
}
while (1) {
guess = (min+max)/2;
less = 0; greater = 0; equal = 0;
maxltguess = min ;
mingtguess = max ;
for (i=0; i<n; i++) {
if (m[i]<guess) {
less++;
if (m[i]>maxltguess) maxltguess = m[i] ;
} else if (m[i]>guess) {
greater++;
if (m[i]<mingtguess) mingtguess = m[i] ;
} else equal++;
}
if (less <= (n+1)/2 && greater <= (n+1)/2) break ;
else if (less>greater) max = maxltguess ;
else min = mingtguess;
}
if (less >= (n+1)/2) *(med)= maxltguess;
else if (less+equal >= (n+1)/2) *(med) = guess;
else *(med) = mingtguess;
}*/
| 1f18b4deedafeb4081c3dd97ceccfa5e19d4eedb.cu | /********************************************************************
* sample.cu
* This is a example of the CUDA program.
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include "definitions.h"
#include "qsort.h"
#define float_lt(a,b) ((*a)<(*b))
__device__ void int_qsort(float *arr, unsigned n);
__device__ void torben(float m[], int n, float* med);
/// filtruje środek Obrazka
__global__ static void CUDAMedianMain(Matrix din, Matrix dout, Mask Okno, unsigned int startw, unsigned int starth)
{
unsigned int r,c,rb,cb,rr,cc,licznik=0;
float* rowin;
float* rowout;
int less, greater, equal;
int n = Okno.e;
int row,col;
float min, max, guess, maxltguess, mingtguess, ret;
r = blockIdx.y*blockDim.y + threadIdx.y + Okno.py + starth; // srodek okna
c = blockIdx.x*blockDim.x + threadIdx.x + Okno.px + startw;
rb = r - Okno.py; // lewy górny róg okna
cb = c - Okno.px;
if(r>=din.height-Okno.py || c>=din.width-Okno.px)
return;
rowin = (float*)((char*)din.data + rb*din.pitch);
min = max = rowin[0];
for (row=rb ; row<rb+Okno.wy ; row++)
for (col=cb ; col<cb+Okno.wx ; col++)
{
rowin = (float*)((char*)din.data + row*din.pitch);
if (rowin[col]<min) min=rowin[col];
if (rowin[col]>max) max=rowin[col];
}
while (1) {
guess = (min+max)/2;
less = 0; greater = 0; equal = 0;
maxltguess = min ;
mingtguess = max ;
for (row=rb; row<rb+Okno.wy; row++)
for (col=cb; col<cb+Okno.wx; col++)
{
rowin = (float*)((char*)din.data + row*din.pitch);
if (rowin[col]<guess) {
less++;
if (rowin[col]>maxltguess) maxltguess = rowin[col] ;
} else if (rowin[col]>guess) {
greater++;
if (rowin[col]<mingtguess) mingtguess = rowin[col] ;
} else equal++;
}
if (less <= (n+1)/2 && greater <= (n+1)/2) break ;
else if (less>greater) max = maxltguess ;
else min = mingtguess;
}
if (less >= (n+1)/2) ret = maxltguess;
else if (less+equal >= (n+1)/2) ret = guess;
else ret = mingtguess;
rowout = (float*)((char*)dout.data + r*dout.pitch);
rowout[c] = ret;
}
///
/// dxx, dyy - połowa z wielkości nieparzystego okna - po to aby zacząć obróbkę w pierwszm przebiegu od tych fragmentów obrazu które nie są brzegami (mieszczą całe okno)
/// startw, starth - indexy od których powinien zacząć się wykonywać grid (będą dodawane do tych obliczonych)
void RunCUDA(Matrix din, Matrix dout, dim3 tpb, dim3 nob, Mask Okno, unsigned int startw, unsigned int starth)
{
// CUDAMedianMain<<<nob, tpb, Okno.e*sizeof(float)>>>(din, dout, Okno, startw, starth);
CUDAMedianMain<<<nob, tpb>>>(din, dout, Okno, startw, starth);
}
/*
__device__ void int_qsort(float *arr, unsigned n)
{
QSORT(float, arr, n, float_lt);
}*/
/*
__device__ void torben(float m[], int n, float* med)
{
int i, less, greater, equal;
float min, max, guess, maxltguess, mingtguess;
min = max = m[0] ;
for (i=1 ; i<n ; i++) {
if (m[i]<min) min=m[i];
if (m[i]>max) max=m[i];
}
while (1) {
guess = (min+max)/2;
less = 0; greater = 0; equal = 0;
maxltguess = min ;
mingtguess = max ;
for (i=0; i<n; i++) {
if (m[i]<guess) {
less++;
if (m[i]>maxltguess) maxltguess = m[i] ;
} else if (m[i]>guess) {
greater++;
if (m[i]<mingtguess) mingtguess = m[i] ;
} else equal++;
}
if (less <= (n+1)/2 && greater <= (n+1)/2) break ;
else if (less>greater) max = maxltguess ;
else min = mingtguess;
}
if (less >= (n+1)/2) *(med)= maxltguess;
else if (less+equal >= (n+1)/2) *(med) = guess;
else *(med) = mingtguess;
}*/
|
fc6df0524d9c81433d3e21a69f99836f6136949f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2009-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
"Persistent speculative while-while" kernel used in:
"Understanding the Efficiency of Ray Traversal on GPUs",
Timo Aila and Samuli Laine,
Proc. High-Performance Graphics 2009
*/
#include "CudaTracerKernels.hpp"
//------------------------------------------------------------------------
#define NODES_ARRAY_OF_STRUCTURES // Define for AOS, comment out for SOA.
#define TRIANGLES_ARRAY_OF_STRUCTURES // Define for AOS, comment out for SOA.
#define LOAD_BALANCER_BATCH_SIZE 96 // Number of rays to fetch at a time. Must be a multiple of 32.
#define STACK_SIZE 64 // Size of the traversal stack in local memory.
//extern "C" __device__ int g_warpCounter; // Work counter for persistent threads.
//__device__ int g_warpCounter; // Work counter for persistent threads.
//------------------------------------------------------------------------
/*
extern "C" __global__ void queryConfig(void)
{
#if (defined(NODES_ARRAY_OF_STRUCTURES) && defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_AOS_AOS;
#elif (defined(NODES_ARRAY_OF_STRUCTURES) && !defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_AOS_SOA;
#elif (!defined(NODES_ARRAY_OF_STRUCTURES) && defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_SOA_AOS;
#elif (!defined(NODES_ARRAY_OF_STRUCTURES) && !defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_SOA_SOA;
#endif
g_config.blockWidth = 32; // One warp per row.
g_config.blockHeight = 6; // 6*32 = 192 threads, optimal for GTX285.
g_config.usePersistentThreads = 1;
}
*/
//------------------------------------------------------------------------
__device__ void IntersectTesla(const float4 o,
const float4 d,
const float4* nodesA,
const float4* nodesB,
const float4* nodesC,
const float4* nodesD,
const float4* trisA,
const float4* trisB,
const float4* trisC,
bool anyHit,
int& retHitIndex,
float& retHitT,
FW::Vec2f& retBari)
{
RayStruct auxRay;
RayStruct* aux = &auxRay;
int traversalStack[STACK_SIZE];
// int traversalStack[STACK_SIZE + 3];
// traversalStack[STACK_SIZE + 0] = threadIdx.x; // Forced to local mem => saves a register.
// traversalStack[STACK_SIZE + 1] = threadIdx.y;
// traversalStack[STACK_SIZE + 2] holds ray index.
// Live state during traversal, stored in registers.
float origx, origy, origz; // Ray origin.
int stackPtr; // Current position in traversal stack.
int leafAddr; // First postponed leaf, non-negative if none.
int nodeAddr; // Non-negative: current internal node, negative: second postponed leaf.
int hitIndex; // Triangle index of the closest intersection, -1 if none.
float hitT; // t-value of the closest intersection.
// Initialize persistent threads.
// Fetch ray.
origx = o.x, origy = o.y, origz = o.z;
aux->tmin = o.w;
float ooeps = exp2f(-80.0f); // Avoid div by zero.
aux->idirx = 1.0f / (fabsf(d.x) > ooeps ? d.x : copysignf(ooeps, d.x));
aux->idiry = 1.0f / (fabsf(d.y) > ooeps ? d.y : copysignf(ooeps, d.y));
aux->idirz = 1.0f / (fabsf(d.z) > ooeps ? d.z : copysignf(ooeps, d.z));
// traversalStack[STACK_SIZE + 2] = rayidx; // Spill.
// Setup traversal.
traversalStack[0] = EntrypointSentinel; // Bottom-most entry.
stackPtr = 0;
leafAddr = 0; // No postponed leaf.
nodeAddr = 0; // Start from the root.
hitIndex = -1; // No triangle intersected so far.
hitT = d.w; // tmax
// Traversal loop.
while (nodeAddr != EntrypointSentinel)
{
float oodx = origx * aux->idirx;
float oody = origy * aux->idiry;
float oodz = origz * aux->idirz;
// Traverse internal nodes until all SIMD lanes have found a leaf.
bool searchingLeaf = true;
while (nodeAddr >= 0 && nodeAddr != EntrypointSentinel)
{
// Fetch AABBs of the two child nodes.
#ifdef NODES_ARRAY_OF_STRUCTURES
float4 n0xy = FETCH_TEXTURE(nodesA, nodeAddr*4+0, float4); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
float4 n1xy = FETCH_TEXTURE(nodesA, nodeAddr*4+1, float4); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
float4 nz = FETCH_TEXTURE(nodesA, nodeAddr*4+2, float4); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
float4 cnodes=FETCH_TEXTURE(nodesA, nodeAddr*4+3, float4);
#else
float4 n0xy = FETCH_TEXTURE(nodesA, nodeAddr, float4); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
float4 n1xy = FETCH_TEXTURE(nodesB, nodeAddr, float4); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
float4 nz = FETCH_TEXTURE(nodesC, nodeAddr, float4); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
float4 cnodes=FETCH_TEXTURE(nodesD, nodeAddr, float4);
#endif
// Intersect the ray against the child nodes.
float c0lox = n0xy.x * aux->idirx - oodx;
float c0hix = n0xy.y * aux->idirx - oodx;
float c0loy = n0xy.z * aux->idiry - oody;
float c0hiy = n0xy.w * aux->idiry - oody;
float c0loz = nz.x * aux->idirz - oodz;
float c0hiz = nz.y * aux->idirz - oodz;
float c1loz = nz.z * aux->idirz - oodz;
float c1hiz = nz.w * aux->idirz - oodz;
float c0min = max4(fminf(c0lox, c0hix), fminf(c0loy, c0hiy), fminf(c0loz, c0hiz), aux->tmin);
float c0max = min4(fmaxf(c0lox, c0hix), fmaxf(c0loy, c0hiy), fmaxf(c0loz, c0hiz), hitT);
float c1lox = n1xy.x * aux->idirx - oodx;
float c1hix = n1xy.y * aux->idirx - oodx;
float c1loy = n1xy.z * aux->idiry - oody;
float c1hiy = n1xy.w * aux->idiry - oody;
float c1min = max4(fminf(c1lox, c1hix), fminf(c1loy, c1hiy), fminf(c1loz, c1hiz), aux->tmin);
float c1max = min4(fmaxf(c1lox, c1hix), fmaxf(c1loy, c1hiy), fmaxf(c1loz, c1hiz), hitT);
// Decide where to go next.
// Differs from "while-while" because this just happened to produce better code here.
bool traverseChild0 = (c0max >= c0min);
bool traverseChild1 = (c1max >= c1min);
nodeAddr = __float_as_int(cnodes.x); // stored as int
int nodeAddrChild1 = __float_as_int(cnodes.y); // stored as int
if (!traverseChild1) { nodeAddrChild1 = EntrypointSentinel; }
if (!traverseChild0) { nodeAddr = nodeAddrChild1; nodeAddrChild1 = EntrypointSentinel; }
// Neither child was intersected => pop.
if (nodeAddr == EntrypointSentinel)
{
nodeAddr = traversalStack[stackPtr];
--stackPtr;
}
// Both children were intersected => push the farther one.
else if (nodeAddrChild1 != EntrypointSentinel)
{
if (c1min < c0min)
swap(nodeAddr, nodeAddrChild1);
++stackPtr;
traversalStack[stackPtr] = nodeAddrChild1;
}
// First leaf => postpone and continue traversal.
if (nodeAddr < 0 && leafAddr >= 0)
{
searchingLeaf = false;
leafAddr = nodeAddr;
nodeAddr = traversalStack[stackPtr];
--stackPtr;
}
// All SIMD lanes have found a leaf => process them.
if(!__any(searchingLeaf))
break;
}
// Process postponed leaf nodes.
while (leafAddr < 0)
{
// Fetch the start and end of the triangle list.
#ifdef NODES_ARRAY_OF_STRUCTURES
float4 leaf=FETCH_TEXTURE(nodesA, (-leafAddr-1)*4+3, float4);
#else
float4 leaf=FETCH_TEXTURE(nodesD, (-nodeAddr-1), float4);
#endif
int triAddr = __float_as_int(leaf.x); // stored as int
int triAddr2 = __float_as_int(leaf.y); // stored as int
// Intersect the ray against each triangle using Sven Woop's algorithm.
for(; triAddr < triAddr2; triAddr++)
{
// Compute and check intersection t-value.
#ifdef TRIANGLES_ARRAY_OF_STRUCTURES
float4 v00 = FETCH_GLOBAL(trisA, triAddr*4+0, float4);
float4 v11 = FETCH_GLOBAL(trisA, triAddr*4+1, float4);
#else
float4 v00 = FETCH_GLOBAL(trisA, triAddr, float4);
float4 v11 = FETCH_GLOBAL(trisB, triAddr, float4);
#endif
float dirx = 1.0f / aux->idirx;
float diry = 1.0f / aux->idiry;
float dirz = 1.0f / aux->idirz;
float Oz = v00.w - origx*v00.x - origy*v00.y - origz*v00.z;
float invDz = 1.0f / (dirx*v00.x + diry*v00.y + dirz*v00.z);
float t = Oz * invDz;
if (t > aux->tmin && t < hitT)
{
// Compute and check barycentric u.
float Ox = v11.w + origx*v11.x + origy*v11.y + origz*v11.z;
float Dx = dirx*v11.x + diry*v11.y + dirz*v11.z;
float u = Ox + t*Dx;
if (u >= 0.0f)
{
// Compute and check barycentric v.
#ifdef TRIANGLES_ARRAY_OF_STRUCTURES
float4 v22 = FETCH_GLOBAL(trisA, triAddr*4+2, float4);
#else
float4 v22 = FETCH_GLOBAL(trisC, triAddr, float4);
#endif
float Oy = v22.w + origx*v22.x + origy*v22.y + origz*v22.z;
float Dy = dirx*v22.x + diry*v22.y + dirz*v22.z;
float v = Oy + t*Dy;
if (v >= 0.0f && u + v <= 1.0f)
{
// Record intersection.
// Closest intersection not required => terminate.
hitT = t;
hitIndex = triAddr;
if (anyHit)
{
nodeAddr = EntrypointSentinel;
break;
}
retBari.x = u;
retBari.y = v;
}
}
}
} // triangle
// Another leaf was postponed => process it as well.
leafAddr = nodeAddr;
if (nodeAddr < 0)
{
nodeAddr = traversalStack[stackPtr];
--stackPtr;
}
} // leaf
} // traversal
// Remap intersected triangle index, and store the result.
/*
if (hitIndex != -1)
hitIndex = FETCH_TEXTURE(triIndices, hitIndex, int);
STORE_RESULT(traversalStack[STACK_SIZE + 2], hitIndex, hitT);
*/
if (hitIndex != -1) {
hitIndex = tex1Dfetch(t_triIndices, hitIndex);
}
retHitIndex = hitIndex;
retHitT = hitT;
}
//------------------------------------------------------------------------
| fc6df0524d9c81433d3e21a69f99836f6136949f.cu | /*
* Copyright (c) 2009-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
"Persistent speculative while-while" kernel used in:
"Understanding the Efficiency of Ray Traversal on GPUs",
Timo Aila and Samuli Laine,
Proc. High-Performance Graphics 2009
*/
#include "CudaTracerKernels.hpp"
//------------------------------------------------------------------------
#define NODES_ARRAY_OF_STRUCTURES // Define for AOS, comment out for SOA.
#define TRIANGLES_ARRAY_OF_STRUCTURES // Define for AOS, comment out for SOA.
#define LOAD_BALANCER_BATCH_SIZE 96 // Number of rays to fetch at a time. Must be a multiple of 32.
#define STACK_SIZE 64 // Size of the traversal stack in local memory.
//extern "C" __device__ int g_warpCounter; // Work counter for persistent threads.
//__device__ int g_warpCounter; // Work counter for persistent threads.
//------------------------------------------------------------------------
/*
extern "C" __global__ void queryConfig(void)
{
#if (defined(NODES_ARRAY_OF_STRUCTURES) && defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_AOS_AOS;
#elif (defined(NODES_ARRAY_OF_STRUCTURES) && !defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_AOS_SOA;
#elif (!defined(NODES_ARRAY_OF_STRUCTURES) && defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_SOA_AOS;
#elif (!defined(NODES_ARRAY_OF_STRUCTURES) && !defined(TRIANGLES_ARRAY_OF_STRUCTURES))
g_config.bvhLayout = BVHLayout_SOA_SOA;
#endif
g_config.blockWidth = 32; // One warp per row.
g_config.blockHeight = 6; // 6*32 = 192 threads, optimal for GTX285.
g_config.usePersistentThreads = 1;
}
*/
//------------------------------------------------------------------------
__device__ void IntersectTesla(const float4 o,
const float4 d,
const float4* nodesA,
const float4* nodesB,
const float4* nodesC,
const float4* nodesD,
const float4* trisA,
const float4* trisB,
const float4* trisC,
bool anyHit,
int& retHitIndex,
float& retHitT,
FW::Vec2f& retBari)
{
RayStruct auxRay;
RayStruct* aux = &auxRay;
int traversalStack[STACK_SIZE];
// int traversalStack[STACK_SIZE + 3];
// traversalStack[STACK_SIZE + 0] = threadIdx.x; // Forced to local mem => saves a register.
// traversalStack[STACK_SIZE + 1] = threadIdx.y;
// traversalStack[STACK_SIZE + 2] holds ray index.
// Live state during traversal, stored in registers.
float origx, origy, origz; // Ray origin.
int stackPtr; // Current position in traversal stack.
int leafAddr; // First postponed leaf, non-negative if none.
int nodeAddr; // Non-negative: current internal node, negative: second postponed leaf.
int hitIndex; // Triangle index of the closest intersection, -1 if none.
float hitT; // t-value of the closest intersection.
// Initialize persistent threads.
// Fetch ray.
origx = o.x, origy = o.y, origz = o.z;
aux->tmin = o.w;
float ooeps = exp2f(-80.0f); // Avoid div by zero.
aux->idirx = 1.0f / (fabsf(d.x) > ooeps ? d.x : copysignf(ooeps, d.x));
aux->idiry = 1.0f / (fabsf(d.y) > ooeps ? d.y : copysignf(ooeps, d.y));
aux->idirz = 1.0f / (fabsf(d.z) > ooeps ? d.z : copysignf(ooeps, d.z));
// traversalStack[STACK_SIZE + 2] = rayidx; // Spill.
// Setup traversal.
traversalStack[0] = EntrypointSentinel; // Bottom-most entry.
stackPtr = 0;
leafAddr = 0; // No postponed leaf.
nodeAddr = 0; // Start from the root.
hitIndex = -1; // No triangle intersected so far.
hitT = d.w; // tmax
// Traversal loop.
while (nodeAddr != EntrypointSentinel)
{
float oodx = origx * aux->idirx;
float oody = origy * aux->idiry;
float oodz = origz * aux->idirz;
// Traverse internal nodes until all SIMD lanes have found a leaf.
bool searchingLeaf = true;
while (nodeAddr >= 0 && nodeAddr != EntrypointSentinel)
{
// Fetch AABBs of the two child nodes.
#ifdef NODES_ARRAY_OF_STRUCTURES
float4 n0xy = FETCH_TEXTURE(nodesA, nodeAddr*4+0, float4); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
float4 n1xy = FETCH_TEXTURE(nodesA, nodeAddr*4+1, float4); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
float4 nz = FETCH_TEXTURE(nodesA, nodeAddr*4+2, float4); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
float4 cnodes=FETCH_TEXTURE(nodesA, nodeAddr*4+3, float4);
#else
float4 n0xy = FETCH_TEXTURE(nodesA, nodeAddr, float4); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
float4 n1xy = FETCH_TEXTURE(nodesB, nodeAddr, float4); // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
float4 nz = FETCH_TEXTURE(nodesC, nodeAddr, float4); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
float4 cnodes=FETCH_TEXTURE(nodesD, nodeAddr, float4);
#endif
// Intersect the ray against the child nodes.
float c0lox = n0xy.x * aux->idirx - oodx;
float c0hix = n0xy.y * aux->idirx - oodx;
float c0loy = n0xy.z * aux->idiry - oody;
float c0hiy = n0xy.w * aux->idiry - oody;
float c0loz = nz.x * aux->idirz - oodz;
float c0hiz = nz.y * aux->idirz - oodz;
float c1loz = nz.z * aux->idirz - oodz;
float c1hiz = nz.w * aux->idirz - oodz;
float c0min = max4(fminf(c0lox, c0hix), fminf(c0loy, c0hiy), fminf(c0loz, c0hiz), aux->tmin);
float c0max = min4(fmaxf(c0lox, c0hix), fmaxf(c0loy, c0hiy), fmaxf(c0loz, c0hiz), hitT);
float c1lox = n1xy.x * aux->idirx - oodx;
float c1hix = n1xy.y * aux->idirx - oodx;
float c1loy = n1xy.z * aux->idiry - oody;
float c1hiy = n1xy.w * aux->idiry - oody;
float c1min = max4(fminf(c1lox, c1hix), fminf(c1loy, c1hiy), fminf(c1loz, c1hiz), aux->tmin);
float c1max = min4(fmaxf(c1lox, c1hix), fmaxf(c1loy, c1hiy), fmaxf(c1loz, c1hiz), hitT);
// Decide where to go next.
// Differs from "while-while" because this just happened to produce better code here.
bool traverseChild0 = (c0max >= c0min);
bool traverseChild1 = (c1max >= c1min);
nodeAddr = __float_as_int(cnodes.x); // stored as int
int nodeAddrChild1 = __float_as_int(cnodes.y); // stored as int
if (!traverseChild1) { nodeAddrChild1 = EntrypointSentinel; }
if (!traverseChild0) { nodeAddr = nodeAddrChild1; nodeAddrChild1 = EntrypointSentinel; }
// Neither child was intersected => pop.
if (nodeAddr == EntrypointSentinel)
{
nodeAddr = traversalStack[stackPtr];
--stackPtr;
}
// Both children were intersected => push the farther one.
else if (nodeAddrChild1 != EntrypointSentinel)
{
if (c1min < c0min)
swap(nodeAddr, nodeAddrChild1);
++stackPtr;
traversalStack[stackPtr] = nodeAddrChild1;
}
// First leaf => postpone and continue traversal.
if (nodeAddr < 0 && leafAddr >= 0)
{
searchingLeaf = false;
leafAddr = nodeAddr;
nodeAddr = traversalStack[stackPtr];
--stackPtr;
}
// All SIMD lanes have found a leaf => process them.
if(!__any(searchingLeaf))
break;
}
// Process postponed leaf nodes.
while (leafAddr < 0)
{
// Fetch the start and end of the triangle list.
#ifdef NODES_ARRAY_OF_STRUCTURES
float4 leaf=FETCH_TEXTURE(nodesA, (-leafAddr-1)*4+3, float4);
#else
float4 leaf=FETCH_TEXTURE(nodesD, (-nodeAddr-1), float4);
#endif
int triAddr = __float_as_int(leaf.x); // stored as int
int triAddr2 = __float_as_int(leaf.y); // stored as int
// Intersect the ray against each triangle using Sven Woop's algorithm.
for(; triAddr < triAddr2; triAddr++)
{
// Compute and check intersection t-value.
#ifdef TRIANGLES_ARRAY_OF_STRUCTURES
float4 v00 = FETCH_GLOBAL(trisA, triAddr*4+0, float4);
float4 v11 = FETCH_GLOBAL(trisA, triAddr*4+1, float4);
#else
float4 v00 = FETCH_GLOBAL(trisA, triAddr, float4);
float4 v11 = FETCH_GLOBAL(trisB, triAddr, float4);
#endif
float dirx = 1.0f / aux->idirx;
float diry = 1.0f / aux->idiry;
float dirz = 1.0f / aux->idirz;
float Oz = v00.w - origx*v00.x - origy*v00.y - origz*v00.z;
float invDz = 1.0f / (dirx*v00.x + diry*v00.y + dirz*v00.z);
float t = Oz * invDz;
if (t > aux->tmin && t < hitT)
{
// Compute and check barycentric u.
float Ox = v11.w + origx*v11.x + origy*v11.y + origz*v11.z;
float Dx = dirx*v11.x + diry*v11.y + dirz*v11.z;
float u = Ox + t*Dx;
if (u >= 0.0f)
{
// Compute and check barycentric v.
#ifdef TRIANGLES_ARRAY_OF_STRUCTURES
float4 v22 = FETCH_GLOBAL(trisA, triAddr*4+2, float4);
#else
float4 v22 = FETCH_GLOBAL(trisC, triAddr, float4);
#endif
float Oy = v22.w + origx*v22.x + origy*v22.y + origz*v22.z;
float Dy = dirx*v22.x + diry*v22.y + dirz*v22.z;
float v = Oy + t*Dy;
if (v >= 0.0f && u + v <= 1.0f)
{
// Record intersection.
// Closest intersection not required => terminate.
hitT = t;
hitIndex = triAddr;
if (anyHit)
{
nodeAddr = EntrypointSentinel;
break;
}
retBari.x = u;
retBari.y = v;
}
}
}
} // triangle
// Another leaf was postponed => process it as well.
leafAddr = nodeAddr;
if (nodeAddr < 0)
{
nodeAddr = traversalStack[stackPtr];
--stackPtr;
}
} // leaf
} // traversal
// Remap intersected triangle index, and store the result.
/*
if (hitIndex != -1)
hitIndex = FETCH_TEXTURE(triIndices, hitIndex, int);
STORE_RESULT(traversalStack[STACK_SIZE + 2], hitIndex, hitT);
*/
if (hitIndex != -1) {
hitIndex = tex1Dfetch(t_triIndices, hitIndex);
}
retHitIndex = hitIndex;
retHitT = hitT;
}
//------------------------------------------------------------------------
|
ae35b9d2d43d1ace83e0d38c518f4be984f1560d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cuda_kernels.h"
/**
* def shift(x, n_segment, fold_div=3):
nt, c, h, w = x.size()
n_batch = int(nt / n_segment)
x = x.view(n_batch, n_segment, c, h, w)
fold = int(c / fold_div)
left_side = torch.cat((x[:, 1:, :fold], torch.zeros(n_batch, 1, fold, h, w).to(x.device)), dim=1)
middle_side = torch.cat((torch.zeros(n_batch, 1, fold, h, w).to(x.device), x[:, :n_segment - 1, fold: 2 * fold]), dim=1)
out = torch.cat((left_side, middle_side, x[:, :, 2 * fold:]), dim=2)
return out.view(nt, c, h, w)
*/
// grid(c, n_segment, n_batch)
// block(w*h)
template<typename T>
__global__ void temporal_shift(T *output, const T *input, int n_segment,
int fold) {
// input (n_batch, n_segment, c, h, w)
const size_t bid =
(blockIdx.z * gridDim.y + blockIdx.y) * gridDim.x + blockIdx.x;
const size_t tid = bid * blockDim.x + threadIdx.x;
if (blockIdx.x < 2 * fold) {
size_t shift_bid;
if (blockIdx.x < fold && blockIdx.y >= 1) {
// left
shift_bid = (blockIdx.z * gridDim.y + (blockIdx.y - 1)) * gridDim.x + blockIdx.x;
output[shift_bid * blockDim.x + threadIdx.x] = input[tid];
} else if (blockIdx.x >= fold && blockIdx.y < n_segment - 1) {
// middle
shift_bid = (blockIdx.z * gridDim.y + (blockIdx.y + 1)) * gridDim.x + blockIdx.x;
output[shift_bid * blockDim.x + threadIdx.x] = input[tid];
} else {
// output[tid] = input[tid];
// output[tid] = (T)0.0f;
}
} else {
output[tid] = input[tid];
}
// __syncthreads();
}
// grid(c, n_segment, n_batch)
// block(w*h): < 1024
template<typename T>
void temporal_shift_kernelLauncher(T *output, T *input, int nt, int c, int h, int w, int n_segment, int fold_div, hipStream_t stream) {
int n_batch = int(nt / n_segment);
dim3 grid(c, n_segment, n_batch);
int blocSize = h * w;
int fold = int(c / fold_div);
if (std::is_same<T, half>::value) {
hipLaunchKernelGGL(( temporal_shift), dim3(grid), dim3(blocSize), 0, stream, (half2 *) output, (const half2 *) input, n_segment, fold);
} else {
hipLaunchKernelGGL(( temporal_shift), dim3(grid), dim3(blocSize), 0, stream, output, input, n_segment, fold);
}
}
template void temporal_shift_kernelLauncher(float *output, float *input, int nt, int c, int h, int w, int n_segment, int fold_div, hipStream_t stream);
template void temporal_shift_kernelLauncher(half *output, half *input, int nt, int c, int h, int w, int n_segment, int fold_div, hipStream_t stream);
//void temporal_shift_kernelLauncher(float *output, float *input, int nt, int c, int h, int w, int n_segment, int fold_div, hipStream_t stream) {
// int n_batch = int(nt / n_segment);
// dim3 grid(n_batch, n_segment, c);
// int blocSize = h * w;
// int fold = int(c / fold_div);
// hipLaunchKernelGGL(( temporal_shift), dim3(grid), dim3(blocSize), 0, stream, output, input, n_segment, fold);
//}
template<typename T>
__global__ void focus(T *output, const T *input, int h, int w) {
const size_t bid = (blockIdx.z * gridDim.y + blockIdx.y) * gridDim.x + blockIdx.x;
const size_t tid = threadIdx.x;
const size_t si = bid * blockDim.x + tid;
bool ix = tid % 2 == 0;
bool iy = blockIdx.x % 2 == 0;
// tid < w, blockIdx.x < h
size_t dst_bid;
if (iy && ix) {
dst_bid = (blockIdx.z * (4 * gridDim.y) + blockIdx.y) * gridDim.x / 2 + blockIdx.x / 2;
} else if ((!iy) && ix) {
dst_bid = (blockIdx.z * (4 * gridDim.y) + blockIdx.y + 1 * gridDim.y) * gridDim.x / 2 + blockIdx.x / 2;
} else if (iy && !ix) {
dst_bid = (blockIdx.z * (4 * gridDim.y) + blockIdx.y + 2 * gridDim.y) * gridDim.x / 2 + blockIdx.x / 2;
} else {
dst_bid = (blockIdx.z * (4 * gridDim.y) + blockIdx.y + 3 * gridDim.y) * gridDim.x / 2 + blockIdx.x / 2;
}
auto di = dst_bid * (blockDim.x / 2) + tid / 2;
output[di] = input[si];
}
template<typename T>
void focus_kernelLauncher(T *output, T *input, int n, int c, int h, int w, hipStream_t stream) {
dim3 grid(h, c, n);
dim3 block(w);
assert(w <= 1024);
hipLaunchKernelGGL(( focus), dim3(grid), dim3(block), 0, stream, output, input, h, w);
}
template void focus_kernelLauncher(float *output, float *input, int n, int c, int h, int w, hipStream_t stream);
template void focus_kernelLauncher(half *output, half *input, int n, int c, int h, int w, hipStream_t stream);
template void focus_kernelLauncher(half2 *output, half2 *input, int n, int c, int h, int w, hipStream_t stream);
__device__ float sigmoid(float data) {
return 1.0f / (1.0f + expf(-data));
};
template<typename T>
__global__ void anchor_decode(T *output, const T *input, int w, T *anchors, T stride) {
auto sid = ((blockIdx.z * gridDim.y + blockIdx.y) * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
auto y = sigmoid(input[sid]);
// pytorch:
// y = x[i].sigmoid()
// y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
// y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
// v5: https://github.com/ultralytics/yolov5/issues/471
if (threadIdx.x < 4) {
int row = blockIdx.x / w;
int col = blockIdx.x % w;
int anchor_idx = blockIdx.y;
if (threadIdx.x == 0) {
y = (y * 2.0f - 0.5f + col) * stride;
} else if (threadIdx.x == 1) {
y = (y * 2.0f - 0.5f + row) * stride;
} else if (threadIdx.x == 2) {
y = powf((y * 2.0f), 2.0f) * anchors[2 * anchor_idx];
} else {
y = powf((y * 2.0f), 2.0f) * anchors[2 * anchor_idx + 1];
}
}
output[sid] = y;
}
void anchor_decode_kernelLauncher(float *output, const float *input, int n, int na, int no, int h, int w, float *anchors, float stride, hipStream_t stream) {
dim3 grid(w * h, na, n);
dim3 block(no);
hipLaunchKernelGGL(( anchor_decode), dim3(grid), dim3(block), 0, stream, (float *) output, (const float *) input, w, anchors, stride);
}
| ae35b9d2d43d1ace83e0d38c518f4be984f1560d.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cuda_kernels.h"
/**
* def shift(x, n_segment, fold_div=3):
nt, c, h, w = x.size()
n_batch = int(nt / n_segment)
x = x.view(n_batch, n_segment, c, h, w)
fold = int(c / fold_div)
left_side = torch.cat((x[:, 1:, :fold], torch.zeros(n_batch, 1, fold, h, w).to(x.device)), dim=1)
middle_side = torch.cat((torch.zeros(n_batch, 1, fold, h, w).to(x.device), x[:, :n_segment - 1, fold: 2 * fold]), dim=1)
out = torch.cat((left_side, middle_side, x[:, :, 2 * fold:]), dim=2)
return out.view(nt, c, h, w)
*/
// grid(c, n_segment, n_batch)
// block(w*h)
template<typename T>
__global__ void temporal_shift(T *output, const T *input, int n_segment,
int fold) {
// input (n_batch, n_segment, c, h, w)
const size_t bid =
(blockIdx.z * gridDim.y + blockIdx.y) * gridDim.x + blockIdx.x;
const size_t tid = bid * blockDim.x + threadIdx.x;
if (blockIdx.x < 2 * fold) {
size_t shift_bid;
if (blockIdx.x < fold && blockIdx.y >= 1) {
// left
shift_bid = (blockIdx.z * gridDim.y + (blockIdx.y - 1)) * gridDim.x + blockIdx.x;
output[shift_bid * blockDim.x + threadIdx.x] = input[tid];
} else if (blockIdx.x >= fold && blockIdx.y < n_segment - 1) {
// middle
shift_bid = (blockIdx.z * gridDim.y + (blockIdx.y + 1)) * gridDim.x + blockIdx.x;
output[shift_bid * blockDim.x + threadIdx.x] = input[tid];
} else {
// output[tid] = input[tid];
// output[tid] = (T)0.0f;
}
} else {
output[tid] = input[tid];
}
// __syncthreads();
}
// grid(c, n_segment, n_batch)
// block(w*h): < 1024
template<typename T>
void temporal_shift_kernelLauncher(T *output, T *input, int nt, int c, int h, int w, int n_segment, int fold_div, cudaStream_t stream) {
int n_batch = int(nt / n_segment);
dim3 grid(c, n_segment, n_batch);
int blocSize = h * w;
int fold = int(c / fold_div);
if (std::is_same<T, half>::value) {
temporal_shift<<<grid, blocSize, 0, stream>>>((half2 *) output, (const half2 *) input, n_segment, fold);
} else {
temporal_shift<<<grid, blocSize, 0, stream>>>(output, input, n_segment, fold);
}
}
template void temporal_shift_kernelLauncher(float *output, float *input, int nt, int c, int h, int w, int n_segment, int fold_div, cudaStream_t stream);
template void temporal_shift_kernelLauncher(half *output, half *input, int nt, int c, int h, int w, int n_segment, int fold_div, cudaStream_t stream);
//void temporal_shift_kernelLauncher(float *output, float *input, int nt, int c, int h, int w, int n_segment, int fold_div, cudaStream_t stream) {
// int n_batch = int(nt / n_segment);
// dim3 grid(n_batch, n_segment, c);
// int blocSize = h * w;
// int fold = int(c / fold_div);
// temporal_shift<<<grid, blocSize, 0, stream>>>(output, input, n_segment, fold);
//}
template<typename T>
__global__ void focus(T *output, const T *input, int h, int w) {
const size_t bid = (blockIdx.z * gridDim.y + blockIdx.y) * gridDim.x + blockIdx.x;
const size_t tid = threadIdx.x;
const size_t si = bid * blockDim.x + tid;
bool ix = tid % 2 == 0;
bool iy = blockIdx.x % 2 == 0;
// tid < w, blockIdx.x < h
size_t dst_bid;
if (iy && ix) {
dst_bid = (blockIdx.z * (4 * gridDim.y) + blockIdx.y) * gridDim.x / 2 + blockIdx.x / 2;
} else if ((!iy) && ix) {
dst_bid = (blockIdx.z * (4 * gridDim.y) + blockIdx.y + 1 * gridDim.y) * gridDim.x / 2 + blockIdx.x / 2;
} else if (iy && !ix) {
dst_bid = (blockIdx.z * (4 * gridDim.y) + blockIdx.y + 2 * gridDim.y) * gridDim.x / 2 + blockIdx.x / 2;
} else {
dst_bid = (blockIdx.z * (4 * gridDim.y) + blockIdx.y + 3 * gridDim.y) * gridDim.x / 2 + blockIdx.x / 2;
}
auto di = dst_bid * (blockDim.x / 2) + tid / 2;
output[di] = input[si];
}
template<typename T>
void focus_kernelLauncher(T *output, T *input, int n, int c, int h, int w, cudaStream_t stream) {
dim3 grid(h, c, n);
dim3 block(w);
assert(w <= 1024);
focus<<<grid, block, 0, stream>>>(output, input, h, w);
}
template void focus_kernelLauncher(float *output, float *input, int n, int c, int h, int w, cudaStream_t stream);
template void focus_kernelLauncher(half *output, half *input, int n, int c, int h, int w, cudaStream_t stream);
template void focus_kernelLauncher(half2 *output, half2 *input, int n, int c, int h, int w, cudaStream_t stream);
__device__ float sigmoid(float data) {
return 1.0f / (1.0f + expf(-data));
};
template<typename T>
__global__ void anchor_decode(T *output, const T *input, int w, T *anchors, T stride) {
auto sid = ((blockIdx.z * gridDim.y + blockIdx.y) * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
auto y = sigmoid(input[sid]);
// pytorch:
// y = x[i].sigmoid()
// y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
// y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
// v5: https://github.com/ultralytics/yolov5/issues/471
if (threadIdx.x < 4) {
int row = blockIdx.x / w;
int col = blockIdx.x % w;
int anchor_idx = blockIdx.y;
if (threadIdx.x == 0) {
y = (y * 2.0f - 0.5f + col) * stride;
} else if (threadIdx.x == 1) {
y = (y * 2.0f - 0.5f + row) * stride;
} else if (threadIdx.x == 2) {
y = powf((y * 2.0f), 2.0f) * anchors[2 * anchor_idx];
} else {
y = powf((y * 2.0f), 2.0f) * anchors[2 * anchor_idx + 1];
}
}
output[sid] = y;
}
void anchor_decode_kernelLauncher(float *output, const float *input, int n, int na, int no, int h, int w, float *anchors, float stride, cudaStream_t stream) {
dim3 grid(w * h, na, n);
dim3 block(no);
anchor_decode<<<grid, block, 0, stream>>>((float *) output, (const float *) input, w, anchors, stride);
}
|
f4dcb643b23216b72701711d7a0488fb5280a04d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/gpu/device/vecmath.hpp"
#include "opencv2/gpu/device/transform.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "internal_shared.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace cv { namespace gpu { namespace mathfunc
{
//////////////////////////////////////////////////////////////////////////////////////
// Compare
template <typename T1, typename T2>
struct NotEqual
{
__device__ __forceinline__ uchar operator()(const T1& src1, const T2& src2)
{
return static_cast<uchar>(static_cast<int>(src1 != src2) * 255);
}
};
template <typename T1, typename T2>
inline void compare_ne(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, hipStream_t stream)
{
NotEqual<T1, T2> op;
transform(static_cast< DevMem2D_<T1> >(src1), static_cast< DevMem2D_<T2> >(src2), dst, op, stream);
}
void compare_ne_8uc4(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, hipStream_t stream)
{
compare_ne<uint, uint>(src1, src2, dst, stream);
}
void compare_ne_32f(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, hipStream_t stream)
{
compare_ne<float, float>(src1, src2, dst, stream);
}
//////////////////////////////////////////////////////////////////////////
// Unary bitwise logical matrix operations
enum { UN_OP_NOT };
template <typename T, int opid>
struct UnOp;
template <typename T>
struct UnOp<T, UN_OP_NOT>
{
static __device__ __forceinline__ T call(T v) { return ~v; }
};
template <int opid>
__global__ void bitwiseUnOpKernel(int rows, int width, const PtrStep src, PtrStep dst)
{
const int x = (blockDim.x * blockIdx.x + threadIdx.x) * 4;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (y < rows)
{
uchar* dst_ptr = dst.ptr(y) + x;
const uchar* src_ptr = src.ptr(y) + x;
if (x + sizeof(uint) - 1 < width)
{
*(uint*)dst_ptr = UnOp<uint, opid>::call(*(uint*)src_ptr);
}
else
{
const uchar* src_end = src.ptr(y) + width;
while (src_ptr < src_end)
{
*dst_ptr++ = UnOp<uchar, opid>::call(*src_ptr++);
}
}
}
}
template <int opid>
void bitwiseUnOp(int rows, int width, const PtrStep src, PtrStep dst,
hipStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(width, threads.x * sizeof(uint)),
divUp(rows, threads.y));
hipLaunchKernelGGL(( bitwiseUnOpKernel<opid>), dim3(grid), dim3(threads), 0, 0, rows, width, src, dst);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T, int opid>
__global__ void bitwiseUnOpKernel(int rows, int cols, int cn, const PtrStep src,
const PtrStep mask, PtrStep dst)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < cols && y < rows && mask.ptr(y)[x / cn])
{
T* dst_row = (T*)dst.ptr(y);
const T* src_row = (const T*)src.ptr(y);
dst_row[x] = UnOp<T, opid>::call(src_row[x]);
}
}
template <typename T, int opid>
void bitwiseUnOp(int rows, int cols, int cn, const PtrStep src,
const PtrStep mask, PtrStep dst, hipStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
hipLaunchKernelGGL(( bitwiseUnOpKernel<T, opid>), dim3(grid), dim3(threads), 0, 0, rows, cols, cn, src, mask, dst);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
void bitwiseNotCaller(int rows, int cols, int elem_size1, int cn,
const PtrStep src, PtrStep dst, hipStream_t stream)
{
bitwiseUnOp<UN_OP_NOT>(rows, cols * elem_size1 * cn, src, dst, stream);
}
template <typename T>
void bitwiseMaskNotCaller(int rows, int cols, int cn, const PtrStep src,
const PtrStep mask, PtrStep dst, hipStream_t stream)
{
bitwiseUnOp<T, UN_OP_NOT>(rows, cols * cn, cn, src, mask, dst, stream);
}
template void bitwiseMaskNotCaller<uchar>(int, int, int, const PtrStep, const PtrStep, PtrStep, hipStream_t);
template void bitwiseMaskNotCaller<ushort>(int, int, int, const PtrStep, const PtrStep, PtrStep, hipStream_t);
template void bitwiseMaskNotCaller<uint>(int, int, int, const PtrStep, const PtrStep, PtrStep, hipStream_t);
//////////////////////////////////////////////////////////////////////////
// Binary bitwise logical matrix operations
enum { BIN_OP_OR, BIN_OP_AND, BIN_OP_XOR };
template <typename T, int opid>
struct BinOp;
template <typename T>
struct BinOp<T, BIN_OP_OR>
{
static __device__ __forceinline__ T call(T a, T b) { return a | b; }
};
template <typename T>
struct BinOp<T, BIN_OP_AND>
{
static __device__ __forceinline__ T call(T a, T b) { return a & b; }
};
template <typename T>
struct BinOp<T, BIN_OP_XOR>
{
static __device__ __forceinline__ T call(T a, T b) { return a ^ b; }
};
template <int opid>
__global__ void bitwiseBinOpKernel(int rows, int width, const PtrStep src1,
const PtrStep src2, PtrStep dst)
{
const int x = (blockDim.x * blockIdx.x + threadIdx.x) * 4;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (y < rows)
{
uchar* dst_ptr = dst.ptr(y) + x;
const uchar* src1_ptr = src1.ptr(y) + x;
const uchar* src2_ptr = src2.ptr(y) + x;
if (x + sizeof(uint) - 1 < width)
{
*(uint*)dst_ptr = BinOp<uint, opid>::call(*(uint*)src1_ptr, *(uint*)src2_ptr);
}
else
{
const uchar* src1_end = src1.ptr(y) + width;
while (src1_ptr < src1_end)
{
*dst_ptr++ = BinOp<uchar, opid>::call(*src1_ptr++, *src2_ptr++);
}
}
}
}
template <int opid>
void bitwiseBinOp(int rows, int width, const PtrStep src1, const PtrStep src2,
PtrStep dst, hipStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(width, threads.x * sizeof(uint)), divUp(rows, threads.y));
hipLaunchKernelGGL(( bitwiseBinOpKernel<opid>), dim3(grid), dim3(threads), 0, 0, rows, width, src1, src2, dst);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T, int opid>
__global__ void bitwiseBinOpKernel(
int rows, int cols, int cn, const PtrStep src1, const PtrStep src2,
const PtrStep mask, PtrStep dst)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < cols && y < rows && mask.ptr(y)[x / cn])
{
T* dst_row = (T*)dst.ptr(y);
const T* src1_row = (const T*)src1.ptr(y);
const T* src2_row = (const T*)src2.ptr(y);
dst_row[x] = BinOp<T, opid>::call(src1_row[x], src2_row[x]);
}
}
template <typename T, int opid>
void bitwiseBinOp(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2,
const PtrStep mask, PtrStep dst, hipStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
hipLaunchKernelGGL(( bitwiseBinOpKernel<T, opid>), dim3(grid), dim3(threads), 0, 0, rows, cols, cn, src1, src2, mask, dst);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
void bitwiseOrCaller(int rows, int cols, int elem_size1, int cn, const PtrStep src1,
const PtrStep src2, PtrStep dst, hipStream_t stream)
{
bitwiseBinOp<BIN_OP_OR>(rows, cols * elem_size1 * cn, src1, src2, dst, stream);
}
template <typename T>
void bitwiseMaskOrCaller(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2,
const PtrStep mask, PtrStep dst, hipStream_t stream)
{
bitwiseBinOp<T, BIN_OP_OR>(rows, cols * cn, cn, src1, src2, mask, dst, stream);
}
template void bitwiseMaskOrCaller<uchar>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, hipStream_t);
template void bitwiseMaskOrCaller<ushort>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, hipStream_t);
template void bitwiseMaskOrCaller<uint>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, hipStream_t);
void bitwiseAndCaller(int rows, int cols, int elem_size1, int cn, const PtrStep src1,
const PtrStep src2, PtrStep dst, hipStream_t stream)
{
bitwiseBinOp<BIN_OP_AND>(rows, cols * elem_size1 * cn, src1, src2, dst, stream);
}
template <typename T>
void bitwiseMaskAndCaller(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2,
const PtrStep mask, PtrStep dst, hipStream_t stream)
{
bitwiseBinOp<T, BIN_OP_AND>(rows, cols * cn, cn, src1, src2, mask, dst, stream);
}
template void bitwiseMaskAndCaller<uchar>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, hipStream_t);
template void bitwiseMaskAndCaller<ushort>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, hipStream_t);
template void bitwiseMaskAndCaller<uint>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, hipStream_t);
void bitwiseXorCaller(int rows, int cols, int elem_size1, int cn, const PtrStep src1,
const PtrStep src2, PtrStep dst, hipStream_t stream)
{
bitwiseBinOp<BIN_OP_XOR>(rows, cols * elem_size1 * cn, src1, src2, dst, stream);
}
template <typename T>
void bitwiseMaskXorCaller(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2,
const PtrStep mask, PtrStep dst, hipStream_t stream)
{
bitwiseBinOp<T, BIN_OP_XOR>(rows, cols * cn, cn, src1, src2, mask, dst, stream);
}
template void bitwiseMaskXorCaller<uchar>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, hipStream_t);
template void bitwiseMaskXorCaller<ushort>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, hipStream_t);
template void bitwiseMaskXorCaller<uint>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, hipStream_t);
//////////////////////////////////////////////////////////////////////////
// min/max
struct MinOp
{
template <typename T>
__device__ __forceinline__ T operator()(T a, T b)
{
return min(a, b);
}
__device__ __forceinline__ float operator()(float a, float b)
{
return fmin(a, b);
}
__device__ __forceinline__ double operator()(double a, double b)
{
return fmin(a, b);
}
};
struct MaxOp
{
template <typename T>
__device__ __forceinline__ T operator()(T a, T b)
{
return max(a, b);
}
__device__ __forceinline__ float operator()(float a, float b)
{
return fmax(a, b);
}
__device__ __forceinline__ double operator()(double a, double b)
{
return fmax(a, b);
}
};
template <typename T> struct ScalarMinOp
{
T s;
explicit ScalarMinOp(T s_) : s(s_) {}
__device__ __forceinline__ T operator()(T a)
{
return min(a, s);
}
};
template <> struct ScalarMinOp<float>
{
float s;
explicit ScalarMinOp(float s_) : s(s_) {}
__device__ __forceinline__ float operator()(float a)
{
return fmin(a, s);
}
};
template <> struct ScalarMinOp<double>
{
double s;
explicit ScalarMinOp(double s_) : s(s_) {}
__device__ __forceinline__ double operator()(double a)
{
return fmin(a, s);
}
};
template <typename T> struct ScalarMaxOp
{
T s;
explicit ScalarMaxOp(T s_) : s(s_) {}
__device__ __forceinline__ T operator()(T a)
{
return max(a, s);
}
};
template <> struct ScalarMaxOp<float>
{
float s;
explicit ScalarMaxOp(float s_) : s(s_) {}
__device__ __forceinline__ float operator()(float a)
{
return fmax(a, s);
}
};
template <> struct ScalarMaxOp<double>
{
double s;
explicit ScalarMaxOp(double s_) : s(s_) {}
__device__ __forceinline__ double operator()(double a)
{
return fmax(a, s);
}
};
template <typename T>
void min_gpu(const DevMem2D_<T>& src1, const DevMem2D_<T>& src2, const DevMem2D_<T>& dst, hipStream_t stream)
{
MinOp op;
transform(src1, src2, dst, op, stream);
}
template void min_gpu<uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, hipStream_t stream);
template void min_gpu<schar >(const DevMem2D_<schar>& src1, const DevMem2D_<schar>& src2, const DevMem2D_<schar>& dst, hipStream_t stream);
template void min_gpu<ushort>(const DevMem2D_<ushort>& src1, const DevMem2D_<ushort>& src2, const DevMem2D_<ushort>& dst, hipStream_t stream);
template void min_gpu<short >(const DevMem2D_<short>& src1, const DevMem2D_<short>& src2, const DevMem2D_<short>& dst, hipStream_t stream);
template void min_gpu<int >(const DevMem2D_<int>& src1, const DevMem2D_<int>& src2, const DevMem2D_<int>& dst, hipStream_t stream);
template void min_gpu<float >(const DevMem2D_<float>& src1, const DevMem2D_<float>& src2, const DevMem2D_<float>& dst, hipStream_t stream);
template void min_gpu<double>(const DevMem2D_<double>& src1, const DevMem2D_<double>& src2, const DevMem2D_<double>& dst, hipStream_t stream);
template <typename T>
void max_gpu(const DevMem2D_<T>& src1, const DevMem2D_<T>& src2, const DevMem2D_<T>& dst, hipStream_t stream)
{
MaxOp op;
transform(src1, src2, dst, op, stream);
}
template void max_gpu<uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, hipStream_t stream);
template void max_gpu<schar >(const DevMem2D_<schar>& src1, const DevMem2D_<schar>& src2, const DevMem2D_<schar>& dst, hipStream_t stream);
template void max_gpu<ushort>(const DevMem2D_<ushort>& src1, const DevMem2D_<ushort>& src2, const DevMem2D_<ushort>& dst, hipStream_t stream);
template void max_gpu<short >(const DevMem2D_<short>& src1, const DevMem2D_<short>& src2, const DevMem2D_<short>& dst, hipStream_t stream);
template void max_gpu<int >(const DevMem2D_<int>& src1, const DevMem2D_<int>& src2, const DevMem2D_<int>& dst, hipStream_t stream);
template void max_gpu<float >(const DevMem2D_<float>& src1, const DevMem2D_<float>& src2, const DevMem2D_<float>& dst, hipStream_t stream);
template void max_gpu<double>(const DevMem2D_<double>& src1, const DevMem2D_<double>& src2, const DevMem2D_<double>& dst, hipStream_t stream);
template <typename T>
void min_gpu(const DevMem2D_<T>& src1, T src2, const DevMem2D_<T>& dst, hipStream_t stream)
{
ScalarMinOp<T> op(src2);
transform(src1, dst, op, stream);
}
template void min_gpu<uchar >(const DevMem2D& src1, uchar src2, const DevMem2D& dst, hipStream_t stream);
template void min_gpu<schar >(const DevMem2D_<schar>& src1, schar src2, const DevMem2D_<schar>& dst, hipStream_t stream);
template void min_gpu<ushort>(const DevMem2D_<ushort>& src1, ushort src2, const DevMem2D_<ushort>& dst, hipStream_t stream);
template void min_gpu<short >(const DevMem2D_<short>& src1, short src2, const DevMem2D_<short>& dst, hipStream_t stream);
template void min_gpu<int >(const DevMem2D_<int>& src1, int src2, const DevMem2D_<int>& dst, hipStream_t stream);
template void min_gpu<float >(const DevMem2D_<float>& src1, float src2, const DevMem2D_<float>& dst, hipStream_t stream);
template void min_gpu<double>(const DevMem2D_<double>& src1, double src2, const DevMem2D_<double>& dst, hipStream_t stream);
template <typename T>
void max_gpu(const DevMem2D_<T>& src1, T src2, const DevMem2D_<T>& dst, hipStream_t stream)
{
ScalarMaxOp<T> op(src2);
transform(src1, dst, op, stream);
}
template void max_gpu<uchar >(const DevMem2D& src1, uchar src2, const DevMem2D& dst, hipStream_t stream);
template void max_gpu<schar >(const DevMem2D_<schar>& src1, schar src2, const DevMem2D_<schar>& dst, hipStream_t stream);
template void max_gpu<ushort>(const DevMem2D_<ushort>& src1, ushort src2, const DevMem2D_<ushort>& dst, hipStream_t stream);
template void max_gpu<short >(const DevMem2D_<short>& src1, short src2, const DevMem2D_<short>& dst, hipStream_t stream);
template void max_gpu<int >(const DevMem2D_<int>& src1, int src2, const DevMem2D_<int>& dst, hipStream_t stream);
template void max_gpu<float >(const DevMem2D_<float>& src1, float src2, const DevMem2D_<float>& dst, hipStream_t stream);
template void max_gpu<double>(const DevMem2D_<double>& src1, double src2, const DevMem2D_<double>& dst, hipStream_t stream);
//////////////////////////////////////////////////////////////////////////
// threshold
template <typename T> struct ThreshBinary
{
ThreshBinary(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {}
__device__ __forceinline__ T operator()(const T& src) const
{
return src > thresh ? maxVal : 0;
}
private:
T thresh;
T maxVal;
};
template <typename T> struct ThreshBinaryInv
{
ThreshBinaryInv(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {}
__device__ __forceinline__ T operator()(const T& src) const
{
return src > thresh ? 0 : maxVal;
}
private:
T thresh;
T maxVal;
};
template <typename T> struct ThreshTrunc
{
ThreshTrunc(T thresh_, T) : thresh(thresh_) {}
__device__ __forceinline__ T operator()(const T& src) const
{
return min(src, thresh);
}
private:
T thresh;
};
template <> struct ThreshTrunc<float>
{
ThreshTrunc(float thresh_, float) : thresh(thresh_) {}
__device__ __forceinline__ float operator()(const float& src) const
{
return fmin(src, thresh);
}
private:
float thresh;
};
template <> struct ThreshTrunc<double>
{
ThreshTrunc(double thresh_, double) : thresh(thresh_) {}
__device__ __forceinline__ double operator()(const double& src) const
{
return fmin(src, thresh);
}
private:
double thresh;
};
template <typename T> struct ThreshToZero
{
public:
ThreshToZero(T thresh_, T) : thresh(thresh_) {}
__device__ __forceinline__ T operator()(const T& src) const
{
return src > thresh ? src : 0;
}
private:
T thresh;
};
template <typename T> struct ThreshToZeroInv
{
public:
ThreshToZeroInv(T thresh_, T) : thresh(thresh_) {}
__device__ __forceinline__ T operator()(const T& src) const
{
return src > thresh ? 0 : src;
}
private:
T thresh;
};
template <template <typename> class Op, typename T>
void threshold_caller(const DevMem2D_<T>& src, const DevMem2D_<T>& dst, T thresh, T maxVal,
hipStream_t stream)
{
Op<T> op(thresh, maxVal);
transform(src, dst, op, stream);
}
template <typename T>
void threshold_gpu(const DevMem2D& src, const DevMem2D& dst, T thresh, T maxVal, int type,
hipStream_t stream)
{
typedef void (*caller_t)(const DevMem2D_<T>& src, const DevMem2D_<T>& dst, T thresh, T maxVal,
hipStream_t stream);
static const caller_t callers[] =
{
threshold_caller<ThreshBinary, T>,
threshold_caller<ThreshBinaryInv, T>,
threshold_caller<ThreshTrunc, T>,
threshold_caller<ThreshToZero, T>,
threshold_caller<ThreshToZeroInv, T>
};
callers[type]((DevMem2D_<T>)src, (DevMem2D_<T>)dst, thresh, maxVal, stream);
}
template void threshold_gpu<uchar>(const DevMem2D& src, const DevMem2D& dst, uchar thresh, uchar maxVal, int type, hipStream_t stream);
template void threshold_gpu<schar>(const DevMem2D& src, const DevMem2D& dst, schar thresh, schar maxVal, int type, hipStream_t stream);
template void threshold_gpu<ushort>(const DevMem2D& src, const DevMem2D& dst, ushort thresh, ushort maxVal, int type, hipStream_t stream);
template void threshold_gpu<short>(const DevMem2D& src, const DevMem2D& dst, short thresh, short maxVal, int type, hipStream_t stream);
template void threshold_gpu<int>(const DevMem2D& src, const DevMem2D& dst, int thresh, int maxVal, int type, hipStream_t stream);
template void threshold_gpu<float>(const DevMem2D& src, const DevMem2D& dst, float thresh, float maxVal, int type, hipStream_t stream);
template void threshold_gpu<double>(const DevMem2D& src, const DevMem2D& dst, double thresh, double maxVal, int type, hipStream_t stream);
}}}
| f4dcb643b23216b72701711d7a0488fb5280a04d.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/gpu/device/vecmath.hpp"
#include "opencv2/gpu/device/transform.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "internal_shared.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace cv { namespace gpu { namespace mathfunc
{
//////////////////////////////////////////////////////////////////////////////////////
// Compare
template <typename T1, typename T2>
struct NotEqual
{
__device__ __forceinline__ uchar operator()(const T1& src1, const T2& src2)
{
return static_cast<uchar>(static_cast<int>(src1 != src2) * 255);
}
};
template <typename T1, typename T2>
inline void compare_ne(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream)
{
NotEqual<T1, T2> op;
transform(static_cast< DevMem2D_<T1> >(src1), static_cast< DevMem2D_<T2> >(src2), dst, op, stream);
}
void compare_ne_8uc4(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream)
{
compare_ne<uint, uint>(src1, src2, dst, stream);
}
void compare_ne_32f(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream)
{
compare_ne<float, float>(src1, src2, dst, stream);
}
//////////////////////////////////////////////////////////////////////////
// Unary bitwise logical matrix operations
enum { UN_OP_NOT };
template <typename T, int opid>
struct UnOp;
template <typename T>
struct UnOp<T, UN_OP_NOT>
{
static __device__ __forceinline__ T call(T v) { return ~v; }
};
template <int opid>
__global__ void bitwiseUnOpKernel(int rows, int width, const PtrStep src, PtrStep dst)
{
const int x = (blockDim.x * blockIdx.x + threadIdx.x) * 4;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (y < rows)
{
uchar* dst_ptr = dst.ptr(y) + x;
const uchar* src_ptr = src.ptr(y) + x;
if (x + sizeof(uint) - 1 < width)
{
*(uint*)dst_ptr = UnOp<uint, opid>::call(*(uint*)src_ptr);
}
else
{
const uchar* src_end = src.ptr(y) + width;
while (src_ptr < src_end)
{
*dst_ptr++ = UnOp<uchar, opid>::call(*src_ptr++);
}
}
}
}
template <int opid>
void bitwiseUnOp(int rows, int width, const PtrStep src, PtrStep dst,
cudaStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(width, threads.x * sizeof(uint)),
divUp(rows, threads.y));
bitwiseUnOpKernel<opid><<<grid, threads>>>(rows, width, src, dst);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T, int opid>
__global__ void bitwiseUnOpKernel(int rows, int cols, int cn, const PtrStep src,
const PtrStep mask, PtrStep dst)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < cols && y < rows && mask.ptr(y)[x / cn])
{
T* dst_row = (T*)dst.ptr(y);
const T* src_row = (const T*)src.ptr(y);
dst_row[x] = UnOp<T, opid>::call(src_row[x]);
}
}
template <typename T, int opid>
void bitwiseUnOp(int rows, int cols, int cn, const PtrStep src,
const PtrStep mask, PtrStep dst, cudaStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
bitwiseUnOpKernel<T, opid><<<grid, threads>>>(rows, cols, cn, src, mask, dst);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void bitwiseNotCaller(int rows, int cols, int elem_size1, int cn,
const PtrStep src, PtrStep dst, cudaStream_t stream)
{
bitwiseUnOp<UN_OP_NOT>(rows, cols * elem_size1 * cn, src, dst, stream);
}
template <typename T>
void bitwiseMaskNotCaller(int rows, int cols, int cn, const PtrStep src,
const PtrStep mask, PtrStep dst, cudaStream_t stream)
{
bitwiseUnOp<T, UN_OP_NOT>(rows, cols * cn, cn, src, mask, dst, stream);
}
template void bitwiseMaskNotCaller<uchar>(int, int, int, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
template void bitwiseMaskNotCaller<ushort>(int, int, int, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
template void bitwiseMaskNotCaller<uint>(int, int, int, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
//////////////////////////////////////////////////////////////////////////
// Binary bitwise logical matrix operations
enum { BIN_OP_OR, BIN_OP_AND, BIN_OP_XOR };
template <typename T, int opid>
struct BinOp;
template <typename T>
struct BinOp<T, BIN_OP_OR>
{
static __device__ __forceinline__ T call(T a, T b) { return a | b; }
};
template <typename T>
struct BinOp<T, BIN_OP_AND>
{
static __device__ __forceinline__ T call(T a, T b) { return a & b; }
};
template <typename T>
struct BinOp<T, BIN_OP_XOR>
{
static __device__ __forceinline__ T call(T a, T b) { return a ^ b; }
};
template <int opid>
__global__ void bitwiseBinOpKernel(int rows, int width, const PtrStep src1,
const PtrStep src2, PtrStep dst)
{
const int x = (blockDim.x * blockIdx.x + threadIdx.x) * 4;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (y < rows)
{
uchar* dst_ptr = dst.ptr(y) + x;
const uchar* src1_ptr = src1.ptr(y) + x;
const uchar* src2_ptr = src2.ptr(y) + x;
if (x + sizeof(uint) - 1 < width)
{
*(uint*)dst_ptr = BinOp<uint, opid>::call(*(uint*)src1_ptr, *(uint*)src2_ptr);
}
else
{
const uchar* src1_end = src1.ptr(y) + width;
while (src1_ptr < src1_end)
{
*dst_ptr++ = BinOp<uchar, opid>::call(*src1_ptr++, *src2_ptr++);
}
}
}
}
template <int opid>
void bitwiseBinOp(int rows, int width, const PtrStep src1, const PtrStep src2,
PtrStep dst, cudaStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(width, threads.x * sizeof(uint)), divUp(rows, threads.y));
bitwiseBinOpKernel<opid><<<grid, threads>>>(rows, width, src1, src2, dst);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T, int opid>
__global__ void bitwiseBinOpKernel(
int rows, int cols, int cn, const PtrStep src1, const PtrStep src2,
const PtrStep mask, PtrStep dst)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < cols && y < rows && mask.ptr(y)[x / cn])
{
T* dst_row = (T*)dst.ptr(y);
const T* src1_row = (const T*)src1.ptr(y);
const T* src2_row = (const T*)src2.ptr(y);
dst_row[x] = BinOp<T, opid>::call(src1_row[x], src2_row[x]);
}
}
template <typename T, int opid>
void bitwiseBinOp(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2,
const PtrStep mask, PtrStep dst, cudaStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
bitwiseBinOpKernel<T, opid><<<grid, threads>>>(rows, cols, cn, src1, src2, mask, dst);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void bitwiseOrCaller(int rows, int cols, int elem_size1, int cn, const PtrStep src1,
const PtrStep src2, PtrStep dst, cudaStream_t stream)
{
bitwiseBinOp<BIN_OP_OR>(rows, cols * elem_size1 * cn, src1, src2, dst, stream);
}
template <typename T>
void bitwiseMaskOrCaller(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2,
const PtrStep mask, PtrStep dst, cudaStream_t stream)
{
bitwiseBinOp<T, BIN_OP_OR>(rows, cols * cn, cn, src1, src2, mask, dst, stream);
}
template void bitwiseMaskOrCaller<uchar>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
template void bitwiseMaskOrCaller<ushort>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
template void bitwiseMaskOrCaller<uint>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
void bitwiseAndCaller(int rows, int cols, int elem_size1, int cn, const PtrStep src1,
const PtrStep src2, PtrStep dst, cudaStream_t stream)
{
bitwiseBinOp<BIN_OP_AND>(rows, cols * elem_size1 * cn, src1, src2, dst, stream);
}
template <typename T>
void bitwiseMaskAndCaller(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2,
const PtrStep mask, PtrStep dst, cudaStream_t stream)
{
bitwiseBinOp<T, BIN_OP_AND>(rows, cols * cn, cn, src1, src2, mask, dst, stream);
}
template void bitwiseMaskAndCaller<uchar>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
template void bitwiseMaskAndCaller<ushort>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
template void bitwiseMaskAndCaller<uint>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
void bitwiseXorCaller(int rows, int cols, int elem_size1, int cn, const PtrStep src1,
const PtrStep src2, PtrStep dst, cudaStream_t stream)
{
bitwiseBinOp<BIN_OP_XOR>(rows, cols * elem_size1 * cn, src1, src2, dst, stream);
}
template <typename T>
void bitwiseMaskXorCaller(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2,
const PtrStep mask, PtrStep dst, cudaStream_t stream)
{
bitwiseBinOp<T, BIN_OP_XOR>(rows, cols * cn, cn, src1, src2, mask, dst, stream);
}
template void bitwiseMaskXorCaller<uchar>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
template void bitwiseMaskXorCaller<ushort>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
template void bitwiseMaskXorCaller<uint>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
//////////////////////////////////////////////////////////////////////////
// min/max
struct MinOp
{
template <typename T>
__device__ __forceinline__ T operator()(T a, T b)
{
return min(a, b);
}
__device__ __forceinline__ float operator()(float a, float b)
{
return fmin(a, b);
}
__device__ __forceinline__ double operator()(double a, double b)
{
return fmin(a, b);
}
};
struct MaxOp
{
template <typename T>
__device__ __forceinline__ T operator()(T a, T b)
{
return max(a, b);
}
__device__ __forceinline__ float operator()(float a, float b)
{
return fmax(a, b);
}
__device__ __forceinline__ double operator()(double a, double b)
{
return fmax(a, b);
}
};
template <typename T> struct ScalarMinOp
{
T s;
explicit ScalarMinOp(T s_) : s(s_) {}
__device__ __forceinline__ T operator()(T a)
{
return min(a, s);
}
};
template <> struct ScalarMinOp<float>
{
float s;
explicit ScalarMinOp(float s_) : s(s_) {}
__device__ __forceinline__ float operator()(float a)
{
return fmin(a, s);
}
};
template <> struct ScalarMinOp<double>
{
double s;
explicit ScalarMinOp(double s_) : s(s_) {}
__device__ __forceinline__ double operator()(double a)
{
return fmin(a, s);
}
};
template <typename T> struct ScalarMaxOp
{
T s;
explicit ScalarMaxOp(T s_) : s(s_) {}
__device__ __forceinline__ T operator()(T a)
{
return max(a, s);
}
};
template <> struct ScalarMaxOp<float>
{
float s;
explicit ScalarMaxOp(float s_) : s(s_) {}
__device__ __forceinline__ float operator()(float a)
{
return fmax(a, s);
}
};
template <> struct ScalarMaxOp<double>
{
double s;
explicit ScalarMaxOp(double s_) : s(s_) {}
__device__ __forceinline__ double operator()(double a)
{
return fmax(a, s);
}
};
template <typename T>
void min_gpu(const DevMem2D_<T>& src1, const DevMem2D_<T>& src2, const DevMem2D_<T>& dst, cudaStream_t stream)
{
MinOp op;
transform(src1, src2, dst, op, stream);
}
template void min_gpu<uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void min_gpu<schar >(const DevMem2D_<schar>& src1, const DevMem2D_<schar>& src2, const DevMem2D_<schar>& dst, cudaStream_t stream);
template void min_gpu<ushort>(const DevMem2D_<ushort>& src1, const DevMem2D_<ushort>& src2, const DevMem2D_<ushort>& dst, cudaStream_t stream);
template void min_gpu<short >(const DevMem2D_<short>& src1, const DevMem2D_<short>& src2, const DevMem2D_<short>& dst, cudaStream_t stream);
template void min_gpu<int >(const DevMem2D_<int>& src1, const DevMem2D_<int>& src2, const DevMem2D_<int>& dst, cudaStream_t stream);
template void min_gpu<float >(const DevMem2D_<float>& src1, const DevMem2D_<float>& src2, const DevMem2D_<float>& dst, cudaStream_t stream);
template void min_gpu<double>(const DevMem2D_<double>& src1, const DevMem2D_<double>& src2, const DevMem2D_<double>& dst, cudaStream_t stream);
template <typename T>
void max_gpu(const DevMem2D_<T>& src1, const DevMem2D_<T>& src2, const DevMem2D_<T>& dst, cudaStream_t stream)
{
MaxOp op;
transform(src1, src2, dst, op, stream);
}
template void max_gpu<uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void max_gpu<schar >(const DevMem2D_<schar>& src1, const DevMem2D_<schar>& src2, const DevMem2D_<schar>& dst, cudaStream_t stream);
template void max_gpu<ushort>(const DevMem2D_<ushort>& src1, const DevMem2D_<ushort>& src2, const DevMem2D_<ushort>& dst, cudaStream_t stream);
template void max_gpu<short >(const DevMem2D_<short>& src1, const DevMem2D_<short>& src2, const DevMem2D_<short>& dst, cudaStream_t stream);
template void max_gpu<int >(const DevMem2D_<int>& src1, const DevMem2D_<int>& src2, const DevMem2D_<int>& dst, cudaStream_t stream);
template void max_gpu<float >(const DevMem2D_<float>& src1, const DevMem2D_<float>& src2, const DevMem2D_<float>& dst, cudaStream_t stream);
template void max_gpu<double>(const DevMem2D_<double>& src1, const DevMem2D_<double>& src2, const DevMem2D_<double>& dst, cudaStream_t stream);
template <typename T>
void min_gpu(const DevMem2D_<T>& src1, T src2, const DevMem2D_<T>& dst, cudaStream_t stream)
{
ScalarMinOp<T> op(src2);
transform(src1, dst, op, stream);
}
template void min_gpu<uchar >(const DevMem2D& src1, uchar src2, const DevMem2D& dst, cudaStream_t stream);
template void min_gpu<schar >(const DevMem2D_<schar>& src1, schar src2, const DevMem2D_<schar>& dst, cudaStream_t stream);
template void min_gpu<ushort>(const DevMem2D_<ushort>& src1, ushort src2, const DevMem2D_<ushort>& dst, cudaStream_t stream);
template void min_gpu<short >(const DevMem2D_<short>& src1, short src2, const DevMem2D_<short>& dst, cudaStream_t stream);
template void min_gpu<int >(const DevMem2D_<int>& src1, int src2, const DevMem2D_<int>& dst, cudaStream_t stream);
template void min_gpu<float >(const DevMem2D_<float>& src1, float src2, const DevMem2D_<float>& dst, cudaStream_t stream);
template void min_gpu<double>(const DevMem2D_<double>& src1, double src2, const DevMem2D_<double>& dst, cudaStream_t stream);
template <typename T>
void max_gpu(const DevMem2D_<T>& src1, T src2, const DevMem2D_<T>& dst, cudaStream_t stream)
{
ScalarMaxOp<T> op(src2);
transform(src1, dst, op, stream);
}
template void max_gpu<uchar >(const DevMem2D& src1, uchar src2, const DevMem2D& dst, cudaStream_t stream);
template void max_gpu<schar >(const DevMem2D_<schar>& src1, schar src2, const DevMem2D_<schar>& dst, cudaStream_t stream);
template void max_gpu<ushort>(const DevMem2D_<ushort>& src1, ushort src2, const DevMem2D_<ushort>& dst, cudaStream_t stream);
template void max_gpu<short >(const DevMem2D_<short>& src1, short src2, const DevMem2D_<short>& dst, cudaStream_t stream);
template void max_gpu<int >(const DevMem2D_<int>& src1, int src2, const DevMem2D_<int>& dst, cudaStream_t stream);
template void max_gpu<float >(const DevMem2D_<float>& src1, float src2, const DevMem2D_<float>& dst, cudaStream_t stream);
template void max_gpu<double>(const DevMem2D_<double>& src1, double src2, const DevMem2D_<double>& dst, cudaStream_t stream);
//////////////////////////////////////////////////////////////////////////
// threshold
template <typename T> struct ThreshBinary
{
ThreshBinary(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {}
__device__ __forceinline__ T operator()(const T& src) const
{
return src > thresh ? maxVal : 0;
}
private:
T thresh;
T maxVal;
};
template <typename T> struct ThreshBinaryInv
{
ThreshBinaryInv(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {}
__device__ __forceinline__ T operator()(const T& src) const
{
return src > thresh ? 0 : maxVal;
}
private:
T thresh;
T maxVal;
};
template <typename T> struct ThreshTrunc
{
ThreshTrunc(T thresh_, T) : thresh(thresh_) {}
__device__ __forceinline__ T operator()(const T& src) const
{
return min(src, thresh);
}
private:
T thresh;
};
template <> struct ThreshTrunc<float>
{
ThreshTrunc(float thresh_, float) : thresh(thresh_) {}
__device__ __forceinline__ float operator()(const float& src) const
{
return fmin(src, thresh);
}
private:
float thresh;
};
template <> struct ThreshTrunc<double>
{
ThreshTrunc(double thresh_, double) : thresh(thresh_) {}
__device__ __forceinline__ double operator()(const double& src) const
{
return fmin(src, thresh);
}
private:
double thresh;
};
template <typename T> struct ThreshToZero
{
public:
ThreshToZero(T thresh_, T) : thresh(thresh_) {}
__device__ __forceinline__ T operator()(const T& src) const
{
return src > thresh ? src : 0;
}
private:
T thresh;
};
template <typename T> struct ThreshToZeroInv
{
public:
ThreshToZeroInv(T thresh_, T) : thresh(thresh_) {}
__device__ __forceinline__ T operator()(const T& src) const
{
return src > thresh ? 0 : src;
}
private:
T thresh;
};
template <template <typename> class Op, typename T>
void threshold_caller(const DevMem2D_<T>& src, const DevMem2D_<T>& dst, T thresh, T maxVal,
cudaStream_t stream)
{
Op<T> op(thresh, maxVal);
transform(src, dst, op, stream);
}
template <typename T>
void threshold_gpu(const DevMem2D& src, const DevMem2D& dst, T thresh, T maxVal, int type,
cudaStream_t stream)
{
typedef void (*caller_t)(const DevMem2D_<T>& src, const DevMem2D_<T>& dst, T thresh, T maxVal,
cudaStream_t stream);
static const caller_t callers[] =
{
threshold_caller<ThreshBinary, T>,
threshold_caller<ThreshBinaryInv, T>,
threshold_caller<ThreshTrunc, T>,
threshold_caller<ThreshToZero, T>,
threshold_caller<ThreshToZeroInv, T>
};
callers[type]((DevMem2D_<T>)src, (DevMem2D_<T>)dst, thresh, maxVal, stream);
}
template void threshold_gpu<uchar>(const DevMem2D& src, const DevMem2D& dst, uchar thresh, uchar maxVal, int type, cudaStream_t stream);
template void threshold_gpu<schar>(const DevMem2D& src, const DevMem2D& dst, schar thresh, schar maxVal, int type, cudaStream_t stream);
template void threshold_gpu<ushort>(const DevMem2D& src, const DevMem2D& dst, ushort thresh, ushort maxVal, int type, cudaStream_t stream);
template void threshold_gpu<short>(const DevMem2D& src, const DevMem2D& dst, short thresh, short maxVal, int type, cudaStream_t stream);
template void threshold_gpu<int>(const DevMem2D& src, const DevMem2D& dst, int thresh, int maxVal, int type, cudaStream_t stream);
template void threshold_gpu<float>(const DevMem2D& src, const DevMem2D& dst, float thresh, float maxVal, int type, cudaStream_t stream);
template void threshold_gpu<double>(const DevMem2D& src, const DevMem2D& dst, double thresh, double maxVal, int type, cudaStream_t stream);
}}}
|
6de8cf5be728ea607db4a6a3c0a1fdb84b24733d.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include "backend/kernel_compiler/gpu/cuda_impl/layer_norm_grad_grad_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/layer_norm_impl.cuh"
constexpr int THREAD_PER_BLOCK = 256;
constexpr int NUM_PER_THREAD_REDUCE = 4;
constexpr int WARP_SIZE = 32;
constexpr int NUM_SHARED_SUM_INPUT = 7;
constexpr int NUM_SHARED_SUM_GAMMA = 3;
template <typename T>
inline __device__ T my_pow(T a, double b) {
return pow(a, static_cast<float>(b));
}
template <>
inline __device__ half my_pow(half a, double b) {
return __float2half(pow(__half2float(a), static_cast<float>(b)));
}
template <typename T>
inline __device__ void GammaAndBetaThreadReduce(const int &col, const int &row_dim, const int &col_dim,
const int &mean_dim, const T &epsilon, const T *dy, const T *x,
const T *mean, const T *var, const T *grad_dx, T *part1, T *part2,
T *part3, const T *global_sum1, const T *global_sum2) {
int loop_num = (row_dim + NUM_PER_THREAD_REDUCE - 1) / NUM_PER_THREAD_REDUCE;
for (int i = threadIdx.x; i < loop_num; i += blockDim.x) {
for (int j = 0; j < NUM_PER_THREAD_REDUCE; j++) {
int row = NUM_PER_THREAD_REDUCE * i + j;
if (row >= row_dim) {
return;
}
int pos = row * col_dim + col;
int mean_offset = pos / mean_dim;
T v1 = x[pos] - mean[mean_offset];
T v2 = my_pow(var[mean_offset] + epsilon, -0.5);
part1[0] += dy[pos] * v1 * v2 * global_sum2[pos];
part2[0] += dy[pos] * global_sum1[pos];
part3[0] += dy[pos] * v2 * grad_dx[pos];
}
}
}
template <typename T>
inline __device__ void GammaAndBetaWarpReduce(T *part1, T *part2, T *part3) {
for (int delta = (WARP_SIZE >> 1); delta > 0; delta >>= 1) {
part1[0] += __shfl_down_sync(0xffffffff, part1[0], delta);
part2[0] += __shfl_down_sync(0xffffffff, part2[0], delta);
part3[0] += __shfl_down_sync(0xffffffff, part3[0], delta);
}
}
template <typename T>
inline __device__ void GammaAndBetaBlockReduce(const int &col, const int &row_dim, T *part1, T *part2, T *part3,
T *d_gamma) {
// load data to share memory
// thread(0, 32, 64, 96, ...) keep the data
DynamicSharedMem<T> share_mem;
if (threadIdx.x % WARP_SIZE == 0) {
int offset = threadIdx.x / WARP_SIZE * NUM_SHARED_SUM_GAMMA;
share_mem.addr()[offset] = part1[0];
share_mem.addr()[offset + 1] = part2[0];
share_mem.addr()[offset + 2] = part3[0];
}
__syncthreads();
for (int stride = blockDim.x / WARP_SIZE / 2; stride > 0; stride >>= 1) {
if (threadIdx.x < stride) {
int offset = (threadIdx.x + stride) * NUM_SHARED_SUM_GAMMA;
share_mem.addr()[threadIdx.x * NUM_SHARED_SUM_GAMMA] += share_mem.addr()[offset];
share_mem.addr()[threadIdx.x * NUM_SHARED_SUM_GAMMA + 1] += share_mem.addr()[offset + 1];
share_mem.addr()[threadIdx.x * NUM_SHARED_SUM_GAMMA + 2] += share_mem.addr()[offset + 2];
}
}
__syncthreads();
if (threadIdx.x == 0) {
d_gamma[col] = share_mem.addr()[0] + share_mem.addr()[1] + share_mem.addr()[2];
}
}
template <typename T>
__global__ void GammaAndBetaPropKernel(const int row_dim, const int col_dim, const int mean_dim, const T epsilon,
const T *dy, const T *x, const T *mean, const T *var, const T *grad_dx,
T *d_gamma, T *global_sum1, T *global_sum2) {
for (int col = blockIdx.x; col < col_dim; col += gridDim.x) {
T part1 = 0;
T part2 = 0;
T part3 = 0;
GammaAndBetaThreadReduce(col, row_dim, col_dim, mean_dim, epsilon, dy, x, mean, var, grad_dx, &part1, &part2,
&part3, global_sum1, global_sum2);
GammaAndBetaWarpReduce(&part1, &part2, &part3);
GammaAndBetaBlockReduce(col, row_dim, &part1, &part2, &part3, d_gamma);
}
}
template <typename T>
inline __device__ void InputThreadReduceInnerMean(const int &row, const int &col_dim, const int ¶m_dim,
const T &epsilon, T *sum1, T *sum2, T *sum3, T *sum4,
const T *dy, const T *x, const T *mean, const T *var,
const T *gamma, const T *grad_dx) {
int loop_num = (col_dim + NUM_PER_THREAD_REDUCE - 1) / NUM_PER_THREAD_REDUCE;
for (int i = threadIdx.x; i < loop_num; i += blockDim.x) {
for (int j = 0; j < NUM_PER_THREAD_REDUCE; j++) {
int col = NUM_PER_THREAD_REDUCE * i + j;
if (col >= col_dim) {
return;
}
int pos = row * col_dim + col;
int gamma_offset = pos % param_dim;
T v1 = x[pos] - mean[row];
T v2 = my_pow(var[row] + epsilon, -0.5);
T v3 = v1 * v2;
T v4 = dy[pos] * gamma[gamma_offset];
sum1[0] -= v2 * grad_dx[pos];
sum2[0] -= v3 * v2 * grad_dx[pos];
sum3[0] += v4;
sum4[0] += v4 * v3;
}
}
}
template <typename T>
inline __device__ void InputWarpReduceInnerMean(T *sum1, T *sum2, T *sum3, T *sum4) {
for (int delta = (WARP_SIZE >> 1); delta > 0; delta >>= 1) {
sum1[0] += __shfl_down_sync(0xffffffff, sum1[0], delta);
sum2[0] += __shfl_down_sync(0xffffffff, sum2[0], delta);
sum3[0] += __shfl_down_sync(0xffffffff, sum3[0], delta);
sum4[0] += __shfl_down_sync(0xffffffff, sum4[0], delta);
}
}
template <typename T>
inline __device__ void InputBlockReduceInnerMean(const int &col_dim, T *sum1, T *sum2, T *sum3, T *sum4, T *share_mem) {
// load data to share memory
// thread(0, 32, 64, 96, ...) keep the data
if (threadIdx.x % WARP_SIZE == 0) {
int offset = threadIdx.x / WARP_SIZE * NUM_SHARED_SUM_INPUT;
share_mem[offset] = sum1[0];
share_mem[offset + 1] = sum2[0];
share_mem[offset + 2] = sum3[0];
share_mem[offset + 3] = sum4[0];
}
__syncthreads();
for (int stride = blockDim.x / WARP_SIZE / 2; stride > 0; stride >>= 1) {
if (threadIdx.x < stride) {
int offset = (threadIdx.x + stride) * NUM_SHARED_SUM_INPUT;
share_mem[threadIdx.x * NUM_SHARED_SUM_INPUT] += share_mem[offset];
share_mem[threadIdx.x * NUM_SHARED_SUM_INPUT + 1] += share_mem[offset + 1];
share_mem[threadIdx.x * NUM_SHARED_SUM_INPUT + 2] += share_mem[offset + 2];
share_mem[threadIdx.x * NUM_SHARED_SUM_INPUT + 3] += share_mem[offset + 3];
}
}
__syncthreads();
}
template <typename T>
inline __device__ void InputThreadReduceOuterMean(const int &row, const int &col_dim, const int ¶m_dim,
const T &epsilon, T *sum5, T *sum6, T *sum7, T *share_mem,
const T *dy, const T *x, const T *mean, const T *var, const T *gamma,
const T *grad_dx, const T *grad_dg, T *d_x) {
int loop_num = (col_dim + NUM_PER_THREAD_REDUCE - 1) / NUM_PER_THREAD_REDUCE;
for (int i = threadIdx.x; i < loop_num; i += blockDim.x) {
for (int j = 0; j < NUM_PER_THREAD_REDUCE; j++) {
int col = NUM_PER_THREAD_REDUCE * i + j;
if (col >= col_dim) {
return;
}
int pos = row * col_dim + col;
int gamma_offset = pos % param_dim;
T v1 = x[pos] - mean[row];
T v2 = my_pow(var[row] + epsilon, -0.5);
T v3 = dy[pos] * gamma[gamma_offset];
T v4 = v3 - share_mem[2] * (1.0 / col_dim) - v1 * v2 * share_mem[3] * (1.0 / col_dim);
T v5 = v3 * share_mem[1] * (1.0 / col_dim);
T v6 = grad_dx[pos] * v2 * share_mem[3] * (-1.0 / col_dim);
T v7 = dy[pos] * grad_dg[gamma_offset];
T v8 = v5 + v6 + v7;
T part1 = v4 * grad_dx[pos];
T part2 = v1 * v8;
T part3 = v2 * v8;
d_x[pos] = part3;
sum5[0] += part1;
sum6[0] += part2;
sum7[0] -= part3;
}
}
}
template <>
inline __device__ void InputThreadReduceOuterMean(const int &row, const int &col_dim, const int ¶m_dim,
const half &epsilon, half *sum5, half *sum6, half *sum7,
half *share_mem, const half *dy, const half *x, const half *mean,
const half *var, const half *gamma, const half *grad_dx,
const half *grad_dg, half *d_x) {
int loop_num = (col_dim + NUM_PER_THREAD_REDUCE - 1) / NUM_PER_THREAD_REDUCE;
for (int i = threadIdx.x; i < loop_num; i += blockDim.x) {
for (int j = 0; j < NUM_PER_THREAD_REDUCE; j++) {
int col = NUM_PER_THREAD_REDUCE * i + j;
if (col >= col_dim) {
return;
}
int pos = row * col_dim + col;
int gamma_offset = pos % param_dim;
half v1 = x[pos] - mean[row];
half v2 = my_pow(var[row] + epsilon, -0.5);
half v3 = dy[pos] * gamma[gamma_offset];
half v4 = v3 - share_mem[2] * __float2half(1.0 / col_dim) - v1 * v2 * share_mem[3] * __float2half(1.0 / col_dim);
half v5 = v3 * share_mem[1] * __float2half(1.0 / col_dim);
half v6 = grad_dx[pos] * v2 * share_mem[3] * __float2half(-1.0 / col_dim);
half v7 = dy[pos] * grad_dg[gamma_offset];
half v8 = v5 + v6 + v7;
half part1 = v4 * grad_dx[pos];
half part2 = v1 * v8;
half part3 = v2 * v8;
d_x[pos] = part3;
sum5[0] += part1;
sum6[0] += part2;
sum7[0] -= part3;
}
}
}
template <typename T>
inline __device__ void InputWarpReduceOuterMean(T *sum5, T *sum6, T *sum7) {
for (int delta = (WARP_SIZE >> 1); delta > 0; delta >>= 1) {
sum5[0] += __shfl_down_sync(0xffffffff, sum5[0], delta);
sum6[0] += __shfl_down_sync(0xffffffff, sum6[0], delta);
sum7[0] += __shfl_down_sync(0xffffffff, sum7[0], delta);
}
}
template <typename T>
inline __device__ void InputBlockReduceOuterMean(const int &col_dim, T *sum5, T *sum6, T *sum7, T *share_mem) {
// load data to share memory
// thread(0, 32, 64, 96, ...) keep the data
if (threadIdx.x % WARP_SIZE == 0) {
int offset = threadIdx.x / WARP_SIZE * NUM_SHARED_SUM_INPUT;
share_mem[offset + 4] = sum5[0];
share_mem[offset + 5] = sum6[0];
share_mem[offset + 6] = sum7[0];
}
__syncthreads();
for (int stride = blockDim.x / WARP_SIZE / 2; stride > 0; stride >>= 1) {
if (threadIdx.x < stride) {
int offset = (threadIdx.x + stride) * NUM_SHARED_SUM_INPUT;
share_mem[threadIdx.x * NUM_SHARED_SUM_INPUT + 4] += share_mem[offset + 4];
share_mem[threadIdx.x * NUM_SHARED_SUM_INPUT + 5] += share_mem[offset + 5];
share_mem[threadIdx.x * NUM_SHARED_SUM_INPUT + 6] += share_mem[offset + 6];
}
}
__syncthreads();
}
template <typename T>
inline __device__ void InputProp(const int &row, const int &col_dim, const int ¶m_dim, const T &epsilon,
const T *dy, const T *x, const T *mean, const T *var, const T *gamma,
const T *grad_dx, const T *grad_dg, const T *grad_db, T *d_dy, T *d_x,
const T *share_mem, T *global_sum1, T *global_sum2) {
for (int col = threadIdx.x; col < col_dim; col += blockDim.x) {
int pos = (row * col_dim + col);
int gamma_offset = pos % param_dim;
T v1 = x[pos] - mean[row];
T v2 = my_pow(var[row] + epsilon, -0.5);
T v3 = v1 * v2;
T part1 = gamma[gamma_offset] * grad_dx[pos] * v2;
T part2 = gamma[gamma_offset] * share_mem[0] * (1.0 / col_dim);
T part3 = gamma[gamma_offset] * v3 * share_mem[1] * (1.0 / col_dim);
T part4 = v3 * grad_dg[gamma_offset];
d_dy[pos] = part1 + part2 + part3 + part4 + grad_db[gamma_offset];
T part5 = v1 * (my_pow(var[row] + epsilon, -1.5) * ((share_mem[4]+ share_mem[5]) * (-1.0 / col_dim)));
d_x[pos] += part5 + share_mem[6] * (1.0 / col_dim);
global_sum1[pos] = share_mem[0] * (1.0 / col_dim);
global_sum2[pos] = share_mem[1] * (1.0 / col_dim);
}
}
template <>
inline __device__ void InputProp(const int &row, const int &col_dim, const int ¶m_dim, const half &epsilon,
const half *dy, const half *x, const half *mean, const half *var, const half *gamma,
const half *grad_dx, const half *grad_dg, const half *grad_db, half *d_dy, half *d_x,
const half *share_mem, half *global_sum1, half *global_sum2) {
for (int col = threadIdx.x; col < col_dim; col += blockDim.x) {
int pos = (row * col_dim + col);
int gamma_offset = pos % param_dim;
half v1 = x[pos] - mean[row];
half v2 = my_pow(var[row] + epsilon, -0.5);
half v3 = v1 * v2;
half part1 = gamma[gamma_offset] * grad_dx[pos] * v2;
half part2 = gamma[gamma_offset] * share_mem[0] * __float2half(1.0 / col_dim);
half part3 = gamma[gamma_offset] * v3 * share_mem[1] * __float2half(1.0 / col_dim);
half part4 = v3 * grad_dg[gamma_offset];
d_dy[pos] = part1 + part2 + part3 + part4 + grad_db[gamma_offset];
half part5 = v1 * (my_pow(var[row] + epsilon, -1.5) *
((share_mem[4]+ share_mem[5]) * __float2half(-1.0 / col_dim)));
d_x[pos] += part5 + share_mem[6] * __float2half(1.0 / col_dim);
global_sum1[pos] = share_mem[0] * __float2half(1.0 / col_dim);
global_sum2[pos] = share_mem[1] * __float2half(1.0 / col_dim);
}
}
template <typename T>
__global__ void InputPropKernel(const int row_dim, const int col_dim, const int param_dim, const T epsilon,
const T *dy, const T *x, const T *mean, const T *var, const T *gamma,
const T *grad_dx, const T *grad_dg, const T *grad_db, T *d_dy, T *d_x, T *global_sum1,
T *global_sum2) {
for (int row = blockIdx.x; row < row_dim; row += gridDim.x) {
T sum1 = 0;
T sum2 = 0;
T sum3 = 0;
T sum4 = 0;
T sum5 = 0;
T sum6 = 0;
T sum7 = 0;
DynamicSharedMem<T> share_mem;
InputThreadReduceInnerMean(row, col_dim, param_dim, epsilon, &sum1, &sum2, &sum3, &sum4, dy, x, mean, var, gamma,
grad_dx);
InputWarpReduceInnerMean(&sum1, &sum2, &sum3, &sum4);
InputBlockReduceInnerMean(col_dim, &sum1, &sum2, &sum3, &sum4, share_mem.addr());
InputThreadReduceOuterMean(row, col_dim, param_dim, epsilon, &sum5, &sum6, &sum7, share_mem.addr(), dy, x, mean,
var, gamma, grad_dx, grad_dg, d_x);
InputWarpReduceOuterMean(&sum5, &sum6, &sum7);
InputBlockReduceOuterMean(col_dim, &sum5, &sum6, &sum7, share_mem.addr());
InputProp(row, col_dim, param_dim, epsilon, dy, x, mean, var, gamma, grad_dx, grad_dg, grad_db, d_dy, d_x,
share_mem.addr(), global_sum1, global_sum2);
}
}
template <typename T>
void LayerNormGradGrad(const int &row_dim, const int &col_dim, const int ¶m_dim, T *global_sum1, T *global_sum2,
const T &epsilon, const T *dy, const T *x, const T *mean, const T *var, const T *gamma,
const T* grad_dx, const T* grad_dg, const T* grad_db, T *d_dy, T *d_x, T *d_gamma,
hipStream_t stream) {
int share_mem_size = THREAD_PER_BLOCK / WARP_SIZE * NUM_SHARED_SUM_INPUT * sizeof(T);
hipLaunchKernelGGL(( InputPropKernel), dim3(row_dim), dim3(THREAD_PER_BLOCK), share_mem_size, stream, row_dim, col_dim, param_dim, epsilon, dy, x,
mean, var, gamma, grad_dx, grad_dg, grad_db,
d_dy, d_x, global_sum1, global_sum2);
share_mem_size = THREAD_PER_BLOCK / WARP_SIZE * NUM_SHARED_SUM_GAMMA * sizeof(T);
int param_reduce_dim = row_dim * col_dim / param_dim;
hipLaunchKernelGGL(( GammaAndBetaPropKernel), dim3(param_dim), dim3(THREAD_PER_BLOCK), share_mem_size, stream, param_reduce_dim, param_dim,
col_dim, epsilon, dy, x, mean, var,
grad_dx, d_gamma, global_sum1,
global_sum2);
}
template void LayerNormGradGrad(const int &row_dim, const int &col_dim, const int ¶m_dim, float *global_sum1,
float *global_sum2, const float &epsilon, const float *dy, const float *x,
const float *mean, const float *var, const float *gamma, const float *grad_dx,
const float *grad_dg, const float *grad_db, float *d_dy, float *d_x, float *d_gamma,
hipStream_t stream);
template void LayerNormGradGrad(const int &row_dim, const int &col_dim, const int ¶m_dim, half *global_sum1,
half *global_sum2, const half &epsilon, const half *dy, const half *x, const half *mean,
const half *var, const half *gamma, const half *grad_dx, const half *grad_dg,
const half *grad_db, half *d_dy, half *d_x, half *d_gamma, hipStream_t stream);
| 6de8cf5be728ea607db4a6a3c0a1fdb84b24733d.cu | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdint.h>
#include <cuda_runtime.h>
#include "backend/kernel_compiler/gpu/cuda_impl/layer_norm_grad_grad_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/layer_norm_impl.cuh"
constexpr int THREAD_PER_BLOCK = 256;
constexpr int NUM_PER_THREAD_REDUCE = 4;
constexpr int WARP_SIZE = 32;
constexpr int NUM_SHARED_SUM_INPUT = 7;
constexpr int NUM_SHARED_SUM_GAMMA = 3;
template <typename T>
inline __device__ T my_pow(T a, double b) {
return pow(a, static_cast<float>(b));
}
template <>
inline __device__ half my_pow(half a, double b) {
return __float2half(pow(__half2float(a), static_cast<float>(b)));
}
template <typename T>
inline __device__ void GammaAndBetaThreadReduce(const int &col, const int &row_dim, const int &col_dim,
const int &mean_dim, const T &epsilon, const T *dy, const T *x,
const T *mean, const T *var, const T *grad_dx, T *part1, T *part2,
T *part3, const T *global_sum1, const T *global_sum2) {
int loop_num = (row_dim + NUM_PER_THREAD_REDUCE - 1) / NUM_PER_THREAD_REDUCE;
for (int i = threadIdx.x; i < loop_num; i += blockDim.x) {
for (int j = 0; j < NUM_PER_THREAD_REDUCE; j++) {
int row = NUM_PER_THREAD_REDUCE * i + j;
if (row >= row_dim) {
return;
}
int pos = row * col_dim + col;
int mean_offset = pos / mean_dim;
T v1 = x[pos] - mean[mean_offset];
T v2 = my_pow(var[mean_offset] + epsilon, -0.5);
part1[0] += dy[pos] * v1 * v2 * global_sum2[pos];
part2[0] += dy[pos] * global_sum1[pos];
part3[0] += dy[pos] * v2 * grad_dx[pos];
}
}
}
template <typename T>
inline __device__ void GammaAndBetaWarpReduce(T *part1, T *part2, T *part3) {
for (int delta = (WARP_SIZE >> 1); delta > 0; delta >>= 1) {
part1[0] += __shfl_down_sync(0xffffffff, part1[0], delta);
part2[0] += __shfl_down_sync(0xffffffff, part2[0], delta);
part3[0] += __shfl_down_sync(0xffffffff, part3[0], delta);
}
}
template <typename T>
inline __device__ void GammaAndBetaBlockReduce(const int &col, const int &row_dim, T *part1, T *part2, T *part3,
T *d_gamma) {
// load data to share memory
// thread(0, 32, 64, 96, ...) keep the data
DynamicSharedMem<T> share_mem;
if (threadIdx.x % WARP_SIZE == 0) {
int offset = threadIdx.x / WARP_SIZE * NUM_SHARED_SUM_GAMMA;
share_mem.addr()[offset] = part1[0];
share_mem.addr()[offset + 1] = part2[0];
share_mem.addr()[offset + 2] = part3[0];
}
__syncthreads();
for (int stride = blockDim.x / WARP_SIZE / 2; stride > 0; stride >>= 1) {
if (threadIdx.x < stride) {
int offset = (threadIdx.x + stride) * NUM_SHARED_SUM_GAMMA;
share_mem.addr()[threadIdx.x * NUM_SHARED_SUM_GAMMA] += share_mem.addr()[offset];
share_mem.addr()[threadIdx.x * NUM_SHARED_SUM_GAMMA + 1] += share_mem.addr()[offset + 1];
share_mem.addr()[threadIdx.x * NUM_SHARED_SUM_GAMMA + 2] += share_mem.addr()[offset + 2];
}
}
__syncthreads();
if (threadIdx.x == 0) {
d_gamma[col] = share_mem.addr()[0] + share_mem.addr()[1] + share_mem.addr()[2];
}
}
template <typename T>
__global__ void GammaAndBetaPropKernel(const int row_dim, const int col_dim, const int mean_dim, const T epsilon,
const T *dy, const T *x, const T *mean, const T *var, const T *grad_dx,
T *d_gamma, T *global_sum1, T *global_sum2) {
for (int col = blockIdx.x; col < col_dim; col += gridDim.x) {
T part1 = 0;
T part2 = 0;
T part3 = 0;
GammaAndBetaThreadReduce(col, row_dim, col_dim, mean_dim, epsilon, dy, x, mean, var, grad_dx, &part1, &part2,
&part3, global_sum1, global_sum2);
GammaAndBetaWarpReduce(&part1, &part2, &part3);
GammaAndBetaBlockReduce(col, row_dim, &part1, &part2, &part3, d_gamma);
}
}
template <typename T>
inline __device__ void InputThreadReduceInnerMean(const int &row, const int &col_dim, const int ¶m_dim,
const T &epsilon, T *sum1, T *sum2, T *sum3, T *sum4,
const T *dy, const T *x, const T *mean, const T *var,
const T *gamma, const T *grad_dx) {
int loop_num = (col_dim + NUM_PER_THREAD_REDUCE - 1) / NUM_PER_THREAD_REDUCE;
for (int i = threadIdx.x; i < loop_num; i += blockDim.x) {
for (int j = 0; j < NUM_PER_THREAD_REDUCE; j++) {
int col = NUM_PER_THREAD_REDUCE * i + j;
if (col >= col_dim) {
return;
}
int pos = row * col_dim + col;
int gamma_offset = pos % param_dim;
T v1 = x[pos] - mean[row];
T v2 = my_pow(var[row] + epsilon, -0.5);
T v3 = v1 * v2;
T v4 = dy[pos] * gamma[gamma_offset];
sum1[0] -= v2 * grad_dx[pos];
sum2[0] -= v3 * v2 * grad_dx[pos];
sum3[0] += v4;
sum4[0] += v4 * v3;
}
}
}
template <typename T>
inline __device__ void InputWarpReduceInnerMean(T *sum1, T *sum2, T *sum3, T *sum4) {
for (int delta = (WARP_SIZE >> 1); delta > 0; delta >>= 1) {
sum1[0] += __shfl_down_sync(0xffffffff, sum1[0], delta);
sum2[0] += __shfl_down_sync(0xffffffff, sum2[0], delta);
sum3[0] += __shfl_down_sync(0xffffffff, sum3[0], delta);
sum4[0] += __shfl_down_sync(0xffffffff, sum4[0], delta);
}
}
template <typename T>
inline __device__ void InputBlockReduceInnerMean(const int &col_dim, T *sum1, T *sum2, T *sum3, T *sum4, T *share_mem) {
// load data to share memory
// thread(0, 32, 64, 96, ...) keep the data
if (threadIdx.x % WARP_SIZE == 0) {
int offset = threadIdx.x / WARP_SIZE * NUM_SHARED_SUM_INPUT;
share_mem[offset] = sum1[0];
share_mem[offset + 1] = sum2[0];
share_mem[offset + 2] = sum3[0];
share_mem[offset + 3] = sum4[0];
}
__syncthreads();
for (int stride = blockDim.x / WARP_SIZE / 2; stride > 0; stride >>= 1) {
if (threadIdx.x < stride) {
int offset = (threadIdx.x + stride) * NUM_SHARED_SUM_INPUT;
share_mem[threadIdx.x * NUM_SHARED_SUM_INPUT] += share_mem[offset];
share_mem[threadIdx.x * NUM_SHARED_SUM_INPUT + 1] += share_mem[offset + 1];
share_mem[threadIdx.x * NUM_SHARED_SUM_INPUT + 2] += share_mem[offset + 2];
share_mem[threadIdx.x * NUM_SHARED_SUM_INPUT + 3] += share_mem[offset + 3];
}
}
__syncthreads();
}
template <typename T>
inline __device__ void InputThreadReduceOuterMean(const int &row, const int &col_dim, const int ¶m_dim,
const T &epsilon, T *sum5, T *sum6, T *sum7, T *share_mem,
const T *dy, const T *x, const T *mean, const T *var, const T *gamma,
const T *grad_dx, const T *grad_dg, T *d_x) {
int loop_num = (col_dim + NUM_PER_THREAD_REDUCE - 1) / NUM_PER_THREAD_REDUCE;
for (int i = threadIdx.x; i < loop_num; i += blockDim.x) {
for (int j = 0; j < NUM_PER_THREAD_REDUCE; j++) {
int col = NUM_PER_THREAD_REDUCE * i + j;
if (col >= col_dim) {
return;
}
int pos = row * col_dim + col;
int gamma_offset = pos % param_dim;
T v1 = x[pos] - mean[row];
T v2 = my_pow(var[row] + epsilon, -0.5);
T v3 = dy[pos] * gamma[gamma_offset];
T v4 = v3 - share_mem[2] * (1.0 / col_dim) - v1 * v2 * share_mem[3] * (1.0 / col_dim);
T v5 = v3 * share_mem[1] * (1.0 / col_dim);
T v6 = grad_dx[pos] * v2 * share_mem[3] * (-1.0 / col_dim);
T v7 = dy[pos] * grad_dg[gamma_offset];
T v8 = v5 + v6 + v7;
T part1 = v4 * grad_dx[pos];
T part2 = v1 * v8;
T part3 = v2 * v8;
d_x[pos] = part3;
sum5[0] += part1;
sum6[0] += part2;
sum7[0] -= part3;
}
}
}
template <>
inline __device__ void InputThreadReduceOuterMean(const int &row, const int &col_dim, const int ¶m_dim,
const half &epsilon, half *sum5, half *sum6, half *sum7,
half *share_mem, const half *dy, const half *x, const half *mean,
const half *var, const half *gamma, const half *grad_dx,
const half *grad_dg, half *d_x) {
int loop_num = (col_dim + NUM_PER_THREAD_REDUCE - 1) / NUM_PER_THREAD_REDUCE;
for (int i = threadIdx.x; i < loop_num; i += blockDim.x) {
for (int j = 0; j < NUM_PER_THREAD_REDUCE; j++) {
int col = NUM_PER_THREAD_REDUCE * i + j;
if (col >= col_dim) {
return;
}
int pos = row * col_dim + col;
int gamma_offset = pos % param_dim;
half v1 = x[pos] - mean[row];
half v2 = my_pow(var[row] + epsilon, -0.5);
half v3 = dy[pos] * gamma[gamma_offset];
half v4 = v3 - share_mem[2] * __float2half(1.0 / col_dim) - v1 * v2 * share_mem[3] * __float2half(1.0 / col_dim);
half v5 = v3 * share_mem[1] * __float2half(1.0 / col_dim);
half v6 = grad_dx[pos] * v2 * share_mem[3] * __float2half(-1.0 / col_dim);
half v7 = dy[pos] * grad_dg[gamma_offset];
half v8 = v5 + v6 + v7;
half part1 = v4 * grad_dx[pos];
half part2 = v1 * v8;
half part3 = v2 * v8;
d_x[pos] = part3;
sum5[0] += part1;
sum6[0] += part2;
sum7[0] -= part3;
}
}
}
template <typename T>
inline __device__ void InputWarpReduceOuterMean(T *sum5, T *sum6, T *sum7) {
for (int delta = (WARP_SIZE >> 1); delta > 0; delta >>= 1) {
sum5[0] += __shfl_down_sync(0xffffffff, sum5[0], delta);
sum6[0] += __shfl_down_sync(0xffffffff, sum6[0], delta);
sum7[0] += __shfl_down_sync(0xffffffff, sum7[0], delta);
}
}
template <typename T>
inline __device__ void InputBlockReduceOuterMean(const int &col_dim, T *sum5, T *sum6, T *sum7, T *share_mem) {
// load data to share memory
// thread(0, 32, 64, 96, ...) keep the data
if (threadIdx.x % WARP_SIZE == 0) {
int offset = threadIdx.x / WARP_SIZE * NUM_SHARED_SUM_INPUT;
share_mem[offset + 4] = sum5[0];
share_mem[offset + 5] = sum6[0];
share_mem[offset + 6] = sum7[0];
}
__syncthreads();
for (int stride = blockDim.x / WARP_SIZE / 2; stride > 0; stride >>= 1) {
if (threadIdx.x < stride) {
int offset = (threadIdx.x + stride) * NUM_SHARED_SUM_INPUT;
share_mem[threadIdx.x * NUM_SHARED_SUM_INPUT + 4] += share_mem[offset + 4];
share_mem[threadIdx.x * NUM_SHARED_SUM_INPUT + 5] += share_mem[offset + 5];
share_mem[threadIdx.x * NUM_SHARED_SUM_INPUT + 6] += share_mem[offset + 6];
}
}
__syncthreads();
}
template <typename T>
inline __device__ void InputProp(const int &row, const int &col_dim, const int ¶m_dim, const T &epsilon,
const T *dy, const T *x, const T *mean, const T *var, const T *gamma,
const T *grad_dx, const T *grad_dg, const T *grad_db, T *d_dy, T *d_x,
const T *share_mem, T *global_sum1, T *global_sum2) {
for (int col = threadIdx.x; col < col_dim; col += blockDim.x) {
int pos = (row * col_dim + col);
int gamma_offset = pos % param_dim;
T v1 = x[pos] - mean[row];
T v2 = my_pow(var[row] + epsilon, -0.5);
T v3 = v1 * v2;
T part1 = gamma[gamma_offset] * grad_dx[pos] * v2;
T part2 = gamma[gamma_offset] * share_mem[0] * (1.0 / col_dim);
T part3 = gamma[gamma_offset] * v3 * share_mem[1] * (1.0 / col_dim);
T part4 = v3 * grad_dg[gamma_offset];
d_dy[pos] = part1 + part2 + part3 + part4 + grad_db[gamma_offset];
T part5 = v1 * (my_pow(var[row] + epsilon, -1.5) * ((share_mem[4]+ share_mem[5]) * (-1.0 / col_dim)));
d_x[pos] += part5 + share_mem[6] * (1.0 / col_dim);
global_sum1[pos] = share_mem[0] * (1.0 / col_dim);
global_sum2[pos] = share_mem[1] * (1.0 / col_dim);
}
}
template <>
inline __device__ void InputProp(const int &row, const int &col_dim, const int ¶m_dim, const half &epsilon,
const half *dy, const half *x, const half *mean, const half *var, const half *gamma,
const half *grad_dx, const half *grad_dg, const half *grad_db, half *d_dy, half *d_x,
const half *share_mem, half *global_sum1, half *global_sum2) {
for (int col = threadIdx.x; col < col_dim; col += blockDim.x) {
int pos = (row * col_dim + col);
int gamma_offset = pos % param_dim;
half v1 = x[pos] - mean[row];
half v2 = my_pow(var[row] + epsilon, -0.5);
half v3 = v1 * v2;
half part1 = gamma[gamma_offset] * grad_dx[pos] * v2;
half part2 = gamma[gamma_offset] * share_mem[0] * __float2half(1.0 / col_dim);
half part3 = gamma[gamma_offset] * v3 * share_mem[1] * __float2half(1.0 / col_dim);
half part4 = v3 * grad_dg[gamma_offset];
d_dy[pos] = part1 + part2 + part3 + part4 + grad_db[gamma_offset];
half part5 = v1 * (my_pow(var[row] + epsilon, -1.5) *
((share_mem[4]+ share_mem[5]) * __float2half(-1.0 / col_dim)));
d_x[pos] += part5 + share_mem[6] * __float2half(1.0 / col_dim);
global_sum1[pos] = share_mem[0] * __float2half(1.0 / col_dim);
global_sum2[pos] = share_mem[1] * __float2half(1.0 / col_dim);
}
}
template <typename T>
__global__ void InputPropKernel(const int row_dim, const int col_dim, const int param_dim, const T epsilon,
const T *dy, const T *x, const T *mean, const T *var, const T *gamma,
const T *grad_dx, const T *grad_dg, const T *grad_db, T *d_dy, T *d_x, T *global_sum1,
T *global_sum2) {
for (int row = blockIdx.x; row < row_dim; row += gridDim.x) {
T sum1 = 0;
T sum2 = 0;
T sum3 = 0;
T sum4 = 0;
T sum5 = 0;
T sum6 = 0;
T sum7 = 0;
DynamicSharedMem<T> share_mem;
InputThreadReduceInnerMean(row, col_dim, param_dim, epsilon, &sum1, &sum2, &sum3, &sum4, dy, x, mean, var, gamma,
grad_dx);
InputWarpReduceInnerMean(&sum1, &sum2, &sum3, &sum4);
InputBlockReduceInnerMean(col_dim, &sum1, &sum2, &sum3, &sum4, share_mem.addr());
InputThreadReduceOuterMean(row, col_dim, param_dim, epsilon, &sum5, &sum6, &sum7, share_mem.addr(), dy, x, mean,
var, gamma, grad_dx, grad_dg, d_x);
InputWarpReduceOuterMean(&sum5, &sum6, &sum7);
InputBlockReduceOuterMean(col_dim, &sum5, &sum6, &sum7, share_mem.addr());
InputProp(row, col_dim, param_dim, epsilon, dy, x, mean, var, gamma, grad_dx, grad_dg, grad_db, d_dy, d_x,
share_mem.addr(), global_sum1, global_sum2);
}
}
template <typename T>
void LayerNormGradGrad(const int &row_dim, const int &col_dim, const int ¶m_dim, T *global_sum1, T *global_sum2,
const T &epsilon, const T *dy, const T *x, const T *mean, const T *var, const T *gamma,
const T* grad_dx, const T* grad_dg, const T* grad_db, T *d_dy, T *d_x, T *d_gamma,
cudaStream_t stream) {
int share_mem_size = THREAD_PER_BLOCK / WARP_SIZE * NUM_SHARED_SUM_INPUT * sizeof(T);
InputPropKernel<<<row_dim, THREAD_PER_BLOCK, share_mem_size, stream>>>(row_dim, col_dim, param_dim, epsilon, dy, x,
mean, var, gamma, grad_dx, grad_dg, grad_db,
d_dy, d_x, global_sum1, global_sum2);
share_mem_size = THREAD_PER_BLOCK / WARP_SIZE * NUM_SHARED_SUM_GAMMA * sizeof(T);
int param_reduce_dim = row_dim * col_dim / param_dim;
GammaAndBetaPropKernel<<<param_dim, THREAD_PER_BLOCK, share_mem_size, stream>>>(param_reduce_dim, param_dim,
col_dim, epsilon, dy, x, mean, var,
grad_dx, d_gamma, global_sum1,
global_sum2);
}
template void LayerNormGradGrad(const int &row_dim, const int &col_dim, const int ¶m_dim, float *global_sum1,
float *global_sum2, const float &epsilon, const float *dy, const float *x,
const float *mean, const float *var, const float *gamma, const float *grad_dx,
const float *grad_dg, const float *grad_db, float *d_dy, float *d_x, float *d_gamma,
cudaStream_t stream);
template void LayerNormGradGrad(const int &row_dim, const int &col_dim, const int ¶m_dim, half *global_sum1,
half *global_sum2, const half &epsilon, const half *dy, const half *x, const half *mean,
const half *var, const half *gamma, const half *grad_dx, const half *grad_dg,
const half *grad_db, half *d_dy, half *d_x, half *d_gamma, cudaStream_t stream);
|
85dbcebfc733e3dba7ccf5b7b3d8d92bf265ba7d.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "scanmatch.h"
#include "svd3.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) utilityCore::checkCUDAError(msg, __LINE__)
#define DEBUG false
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
/*! Size of the starting area in simulation space.
* FOR SINE TEST: 2.f
* FOR ELEPHANT OBJ:
* FOR BUDDHA OBJ: 1 << 2;
* FOR WAYMO DATASET: 1 << 5;
*/
#define scene_scale 1 << 4
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
glm::vec3 *dev_pos;
glm::vec3 *dev_rgb;
pointcloud* target_pc;
pointcloud* src_pc;
//OCTREE pointer (all octnodes lie in device memory)
Octree* octree;
OctNodeGPU *dev_octoNodes;
glm::vec3 *dev_octoCoords;
/******************
* initSimulation *
******************/
/**
* Initialize memory, update some globals
*/
void ScanMatch::initSimulationCPU(int N, std::vector<glm::vec3> coords) {
numObjects = N;
//Setup and initialize source and target pointcloud
src_pc = new pointcloud(false, numObjects, false);
src_pc->initCPU();
target_pc = new pointcloud(true, numObjects, false);
target_pc->initCPU();
}
void ScanMatch::initSimulationGPU(int N , std::vector<glm::vec3> coords) {
numObjects = N;
//Setup and initialize source and target pointcloud
src_pc = new pointcloud(false, numObjects, true);
src_pc->initGPU(coords);
target_pc = new pointcloud(true, numObjects, true);
target_pc->initGPU(coords);
}
void ScanMatch::initSimulationGPUOCTREE(int N , std::vector<glm::vec3> coords) {
numObjects = N;
//First create the Octree
octree = new Octree(glm::vec3(0.f, 0.f, 0.f), 1 << 4, coords);
octree->create();
octree->compact();
//Extract Final Data from Octree
int numNodes = octree->gpuNodePool.size();
glm::vec3* octoCoords = octree->gpuCoords.data();
OctNodeGPU* octoNodes = octree->gpuNodePool.data();
//Send stuff to device
hipMalloc((void**)&dev_octoNodes, numNodes * sizeof(OctNodeGPU));
utilityCore::checkCUDAError("hipMalloc octor failed", __LINE__);
hipMemcpy(dev_octoNodes, octoNodes, numNodes * sizeof(OctNodeGPU), hipMemcpyHostToDevice);
utilityCore::checkCUDAError("hipMemcpy octoNodes failed", __LINE__);
src_pc = new pointcloud(false, numObjects, true);
src_pc->initGPU(coords);
target_pc = new pointcloud(true, numObjects, true);
target_pc->initGPU(octree->gpuCoords);
}
/******************
* copyPointCloudToVBO *
******************/
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void ScanMatch::copyPointCloudToVBO(float *vbodptr_positions, float *vbodptr_rgb, bool usecpu) {
if (usecpu) { //IF CPU
src_pc->pointCloudToVBOCPU(vbodptr_positions, vbodptr_rgb, scene_scale);
target_pc->pointCloudToVBOCPU(vbodptr_positions + 4*numObjects, vbodptr_rgb + 4*numObjects, scene_scale);
}
else { //IF GPU
src_pc->pointCloudToVBOGPU(vbodptr_positions, vbodptr_rgb, scene_scale);
target_pc->pointCloudToVBOGPU(vbodptr_positions + 4*numObjects, vbodptr_rgb + 4*numObjects, scene_scale);
}
}
/******************
* stepSimulation *
******************/
void ScanMatch::endSimulation() {
src_pc->~pointcloud();
target_pc->~pointcloud();
}
/******************
* CPU SCANMATCHING *
******************/
/**
* Main Algorithm for Running ICP on the CPU
* Finds homogenous transform between src_pc and target_pc
*/
void ScanMatch::stepICPCPU() {
//1: Find Nearest Neigbors and Reshuffle
float* dist = new float[numObjects];
int* indicies = new int[numObjects];
#if DEBUG
printf("NEAREST NEIGHBORS \n");
#endif // DEBUG
auto start = std::chrono::high_resolution_clock::now();
ScanMatch::findNNCPU(src_pc, target_pc, dist, indicies, numObjects);
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end - start);
std::cout << duration.count() << std::endl;
#if DEBUG
printf("RESHUFFLE\n");
#endif // DEBUG
ScanMatch::reshuffleCPU(target_pc, indicies, numObjects);
//2: Find Best Fit Transformation
glm::mat3 R;
glm::vec3 t;
ScanMatch::bestFitTransform(src_pc, target_pc, numObjects, R, t);
//3: Update each src_point
glm::vec3* src_dev_pos = src_pc->dev_pos;
for (int i = 0; i < numObjects; ++i) {
src_dev_pos[i] = glm::transpose(R) * src_dev_pos[i] + t;
}
}
/**
* Finds Nearest Neighbors of target pc in src pc
* @args: src, target -> PointClouds w/ filled dev_pos
* @returns:
* dist -> N array -> ith index = dist(src[i], closest_point in target)
* indicies -> N array w/ ith index = index of the closest point in target to src[i]
*/
void ScanMatch::findNNCPU(pointcloud* src, pointcloud* target, float* dist, int *indicies, int N) {
glm::vec3* src_dev_pos = src->dev_pos;
glm::vec3* target_dev_pos = target->dev_pos;
for (int src_idx = 0; src_idx < N; ++src_idx) { //Iterate through each source point
glm::vec3 src_pt = src_dev_pos[src_idx];
float minDist = INFINITY;
int idx_minDist = -1;
for (int tgt_idx = 0; tgt_idx < N; ++tgt_idx) { //Iterate through each tgt point and find closest
glm::vec3 tgt_pt = target_dev_pos[tgt_idx];
float d = glm::distance(src_pt, tgt_pt);
if (d < minDist) {
minDist = d;
idx_minDist = tgt_idx;
}
}
//Update dist and indicies
#if DEBUG
printf("IDX: %d - MINDIST %f\n", src_idx, minDist);
printf("IDX: %d - indicies %d\n", src_idx, idx_minDist);
#endif // DEBUG
dist[src_idx] = minDist;
indicies[src_idx] = idx_minDist;
}
}
/**
* Reshuffles pointcloud a as per indicies, puts these in dev_matches
* NOT ONE TO ONE SO NEED TO MAKE A COPY!
*/
void ScanMatch::reshuffleCPU(pointcloud* a, int* indicies, int N) {
glm::vec3 *a_dev_matches = a->dev_matches;
glm::vec3 *a_dev_pos = a->dev_pos;
for (int i = 0; i < N; ++i) {
a_dev_matches[i] = a_dev_pos[indicies[i]];
#if DEBUG
printf("DEV MATCHES\n");
utilityCore::printVec3(a->dev_matches[i]);
printf("DEV POS\n");
utilityCore::printVec3(a_dev_pos[i]);
#endif // DEBUG
}
}
/**
* Calculates transform T that maps from src to target
* Assumes dev_matches is filled for target
*/
void ScanMatch::bestFitTransform(pointcloud* src, pointcloud* target, int N, glm::mat3 &R, glm::vec3 &t){
glm::vec3* src_norm = new glm::vec3[N];
glm::vec3* target_norm = new glm::vec3[N];
glm::vec3 src_centroid(0.f);
glm::vec3 target_centroid(0.f);
glm::vec3* src_pos = src->dev_pos;
glm::vec3* target_matches = target->dev_matches;
//1:Calculate centroids and norm src and target
for (int i = 0; i < N; ++i) {
src_centroid += src_pos[i];
target_centroid += target_matches[i];
}
src_centroid = src_centroid / glm::vec3(N);
target_centroid = target_centroid / glm::vec3(N);
#if DEBUG
printf("SRC CENTROID\n");
utilityCore::printVec3(src_centroid);
printf("TARGET CENTROID\n");
utilityCore::printVec3(target_centroid);
#endif // DEBUG
for (int j = 0; j < N; ++j) {
src_norm[j] = src_pos[j] - src_centroid;
target_norm[j] = target_matches[j] - target_centroid;
#if DEBUG
printf("SRC NORM IDX %d\n", j);
utilityCore::printVec3(src_norm[j]);
printf("TARGET NORM IDX %d\n", j);
utilityCore::printVec3(target_norm[j]);
#endif // DEBUG
}
//1:Multiply src.T (3 x N) by target (N x 3) = H (3 x 3)
float H[3][3] = { 0 };
for (int i = 0; i < N; ++i) { //3 x N by N x 3 matmul
for (int out_row = 0; out_row < 3; out_row++) {
for (int out_col = 0; out_col < 3; out_col++) {
H[out_row][out_col] += src_norm[i][out_row] * target_norm[i][out_col];
}
}
}
#if DEBUG
printf("H MATRIX ======================================================\n");
std::cout << H[0][0] << " " << H[1][0] << " " << H[2][0] << " " << std::endl;
std::cout << H[0][1] << " " << H[1][1] << " " << H[2][1] << " " << std::endl;
std::cout << H[0][2] << " " << H[1][2] << " " << H[2][2] << " " << std::endl;
printf("======================================================\n");
#endif // DEBUG
//2:calculate SVD of H to get U, S & V
float U[3][3] = { 0 };
float S[3][3] = { 0 };
float V[3][3] = { 0 };
svd(H[0][0], H[0][1], H[0][2], H[1][0], H[1][1], H[1][2], H[2][0], H[2][1], H[2][2],
U[0][0], U[0][1], U[0][2], U[1][0], U[1][1], U[1][2], U[2][0], U[2][1], U[2][2],
S[0][0], S[0][1], S[0][2], S[1][0], S[1][1], S[1][2], S[2][0], S[2][1], S[2][2],
V[0][0], V[0][1], V[0][2], V[1][0], V[1][1], V[1][2], V[2][0], V[2][1], V[2][2]
);
glm::mat3 matU(glm::vec3(U[0][0], U[1][0], U[2][0]), glm::vec3(U[0][1], U[1][1], U[2][1]), glm::vec3(U[0][2], U[1][2], U[2][2]));
glm::mat3 matV(glm::vec3(V[0][0], V[0][1], V[0][2]), glm::vec3(V[1][0], V[1][1], V[1][2]), glm::vec3(V[2][0], V[2][1], V[2][2]));
#if DEBUG
printf("U MATRIX\n");
utilityCore::printMat3(matU);
printf("V MATRIX\n");
utilityCore::printMat3(matV);
#endif // DEBUG
//2:Rotation Matrix and Translation Vector
R = (matU * matV);
t = target_centroid - R * (src_centroid);
#if DEBUG
printf("ROTATION\n");
utilityCore::printMat3(R);
printf("TRANSLATION\n");
utilityCore::printVec3(t);
#endif // DEBUG
}
/******************
* GPU NAIVE SCANMATCHING *
******************/
__global__ void kernUpdatePositions(glm::vec3* src_pos, glm::mat3 R, glm::vec3 t, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
src_pos[idx] = (R) * src_pos[idx] + t;
}
}
/**
* Main Algorithm for Running ICP on the GPU
* Finds homogenous transform between src_pc and target_pc
*/
void ScanMatch::stepICPGPU_NAIVE() {
//hipMalloc dist and indicies
float* dist;
int* indicies;
hipMalloc((void**)&dist, numObjects * sizeof(float));
utilityCore::checkCUDAError("hipMalloc dist failed", __LINE__);
hipMalloc((void**)&indicies, numObjects * sizeof(int));
utilityCore::checkCUDAError("hipMalloc indicies failed", __LINE__);
hipMemset(dist, 0, numObjects * sizeof(float));
hipMemset(indicies, -1, numObjects * sizeof(int));
//1: Find Nearest Neigbors and Reshuffle
auto start = std::chrono::high_resolution_clock::now();
ScanMatch::findNNGPU_NAIVE(src_pc, target_pc, dist, indicies, numObjects);
hipDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end - start);
std::cout << duration.count() << std::endl;
ScanMatch::reshuffleGPU(target_pc, indicies, numObjects);
hipDeviceSynchronize();
//2: Find Best Fit Transformation
glm::mat3 R;
glm::vec3 t;
ScanMatch::bestFitTransformGPU(src_pc, target_pc, numObjects, R, t);
hipDeviceSynchronize();
//3: Update each src_point via Kernel Call
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernUpdatePositions), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, src_pc->dev_pos, R, t, numObjects);
//hipFree dist and indicies
hipFree(dist);
hipFree(indicies);
}
/**
* Main Algorithm for Running ICP on the GPU w/Octree
* Finds homogenous transform between src_pc and target_pc
*/
void ScanMatch::stepICPGPU_OCTREE() {
//hipMalloc dist and indicies
float* dist;
int* indicies;
hipMalloc((void**)&dist, numObjects * sizeof(float));
utilityCore::checkCUDAError("hipMalloc dist failed", __LINE__);
hipMalloc((void**)&indicies, numObjects * sizeof(int));
utilityCore::checkCUDAError("hipMalloc indicies failed", __LINE__);
hipMemset(dist, 0, numObjects * sizeof(float));
hipMemset(indicies, -1, numObjects * sizeof(int));
//1: Find Nearest Neigbors and Reshuffle
auto start = std::chrono::high_resolution_clock::now();
ScanMatch::findNNGPU_OCTREE(src_pc, target_pc, dist, indicies, numObjects, dev_octoNodes);
//ScanMatch::findNNGPU_NAIVE(src_pc, target_pc, dist, indicies, numObjects);
hipDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end - start);
std::cout << duration.count() << std::endl;
ScanMatch::reshuffleGPU(target_pc, indicies, numObjects);
hipDeviceSynchronize();
//2: Find Best Fit Transformation
glm::mat3 R;
glm::vec3 t;
ScanMatch::bestFitTransformGPU(src_pc, target_pc, numObjects, R, t);
hipDeviceSynchronize();
//3: Update each src_point via Kernel Call
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernUpdatePositions), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, src_pc->dev_pos, R, t, numObjects);
//hipFree dist and indicies
hipFree(dist);
hipFree(indicies);
}
/*
* Parallely compute NN for each point in the pointcloud
*/
__global__ void kernNNGPU_NAIVE(glm::vec3* src_pos, glm::vec3* target_pos, float* dist, int* indicies, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
float minDist = INFINITY;
float idx_minDist = -1;
glm::vec3 src_pt = src_pos[idx];
for (int tgt_idx = 0; tgt_idx < N; ++tgt_idx) { //Iterate through each tgt & find closest
glm::vec3 tgt_pt = target_pos[tgt_idx];
float d = glm::distance(src_pt, tgt_pt);
//float d = sqrtf(powf((tgt_pt.x - src_pt.x), 2.f) + powf((tgt_pt.y - src_pt.y), 2.f) + powf((tgt_pt.z - src_pt.z), 2.f));
if (d < minDist) {
minDist = d;
idx_minDist = tgt_idx;
}
}
dist[idx] = minDist;
indicies[idx] = idx_minDist;
}
}
__device__ OctNodeGPU findLeafOctant(glm::vec3 src_pos, OctNodeGPU* octoNodes) {
octKey currKey = 0;
OctNodeGPU currNode = octoNodes[currKey];
OctNodeGPU parentNode = currNode;
//printf("SRC: %f, %f, %f \n", src_pos.x, src_pos.y, src_pos.z);
while (!currNode.isLeaf) {
//Determine which octant the point lies in (0 is bottom-back-left)
glm::vec3 center = currNode.center;
uint8_t x = src_pos.x > center.x;
uint8_t y = src_pos.y > center.y;
uint8_t z = src_pos.z > center.z;
//printf("currKey: %d\n", currKey);
//printf("currNodeBaseKey: %d\n", currNode.firstChildIdx);
//Update the code
currKey = currNode.firstChildIdx + (x + 2 * y + 4 * z);
parentNode = currNode;
currNode = octoNodes[currKey];
}
//printf("currKey: %d\n", currKey);
//printf("currNodeBaseKey: %d\n", currNode.firstChildIdx);
//printf("OCTANT CENTER: %f, %f, %f \n", currNode.center.x, currNode.center.y, currNode.center.z);
//printf("Data START: %d \n", currNode.data_startIdx);
return currNode;
}
/*
* Parallely compute NN for each point in the pointcloud
*/
__global__ void kernNNGPU_OCTREE(glm::vec3* src_pos, glm::vec3* target_pos, float* dist, int* indicies, int N, OctNodeGPU* octoNodes) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
float minDist = INFINITY;
float idx_minDist = -1;
glm::vec3 src_pt = src_pos[idx];
//Find our leaf node and extract tgt_start and tgt_end from it
OctNodeGPU currLeafOctant = findLeafOctant(src_pt, octoNodes);
int tgt_start = currLeafOctant.data_startIdx;
int tgt_end = currLeafOctant.data_startIdx + currLeafOctant.count;
for (int tgt_idx = tgt_start; tgt_idx < tgt_end; ++tgt_idx) { //Iterate through each tgt & find closest
glm::vec3 tgt_pt = target_pos[tgt_idx];
float d = glm::distance(src_pt, tgt_pt);
//float d = sqrtf(powf((tgt_pt.x - src_pt.x), 2.f) + powf((tgt_pt.y - src_pt.y), 2.f) + powf((tgt_pt.z - src_pt.z), 2.f));
if (d < minDist) {
minDist = d;
idx_minDist = tgt_idx;
}
}
dist[idx] = minDist;
indicies[idx] = idx_minDist;
}
}
/**
* Finds Nearest Neighbors of target pc in src pc
* @args: src, target -> PointClouds w/ filled dev_pos IN GPU
* @returns:
* dist -> N array -> ith index = dist(src[i], closest_point in target) (on GPU)
* indicies -> N array w/ ith index = index of the closest point in target to src[i] (on GPU)
*/
void ScanMatch::findNNGPU_NAIVE(pointcloud* src, pointcloud* target, float* dist, int *indicies, int N) {
//Launch a kernel (paralellely compute NN for each point)
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernNNGPU_NAIVE), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, src->dev_pos, target->dev_pos, dist, indicies, N);
}
/**
* Finds Nearest Neighbors of target pc in src pc
* @args: src, target -> PointClouds w/ filled dev_pos IN GPU
* @returns:
* dist -> N array -> ith index = dist(src[i], closest_point in target) (on GPU)
* indicies -> N array w/ ith index = index of the closest point in target to src[i] (on GPU)
*/
void ScanMatch::findNNGPU_OCTREE(pointcloud* src, pointcloud* target, float* dist, int *indicies, int N, OctNodeGPU* octoNodes) {
//Launch a kernel (paralellely compute NN for each point)
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernNNGPU_OCTREE), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, src->dev_pos, target->dev_pos, dist, indicies, N, octoNodes);
}
/*
* Parallely reshuffle pos by indicies and fill matches
*/
__global__ void kernReshuffleGPU(glm::vec3* pos, glm::vec3* matches, int *indicies, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
//matches[idx] = pos[idx];
matches[idx] = pos[indicies[idx]];
}
}
/**
* Reshuffles pointcloud a as per indicies, puts these in dev_matches
* NOT ONE TO ONE SO NEED TO MAKE A COPY!
*/
void ScanMatch::reshuffleGPU(pointcloud* a, int* indicies, int N) {
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernReshuffleGPU), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, a->dev_pos, a->dev_matches, indicies, N);
}
__global__ void kernComputeNorms(glm::vec3* src_norm, glm::vec3* target_norm, glm::vec3* pos, glm::vec3* matches, glm::vec3 pos_centroid, glm::vec3 matches_centroid, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
src_norm[idx] = pos[idx] - pos_centroid;
target_norm[idx] = matches[idx] - matches_centroid;
}
}
__global__ void kernComputeHarray(glm::mat3* Harray, glm::vec3* src_norm, glm::vec3* target_norm, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
Harray[idx] = glm::mat3(glm::vec3(src_norm[idx]) * target_norm[idx].x,
glm::vec3(src_norm[idx]) * target_norm[idx].y,
glm::vec3(src_norm[idx]) * target_norm[idx].z);
}
}
/**
* Calculates transform T that maps from src to target
* Assumes dev_matches is filled for target
*/
void ScanMatch::bestFitTransformGPU(pointcloud* src, pointcloud* target, int N, glm::mat3 &R, glm::vec3 &t){
glm::vec3* src_norm;
glm::vec3* target_norm;
glm::mat3* Harray;
//hipMalloc Norms and Harray
hipMalloc((void**)&src_norm, N * sizeof(glm::vec3));
hipMalloc((void**)&target_norm, N * sizeof(glm::vec3));
hipMalloc((void**)&Harray, N * sizeof(glm::mat3));
hipMemset(Harray, 0, N * sizeof(glm::mat3));
//Thrust device pointers for calculating centroids
thrust::device_ptr<glm::vec3> src_thrustpos(src->dev_pos);
thrust::device_ptr<glm::vec3> target_thrustmatches(target->dev_matches);
thrust::device_ptr<glm::mat3> harray_thrust = thrust::device_pointer_cast(Harray);
//1: Calculate centroids
glm::vec3 src_centroid(0.f);
glm::vec3 target_centroid(0.f);
src_centroid = glm::vec3(thrust::reduce(src_thrustpos, src_thrustpos + N, glm::vec3(0.f), thrust::plus<glm::vec3>()));
hipDeviceSynchronize();
target_centroid = glm::vec3(thrust::reduce(target_thrustmatches, target_thrustmatches + N, glm::vec3(0.f), thrust::plus<glm::vec3>()));
hipDeviceSynchronize();
src_centroid /= glm::vec3(N);
target_centroid /= glm::vec3(N);
#if DEBUG
printf("SRC CENTROID\n");
utilityCore::printVec3(src_centroid);
printf("TARGET CENTROID\n");
utilityCore::printVec3(target_centroid);
#endif // DEBUG
//2: Compute Norm via Kernel Call
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
hipLaunchKernelGGL(( kernComputeNorms), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, src_norm, target_norm, src->dev_pos, target->dev_matches, src_centroid, target_centroid, N);
hipDeviceSynchronize();
utilityCore::checkCUDAError("Compute Norms Failed", __LINE__);
//3:Multiply src.T (3 x N) by target (N x 3) = H (3 x 3) via a kernel call
hipLaunchKernelGGL(( kernComputeHarray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, Harray, src_norm, target_norm, N);
hipDeviceSynchronize();
utilityCore::checkCUDAError("Compute HARRAY Failed", __LINE__);
/*
glm::mat3 H = thrust::reduce(harray_thrust, harray_thrust + N, glm::mat3(0.f), thrust::plus<glm::mat3>());
hipDeviceSynchronize();
*/
glm::mat3* Hcpu = new glm::mat3[N];
hipMemcpy(Hcpu, Harray, N * sizeof(glm::mat3), hipMemcpyDeviceToHost);
utilityCore::checkCUDAError("REDUCE HARRAY Failed", __LINE__);
hipDeviceSynchronize();
glm::mat3 H(0.f);
for (int i = 0; i < N; ++i) {
H += Hcpu[i];
}
//4:Calculate SVD of H to get U, S & V
float U[3][3] = { 0 };
float S[3][3] = { 0 };
float V[3][3] = { 0 };
svd(H[0][0], H[0][1], H[0][2], H[1][0], H[1][1], H[1][2], H[2][0], H[2][1], H[2][2],
U[0][0], U[0][1], U[0][2], U[1][0], U[1][1], U[1][2], U[2][0], U[2][1], U[2][2],
S[0][0], S[0][1], S[0][2], S[1][0], S[1][1], S[1][2], S[2][0], S[2][1], S[2][2],
V[0][0], V[0][1], V[0][2], V[1][0], V[1][1], V[1][2], V[2][0], V[2][1], V[2][2]
);
glm::mat3 matU(glm::vec3(U[0][0], U[1][0], U[2][0]), glm::vec3(U[0][1], U[1][1], U[2][1]), glm::vec3(U[0][2], U[1][2], U[2][2]));
glm::mat3 matV(glm::vec3(V[0][0], V[0][1], V[0][2]), glm::vec3(V[1][0], V[1][1], V[1][2]), glm::vec3(V[2][0], V[2][1], V[2][2]));
//5:Rotation Matrix and Translation Vector
R = (matU * matV);
t = target_centroid - (R) * (src_centroid);
//hipMalloc Norms and Harray
hipFree(src_norm);
hipFree(target_norm);
hipFree(Harray);
}
| 85dbcebfc733e3dba7ccf5b7b3d8d92bf265ba7d.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "scanmatch.h"
#include "svd3.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) utilityCore::checkCUDAError(msg, __LINE__)
#define DEBUG false
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
/*! Size of the starting area in simulation space.
* FOR SINE TEST: 2.f
* FOR ELEPHANT OBJ:
* FOR BUDDHA OBJ: 1 << 2;
* FOR WAYMO DATASET: 1 << 5;
*/
#define scene_scale 1 << 4
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
glm::vec3 *dev_pos;
glm::vec3 *dev_rgb;
pointcloud* target_pc;
pointcloud* src_pc;
//OCTREE pointer (all octnodes lie in device memory)
Octree* octree;
OctNodeGPU *dev_octoNodes;
glm::vec3 *dev_octoCoords;
/******************
* initSimulation *
******************/
/**
* Initialize memory, update some globals
*/
void ScanMatch::initSimulationCPU(int N, std::vector<glm::vec3> coords) {
numObjects = N;
//Setup and initialize source and target pointcloud
src_pc = new pointcloud(false, numObjects, false);
src_pc->initCPU();
target_pc = new pointcloud(true, numObjects, false);
target_pc->initCPU();
}
void ScanMatch::initSimulationGPU(int N , std::vector<glm::vec3> coords) {
numObjects = N;
//Setup and initialize source and target pointcloud
src_pc = new pointcloud(false, numObjects, true);
src_pc->initGPU(coords);
target_pc = new pointcloud(true, numObjects, true);
target_pc->initGPU(coords);
}
void ScanMatch::initSimulationGPUOCTREE(int N , std::vector<glm::vec3> coords) {
numObjects = N;
//First create the Octree
octree = new Octree(glm::vec3(0.f, 0.f, 0.f), 1 << 4, coords);
octree->create();
octree->compact();
//Extract Final Data from Octree
int numNodes = octree->gpuNodePool.size();
glm::vec3* octoCoords = octree->gpuCoords.data();
OctNodeGPU* octoNodes = octree->gpuNodePool.data();
//Send stuff to device
cudaMalloc((void**)&dev_octoNodes, numNodes * sizeof(OctNodeGPU));
utilityCore::checkCUDAError("cudaMalloc octor failed", __LINE__);
cudaMemcpy(dev_octoNodes, octoNodes, numNodes * sizeof(OctNodeGPU), cudaMemcpyHostToDevice);
utilityCore::checkCUDAError("cudaMemcpy octoNodes failed", __LINE__);
src_pc = new pointcloud(false, numObjects, true);
src_pc->initGPU(coords);
target_pc = new pointcloud(true, numObjects, true);
target_pc->initGPU(octree->gpuCoords);
}
/******************
* copyPointCloudToVBO *
******************/
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void ScanMatch::copyPointCloudToVBO(float *vbodptr_positions, float *vbodptr_rgb, bool usecpu) {
if (usecpu) { //IF CPU
src_pc->pointCloudToVBOCPU(vbodptr_positions, vbodptr_rgb, scene_scale);
target_pc->pointCloudToVBOCPU(vbodptr_positions + 4*numObjects, vbodptr_rgb + 4*numObjects, scene_scale);
}
else { //IF GPU
src_pc->pointCloudToVBOGPU(vbodptr_positions, vbodptr_rgb, scene_scale);
target_pc->pointCloudToVBOGPU(vbodptr_positions + 4*numObjects, vbodptr_rgb + 4*numObjects, scene_scale);
}
}
/******************
* stepSimulation *
******************/
void ScanMatch::endSimulation() {
src_pc->~pointcloud();
target_pc->~pointcloud();
}
/******************
* CPU SCANMATCHING *
******************/
/**
* Main Algorithm for Running ICP on the CPU
* Finds homogenous transform between src_pc and target_pc
*/
void ScanMatch::stepICPCPU() {
//1: Find Nearest Neigbors and Reshuffle
float* dist = new float[numObjects];
int* indicies = new int[numObjects];
#if DEBUG
printf("NEAREST NEIGHBORS \n");
#endif // DEBUG
auto start = std::chrono::high_resolution_clock::now();
ScanMatch::findNNCPU(src_pc, target_pc, dist, indicies, numObjects);
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end - start);
std::cout << duration.count() << std::endl;
#if DEBUG
printf("RESHUFFLE\n");
#endif // DEBUG
ScanMatch::reshuffleCPU(target_pc, indicies, numObjects);
//2: Find Best Fit Transformation
glm::mat3 R;
glm::vec3 t;
ScanMatch::bestFitTransform(src_pc, target_pc, numObjects, R, t);
//3: Update each src_point
glm::vec3* src_dev_pos = src_pc->dev_pos;
for (int i = 0; i < numObjects; ++i) {
src_dev_pos[i] = glm::transpose(R) * src_dev_pos[i] + t;
}
}
/**
* Finds Nearest Neighbors of target pc in src pc
* @args: src, target -> PointClouds w/ filled dev_pos
* @returns:
* dist -> N array -> ith index = dist(src[i], closest_point in target)
* indicies -> N array w/ ith index = index of the closest point in target to src[i]
*/
void ScanMatch::findNNCPU(pointcloud* src, pointcloud* target, float* dist, int *indicies, int N) {
glm::vec3* src_dev_pos = src->dev_pos;
glm::vec3* target_dev_pos = target->dev_pos;
for (int src_idx = 0; src_idx < N; ++src_idx) { //Iterate through each source point
glm::vec3 src_pt = src_dev_pos[src_idx];
float minDist = INFINITY;
int idx_minDist = -1;
for (int tgt_idx = 0; tgt_idx < N; ++tgt_idx) { //Iterate through each tgt point and find closest
glm::vec3 tgt_pt = target_dev_pos[tgt_idx];
float d = glm::distance(src_pt, tgt_pt);
if (d < minDist) {
minDist = d;
idx_minDist = tgt_idx;
}
}
//Update dist and indicies
#if DEBUG
printf("IDX: %d - MINDIST %f\n", src_idx, minDist);
printf("IDX: %d - indicies %d\n", src_idx, idx_minDist);
#endif // DEBUG
dist[src_idx] = minDist;
indicies[src_idx] = idx_minDist;
}
}
/**
* Reshuffles pointcloud a as per indicies, puts these in dev_matches
* NOT ONE TO ONE SO NEED TO MAKE A COPY!
*/
void ScanMatch::reshuffleCPU(pointcloud* a, int* indicies, int N) {
glm::vec3 *a_dev_matches = a->dev_matches;
glm::vec3 *a_dev_pos = a->dev_pos;
for (int i = 0; i < N; ++i) {
a_dev_matches[i] = a_dev_pos[indicies[i]];
#if DEBUG
printf("DEV MATCHES\n");
utilityCore::printVec3(a->dev_matches[i]);
printf("DEV POS\n");
utilityCore::printVec3(a_dev_pos[i]);
#endif // DEBUG
}
}
/**
* Calculates transform T that maps from src to target
* Assumes dev_matches is filled for target
*/
void ScanMatch::bestFitTransform(pointcloud* src, pointcloud* target, int N, glm::mat3 &R, glm::vec3 &t){
glm::vec3* src_norm = new glm::vec3[N];
glm::vec3* target_norm = new glm::vec3[N];
glm::vec3 src_centroid(0.f);
glm::vec3 target_centroid(0.f);
glm::vec3* src_pos = src->dev_pos;
glm::vec3* target_matches = target->dev_matches;
//1:Calculate centroids and norm src and target
for (int i = 0; i < N; ++i) {
src_centroid += src_pos[i];
target_centroid += target_matches[i];
}
src_centroid = src_centroid / glm::vec3(N);
target_centroid = target_centroid / glm::vec3(N);
#if DEBUG
printf("SRC CENTROID\n");
utilityCore::printVec3(src_centroid);
printf("TARGET CENTROID\n");
utilityCore::printVec3(target_centroid);
#endif // DEBUG
for (int j = 0; j < N; ++j) {
src_norm[j] = src_pos[j] - src_centroid;
target_norm[j] = target_matches[j] - target_centroid;
#if DEBUG
printf("SRC NORM IDX %d\n", j);
utilityCore::printVec3(src_norm[j]);
printf("TARGET NORM IDX %d\n", j);
utilityCore::printVec3(target_norm[j]);
#endif // DEBUG
}
//1:Multiply src.T (3 x N) by target (N x 3) = H (3 x 3)
float H[3][3] = { 0 };
for (int i = 0; i < N; ++i) { //3 x N by N x 3 matmul
for (int out_row = 0; out_row < 3; out_row++) {
for (int out_col = 0; out_col < 3; out_col++) {
H[out_row][out_col] += src_norm[i][out_row] * target_norm[i][out_col];
}
}
}
#if DEBUG
printf("H MATRIX ======================================================\n");
std::cout << H[0][0] << " " << H[1][0] << " " << H[2][0] << " " << std::endl;
std::cout << H[0][1] << " " << H[1][1] << " " << H[2][1] << " " << std::endl;
std::cout << H[0][2] << " " << H[1][2] << " " << H[2][2] << " " << std::endl;
printf("======================================================\n");
#endif // DEBUG
//2:calculate SVD of H to get U, S & V
float U[3][3] = { 0 };
float S[3][3] = { 0 };
float V[3][3] = { 0 };
svd(H[0][0], H[0][1], H[0][2], H[1][0], H[1][1], H[1][2], H[2][0], H[2][1], H[2][2],
U[0][0], U[0][1], U[0][2], U[1][0], U[1][1], U[1][2], U[2][0], U[2][1], U[2][2],
S[0][0], S[0][1], S[0][2], S[1][0], S[1][1], S[1][2], S[2][0], S[2][1], S[2][2],
V[0][0], V[0][1], V[0][2], V[1][0], V[1][1], V[1][2], V[2][0], V[2][1], V[2][2]
);
glm::mat3 matU(glm::vec3(U[0][0], U[1][0], U[2][0]), glm::vec3(U[0][1], U[1][1], U[2][1]), glm::vec3(U[0][2], U[1][2], U[2][2]));
glm::mat3 matV(glm::vec3(V[0][0], V[0][1], V[0][2]), glm::vec3(V[1][0], V[1][1], V[1][2]), glm::vec3(V[2][0], V[2][1], V[2][2]));
#if DEBUG
printf("U MATRIX\n");
utilityCore::printMat3(matU);
printf("V MATRIX\n");
utilityCore::printMat3(matV);
#endif // DEBUG
//2:Rotation Matrix and Translation Vector
R = (matU * matV);
t = target_centroid - R * (src_centroid);
#if DEBUG
printf("ROTATION\n");
utilityCore::printMat3(R);
printf("TRANSLATION\n");
utilityCore::printVec3(t);
#endif // DEBUG
}
/******************
* GPU NAIVE SCANMATCHING *
******************/
__global__ void kernUpdatePositions(glm::vec3* src_pos, glm::mat3 R, glm::vec3 t, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
src_pos[idx] = (R) * src_pos[idx] + t;
}
}
/**
* Main Algorithm for Running ICP on the GPU
* Finds homogenous transform between src_pc and target_pc
*/
void ScanMatch::stepICPGPU_NAIVE() {
//cudaMalloc dist and indicies
float* dist;
int* indicies;
cudaMalloc((void**)&dist, numObjects * sizeof(float));
utilityCore::checkCUDAError("cudaMalloc dist failed", __LINE__);
cudaMalloc((void**)&indicies, numObjects * sizeof(int));
utilityCore::checkCUDAError("cudaMalloc indicies failed", __LINE__);
cudaMemset(dist, 0, numObjects * sizeof(float));
cudaMemset(indicies, -1, numObjects * sizeof(int));
//1: Find Nearest Neigbors and Reshuffle
auto start = std::chrono::high_resolution_clock::now();
ScanMatch::findNNGPU_NAIVE(src_pc, target_pc, dist, indicies, numObjects);
cudaDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end - start);
std::cout << duration.count() << std::endl;
ScanMatch::reshuffleGPU(target_pc, indicies, numObjects);
cudaDeviceSynchronize();
//2: Find Best Fit Transformation
glm::mat3 R;
glm::vec3 t;
ScanMatch::bestFitTransformGPU(src_pc, target_pc, numObjects, R, t);
cudaDeviceSynchronize();
//3: Update each src_point via Kernel Call
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernUpdatePositions<<<fullBlocksPerGrid, blockSize>>>(src_pc->dev_pos, R, t, numObjects);
//cudaFree dist and indicies
cudaFree(dist);
cudaFree(indicies);
}
/**
* Main Algorithm for Running ICP on the GPU w/Octree
* Finds homogenous transform between src_pc and target_pc
*/
void ScanMatch::stepICPGPU_OCTREE() {
//cudaMalloc dist and indicies
float* dist;
int* indicies;
cudaMalloc((void**)&dist, numObjects * sizeof(float));
utilityCore::checkCUDAError("cudaMalloc dist failed", __LINE__);
cudaMalloc((void**)&indicies, numObjects * sizeof(int));
utilityCore::checkCUDAError("cudaMalloc indicies failed", __LINE__);
cudaMemset(dist, 0, numObjects * sizeof(float));
cudaMemset(indicies, -1, numObjects * sizeof(int));
//1: Find Nearest Neigbors and Reshuffle
auto start = std::chrono::high_resolution_clock::now();
ScanMatch::findNNGPU_OCTREE(src_pc, target_pc, dist, indicies, numObjects, dev_octoNodes);
//ScanMatch::findNNGPU_NAIVE(src_pc, target_pc, dist, indicies, numObjects);
cudaDeviceSynchronize();
auto end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(end - start);
std::cout << duration.count() << std::endl;
ScanMatch::reshuffleGPU(target_pc, indicies, numObjects);
cudaDeviceSynchronize();
//2: Find Best Fit Transformation
glm::mat3 R;
glm::vec3 t;
ScanMatch::bestFitTransformGPU(src_pc, target_pc, numObjects, R, t);
cudaDeviceSynchronize();
//3: Update each src_point via Kernel Call
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernUpdatePositions<<<fullBlocksPerGrid, blockSize>>>(src_pc->dev_pos, R, t, numObjects);
//cudaFree dist and indicies
cudaFree(dist);
cudaFree(indicies);
}
/*
* Parallely compute NN for each point in the pointcloud
*/
__global__ void kernNNGPU_NAIVE(glm::vec3* src_pos, glm::vec3* target_pos, float* dist, int* indicies, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
float minDist = INFINITY;
float idx_minDist = -1;
glm::vec3 src_pt = src_pos[idx];
for (int tgt_idx = 0; tgt_idx < N; ++tgt_idx) { //Iterate through each tgt & find closest
glm::vec3 tgt_pt = target_pos[tgt_idx];
float d = glm::distance(src_pt, tgt_pt);
//float d = sqrtf(powf((tgt_pt.x - src_pt.x), 2.f) + powf((tgt_pt.y - src_pt.y), 2.f) + powf((tgt_pt.z - src_pt.z), 2.f));
if (d < minDist) {
minDist = d;
idx_minDist = tgt_idx;
}
}
dist[idx] = minDist;
indicies[idx] = idx_minDist;
}
}
__device__ OctNodeGPU findLeafOctant(glm::vec3 src_pos, OctNodeGPU* octoNodes) {
octKey currKey = 0;
OctNodeGPU currNode = octoNodes[currKey];
OctNodeGPU parentNode = currNode;
//printf("SRC: %f, %f, %f \n", src_pos.x, src_pos.y, src_pos.z);
while (!currNode.isLeaf) {
//Determine which octant the point lies in (0 is bottom-back-left)
glm::vec3 center = currNode.center;
uint8_t x = src_pos.x > center.x;
uint8_t y = src_pos.y > center.y;
uint8_t z = src_pos.z > center.z;
//printf("currKey: %d\n", currKey);
//printf("currNodeBaseKey: %d\n", currNode.firstChildIdx);
//Update the code
currKey = currNode.firstChildIdx + (x + 2 * y + 4 * z);
parentNode = currNode;
currNode = octoNodes[currKey];
}
//printf("currKey: %d\n", currKey);
//printf("currNodeBaseKey: %d\n", currNode.firstChildIdx);
//printf("OCTANT CENTER: %f, %f, %f \n", currNode.center.x, currNode.center.y, currNode.center.z);
//printf("Data START: %d \n", currNode.data_startIdx);
return currNode;
}
/*
* Parallely compute NN for each point in the pointcloud
*/
__global__ void kernNNGPU_OCTREE(glm::vec3* src_pos, glm::vec3* target_pos, float* dist, int* indicies, int N, OctNodeGPU* octoNodes) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
float minDist = INFINITY;
float idx_minDist = -1;
glm::vec3 src_pt = src_pos[idx];
//Find our leaf node and extract tgt_start and tgt_end from it
OctNodeGPU currLeafOctant = findLeafOctant(src_pt, octoNodes);
int tgt_start = currLeafOctant.data_startIdx;
int tgt_end = currLeafOctant.data_startIdx + currLeafOctant.count;
for (int tgt_idx = tgt_start; tgt_idx < tgt_end; ++tgt_idx) { //Iterate through each tgt & find closest
glm::vec3 tgt_pt = target_pos[tgt_idx];
float d = glm::distance(src_pt, tgt_pt);
//float d = sqrtf(powf((tgt_pt.x - src_pt.x), 2.f) + powf((tgt_pt.y - src_pt.y), 2.f) + powf((tgt_pt.z - src_pt.z), 2.f));
if (d < minDist) {
minDist = d;
idx_minDist = tgt_idx;
}
}
dist[idx] = minDist;
indicies[idx] = idx_minDist;
}
}
/**
* Finds Nearest Neighbors of target pc in src pc
* @args: src, target -> PointClouds w/ filled dev_pos IN GPU
* @returns:
* dist -> N array -> ith index = dist(src[i], closest_point in target) (on GPU)
* indicies -> N array w/ ith index = index of the closest point in target to src[i] (on GPU)
*/
void ScanMatch::findNNGPU_NAIVE(pointcloud* src, pointcloud* target, float* dist, int *indicies, int N) {
//Launch a kernel (paralellely compute NN for each point)
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
kernNNGPU_NAIVE<<<fullBlocksPerGrid, blockSize>>>(src->dev_pos, target->dev_pos, dist, indicies, N);
}
/**
* Finds Nearest Neighbors of target pc in src pc
* @args: src, target -> PointClouds w/ filled dev_pos IN GPU
* @returns:
* dist -> N array -> ith index = dist(src[i], closest_point in target) (on GPU)
* indicies -> N array w/ ith index = index of the closest point in target to src[i] (on GPU)
*/
void ScanMatch::findNNGPU_OCTREE(pointcloud* src, pointcloud* target, float* dist, int *indicies, int N, OctNodeGPU* octoNodes) {
//Launch a kernel (paralellely compute NN for each point)
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
kernNNGPU_OCTREE<<<fullBlocksPerGrid, blockSize>>>(src->dev_pos, target->dev_pos, dist, indicies, N, octoNodes);
}
/*
* Parallely reshuffle pos by indicies and fill matches
*/
__global__ void kernReshuffleGPU(glm::vec3* pos, glm::vec3* matches, int *indicies, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
//matches[idx] = pos[idx];
matches[idx] = pos[indicies[idx]];
}
}
/**
* Reshuffles pointcloud a as per indicies, puts these in dev_matches
* NOT ONE TO ONE SO NEED TO MAKE A COPY!
*/
void ScanMatch::reshuffleGPU(pointcloud* a, int* indicies, int N) {
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
kernReshuffleGPU<<<fullBlocksPerGrid, blockSize>>>(a->dev_pos, a->dev_matches, indicies, N);
}
__global__ void kernComputeNorms(glm::vec3* src_norm, glm::vec3* target_norm, glm::vec3* pos, glm::vec3* matches, glm::vec3 pos_centroid, glm::vec3 matches_centroid, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
src_norm[idx] = pos[idx] - pos_centroid;
target_norm[idx] = matches[idx] - matches_centroid;
}
}
__global__ void kernComputeHarray(glm::mat3* Harray, glm::vec3* src_norm, glm::vec3* target_norm, int N) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
Harray[idx] = glm::mat3(glm::vec3(src_norm[idx]) * target_norm[idx].x,
glm::vec3(src_norm[idx]) * target_norm[idx].y,
glm::vec3(src_norm[idx]) * target_norm[idx].z);
}
}
/**
* Calculates transform T that maps from src to target
* Assumes dev_matches is filled for target
*/
void ScanMatch::bestFitTransformGPU(pointcloud* src, pointcloud* target, int N, glm::mat3 &R, glm::vec3 &t){
glm::vec3* src_norm;
glm::vec3* target_norm;
glm::mat3* Harray;
//cudaMalloc Norms and Harray
cudaMalloc((void**)&src_norm, N * sizeof(glm::vec3));
cudaMalloc((void**)&target_norm, N * sizeof(glm::vec3));
cudaMalloc((void**)&Harray, N * sizeof(glm::mat3));
cudaMemset(Harray, 0, N * sizeof(glm::mat3));
//Thrust device pointers for calculating centroids
thrust::device_ptr<glm::vec3> src_thrustpos(src->dev_pos);
thrust::device_ptr<glm::vec3> target_thrustmatches(target->dev_matches);
thrust::device_ptr<glm::mat3> harray_thrust = thrust::device_pointer_cast(Harray);
//1: Calculate centroids
glm::vec3 src_centroid(0.f);
glm::vec3 target_centroid(0.f);
src_centroid = glm::vec3(thrust::reduce(src_thrustpos, src_thrustpos + N, glm::vec3(0.f), thrust::plus<glm::vec3>()));
cudaDeviceSynchronize();
target_centroid = glm::vec3(thrust::reduce(target_thrustmatches, target_thrustmatches + N, glm::vec3(0.f), thrust::plus<glm::vec3>()));
cudaDeviceSynchronize();
src_centroid /= glm::vec3(N);
target_centroid /= glm::vec3(N);
#if DEBUG
printf("SRC CENTROID\n");
utilityCore::printVec3(src_centroid);
printf("TARGET CENTROID\n");
utilityCore::printVec3(target_centroid);
#endif // DEBUG
//2: Compute Norm via Kernel Call
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
kernComputeNorms<<<fullBlocksPerGrid, blockSize>>>(src_norm, target_norm, src->dev_pos, target->dev_matches, src_centroid, target_centroid, N);
cudaDeviceSynchronize();
utilityCore::checkCUDAError("Compute Norms Failed", __LINE__);
//3:Multiply src.T (3 x N) by target (N x 3) = H (3 x 3) via a kernel call
kernComputeHarray<<<fullBlocksPerGrid, blockSize>>>(Harray, src_norm, target_norm, N);
cudaDeviceSynchronize();
utilityCore::checkCUDAError("Compute HARRAY Failed", __LINE__);
/*
glm::mat3 H = thrust::reduce(harray_thrust, harray_thrust + N, glm::mat3(0.f), thrust::plus<glm::mat3>());
cudaThreadSynchronize();
*/
glm::mat3* Hcpu = new glm::mat3[N];
cudaMemcpy(Hcpu, Harray, N * sizeof(glm::mat3), cudaMemcpyDeviceToHost);
utilityCore::checkCUDAError("REDUCE HARRAY Failed", __LINE__);
cudaDeviceSynchronize();
glm::mat3 H(0.f);
for (int i = 0; i < N; ++i) {
H += Hcpu[i];
}
//4:Calculate SVD of H to get U, S & V
float U[3][3] = { 0 };
float S[3][3] = { 0 };
float V[3][3] = { 0 };
svd(H[0][0], H[0][1], H[0][2], H[1][0], H[1][1], H[1][2], H[2][0], H[2][1], H[2][2],
U[0][0], U[0][1], U[0][2], U[1][0], U[1][1], U[1][2], U[2][0], U[2][1], U[2][2],
S[0][0], S[0][1], S[0][2], S[1][0], S[1][1], S[1][2], S[2][0], S[2][1], S[2][2],
V[0][0], V[0][1], V[0][2], V[1][0], V[1][1], V[1][2], V[2][0], V[2][1], V[2][2]
);
glm::mat3 matU(glm::vec3(U[0][0], U[1][0], U[2][0]), glm::vec3(U[0][1], U[1][1], U[2][1]), glm::vec3(U[0][2], U[1][2], U[2][2]));
glm::mat3 matV(glm::vec3(V[0][0], V[0][1], V[0][2]), glm::vec3(V[1][0], V[1][1], V[1][2]), glm::vec3(V[2][0], V[2][1], V[2][2]));
//5:Rotation Matrix and Translation Vector
R = (matU * matV);
t = target_centroid - (R) * (src_centroid);
//cudaMalloc Norms and Harray
cudaFree(src_norm);
cudaFree(target_norm);
cudaFree(Harray);
}
|
438288cf0d36cdaf0d15d8cf304f8c416147c7e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// u = grad(f)
// u is a vector field and f a scalar field.
__global__ void
grad_kernel_default(const real* __restrict__ f, real * __restrict__ u, const real xfactor,
const real yfactor, const real zfactor)
{
__shared__ real fs[NY_TILE + 2 * NGHOST][NX_TILE + 2 * NGHOST];
// Local indices
const int xli = threadIdx.x + NGHOST;
const int yli = threadIdx.y + NGHOST;
// Global indices
const int xi = blockIdx.x * blockDim.x + threadIdx.x + NGHOST;
const int yi = blockIdx.y * blockDim.y + threadIdx.y + NGHOST;
// Z-wise iteration values
real behind3,
behind2 = f[vfidx(xi, yi, 0)],
behind1 = f[vfidx(xi, yi, 1)],
current = f[vfidx(xi, yi, 2)],
forward1 = f[vfidx(xi, yi, 3)],
forward2 = f[vfidx(xi, yi, 4)],
forward3 = f[vfidx(xi, yi, 5)];
for (int zi = NGHOST; zi < NZ + NGHOST; zi++) {
// Iterate through z dimension in registers
behind3 = behind2;
behind2 = behind1;
behind1 = current;
current = forward1;
forward1 = forward2;
forward2 = forward3;
forward3 = f[vfidx(xi, yi, zi + 3)];
// Load x-y tile to shared memory
__syncthreads();
fs[yli][xli] = current;
if (threadIdx.x < NGHOST) {
fs[yli][xli - NGHOST] = f[vfidx(xi - NGHOST, yi, zi)];
fs[yli][xli + NX_TILE] = f[vfidx(xi + NX_TILE, yi, zi)];
}
if (threadIdx.y < NGHOST) {
fs[yli - NGHOST][xli] = f[vfidx(xi, yi - NGHOST, zi)];
fs[yli + NY_TILE][xli] = f[vfidx(xi, yi + NY_TILE, zi)];
}
__syncthreads();
// Compute the gradient
u[vfidx(xi, yi, zi, 2)] = zfactor * fd1D(
behind3, behind2, behind1, forward1, forward2, forward3);
u[vfidx(xi, yi, zi, 1)] = yfactor * fd1D(
fs[yli - 3][xli], fs[yli - 2][xli], fs[yli - 1][xli],
fs[yli + 1][xli], fs[yli + 2][xli], fs[yli + 3][xli]);
u[vfidx(xi, yi, zi, 0)] = xfactor * fd1D(
fs[yli][xli - 3], fs[yli][xli - 2], fs[yli][xli - 1],
fs[yli][xli + 1], fs[yli][xli + 2], fs[yli][xli + 3]);
}
}
void
grad_default(vf3dgpu &f, vf3dgpu &u)
{
hipLaunchKernelGGL(( grad_kernel_default), dim3(xy_tile.nblocks), dim3(xy_tile.nthreads), 0, 0, f.mem(), u.mem(),
1.0/dx, 1.0/dy, 1.0/dz);
}
| 438288cf0d36cdaf0d15d8cf304f8c416147c7e5.cu | // u = grad(f)
// u is a vector field and f a scalar field.
__global__ void
grad_kernel_default(const real* __restrict__ f, real * __restrict__ u, const real xfactor,
const real yfactor, const real zfactor)
{
__shared__ real fs[NY_TILE + 2 * NGHOST][NX_TILE + 2 * NGHOST];
// Local indices
const int xli = threadIdx.x + NGHOST;
const int yli = threadIdx.y + NGHOST;
// Global indices
const int xi = blockIdx.x * blockDim.x + threadIdx.x + NGHOST;
const int yi = blockIdx.y * blockDim.y + threadIdx.y + NGHOST;
// Z-wise iteration values
real behind3,
behind2 = f[vfidx(xi, yi, 0)],
behind1 = f[vfidx(xi, yi, 1)],
current = f[vfidx(xi, yi, 2)],
forward1 = f[vfidx(xi, yi, 3)],
forward2 = f[vfidx(xi, yi, 4)],
forward3 = f[vfidx(xi, yi, 5)];
for (int zi = NGHOST; zi < NZ + NGHOST; zi++) {
// Iterate through z dimension in registers
behind3 = behind2;
behind2 = behind1;
behind1 = current;
current = forward1;
forward1 = forward2;
forward2 = forward3;
forward3 = f[vfidx(xi, yi, zi + 3)];
// Load x-y tile to shared memory
__syncthreads();
fs[yli][xli] = current;
if (threadIdx.x < NGHOST) {
fs[yli][xli - NGHOST] = f[vfidx(xi - NGHOST, yi, zi)];
fs[yli][xli + NX_TILE] = f[vfidx(xi + NX_TILE, yi, zi)];
}
if (threadIdx.y < NGHOST) {
fs[yli - NGHOST][xli] = f[vfidx(xi, yi - NGHOST, zi)];
fs[yli + NY_TILE][xli] = f[vfidx(xi, yi + NY_TILE, zi)];
}
__syncthreads();
// Compute the gradient
u[vfidx(xi, yi, zi, 2)] = zfactor * fd1D(
behind3, behind2, behind1, forward1, forward2, forward3);
u[vfidx(xi, yi, zi, 1)] = yfactor * fd1D(
fs[yli - 3][xli], fs[yli - 2][xli], fs[yli - 1][xli],
fs[yli + 1][xli], fs[yli + 2][xli], fs[yli + 3][xli]);
u[vfidx(xi, yi, zi, 0)] = xfactor * fd1D(
fs[yli][xli - 3], fs[yli][xli - 2], fs[yli][xli - 1],
fs[yli][xli + 1], fs[yli][xli + 2], fs[yli][xli + 3]);
}
}
void
grad_default(vf3dgpu &f, vf3dgpu &u)
{
grad_kernel_default<<<xy_tile.nblocks, xy_tile.nthreads>>>(f.mem(), u.mem(),
1.0/dx, 1.0/dy, 1.0/dz);
}
|
22c473f66874c2bf1cb33e7cf2f2035e5a2298ff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void hello() {
printf("Hello world from block (%d,%d), thread (%d,%d,%d).\n",
blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, threadIdx.z);
__syncthreads();
}
int main(void) {
int devID;
hipDeviceProp_t p;
hipGetDevice(&devID);
hipGetDeviceProperties(&p, devID);
printf("Running on device %d \"%s\" with capability %d.%d.\n",
devID, p.name, p.major, p.minor);
if (p.major < 2) {
printf("Program incompatible with existing architecture; terminating.\n");
return 1;
}
dim3 dimGrid(2,2);
dim3 dimBlock(2,2,2);
hipLaunchKernelGGL(( hello), dim3(dimGrid),dim3(dimBlock), 0, 0, );
hipDeviceSynchronize();
return 0;
}
| 22c473f66874c2bf1cb33e7cf2f2035e5a2298ff.cu | #include <stdio.h>
__global__ void hello() {
printf("Hello world from block (%d,%d), thread (%d,%d,%d).\n",
blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, threadIdx.z);
__syncthreads();
}
int main(void) {
int devID;
cudaDeviceProp p;
cudaGetDevice(&devID);
cudaGetDeviceProperties(&p, devID);
printf("Running on device %d \"%s\" with capability %d.%d.\n",
devID, p.name, p.major, p.minor);
if (p.major < 2) {
printf("Program incompatible with existing architecture; terminating.\n");
return 1;
}
dim3 dimGrid(2,2);
dim3 dimBlock(2,2,2);
hello<<<dimGrid,dimBlock>>>();
cudaDeviceSynchronize();
return 0;
}
|
8c9cd8cfba11c14f86b55a2c2cb8b8181d23193e.hip | // !!! This is a file automatically generated by hipify!!!
//put C:/Users/molly/Desktop/289Q/project/main.cu
//nvcc -std=c++11 main.cu
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
#include <hip/hip_cooperative_groups.h>
#include <hip/hip_cooperative_groups.h>
// includes, project
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
using namespace cooperative_groups;
namespace cg = cooperative_groups;
// #define FILESIZE_CHAR 1048576
#define FILESIZE_CHAR 1048576
#define FILESIZE_INT FILESIZE_CHAR/4
__host__
void makeLUT(int N, int* LUT){
int M = N;
int even = 0;
int odd = 1;
int LUTsize = N*(log2((double)N)*2 - 2);
for (int i =0; i < LUTsize/2; i+=N){
for (int j=0; j<N; j+=M){
for (int k =0; k<M/2; k++){
LUT[i+j+k] = even;
even+=2;
}
for (int k =M/2; k<M; k++){
LUT[i+j+k] = odd;
odd+=2;
}
} even=0; odd=1; M = M/2;
}
for (int x=LUTsize-N, i=LUTsize/2; i<LUTsize;i+=N, x-=N){
for(int j=0; j<N; j++){
int newIndex = LUT[x+j-LUTsize/2];
LUT[newIndex + i] = j;
}
}
return;
}
int createMask(int n)
{
int r = 0;
for (int i=0; i<n; i++)
r |= 1 << i;
return r;
}
__global__
void benes(int N, int block, char* network, int* LUT, volatile int* valid, int mask, int* data, char* output){
int idx = threadIdx.x;
int in1, in2, in1_index, in2_index;
int readOffset=0;
int fileSize = FILESIZE_INT/2;
int readOffsetSecondNet=fileSize;
thread_group g = tiled_partition(this_thread_block(), 2); //stops working after 32?
if(blockIdx.x == 0){
while(readOffset < fileSize){
in1 = data[idx*2 + readOffset];
in2 = data[idx*2+1 + readOffset];
readOffset+=N;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
g.sync();
// __syncthreads();
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
}
}
else if ( blockIdx.x < block) {
while(readOffset < fileSize){
while((valid[idx + (blockIdx.x)*(N/2)])==0);
in1_index = LUT[idx*2 + (blockIdx.x-1)*N];
in2_index = LUT[idx*2 + (blockIdx.x-1)*N + 1];
in1 = network[in1_index+(blockIdx.x)*N];
in2 = network[in2_index+(blockIdx.x)*N];
valid[idx + (blockIdx.x)*(N/2)] = 0;// valid[idx*2 + 1 + (blockIdx.x)*N] = 0;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
if (blockIdx.x != gridDim.x - 1 && blockIdx.x != block-1){
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
g.sync();
// __syncthreads();
}
else {
output[idx*2 + readOffset] = network[idx*2 + (blockIdx.x+1)*N];
output[idx*2+1 + readOffset] = network[idx*2 + (blockIdx.x+1)*N + 1];
}
readOffset += N;
}
}
else if(blockIdx.x == block){
while(readOffsetSecondNet < FILESIZE_INT){
in1 = data[idx*2 + readOffsetSecondNet];
in2 = data[idx*2+1 + readOffsetSecondNet];
readOffsetSecondNet+=N;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
// __syncthreads();
g.sync();
}
}
else{
while(readOffsetSecondNet < FILESIZE_INT){
// printf("waiting for previous block %d to produce\n", blockIdx.x - 1);
while((valid[idx + (blockIdx.x)*(N/2)])==0);
// printf("waiting for previous block %d to produce\n", blockIdx.x - 1);
in1_index = LUT[idx*2 + ((blockIdx.x%block)-1)*N];
in2_index = LUT[idx*2 + ((blockIdx.x%block)-1)*N + 1];
in1 = network[in1_index+(blockIdx.x)*N];
in2 = network[in2_index+(blockIdx.x)*N];
// printf("Block %d thread %d consumed %d %d\n", blockIdx.x,threadIdx.x, in1, in2);
valid[idx + (blockIdx.x)*(N/2)] = 0; //valid[idx*2 + 1 + (blockIdx.x)*N] = 0;
//printf("waiting for next block %d to consume\n", blockIdx.x + 1);
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
// printf("Block %d produced %d %d\n", blockIdx.x, in1, in2);
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
//printf("Block %d produced %d %d\n", blockIdx.x, in1, in2);
if (blockIdx.x != gridDim.x - 1){
valid[idx + (blockIdx.x+1)*(N/2)]=1; //valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
// __syncthreads();
g.sync();
//printf("valid:%d index:%d\n",valid[idx + (blockIdx.x+1)*N],idx + (blockIdx.x+1)*N);
}
else {
output[idx*2 + readOffsetSecondNet] = network[idx*2 + (blockIdx.x+1)*N];
output[idx*2+1 + readOffsetSecondNet] = network[idx*2 + (blockIdx.x+1)*N + 1];
}
readOffsetSecondNet += N;
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Usage: %s <input.txt> <size>\n", argv[0]);
return 1;
}
std::ifstream file(argv[1], std::ios::binary);
if (!file) {
printf("Could not open input file\n");
return 1;
}
int N = atoi(argv[2]);
if (FILESIZE_INT<N)
N = FILESIZE_INT;
int blockSize = N/2;
int blocks = 2*log2((double)N)-1;
int b = 2*log2((double)N)-1;
int LUTsize = N*(log2((double)N)*2 - 2);
int numBlocks;
if (FILESIZE_INT <= N)
numBlocks = blocks;
else
numBlocks = 2*blocks;
char* network;
hipMallocManaged(&network,N*(numBlocks+1)*sizeof(char));
memset(network,0,N*(numBlocks+1)*sizeof(char));
int* LUT;
hipMallocManaged(&LUT,LUTsize*sizeof(int));
makeLUT(N,LUT);
int mask = createMask(log2((double)N));
int *valid;
hipMallocManaged(&valid,(N/2)*(numBlocks)*sizeof(int));
memset(valid,0,(N/2)*(numBlocks+1)*sizeof(int));
for(int i = 0; i < N/2; i++)
valid[i] = 1;
char* data;
hipMallocManaged(&data,FILESIZE_CHAR*sizeof(char));
memset(data,0,FILESIZE_CHAR*sizeof(char));
file.read(data, FILESIZE_CHAR*sizeof(char));
file.close();
int* idata;
hipMallocManaged(&idata,FILESIZE_CHAR*sizeof(char));
memcpy(idata, data, FILESIZE_CHAR*sizeof(char));
char* output;
hipMallocManaged(&output,FILESIZE_CHAR*sizeof(char));
memset(output,0,FILESIZE_CHAR*sizeof(char));
hipLaunchKernelGGL(( benes), dim3(numBlocks),dim3(blockSize), 0, 0, N, blocks, network, LUT, valid, mask, idata, output);
hipDeviceSynchronize();
// printf("The input is:");
// for (int i = 0; i < FILESIZE_INT; i++){
// if (i%N == 0) printf("\n");
// printf("%d ", idata[i]);
// }
// printf("\n\n");
for (int i = 0; i < FILESIZE_INT-1; i++){
if ((i%N != N-1) && (output[i+1]!=0)) {
if((mask & output[i+1]) < (mask & output[i])){
printf("ERROR in routing at output %d %d %d\n",i ,mask & output[i+1],mask &output[i] );
return 1;
}
}
}
printf("Routing was successful!\n");
hipFree(valid);
hipFree(LUT);
hipFree(network);
hipFree(data);
hipFree(idata);
hipFree(output);
}
| 8c9cd8cfba11c14f86b55a2c2cb8b8181d23193e.cu | //put C:/Users/molly/Desktop/289Q/project/main.cu
//nvcc -std=c++11 main.cu
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
#include <cooperative_groups.h>
#include <cooperative_groups.h>
// includes, project
#include <cuda.h>
#include <cuda_runtime.h>
using namespace cooperative_groups;
namespace cg = cooperative_groups;
// #define FILESIZE_CHAR 1048576
#define FILESIZE_CHAR 1048576
#define FILESIZE_INT FILESIZE_CHAR/4
__host__
void makeLUT(int N, int* LUT){
int M = N;
int even = 0;
int odd = 1;
int LUTsize = N*(log2((double)N)*2 - 2);
for (int i =0; i < LUTsize/2; i+=N){
for (int j=0; j<N; j+=M){
for (int k =0; k<M/2; k++){
LUT[i+j+k] = even;
even+=2;
}
for (int k =M/2; k<M; k++){
LUT[i+j+k] = odd;
odd+=2;
}
} even=0; odd=1; M = M/2;
}
for (int x=LUTsize-N, i=LUTsize/2; i<LUTsize;i+=N, x-=N){
for(int j=0; j<N; j++){
int newIndex = LUT[x+j-LUTsize/2];
LUT[newIndex + i] = j;
}
}
return;
}
int createMask(int n)
{
int r = 0;
for (int i=0; i<n; i++)
r |= 1 << i;
return r;
}
__global__
void benes(int N, int block, char* network, int* LUT, volatile int* valid, int mask, int* data, char* output){
int idx = threadIdx.x;
int in1, in2, in1_index, in2_index;
int readOffset=0;
int fileSize = FILESIZE_INT/2;
int readOffsetSecondNet=fileSize;
thread_group g = tiled_partition(this_thread_block(), 2); //stops working after 32?
if(blockIdx.x == 0){
while(readOffset < fileSize){
in1 = data[idx*2 + readOffset];
in2 = data[idx*2+1 + readOffset];
readOffset+=N;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
g.sync();
// __syncthreads();
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
}
}
else if ( blockIdx.x < block) {
while(readOffset < fileSize){
while((valid[idx + (blockIdx.x)*(N/2)])==0);
in1_index = LUT[idx*2 + (blockIdx.x-1)*N];
in2_index = LUT[idx*2 + (blockIdx.x-1)*N + 1];
in1 = network[in1_index+(blockIdx.x)*N];
in2 = network[in2_index+(blockIdx.x)*N];
valid[idx + (blockIdx.x)*(N/2)] = 0;// valid[idx*2 + 1 + (blockIdx.x)*N] = 0;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
if (blockIdx.x != gridDim.x - 1 && blockIdx.x != block-1){
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
g.sync();
// __syncthreads();
}
else {
output[idx*2 + readOffset] = network[idx*2 + (blockIdx.x+1)*N];
output[idx*2+1 + readOffset] = network[idx*2 + (blockIdx.x+1)*N + 1];
}
readOffset += N;
}
}
else if(blockIdx.x == block){
while(readOffsetSecondNet < FILESIZE_INT){
in1 = data[idx*2 + readOffsetSecondNet];
in2 = data[idx*2+1 + readOffsetSecondNet];
readOffsetSecondNet+=N;
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
valid[idx + (blockIdx.x+1)*(N/2)]=1;// valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
// __syncthreads();
g.sync();
}
}
else{
while(readOffsetSecondNet < FILESIZE_INT){
// printf("waiting for previous block %d to produce\n", blockIdx.x - 1);
while((valid[idx + (blockIdx.x)*(N/2)])==0);
// printf("waiting for previous block %d to produce\n", blockIdx.x - 1);
in1_index = LUT[idx*2 + ((blockIdx.x%block)-1)*N];
in2_index = LUT[idx*2 + ((blockIdx.x%block)-1)*N + 1];
in1 = network[in1_index+(blockIdx.x)*N];
in2 = network[in2_index+(blockIdx.x)*N];
// printf("Block %d thread %d consumed %d %d\n", blockIdx.x,threadIdx.x, in1, in2);
valid[idx + (blockIdx.x)*(N/2)] = 0; //valid[idx*2 + 1 + (blockIdx.x)*N] = 0;
//printf("waiting for next block %d to consume\n", blockIdx.x + 1);
while((valid[idx + (blockIdx.x+1)*(N/2)])==1);
if ((in1 & mask) < (in2 & mask)){
network[idx*2 + (blockIdx.x+1)*N] = in1;
network[idx*2 + (blockIdx.x+1)*N + 1] = in2;
// printf("Block %d produced %d %d\n", blockIdx.x, in1, in2);
}
else{
network[idx*2 + (blockIdx.x+1)*N] = in2;
network[idx*2 + (blockIdx.x+1)*N + 1] = in1;
}
//printf("Block %d produced %d %d\n", blockIdx.x, in1, in2);
if (blockIdx.x != gridDim.x - 1){
valid[idx + (blockIdx.x+1)*(N/2)]=1; //valid[idx*2 + 1 + (blockIdx.x+1)*N]=1;
// __syncthreads();
g.sync();
//printf("valid:%d index:%d\n",valid[idx + (blockIdx.x+1)*N],idx + (blockIdx.x+1)*N);
}
else {
output[idx*2 + readOffsetSecondNet] = network[idx*2 + (blockIdx.x+1)*N];
output[idx*2+1 + readOffsetSecondNet] = network[idx*2 + (blockIdx.x+1)*N + 1];
}
readOffsetSecondNet += N;
}
}
}
int main(int argc, char *argv[]){
if (argc != 3){
printf("Usage: %s <input.txt> <size>\n", argv[0]);
return 1;
}
std::ifstream file(argv[1], std::ios::binary);
if (!file) {
printf("Could not open input file\n");
return 1;
}
int N = atoi(argv[2]);
if (FILESIZE_INT<N)
N = FILESIZE_INT;
int blockSize = N/2;
int blocks = 2*log2((double)N)-1;
int b = 2*log2((double)N)-1;
int LUTsize = N*(log2((double)N)*2 - 2);
int numBlocks;
if (FILESIZE_INT <= N)
numBlocks = blocks;
else
numBlocks = 2*blocks;
char* network;
cudaMallocManaged(&network,N*(numBlocks+1)*sizeof(char));
memset(network,0,N*(numBlocks+1)*sizeof(char));
int* LUT;
cudaMallocManaged(&LUT,LUTsize*sizeof(int));
makeLUT(N,LUT);
int mask = createMask(log2((double)N));
int *valid;
cudaMallocManaged(&valid,(N/2)*(numBlocks)*sizeof(int));
memset(valid,0,(N/2)*(numBlocks+1)*sizeof(int));
for(int i = 0; i < N/2; i++)
valid[i] = 1;
char* data;
cudaMallocManaged(&data,FILESIZE_CHAR*sizeof(char));
memset(data,0,FILESIZE_CHAR*sizeof(char));
file.read(data, FILESIZE_CHAR*sizeof(char));
file.close();
int* idata;
cudaMallocManaged(&idata,FILESIZE_CHAR*sizeof(char));
memcpy(idata, data, FILESIZE_CHAR*sizeof(char));
char* output;
cudaMallocManaged(&output,FILESIZE_CHAR*sizeof(char));
memset(output,0,FILESIZE_CHAR*sizeof(char));
benes<<<numBlocks,blockSize>>>(N, blocks, network, LUT, valid, mask, idata, output);
cudaDeviceSynchronize();
// printf("The input is:");
// for (int i = 0; i < FILESIZE_INT; i++){
// if (i%N == 0) printf("\n");
// printf("%d ", idata[i]);
// }
// printf("\n\n");
for (int i = 0; i < FILESIZE_INT-1; i++){
if ((i%N != N-1) && (output[i+1]!=0)) {
if((mask & output[i+1]) < (mask & output[i])){
printf("ERROR in routing at output %d %d %d\n",i ,mask & output[i+1],mask &output[i] );
return 1;
}
}
}
printf("Routing was successful!\n");
cudaFree(valid);
cudaFree(LUT);
cudaFree(network);
cudaFree(data);
cudaFree(idata);
cudaFree(output);
}
|
diffuseProject_k.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=[5,8,1] --blockDim=[64,4,1]
#include "common.h"
__global__ void
diffuseProject_k(cData *vx, cData *vy, int dx, int dy, float dt,
float visc, int lb)
{
__requires(dx == 512);
__requires(dy == 512);
__requires(lb == 16);
int gtidx = blockIdx.x * blockDim.x + threadIdx.x;
int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb;
int p;
cData xterm, yterm;
// gtidx is the domain location in x for this thread
if (gtidx < dx)
{
for (p = 0;
#define __vx_r_offset __read_offset_bytes(vx)/sizeof(cData)
#define __vx_w_offset __write_offset_bytes(vx)/sizeof(cData)
#define __vy_r_offset __read_offset_bytes(vy)/sizeof(cData)
#define __vy_w_offset __write_offset_bytes(vy)/sizeof(cData)
#if 0 // Generated by inference
__global_invariant(0 <= p),
__global_invariant(__write_implies(vx,
(__mod_pow2(__vx_w_offset*2, dx*2) == __mod_pow2((blockIdx.y*lb*blockDim.y*dx + threadIdx.y*lb*dx + blockIdx.x*blockDim.x + threadIdx.x)*2, dx*2)) |
(__mod_pow2(__vx_w_offset*2, dx*2) == __mod_pow2((blockIdx.y*lb*blockDim.y*dx + threadIdx.y*lb*dx + blockIdx.x*blockDim.x + threadIdx.x)*2+1, dx*2))
)
),
__global_invariant(__read_implies(vx,
(__mod_pow2(__vx_r_offset*2, dx*2) == __mod_pow2((blockIdx.y*lb*blockDim.y*dx + threadIdx.y*lb*dx + blockIdx.x*blockDim.x + threadIdx.x)*2, dx*2)) |
(__mod_pow2(__vx_r_offset*2, dx*2) == __mod_pow2((blockIdx.y*lb*blockDim.y*dx + threadIdx.y*lb*dx + blockIdx.x*blockDim.x + threadIdx.x)*2+1, dx*2))
)
),
__global_invariant(__write_implies(vy,
(__mod_pow2(__vy_w_offset*2, dx*2) == __mod_pow2((blockIdx.y*lb*blockDim.y*dx + threadIdx.y*lb*dx + blockIdx.x*blockDim.x + threadIdx.x)*2, dx*2)) |
(__mod_pow2(__vy_w_offset*2, dx*2) == __mod_pow2((blockIdx.y*lb*blockDim.y*dx + threadIdx.y*lb*dx + blockIdx.x*blockDim.x + threadIdx.x)*2+1, dx*2))
)
),
__global_invariant(__read_implies(vy,
(__mod_pow2(__vy_r_offset*2, dx*2) == __mod_pow2((blockIdx.y*lb*blockDim.y*dx + threadIdx.y*lb*dx + blockIdx.x*blockDim.x + threadIdx.x)*2, dx*2)) |
(__mod_pow2(__vy_r_offset*2, dx*2) == __mod_pow2((blockIdx.y*lb*blockDim.y*dx + threadIdx.y*lb*dx + blockIdx.x*blockDim.x + threadIdx.x)*2+1, dx*2))
)
),
#endif
#if SET == 1 // full access break (inference not required)
// __v{x,y}_{r,w}_offset % dx % blockDim.x == threadIdx.x
// __v{x,y}_{r,w}_offset / dx / lb % blockDim.y == threadIdx.y
// __v{x,y}_{r,w}_offset % dx / blockDim.x == blockIdx.x
// __v{x,y}_{r,w}_offset / dx / lb / blockDim.y == blockIdx.y
__global_invariant(__write_implies(vx, __mod_pow2(__mod_pow2(__vx_w_offset, dx), blockDim.x) == threadIdx.x)),
__global_invariant(__write_implies(vx, __mod_pow2(__vx_w_offset / dx / lb, blockDim.y) == threadIdx.y)),
__global_invariant(__write_implies(vx, __mod_pow2(__vx_w_offset, dx) / blockDim.x == blockIdx.x)),
__global_invariant(__write_implies(vx, __vx_w_offset / dx / lb / blockDim.y == blockIdx.y)),
__global_invariant(__read_implies(vx, __mod_pow2(__mod_pow2(__vx_r_offset, dx), blockDim.x) == threadIdx.x)),
__global_invariant(__read_implies(vx, __mod_pow2(__vx_r_offset / dx / lb, blockDim.y) == threadIdx.y)),
__global_invariant(__read_implies(vx, __mod_pow2(__vx_r_offset, dx) / blockDim.x == blockIdx.x)),
__global_invariant(__read_implies(vx, __vx_r_offset / dx / lb / blockDim.y == blockIdx.y)),
__global_invariant(__write_implies(vy, __mod_pow2(__mod_pow2(__vy_w_offset, dx), blockDim.x) == threadIdx.x)),
__global_invariant(__write_implies(vy, __mod_pow2(__vy_w_offset / dx / lb, blockDim.y) == threadIdx.y)),
__global_invariant(__write_implies(vy, __mod_pow2(__vy_w_offset, dx) / blockDim.x == blockIdx.x)),
__global_invariant(__write_implies(vy, __vy_w_offset / dx / lb / blockDim.y == blockIdx.y)),
__global_invariant(__read_implies(vy, __mod_pow2(__mod_pow2(__vy_r_offset, dx), blockDim.x) == threadIdx.x)),
__global_invariant(__read_implies(vy, __mod_pow2(__vy_r_offset / dx / lb, blockDim.y) == threadIdx.y)),
__global_invariant(__read_implies(vy, __mod_pow2(__vy_r_offset, dx) / blockDim.x == blockIdx.x)),
__global_invariant(__read_implies(vy, __vy_r_offset / dx / lb / blockDim.y == blockIdx.y)),
#elif SET == 2 // minimal set relying on inference is quicker
__global_invariant(__write_implies(vx, __mod_pow2(__vx_w_offset / dx / lb, blockDim.y) == threadIdx.y)),
__global_invariant(__write_implies(vx, __vx_w_offset / dx / lb / blockDim.y == blockIdx.y)),
__global_invariant(__read_implies(vx, __mod_pow2(__vx_r_offset / dx / lb, blockDim.y) == threadIdx.y)),
__global_invariant(__read_implies(vx, __vx_r_offset / dx / lb / blockDim.y == blockIdx.y)),
__global_invariant(__write_implies(vy, __mod_pow2(__vy_w_offset / dx / lb, blockDim.y) == threadIdx.y)),
__global_invariant(__write_implies(vy, __vy_w_offset / dx / lb / blockDim.y == blockIdx.y)),
__global_invariant(__read_implies(vy, __mod_pow2(__vy_r_offset / dx / lb, blockDim.y) == threadIdx.y)),
__global_invariant(__read_implies(vy, __vy_r_offset / dx / lb / blockDim.y == blockIdx.y)),
#elif SET == 3 // rewrite set
// [ __v{x,y}_{r,w}_offset - threadIdx.y*dx*lb - blockIdx.x*blockDim.x - blockIdx.y*dx*lb*blockDim.y ] % blockDim.x == threadIdx.x
// __v{x,y}_{r,w}_offset/dx/lb - threadIdx.x/dx/lb - blockIdx.x*blockDim.x/dx/lb - blockIdx.y*dx*lb*blockDim.y/dx/lb == threadIdx.y
// [ __v{x,y}_{r,w}_offset/blockDim.x - threadIdx.x/blockDim.x - threadIdx.y*dx*lb/blockDim.x - blockIdx.y*dx*lb*blockDim.y/blockDim.x ] % gridDim.x == blockIdx.x /*INVALID*/
// __v{x,y}_{r,w}_offset/dx/lb/blockDim.y - threadIdx.x/dx/lb/blockDim.y - threadIdx.y*dx*lb/dx/lb/blockDim.y - blockIdx.x*blockDim.x/dx/lb/blockDim.y == blockIdx.y
__global_invariant(__write_implies(vx, threadIdx.x == (__vx_w_offset - (threadIdx.y*dx*lb) - (blockIdx.x*blockDim.x) - (blockIdx.y*dx*lb*blockDim.y)) % blockDim.x)),
__global_invariant(__write_implies(vx, threadIdx.y == (__vx_w_offset/dx/lb) - (threadIdx.x/dx/lb) - (blockIdx.x*blockDim.x/dx/lb) - (blockIdx.y*dx*lb*blockDim.y/dx/lb))),
__global_invariant(__write_implies(vx, blockIdx.y == (__vx_w_offset/dx/lb/blockDim.y) - (threadIdx.x/dx/lb/blockDim.y) - (threadIdx.y*dx*lb/dx/lb/blockDim.y) - (blockIdx.x*blockDim.x/dx/lb/blockDim.y))),
__global_invariant(__read_implies( vx, threadIdx.x == (__vx_r_offset - (threadIdx.y*dx*lb) - (blockIdx.x*blockDim.x) - (blockIdx.y*dx*lb*blockDim.y)) % blockDim.x)),
__global_invariant(__read_implies( vx, threadIdx.y == (__vx_r_offset/dx/lb) - (threadIdx.x/dx/lb) - (blockIdx.x*blockDim.x/dx/lb) - (blockIdx.y*dx*lb*blockDim.y/dx/lb))),
__global_invariant(__read_implies( vx, blockIdx.y == (__vx_r_offset/dx/lb/blockDim.y) - (threadIdx.x/dx/lb/blockDim.y) - (threadIdx.y*dx*lb/dx/lb/blockDim.y) - (blockIdx.x*blockDim.x/dx/lb/blockDim.y))),
__global_invariant(__write_implies(vy, threadIdx.x == (__vy_w_offset - (threadIdx.y*dx*lb) - (blockIdx.x*blockDim.x) - (blockIdx.y*dx*lb*blockDim.y)) % blockDim.x)),
__global_invariant(__write_implies(vy, threadIdx.y == (__vy_w_offset/dx/lb) - (threadIdx.x/dx/lb) - (blockIdx.x*blockDim.x/dx/lb) - (blockIdx.y*dx*lb*blockDim.y/dx/lb))),
__global_invariant(__write_implies(vy, blockIdx.y == (__vy_w_offset/dx/lb/blockDim.y) - (threadIdx.x/dx/lb/blockDim.y) - (threadIdx.y*dx*lb/dx/lb/blockDim.y) - (blockIdx.x*blockDim.x/dx/lb/blockDim.y))),
__global_invariant(__read_implies( vy, threadIdx.x == (__vy_r_offset - (threadIdx.y*dx*lb) - (blockIdx.x*blockDim.x) - (blockIdx.y*dx*lb*blockDim.y)) % blockDim.x)),
__global_invariant(__read_implies( vy, threadIdx.y == (__vy_r_offset/dx/lb) - (threadIdx.x/dx/lb) - (blockIdx.x*blockDim.x/dx/lb) - (blockIdx.y*dx*lb*blockDim.y/dx/lb))),
__global_invariant(__read_implies( vy, blockIdx.y == (__vy_r_offset/dx/lb/blockDim.y) - (threadIdx.x/dx/lb/blockDim.y) - (threadIdx.y*dx*lb/dx/lb/blockDim.y) - (blockIdx.x*blockDim.x/dx/lb/blockDim.y))),
#elif SET == 4 // minimal rewrite set needs inference
__global_invariant(__write_implies(vx, threadIdx.y == (__vx_w_offset/dx/lb) - (threadIdx.x/dx/lb) - (blockIdx.x*blockDim.x/dx/lb) - (blockIdx.y *blockDim.y ))),
__global_invariant(__write_implies(vx, blockIdx.y == (__vx_w_offset/dx/lb/blockDim.y) - (threadIdx.x/dx/lb/blockDim.y) - (threadIdx.y /blockDim.y) - (blockIdx.x*blockDim.x/dx/lb/blockDim.y))),
__global_invariant(__read_implies( vx, threadIdx.y == (__vx_r_offset/dx/lb) - (threadIdx.x/dx/lb) - (blockIdx.x*blockDim.x/dx/lb) - (blockIdx.y *blockDim.y ))),
__global_invariant(__read_implies( vx, blockIdx.y == (__vx_r_offset/dx/lb/blockDim.y) - (threadIdx.x/dx/lb/blockDim.y) - (threadIdx.y /blockDim.y) - (blockIdx.x*blockDim.x/dx/lb/blockDim.y))),
__global_invariant(__write_implies(vy, threadIdx.y == (__vy_w_offset/dx/lb) - (threadIdx.x/dx/lb) - (blockIdx.x*blockDim.x/dx/lb) - (blockIdx.y *blockDim.y ))),
__global_invariant(__write_implies(vy, blockIdx.y == (__vy_w_offset/dx/lb/blockDim.y) - (threadIdx.x/dx/lb/blockDim.y) - (threadIdx.y /blockDim.y) - (blockIdx.x*blockDim.x/dx/lb/blockDim.y))),
__global_invariant(__read_implies( vy, threadIdx.y == (__vy_r_offset/dx/lb) - (threadIdx.x/dx/lb) - (blockIdx.x*blockDim.x/dx/lb) - (blockIdx.y *blockDim.y ))),
__global_invariant(__read_implies( vy, blockIdx.y == (__vy_r_offset/dx/lb/blockDim.y) - (threadIdx.x/dx/lb/blockDim.y) - (threadIdx.y /blockDim.y) - (blockIdx.x*blockDim.x/dx/lb/blockDim.y))),
#endif
p < lb; p++)
{
// fi is the domain location in y for this thread
int fi = gtidy + p;
if (fi < dy)
{
int fj = fi * dx + gtidx;
xterm = vx[fj];
yterm = vy[fj];
// Compute the index of the wavenumber based on the
// data order produced by a standard NN FFT.
int iix = gtidx;
int iiy = (fi>dy/2)?(fi-(dy)):fi;
// Velocity diffusion
float kk = (float)(iix * iix + iiy * iiy); // k^2
float diff = 1.f / (1.f + visc * dt * kk);
xterm.x *= diff;
xterm.y *= diff;
yterm.x *= diff;
yterm.y *= diff;
// Velocity projection
if (kk > 0.f)
{
float rkk = 1.f / kk;
// Real portion of velocity projection
float rkp = (iix * xterm.x + iiy * yterm.x);
// Imaginary portion of velocity projection
float ikp = (iix * xterm.y + iiy * yterm.y);
xterm.x -= rkk * rkp * iix;
xterm.y -= rkk * ikp * iix;
yterm.x -= rkk * rkp * iiy;
yterm.y -= rkk * ikp * iiy;
}
vx[fj] = xterm;
vy[fj] = yterm;
}
}
}
}
| diffuseProject_k.cu | //pass
//--gridDim=[5,8,1] --blockDim=[64,4,1]
#include "common.h"
__global__ void
diffuseProject_k(cData *vx, cData *vy, int dx, int dy, float dt,
float visc, int lb)
{
__requires(dx == 512);
__requires(dy == 512);
__requires(lb == 16);
int gtidx = blockIdx.x * blockDim.x + threadIdx.x;
int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb;
int p;
cData xterm, yterm;
// gtidx is the domain location in x for this thread
if (gtidx < dx)
{
for (p = 0;
#define __vx_r_offset __read_offset_bytes(vx)/sizeof(cData)
#define __vx_w_offset __write_offset_bytes(vx)/sizeof(cData)
#define __vy_r_offset __read_offset_bytes(vy)/sizeof(cData)
#define __vy_w_offset __write_offset_bytes(vy)/sizeof(cData)
#if 0 // Generated by inference
__global_invariant(0 <= p),
__global_invariant(__write_implies(vx,
(__mod_pow2(__vx_w_offset*2, dx*2) == __mod_pow2((blockIdx.y*lb*blockDim.y*dx + threadIdx.y*lb*dx + blockIdx.x*blockDim.x + threadIdx.x)*2, dx*2)) |
(__mod_pow2(__vx_w_offset*2, dx*2) == __mod_pow2((blockIdx.y*lb*blockDim.y*dx + threadIdx.y*lb*dx + blockIdx.x*blockDim.x + threadIdx.x)*2+1, dx*2))
)
),
__global_invariant(__read_implies(vx,
(__mod_pow2(__vx_r_offset*2, dx*2) == __mod_pow2((blockIdx.y*lb*blockDim.y*dx + threadIdx.y*lb*dx + blockIdx.x*blockDim.x + threadIdx.x)*2, dx*2)) |
(__mod_pow2(__vx_r_offset*2, dx*2) == __mod_pow2((blockIdx.y*lb*blockDim.y*dx + threadIdx.y*lb*dx + blockIdx.x*blockDim.x + threadIdx.x)*2+1, dx*2))
)
),
__global_invariant(__write_implies(vy,
(__mod_pow2(__vy_w_offset*2, dx*2) == __mod_pow2((blockIdx.y*lb*blockDim.y*dx + threadIdx.y*lb*dx + blockIdx.x*blockDim.x + threadIdx.x)*2, dx*2)) |
(__mod_pow2(__vy_w_offset*2, dx*2) == __mod_pow2((blockIdx.y*lb*blockDim.y*dx + threadIdx.y*lb*dx + blockIdx.x*blockDim.x + threadIdx.x)*2+1, dx*2))
)
),
__global_invariant(__read_implies(vy,
(__mod_pow2(__vy_r_offset*2, dx*2) == __mod_pow2((blockIdx.y*lb*blockDim.y*dx + threadIdx.y*lb*dx + blockIdx.x*blockDim.x + threadIdx.x)*2, dx*2)) |
(__mod_pow2(__vy_r_offset*2, dx*2) == __mod_pow2((blockIdx.y*lb*blockDim.y*dx + threadIdx.y*lb*dx + blockIdx.x*blockDim.x + threadIdx.x)*2+1, dx*2))
)
),
#endif
#if SET == 1 // full access break (inference not required)
// __v{x,y}_{r,w}_offset % dx % blockDim.x == threadIdx.x
// __v{x,y}_{r,w}_offset / dx / lb % blockDim.y == threadIdx.y
// __v{x,y}_{r,w}_offset % dx / blockDim.x == blockIdx.x
// __v{x,y}_{r,w}_offset / dx / lb / blockDim.y == blockIdx.y
__global_invariant(__write_implies(vx, __mod_pow2(__mod_pow2(__vx_w_offset, dx), blockDim.x) == threadIdx.x)),
__global_invariant(__write_implies(vx, __mod_pow2(__vx_w_offset / dx / lb, blockDim.y) == threadIdx.y)),
__global_invariant(__write_implies(vx, __mod_pow2(__vx_w_offset, dx) / blockDim.x == blockIdx.x)),
__global_invariant(__write_implies(vx, __vx_w_offset / dx / lb / blockDim.y == blockIdx.y)),
__global_invariant(__read_implies(vx, __mod_pow2(__mod_pow2(__vx_r_offset, dx), blockDim.x) == threadIdx.x)),
__global_invariant(__read_implies(vx, __mod_pow2(__vx_r_offset / dx / lb, blockDim.y) == threadIdx.y)),
__global_invariant(__read_implies(vx, __mod_pow2(__vx_r_offset, dx) / blockDim.x == blockIdx.x)),
__global_invariant(__read_implies(vx, __vx_r_offset / dx / lb / blockDim.y == blockIdx.y)),
__global_invariant(__write_implies(vy, __mod_pow2(__mod_pow2(__vy_w_offset, dx), blockDim.x) == threadIdx.x)),
__global_invariant(__write_implies(vy, __mod_pow2(__vy_w_offset / dx / lb, blockDim.y) == threadIdx.y)),
__global_invariant(__write_implies(vy, __mod_pow2(__vy_w_offset, dx) / blockDim.x == blockIdx.x)),
__global_invariant(__write_implies(vy, __vy_w_offset / dx / lb / blockDim.y == blockIdx.y)),
__global_invariant(__read_implies(vy, __mod_pow2(__mod_pow2(__vy_r_offset, dx), blockDim.x) == threadIdx.x)),
__global_invariant(__read_implies(vy, __mod_pow2(__vy_r_offset / dx / lb, blockDim.y) == threadIdx.y)),
__global_invariant(__read_implies(vy, __mod_pow2(__vy_r_offset, dx) / blockDim.x == blockIdx.x)),
__global_invariant(__read_implies(vy, __vy_r_offset / dx / lb / blockDim.y == blockIdx.y)),
#elif SET == 2 // minimal set relying on inference is quicker
__global_invariant(__write_implies(vx, __mod_pow2(__vx_w_offset / dx / lb, blockDim.y) == threadIdx.y)),
__global_invariant(__write_implies(vx, __vx_w_offset / dx / lb / blockDim.y == blockIdx.y)),
__global_invariant(__read_implies(vx, __mod_pow2(__vx_r_offset / dx / lb, blockDim.y) == threadIdx.y)),
__global_invariant(__read_implies(vx, __vx_r_offset / dx / lb / blockDim.y == blockIdx.y)),
__global_invariant(__write_implies(vy, __mod_pow2(__vy_w_offset / dx / lb, blockDim.y) == threadIdx.y)),
__global_invariant(__write_implies(vy, __vy_w_offset / dx / lb / blockDim.y == blockIdx.y)),
__global_invariant(__read_implies(vy, __mod_pow2(__vy_r_offset / dx / lb, blockDim.y) == threadIdx.y)),
__global_invariant(__read_implies(vy, __vy_r_offset / dx / lb / blockDim.y == blockIdx.y)),
#elif SET == 3 // rewrite set
// [ __v{x,y}_{r,w}_offset - threadIdx.y*dx*lb - blockIdx.x*blockDim.x - blockIdx.y*dx*lb*blockDim.y ] % blockDim.x == threadIdx.x
// __v{x,y}_{r,w}_offset/dx/lb - threadIdx.x/dx/lb - blockIdx.x*blockDim.x/dx/lb - blockIdx.y*dx*lb*blockDim.y/dx/lb == threadIdx.y
// [ __v{x,y}_{r,w}_offset/blockDim.x - threadIdx.x/blockDim.x - threadIdx.y*dx*lb/blockDim.x - blockIdx.y*dx*lb*blockDim.y/blockDim.x ] % gridDim.x == blockIdx.x /*INVALID*/
// __v{x,y}_{r,w}_offset/dx/lb/blockDim.y - threadIdx.x/dx/lb/blockDim.y - threadIdx.y*dx*lb/dx/lb/blockDim.y - blockIdx.x*blockDim.x/dx/lb/blockDim.y == blockIdx.y
__global_invariant(__write_implies(vx, threadIdx.x == (__vx_w_offset - (threadIdx.y*dx*lb) - (blockIdx.x*blockDim.x) - (blockIdx.y*dx*lb*blockDim.y)) % blockDim.x)),
__global_invariant(__write_implies(vx, threadIdx.y == (__vx_w_offset/dx/lb) - (threadIdx.x/dx/lb) - (blockIdx.x*blockDim.x/dx/lb) - (blockIdx.y*dx*lb*blockDim.y/dx/lb))),
__global_invariant(__write_implies(vx, blockIdx.y == (__vx_w_offset/dx/lb/blockDim.y) - (threadIdx.x/dx/lb/blockDim.y) - (threadIdx.y*dx*lb/dx/lb/blockDim.y) - (blockIdx.x*blockDim.x/dx/lb/blockDim.y))),
__global_invariant(__read_implies( vx, threadIdx.x == (__vx_r_offset - (threadIdx.y*dx*lb) - (blockIdx.x*blockDim.x) - (blockIdx.y*dx*lb*blockDim.y)) % blockDim.x)),
__global_invariant(__read_implies( vx, threadIdx.y == (__vx_r_offset/dx/lb) - (threadIdx.x/dx/lb) - (blockIdx.x*blockDim.x/dx/lb) - (blockIdx.y*dx*lb*blockDim.y/dx/lb))),
__global_invariant(__read_implies( vx, blockIdx.y == (__vx_r_offset/dx/lb/blockDim.y) - (threadIdx.x/dx/lb/blockDim.y) - (threadIdx.y*dx*lb/dx/lb/blockDim.y) - (blockIdx.x*blockDim.x/dx/lb/blockDim.y))),
__global_invariant(__write_implies(vy, threadIdx.x == (__vy_w_offset - (threadIdx.y*dx*lb) - (blockIdx.x*blockDim.x) - (blockIdx.y*dx*lb*blockDim.y)) % blockDim.x)),
__global_invariant(__write_implies(vy, threadIdx.y == (__vy_w_offset/dx/lb) - (threadIdx.x/dx/lb) - (blockIdx.x*blockDim.x/dx/lb) - (blockIdx.y*dx*lb*blockDim.y/dx/lb))),
__global_invariant(__write_implies(vy, blockIdx.y == (__vy_w_offset/dx/lb/blockDim.y) - (threadIdx.x/dx/lb/blockDim.y) - (threadIdx.y*dx*lb/dx/lb/blockDim.y) - (blockIdx.x*blockDim.x/dx/lb/blockDim.y))),
__global_invariant(__read_implies( vy, threadIdx.x == (__vy_r_offset - (threadIdx.y*dx*lb) - (blockIdx.x*blockDim.x) - (blockIdx.y*dx*lb*blockDim.y)) % blockDim.x)),
__global_invariant(__read_implies( vy, threadIdx.y == (__vy_r_offset/dx/lb) - (threadIdx.x/dx/lb) - (blockIdx.x*blockDim.x/dx/lb) - (blockIdx.y*dx*lb*blockDim.y/dx/lb))),
__global_invariant(__read_implies( vy, blockIdx.y == (__vy_r_offset/dx/lb/blockDim.y) - (threadIdx.x/dx/lb/blockDim.y) - (threadIdx.y*dx*lb/dx/lb/blockDim.y) - (blockIdx.x*blockDim.x/dx/lb/blockDim.y))),
#elif SET == 4 // minimal rewrite set needs inference
__global_invariant(__write_implies(vx, threadIdx.y == (__vx_w_offset/dx/lb) - (threadIdx.x/dx/lb) - (blockIdx.x*blockDim.x/dx/lb) - (blockIdx.y *blockDim.y ))),
__global_invariant(__write_implies(vx, blockIdx.y == (__vx_w_offset/dx/lb/blockDim.y) - (threadIdx.x/dx/lb/blockDim.y) - (threadIdx.y /blockDim.y) - (blockIdx.x*blockDim.x/dx/lb/blockDim.y))),
__global_invariant(__read_implies( vx, threadIdx.y == (__vx_r_offset/dx/lb) - (threadIdx.x/dx/lb) - (blockIdx.x*blockDim.x/dx/lb) - (blockIdx.y *blockDim.y ))),
__global_invariant(__read_implies( vx, blockIdx.y == (__vx_r_offset/dx/lb/blockDim.y) - (threadIdx.x/dx/lb/blockDim.y) - (threadIdx.y /blockDim.y) - (blockIdx.x*blockDim.x/dx/lb/blockDim.y))),
__global_invariant(__write_implies(vy, threadIdx.y == (__vy_w_offset/dx/lb) - (threadIdx.x/dx/lb) - (blockIdx.x*blockDim.x/dx/lb) - (blockIdx.y *blockDim.y ))),
__global_invariant(__write_implies(vy, blockIdx.y == (__vy_w_offset/dx/lb/blockDim.y) - (threadIdx.x/dx/lb/blockDim.y) - (threadIdx.y /blockDim.y) - (blockIdx.x*blockDim.x/dx/lb/blockDim.y))),
__global_invariant(__read_implies( vy, threadIdx.y == (__vy_r_offset/dx/lb) - (threadIdx.x/dx/lb) - (blockIdx.x*blockDim.x/dx/lb) - (blockIdx.y *blockDim.y ))),
__global_invariant(__read_implies( vy, blockIdx.y == (__vy_r_offset/dx/lb/blockDim.y) - (threadIdx.x/dx/lb/blockDim.y) - (threadIdx.y /blockDim.y) - (blockIdx.x*blockDim.x/dx/lb/blockDim.y))),
#endif
p < lb; p++)
{
// fi is the domain location in y for this thread
int fi = gtidy + p;
if (fi < dy)
{
int fj = fi * dx + gtidx;
xterm = vx[fj];
yterm = vy[fj];
// Compute the index of the wavenumber based on the
// data order produced by a standard NN FFT.
int iix = gtidx;
int iiy = (fi>dy/2)?(fi-(dy)):fi;
// Velocity diffusion
float kk = (float)(iix * iix + iiy * iiy); // k^2
float diff = 1.f / (1.f + visc * dt * kk);
xterm.x *= diff;
xterm.y *= diff;
yterm.x *= diff;
yterm.y *= diff;
// Velocity projection
if (kk > 0.f)
{
float rkk = 1.f / kk;
// Real portion of velocity projection
float rkp = (iix * xterm.x + iiy * yterm.x);
// Imaginary portion of velocity projection
float ikp = (iix * xterm.y + iiy * yterm.y);
xterm.x -= rkk * rkp * iix;
xterm.y -= rkk * ikp * iix;
yterm.x -= rkk * rkp * iiy;
yterm.y -= rkk * ikp * iiy;
}
vx[fj] = xterm;
vy[fj] = yterm;
}
}
}
}
|
4169f79f1c6780b0b8e3a5dfd72595146d744aea.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include <chrono>
#include <iostream>
#include "matrix.h"
static void handle_error(hipError_t err, const char* file, int line) {
if (err != hipSuccess) {
printf("%s in %s at line %d\n", hipGetErrorString(err), file, line);
exit(-1);
}
}
#define HANDLE_ERROR(err) (handle_error(err, __FILE__, __LINE__))
__global__ void fill_kernel(double* ptr, int64_t length, double val) {
int64_t tx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t stride_x = gridDim.x * blockDim.x;
while (tx < length) {
ptr[tx] = val;
tx += stride_x;
}
}
__global__ void coo_spmm_kernel(int64_t* rows, int64_t* cols, double* data,
double* out, int64_t l, int64_t m, int64_t n,
int64_t nnz) {
int64_t ty = blockIdx.y * blockDim.y + threadIdx.y,
tx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t stride_y = gridDim.y * blockDim.y, stride_x = gridDim.x * blockDim.x;
while (ty < nnz) {
int64_t rid = rows[ty], cid = cols[ty];
double* out_off = out + rid * n;
while (tx < n) {
atomicAdd(out_off + tx, data[cid * n + tx]);
tx += stride_x;
}
ty += stride_y;
}
}
__global__ void csr_spmm_kernel(int64_t* indptr, int64_t* indices, double* data,
double* out, int64_t l, int64_t m, int64_t n) {
int64_t ty = blockIdx.y * blockDim.y + threadIdx.y,
tx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t stride_y = gridDim.y * blockDim.y, stride_x = gridDim.x * blockDim.x;
while (ty < l) {
int64_t row_start = indptr[ty], row_end = indptr[ty + 1];
double* out_off = out + ty * n;
while (tx < n) {
double acc = 0;
for (int64_t i = row_start; i < row_end; ++i) {
int64_t cid = indices[i];
acc += data[cid * n + tx];
}
out_off[tx] = acc;
tx += stride_x;
}
ty += stride_y;
}
}
void coo_spmm(const COOMatrix& coo, const DenseMatrix& dense,
DenseMatrix& out) {
assert(coo.num_cols == dense.num_rows);
assert(coo.num_rows == out.num_rows);
assert(dense.num_cols == out.num_cols);
int64_t *coo_row, *coo_col;
double *dense_data, *out_data;
int64_t coo_bytes = coo.nnz * sizeof(int64_t),
dense_bytes = dense.num_rows * dense.num_cols * sizeof(double),
out_bytes = out.num_rows * out.num_cols * sizeof(double);
hipMalloc((void**)&coo_row, coo_bytes);
hipMalloc((void**)&coo_col, coo_bytes);
hipMalloc((void**)&dense_data, dense_bytes);
hipMalloc((void**)&out_data, out_bytes);
hipMemcpy((void*)coo_row, (void*)(coo.row), coo_bytes,
hipMemcpyHostToDevice);
hipMemcpy((void*)coo_col, (void*)(coo.col), coo_bytes,
hipMemcpyHostToDevice);
hipMemcpy((void*)dense_data, (void*)(dense.data), dense_bytes,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( fill_kernel), dim3((out.num_rows * out.num_cols + 511) / 512), dim3(512), 0, 0,
out_data, out.num_rows * out.num_cols, 0);
dim3 block_size(32, 32);
dim3 grid_size((out.num_cols + block_size.x - 1) / block_size.x,
(coo.nnz + block_size.y - 1) / block_size.y);
float time;
hipEvent_t start, stop;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
HANDLE_ERROR(hipEventRecord(start, 0));
hipLaunchKernelGGL(( coo_spmm_kernel), dim3(grid_size), dim3(block_size), 0, 0,
coo_row, coo_col, dense_data, out_data, coo.num_rows, coo.num_cols,
dense.num_cols, coo.nnz);
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
HANDLE_ERROR(hipEventElapsedTime(&time, start, stop));
printf("coo cost time: %3.10f ms \n", time);
hipMemcpy((void*)out.data, (void*)out_data, out_bytes,
hipMemcpyDeviceToHost);
hipFree(coo_row);
hipFree(coo_col);
hipFree(dense_data);
hipFree(out_data);
}
void csr_spmm(const CSRMatrix& csr, const DenseMatrix& dense,
DenseMatrix& out) {
assert(csr.num_cols == dense.num_rows);
assert(csr.num_rows == out.num_rows);
assert(dense.num_cols == out.num_cols);
int64_t *csr_indptr, *csr_indices;
double *dense_data, *out_data;
int64_t indptr_bytes = (csr.num_rows + 1) * sizeof(int64_t),
indices_bytes = csr.indptr[csr.num_rows] * sizeof(int64_t),
dense_bytes = dense.num_rows * dense.num_cols * sizeof(double),
out_bytes = out.num_rows * out.num_cols * sizeof(double);
hipMalloc((void**)&csr_indptr, indptr_bytes);
hipMalloc((void**)&csr_indices, indices_bytes);
hipMalloc((void**)&dense_data, dense_bytes);
hipMalloc((void**)&out_data, out_bytes);
hipMemcpy((void*)csr_indptr, (void*)(csr.indptr), indptr_bytes,
hipMemcpyHostToDevice);
hipMemcpy((void*)csr_indices, (void*)(csr.indices), indices_bytes,
hipMemcpyHostToDevice);
hipMemcpy((void*)dense_data, (void*)(dense.data), dense_bytes,
hipMemcpyHostToDevice);
dim3 block_size(32, 32);
dim3 grid_size((out.num_cols + block_size.x - 1) / block_size.x,
(csr.num_rows + block_size.y - 1) / block_size.y);
float time;
hipEvent_t start, stop;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&stop));
HANDLE_ERROR(hipEventRecord(start, 0));
hipLaunchKernelGGL(( csr_spmm_kernel), dim3(grid_size), dim3(block_size), 0, 0, csr_indptr, csr_indices,
dense_data, out_data, csr.num_rows,
csr.num_cols, dense.num_cols);
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
HANDLE_ERROR(hipEventElapsedTime(&time, start, stop));
printf("csr cost time: %3.10f ms \n", time);
hipMemcpy((void*)out.data, (void*)out_data, out_bytes,
hipMemcpyDeviceToHost);
hipFree(csr_indptr);
hipFree(csr_indices);
hipFree(dense_data);
hipFree(out_data);
}
void test_small_coo_spmm() {
int64_t l = 2, m = 3, n = 3, nnz = 3;
COOMatrix coo(l, m, nnz, vec2ptr({0, 1, 1}), vec2ptr({1, 0, 2}));
DenseMatrix dense(m, n, vec2ptr({{3, 9, 2}, {4, 6, 7}, {5, 8, 1}}));
DenseMatrix out(l, n, new double[l * n]());
coo_spmm(coo, dense, out);
out.dump();
}
void test_small_csr_spmm() {
int64_t l = 2, m = 3, n = 3;
CSRMatrix csr(l, m, vec2ptr({0, 1, 3}), vec2ptr({1, 0, 2}));
DenseMatrix dense(m, n, vec2ptr({{3, 9, 2}, {4, 6, 7}, {5, 8, 1}}));
DenseMatrix out(l, n, new double[l * n]());
csr_spmm(csr, dense, out);
out.dump();
}
void test_large_coo_spmm(int64_t l, int64_t m, int64_t n) {
COOMatrix coo = random_coo(l, m);
DenseMatrix dense = random_dense(m, n);
DenseMatrix out(l, n, new double[l * n]());
std::cout << "nnz" << coo.nnz << std::endl;
// auto start_time = std::chrono::high_resolution_clock::now();
coo_spmm(coo, dense, out);
// auto end_time = std::chrono::high_resolution_clock::now();
// double duration = std::chrono::duration<double>(end_time -
// start_time).count();
// std::cout << "coo_spmm time cost: " << duration << "s" << std::endl;
}
void test_large_csr_spmm(int64_t l, int64_t m, int64_t n) {
CSRMatrix csr = random_csr(l, m);
DenseMatrix dense = random_dense(m, n);
DenseMatrix out(l, n, new double[l * n]());
// auto start_time = std::chrono::high_resolution_clock::now();
csr_spmm(csr, dense, out);
// auto end_time = std::chrono::high_resolution_clock::now();
// double duration = std::chrono::duration<double>(end_time -
// start_time).count();
// std::cout << "csr_spmm time cost: " << duration << "s" << std::endl;
}
int main() {
test_small_coo_spmm();
test_small_csr_spmm();
test_large_coo_spmm(5000, 5500, 5200);
test_large_csr_spmm(5000, 5500, 5200);
return 0;
}
| 4169f79f1c6780b0b8e3a5dfd72595146d744aea.cu | #include <assert.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <chrono>
#include <iostream>
#include "matrix.h"
static void handle_error(cudaError_t err, const char* file, int line) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(-1);
}
}
#define HANDLE_ERROR(err) (handle_error(err, __FILE__, __LINE__))
__global__ void fill_kernel(double* ptr, int64_t length, double val) {
int64_t tx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t stride_x = gridDim.x * blockDim.x;
while (tx < length) {
ptr[tx] = val;
tx += stride_x;
}
}
__global__ void coo_spmm_kernel(int64_t* rows, int64_t* cols, double* data,
double* out, int64_t l, int64_t m, int64_t n,
int64_t nnz) {
int64_t ty = blockIdx.y * blockDim.y + threadIdx.y,
tx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t stride_y = gridDim.y * blockDim.y, stride_x = gridDim.x * blockDim.x;
while (ty < nnz) {
int64_t rid = rows[ty], cid = cols[ty];
double* out_off = out + rid * n;
while (tx < n) {
atomicAdd(out_off + tx, data[cid * n + tx]);
tx += stride_x;
}
ty += stride_y;
}
}
__global__ void csr_spmm_kernel(int64_t* indptr, int64_t* indices, double* data,
double* out, int64_t l, int64_t m, int64_t n) {
int64_t ty = blockIdx.y * blockDim.y + threadIdx.y,
tx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t stride_y = gridDim.y * blockDim.y, stride_x = gridDim.x * blockDim.x;
while (ty < l) {
int64_t row_start = indptr[ty], row_end = indptr[ty + 1];
double* out_off = out + ty * n;
while (tx < n) {
double acc = 0;
for (int64_t i = row_start; i < row_end; ++i) {
int64_t cid = indices[i];
acc += data[cid * n + tx];
}
out_off[tx] = acc;
tx += stride_x;
}
ty += stride_y;
}
}
void coo_spmm(const COOMatrix& coo, const DenseMatrix& dense,
DenseMatrix& out) {
assert(coo.num_cols == dense.num_rows);
assert(coo.num_rows == out.num_rows);
assert(dense.num_cols == out.num_cols);
int64_t *coo_row, *coo_col;
double *dense_data, *out_data;
int64_t coo_bytes = coo.nnz * sizeof(int64_t),
dense_bytes = dense.num_rows * dense.num_cols * sizeof(double),
out_bytes = out.num_rows * out.num_cols * sizeof(double);
cudaMalloc((void**)&coo_row, coo_bytes);
cudaMalloc((void**)&coo_col, coo_bytes);
cudaMalloc((void**)&dense_data, dense_bytes);
cudaMalloc((void**)&out_data, out_bytes);
cudaMemcpy((void*)coo_row, (void*)(coo.row), coo_bytes,
cudaMemcpyHostToDevice);
cudaMemcpy((void*)coo_col, (void*)(coo.col), coo_bytes,
cudaMemcpyHostToDevice);
cudaMemcpy((void*)dense_data, (void*)(dense.data), dense_bytes,
cudaMemcpyHostToDevice);
fill_kernel<<<(out.num_rows * out.num_cols + 511) / 512, 512>>>(
out_data, out.num_rows * out.num_cols, 0);
dim3 block_size(32, 32);
dim3 grid_size((out.num_cols + block_size.x - 1) / block_size.x,
(coo.nnz + block_size.y - 1) / block_size.y);
float time;
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start, 0));
coo_spmm_kernel<<<grid_size, block_size>>>(
coo_row, coo_col, dense_data, out_data, coo.num_rows, coo.num_cols,
dense.num_cols, coo.nnz);
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&time, start, stop));
printf("coo cost time: %3.10f ms \n", time);
cudaMemcpy((void*)out.data, (void*)out_data, out_bytes,
cudaMemcpyDeviceToHost);
cudaFree(coo_row);
cudaFree(coo_col);
cudaFree(dense_data);
cudaFree(out_data);
}
void csr_spmm(const CSRMatrix& csr, const DenseMatrix& dense,
DenseMatrix& out) {
assert(csr.num_cols == dense.num_rows);
assert(csr.num_rows == out.num_rows);
assert(dense.num_cols == out.num_cols);
int64_t *csr_indptr, *csr_indices;
double *dense_data, *out_data;
int64_t indptr_bytes = (csr.num_rows + 1) * sizeof(int64_t),
indices_bytes = csr.indptr[csr.num_rows] * sizeof(int64_t),
dense_bytes = dense.num_rows * dense.num_cols * sizeof(double),
out_bytes = out.num_rows * out.num_cols * sizeof(double);
cudaMalloc((void**)&csr_indptr, indptr_bytes);
cudaMalloc((void**)&csr_indices, indices_bytes);
cudaMalloc((void**)&dense_data, dense_bytes);
cudaMalloc((void**)&out_data, out_bytes);
cudaMemcpy((void*)csr_indptr, (void*)(csr.indptr), indptr_bytes,
cudaMemcpyHostToDevice);
cudaMemcpy((void*)csr_indices, (void*)(csr.indices), indices_bytes,
cudaMemcpyHostToDevice);
cudaMemcpy((void*)dense_data, (void*)(dense.data), dense_bytes,
cudaMemcpyHostToDevice);
dim3 block_size(32, 32);
dim3 grid_size((out.num_cols + block_size.x - 1) / block_size.x,
(csr.num_rows + block_size.y - 1) / block_size.y);
float time;
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start, 0));
csr_spmm_kernel<<<grid_size, block_size>>>(csr_indptr, csr_indices,
dense_data, out_data, csr.num_rows,
csr.num_cols, dense.num_cols);
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&time, start, stop));
printf("csr cost time: %3.10f ms \n", time);
cudaMemcpy((void*)out.data, (void*)out_data, out_bytes,
cudaMemcpyDeviceToHost);
cudaFree(csr_indptr);
cudaFree(csr_indices);
cudaFree(dense_data);
cudaFree(out_data);
}
void test_small_coo_spmm() {
int64_t l = 2, m = 3, n = 3, nnz = 3;
COOMatrix coo(l, m, nnz, vec2ptr({0, 1, 1}), vec2ptr({1, 0, 2}));
DenseMatrix dense(m, n, vec2ptr({{3, 9, 2}, {4, 6, 7}, {5, 8, 1}}));
DenseMatrix out(l, n, new double[l * n]());
coo_spmm(coo, dense, out);
out.dump();
}
void test_small_csr_spmm() {
int64_t l = 2, m = 3, n = 3;
CSRMatrix csr(l, m, vec2ptr({0, 1, 3}), vec2ptr({1, 0, 2}));
DenseMatrix dense(m, n, vec2ptr({{3, 9, 2}, {4, 6, 7}, {5, 8, 1}}));
DenseMatrix out(l, n, new double[l * n]());
csr_spmm(csr, dense, out);
out.dump();
}
void test_large_coo_spmm(int64_t l, int64_t m, int64_t n) {
COOMatrix coo = random_coo(l, m);
DenseMatrix dense = random_dense(m, n);
DenseMatrix out(l, n, new double[l * n]());
std::cout << "nnz" << coo.nnz << std::endl;
// auto start_time = std::chrono::high_resolution_clock::now();
coo_spmm(coo, dense, out);
// auto end_time = std::chrono::high_resolution_clock::now();
// double duration = std::chrono::duration<double>(end_time -
// start_time).count();
// std::cout << "coo_spmm time cost: " << duration << "s" << std::endl;
}
void test_large_csr_spmm(int64_t l, int64_t m, int64_t n) {
CSRMatrix csr = random_csr(l, m);
DenseMatrix dense = random_dense(m, n);
DenseMatrix out(l, n, new double[l * n]());
// auto start_time = std::chrono::high_resolution_clock::now();
csr_spmm(csr, dense, out);
// auto end_time = std::chrono::high_resolution_clock::now();
// double duration = std::chrono::duration<double>(end_time -
// start_time).count();
// std::cout << "csr_spmm time cost: " << duration << "s" << std::endl;
}
int main() {
test_small_coo_spmm();
test_small_csr_spmm();
test_large_coo_spmm(5000, 5500, 5200);
test_large_csr_spmm(5000, 5500, 5200);
return 0;
}
|
5a780c402c1cebe1b385baab6e46a77ebf298c2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************************************************
* Copyright (C) 2013 x265 project
*
* Authors: Gopu Govindaswamy <gopu@govindaswamy.org>
* Mandar Gurav <mandar@multicorewareinc.com>
* Mahesh Pittala <mahesh@multicorewareinc.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@multicorewareinc.com.
*****************************************************************************/
#include "ece408_competition.h"
#include "primitives.h"
#include "test/intrapredharness.h"
#include "cpu.h"
#include "TLibCommon/TComRom.h"
#include "TLibEncoder/TEncCfg.h"
#include "input/input.h"
#include "output/output.h"
#include "common.h"
#include "x265.h"
#include "getopt.h"
#include "PPA/ppa.h"
#include "encoder.h"
#include "TLibCommon/TComYuv.h"
#include "TLibCommon/TComPic.h"
#include "TLibCommon/TComPicYuv.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <fstream>
#include <algorithm>
#include <sys/time.h>
#include "kernel.hip"
//Define this to verify the student intra prediction against the reference version
#define VERIFY
//#define VERBOSE
//Define this to dump all reference results to file (to compare between versions)
//#define DUMP_TO_FILE
//This is the filename where all reference results will be dumped ifdef DUMP_TO_FILE
#define DUMP_FILE "dump.bin"
using namespace x265;
ece408_intra_pred_result *ece408_competition_ref(TEncCfg *encoder, x265_picture *pics_in, int num_frames);
ece408_intra_pred_result *ece408_competition(ece408_frame *imgs, int num_frames);
bool ece408_compare(ece408_intra_pred_result *ref, ece408_intra_pred_result *student, int num_frames);
Pel *refAbove1, *refAbove2, *refLeft1, *refLeft2;
Pel* predBuf;
int predBufStride;
int predBufHeight;
TComYuv pred_yuv;
TComYuv orig_yuv;
TComSPS sps;
TComPPS pps;
x265_param *param;
ALIGN_VAR_32(Pel, tmp[33 * 32 * 32]);
ALIGN_VAR_32(Pel, buf_trans[32 * 32]);
static const char short_options[] = "o:f:F:r:i:b:s:q:m:hwV";
static const struct option long_options[] =
{
#if HIGH_BIT_DEPTH
{ "depth", required_argument, NULL, 0 },
#endif
{ "help", no_argument, NULL, 'h' },
{ "version", no_argument, NULL, 'V' },
{ "cpuid", required_argument, NULL, 0 },
{ "threads", required_argument, NULL, 0 },
{ "preset", required_argument, NULL, 'p' },
{ "tune", required_argument, NULL, 't' },
{ "frame-threads", required_argument, NULL, 'F' },
{ "log", required_argument, NULL, 0 },
{ "csv", required_argument, NULL, 0 },
{ "y4m", no_argument, NULL, 0 },
{ "no-progress", no_argument, NULL, 0 },
{ "output", required_argument, NULL, 'o' },
{ "input", required_argument, NULL, 0 },
{ "input-depth", required_argument, NULL, 0 },
{ "input-res", required_argument, NULL, 0 },
{ "input-csp", required_argument, NULL, 0 },
{ "fps", required_argument, NULL, 0 },
{ "frame-skip", required_argument, NULL, 0 },
{ "frames", required_argument, NULL, 'f' },
{ "recon", required_argument, NULL, 'r' },
{ "recon-depth", required_argument, NULL, 0 },
{ "no-wpp", no_argument, NULL, 0 },
{ "wpp", no_argument, NULL, 0 },
{ "ctu", required_argument, NULL, 's' },
{ "tu-intra-depth", required_argument, NULL, 0 },
{ "tu-inter-depth", required_argument, NULL, 0 },
{ "me", required_argument, NULL, 0 },
{ "subme", required_argument, NULL, 'm' },
{ "merange", required_argument, NULL, 0 },
{ "max-merge", required_argument, NULL, 0 },
{ "rdpenalty", required_argument, NULL, 0 },
{ "no-rect", no_argument, NULL, 0 },
{ "rect", no_argument, NULL, 0 },
{ "no-amp", no_argument, NULL, 0 },
{ "amp", no_argument, NULL, 0 },
{ "no-early-skip", no_argument, NULL, 0 },
{ "early-skip", no_argument, NULL, 0 },
{ "no-fast-cbf", no_argument, NULL, 0 },
{ "fast-cbf", no_argument, NULL, 0 },
{ "no-tskip", no_argument, NULL, 0 },
{ "tskip", no_argument, NULL, 0 },
{ "no-tskip-fast", no_argument, NULL, 0 },
{ "tskip-fast", no_argument, NULL, 0 },
{ "no-constrained-intra", no_argument, NULL, 0 },
{ "constrained-intra", no_argument, NULL, 0 },
{ "refresh", required_argument, NULL, 0 },
{ "keyint", required_argument, NULL, 'i' },
{ "rc-lookahead", required_argument, NULL, 0 },
{ "bframes", required_argument, NULL, 'b' },
{ "bframe-bias", required_argument, NULL, 0 },
{ "b-adapt", required_argument, NULL, 0 },
{ "no-b-pyramid", no_argument, NULL, 0 },
{ "b-pyramid", no_argument, NULL, 0 },
{ "ref", required_argument, NULL, 0 },
{ "no-weightp", no_argument, NULL, 0 },
{ "weightp", no_argument, NULL, 'w' },
{ "crf", required_argument, NULL, 0 },
{ "vbv-maxrate", required_argument, NULL, 0 },
{ "vbv-bufsize", required_argument, NULL, 0 },
{ "vbv-init", required_argument, NULL, 0 },
{ "bitrate", required_argument, NULL, 0 },
{ "qp", required_argument, NULL, 'q' },
{ "aq-mode", required_argument, NULL, 0 },
{ "aq-strength", required_argument, NULL, 0 },
{ "cbqpoffs", required_argument, NULL, 0 },
{ "crqpoffs", required_argument, NULL, 0 },
{ "rd", required_argument, NULL, 0 },
{ "no-signhide", no_argument, NULL, 0 },
{ "signhide", no_argument, NULL, 0 },
{ "no-lft", no_argument, NULL, 0 },
{ "lft", no_argument, NULL, 0 },
{ "no-sao", no_argument, NULL, 0 },
{ "sao", no_argument, NULL, 0 },
{ "sao-lcu-bounds", required_argument, NULL, 0 },
{ "sao-lcu-opt", required_argument, NULL, 0 },
{ "no-ssim", no_argument, NULL, 0 },
{ "ssim", no_argument, NULL, 0 },
{ "no-psnr", no_argument, NULL, 0 },
{ "psnr", no_argument, NULL, 0 },
{ "hash", required_argument, NULL, 0 },
{ "no-strong-intra-smoothing", no_argument, NULL, 0 },
{ "strong-intra-smoothing", no_argument, NULL, 0 },
{ 0, 0, 0, 0 }
};
struct CLIOptions
{
Input* input;
Output* recon;
std::fstream bitstreamFile;
bool bProgress;
bool bForceY4m;
uint32_t totalbytes;
uint32_t frameSkip; // number of frames to skip from the beginning
uint32_t framesToBeEncoded; // number of frames to encode
int64_t startTime;
int64_t prevUpdateTime;
/* in microseconds */
static const int UPDATE_INTERVAL = 250000;
CLIOptions()
{
input = NULL;
recon = NULL;
framesToBeEncoded = frameSkip = totalbytes = 0;
bProgress = true;
bForceY4m = false;
startTime = x265_mdate();
prevUpdateTime = 0;
}
void destroy();
void writeNALs(const x265_nal* nal, uint32_t nalcount);
void printVersion(x265_param *par);
void showHelp(x265_param *par);
bool parse(int argc, char **argv, x265_param* par);
};
void CLIOptions::destroy()
{
if (input)
input->release();
input = NULL;
if (recon)
recon->release();
recon = NULL;
}
void CLIOptions::writeNALs(const x265_nal* nal, uint32_t nalcount)
{
PPAScopeEvent(bitstream_write);
for (uint32_t i = 0; i < nalcount; i++)
{
bitstreamFile.write((const char*)nal->payload, nal->sizeBytes);
totalbytes += nal->sizeBytes;
nal++;
}
}
void CLIOptions::printVersion(x265_param *par)
{
fprintf(stderr, "x265 [info]: HEVC encoder version %s\n", x265_version_str);
fprintf(stderr, "x265 [info]: build info %s\n", x265_build_info_str);
x265_setup_primitives(par, -1);
}
void CLIOptions::showHelp(x265_param *par)
{
x265_param_default(par);
printVersion(par);
#define H0 printf
#define OPT(value) (value ? "enabled" : "disabled")
H0("\nSyntax: x265 [options] infile [-o] outfile\n");
H0(" infile can be YUV or Y4M\n");
H0(" outfile is raw HEVC bitstream\n");
H0("\nExecutable Options:\n");
H0("-h/--h Show this help text and exit\n");
H0("-V/--version Show version info and exit\n");
H0(" --cpuid Limit SIMD capability bitmap 0:auto 1:None. Default:0\n");
H0(" --threads Number of threads for thread pool (0: detect CPU core count, default)\n");
H0("-p/--preset ultrafast, veryfast, faster, fast, medium, slow, slower, veryslow, or placebo\n");
H0("-t/--tune Tune the settings for a particular type of source or situation\n");
H0("-F/--frame-threads Number of concurrently encoded frames. Default %d\n", par->frameNumThreads);
H0(" --log Logging level 0:ERROR 1:WARNING 2:INFO 3:DEBUG -1:NONE. Default %d\n", par->logLevel);
H0(" --csv Comma separated log file, log level >= 3 frame log, else one line per run\n");
H0(" --y4m Parse input stream as YUV4MPEG2 regardless of file extension\n");
H0(" --no-progress Disable CLI progress reports\n");
H0("-o/--output Bitstream output file name\n");
H0("\nInput Options:\n");
H0(" --input Raw YUV or Y4M input file name\n");
H0(" --input-depth Bit-depth of input file (YUV only) Default %d\n", par->inputBitDepth);
H0(" --input-res Source picture size [w x h], auto-detected if Y4M\n");
H0(" --input-csp Source color space parameter, auto-detected if Y4M\n");
H0(" --fps Source frame rate, auto-detected if Y4M\n");
H0(" --frame-skip Number of frames to skip at start of input file\n");
H0("-f/--frames Number of frames to be encoded. Default all\n");
H0("\nQuad-Tree analysis:\n");
H0(" --[no-]wpp Enable Wavefront Parallel Processing. Default %s\n", OPT(par->bEnableWavefront));
H0("-s/--ctu Maximum CU size. Default %dx%d\n", par->maxCUSize, par->maxCUSize);
H0(" --tu-intra-depth Max TU recursive depth for intra CUs. Default %d\n", par->tuQTMaxIntraDepth);
H0(" --tu-inter-depth Max TU recursive depth for inter CUs. Default %d\n", par->tuQTMaxInterDepth);
H0("\nTemporal / motion search options:\n");
H0(" --me Motion search method 0:dia 1:hex 2:umh 3:star 4:full. Default %d\n", par->searchMethod);
H0("-m/--subme Amount of subpel refinement to perform (0:least .. 7:most). Default %d \n", par->subpelRefine);
H0(" --merange Motion search range. Default %d\n", par->searchRange);
H0(" --[no-]rect Enable rectangular motion partitions Nx2N and 2NxN. Default %s\n", OPT(par->bEnableRectInter));
H0(" --[no-]amp Enable asymmetric motion partitions, requires --rect. Default %s\n", OPT(par->bEnableAMP));
H0(" --max-merge Maximum number of merge candidates. Default %d\n", par->maxNumMergeCand);
H0(" --[no-]early-skip Enable early SKIP detection. Default %s\n", OPT(par->bEnableEarlySkip));
H0(" --[no-]fast-cbf Enable Cbf fast mode \n \t\t\t\t Default : %s\n", OPT(par->bEnableCbfFastMode));
H0("\nSpatial / intra options:\n");
H0(" --rdpenalty penalty for 32x32 intra TU in non-I slices. 0:disabled 1:RD-penalty 2:maximum. Default %d\n", par->rdPenalty);
H0(" --[no-]tskip Enable intra transform skipping. Default %s\n", OPT(par->bEnableTransformSkip));
H0(" --[no-]tskip-fast Enable fast intra transform skipping. Default %s\n", OPT(par->bEnableTSkipFast));
H0(" --[no-]strong-intra-smoothing Enable strong intra smoothing for 32x32 blocks. Default %s\n", OPT(par->bEnableStrongIntraSmoothing));
H0(" --[no-]constrained-intra Constrained intra prediction (use only intra coded reference pixels) Default %s\n", OPT(par->bEnableConstrainedIntra));
H0("\nSlice decision options:\n");
H0(" --refresh Intra refresh type - 0:none, 1:CDR, 2:IDR (default: CDR) Default %d\n", par->decodingRefreshType);
H0("-i/--keyint Max intra period in frames. Default %d\n", par->keyframeMax);
H0(" --rc-lookahead Number of frames for frame-type lookahead (determines encoder latency) Default %d\n", par->lookaheadDepth);
H0(" --bframes Maximum number of consecutive b-frames (now it only enables B GOP structure) Default %d\n", par->bframes);
H0(" --bframe-bias Bias towards B frame decisions. Default %d\n", par->bFrameBias);
H0(" --b-adapt 0 - none, 1 - fast, 2 - full (trellis) adaptive B frame scheduling. Default %d\n", par->bFrameAdaptive);
H0(" --[no-]b-pyramid Use B-frames as references. Default %s\n", OPT(par->bBPyramid));
H0(" --ref max number of L0 references to be allowed (1 .. 16) Default %d\n", par->maxNumReferences);
H0("-w/--[no-]weightp Enable weighted prediction in P slices. Default %s\n", OPT(par->bEnableWeightedPred));
H0("\nQP, rate control and rate distortion options:\n");
H0(" --bitrate Target bitrate (kbps), implies ABR. Default %d\n", par->rc.bitrate);
H0(" --crf Quality-based VBR (0-51). Default %f\n", par->rc.rfConstant);
H0(" --vbv-maxrate Max local bitrate (kbit/s). Default %d\n", par->rc.vbvMaxBitrate);
H0(" --vbv-bufsize Set size of the VBV buffer (kbit). Default %d\n", par->rc.vbvBufferSize);
H0(" --vbv-init Initial VBV buffer occupancy. Default %f\n", par->rc.vbvBufferInit);
H0("-q/--qp Base QP for CQP mode. Default %d\n", par->rc.qp);
H0(" --aq-mode Mode for Adaptive Quantization - 0:none 1:aqVariance Default %d\n", par->rc.aqMode);
H0(" --aq-strength Reduces blocking and blurring in flat and textured areas.(0 to 3.0)<double> . Default %f\n", par->rc.aqStrength);
H0(" --cbqpoffs Chroma Cb QP Offset. Default %d\n", par->cbQpOffset);
H0(" --crqpoffs Chroma Cr QP Offset. Default %d\n", par->crQpOffset);
H0(" --rd Level of RD in mode decision 0:least....2:full RDO. Default %d\n", par->rdLevel);
H0(" --[no-]signhide Hide sign bit of one coeff per TU (rdo). Default %s\n", OPT(par->bEnableSignHiding));
H0("\nLoop filter:\n");
H0(" --[no-]lft Enable Loop Filter. Default %s\n", OPT(par->bEnableLoopFilter));
H0("\nSample Adaptive Offset loop filter:\n");
H0(" --[no-]sao Enable Sample Adaptive Offset. Default %s\n", OPT(par->bEnableSAO));
H0(" --sao-lcu-bounds 0: right/bottom boundary areas skipped 1: non-deblocked pixels are used. Default %d\n", par->saoLcuBoundary);
H0(" --sao-lcu-opt 0: SAO picture-based optimization, 1: SAO LCU-based optimization. Default %d\n", par->saoLcuBasedOptimization);
H0("\nQuality reporting metrics:\n");
H0(" --[no-]ssim Enable reporting SSIM metric scores. Default %s\n", OPT(par->bEnableSsim));
H0(" --[no-]psnr Enable reporting PSNR metric scores. Default %s\n", OPT(par->bEnablePsnr));
H0("\nReconstructed video options (debugging):\n");
H0("-r/--recon Reconstructed raw image YUV or Y4M output file name\n");
H0(" --recon-depth Bit-depth of reconstructed raw image file. Default 8\n");
H0("\nSEI options:\n");
H0(" --hash Decoded Picture Hash SEI 0: disabled, 1: MD5, 2: CRC, 3: Checksum. Default %d\n", par->decodedPictureHashSEI);
#undef OPT
#undef H0
exit(0);
}
bool CLIOptions::parse(int argc, char **argv, x265_param* par)
{
int berror = 0;
int help = 0;
int cpuid = 0;
int reconFileBitDepth = 0;
const char *inputfn = NULL;
const char *reconfn = NULL;
const char *bitstreamfn = NULL;
const char *inputRes = NULL;
const char *preset = "medium";
const char *tune = "psnr";
/* Presets are applied before all other options. */
for (optind = 0;; )
{
int c = getopt_long(argc, argv, short_options, long_options, NULL);
if (c == -1)
break;
if (c == 'p')
preset = optarg;
if (c == 't')
tune = optarg;
else if (c == '?')
return true;
}
if (x265_param_default_preset(param, preset, tune) < 0)
{
x265_log(NULL, X265_LOG_WARNING, "preset or tune unrecognized\n");
return true;
}
//MRJ Set max CU size to 32x32 so that frames are padded in Encoder::configure() to a multiple of 4x4, not a multiple of 8x8.
par->maxCUSize = 32;
for (optind = 0;; )
{
int long_options_index = -1;
int c = getopt_long(argc, argv, short_options, long_options, &long_options_index);
if (c == -1)
{
break;
}
switch (c)
{
case 'h':
showHelp(par);
break;
case 'V':
printVersion(par);
exit(0);
default:
if (long_options_index < 0 && c > 0)
{
for (size_t i = 0; i < sizeof(long_options) / sizeof(long_options[0]); i++)
{
if (long_options[i].val == c)
{
long_options_index = (int)i;
break;
}
}
if (long_options_index < 0)
{
/* getopt_long might have already printed an error message */
if (c != 63)
x265_log(NULL, X265_LOG_WARNING, "internal error: short option '%c' has no long option\n", c);
return true;
}
}
if (long_options_index < 0)
{
x265_log(NULL, X265_LOG_WARNING, "short option '%c' unrecognized\n", c);
return true;
}
#define OPT(longname) \
else if (!strcmp(long_options[long_options_index].name, longname))
if (0) ;
OPT("cpuid") cpuid = atoi(optarg);
OPT("frames") this->framesToBeEncoded = (uint32_t)atoi(optarg);
OPT("preset") preset = optarg;
OPT("tune") tune = optarg;
OPT("no-progress") this->bProgress = false;
OPT("frame-skip") this->frameSkip = (uint32_t)atoi(optarg);
OPT("output") bitstreamfn = optarg;
OPT("input") inputfn = optarg;
OPT("recon") reconfn = optarg;
OPT("input-depth") par->inputBitDepth = (uint32_t)atoi(optarg);
OPT("recon-depth") reconFileBitDepth = (uint32_t)atoi(optarg);
OPT("input-res") inputRes = optarg;
OPT("y4m") bForceY4m = true;
else
berror |= x265_param_parse(par, long_options[long_options_index].name, optarg);
if (berror)
{
const char *name = long_options_index > 0 ? long_options[long_options_index].name : argv[optind - 2];
x265_log(NULL, X265_LOG_ERROR, "invalid argument: %s = %s\n", name, optarg);
return true;
}
#undef OPT
}
}
if (optind < argc && !inputfn)
inputfn = argv[optind++];
if (optind < argc && !bitstreamfn)
bitstreamfn = argv[optind++];
if (optind < argc)
{
x265_log(par, X265_LOG_WARNING, "extra unused command arguments given <%s>\n", argv[optind]);
return true;
}
if (argc <= 1 || help)
showHelp(par);
if (inputfn == NULL || bitstreamfn == NULL)
{
x265_log(par, X265_LOG_ERROR, "input or output file not specified, try -V for help\n");
return true;
}
this->input = Input::open(inputfn, par->inputBitDepth, bForceY4m);
if (!this->input || this->input->isFail())
{
x265_log(par, X265_LOG_ERROR, "unable to open input file <%s>\n", inputfn);
return true;
}
if (this->input->getWidth())
{
/* parse the width, height, frame rate from the y4m file */
par->internalCsp = this->input->getColorSpace();
par->sourceWidth = this->input->getWidth();
par->sourceHeight = this->input->getHeight();
par->frameRate = (int)this->input->getRate();
}
else if (inputRes)
{
this->input->setColorSpace(par->internalCsp);
sscanf(inputRes, "%dx%d", &par->sourceWidth, &par->sourceHeight);
this->input->setDimensions(par->sourceWidth, par->sourceHeight);
this->input->setBitDepth(par->inputBitDepth);
}
else if (par->sourceHeight <= 0 || par->sourceWidth <= 0 || par->frameRate <= 0)
{
x265_log(par, X265_LOG_ERROR, "YUV input requires source width, height, and rate to be specified\n");
return true;
}
else
{
this->input->setDimensions(par->sourceWidth, par->sourceHeight);
this->input->setBitDepth(par->inputBitDepth);
}
int guess = this->input->guessFrameCount();
if (this->frameSkip)
{
this->input->skipFrames(this->frameSkip);
}
uint32_t fileFrameCount = guess < 0 ? 0 : (uint32_t)guess;
if (this->framesToBeEncoded && fileFrameCount)
this->framesToBeEncoded = X265_MIN(this->framesToBeEncoded, fileFrameCount - this->frameSkip);
else if (fileFrameCount)
this->framesToBeEncoded = fileFrameCount - this->frameSkip;
if (par->logLevel >= X265_LOG_INFO)
{
if (this->framesToBeEncoded == 0)
fprintf(stderr, "%s [info]: %dx%d %dHz %s, unknown frame count\n", input->getName(),
par->sourceWidth, par->sourceHeight, par->frameRate,
(par->internalCsp >= X265_CSP_I444) ? "C444" : (par->internalCsp >= X265_CSP_I422) ? "C422" : "C420");
else
fprintf(stderr, "%s [info]: %dx%d %dHz %s, frames %u - %d of %d\n", input->getName(),
par->sourceWidth, par->sourceHeight, par->frameRate,
(par->internalCsp >= X265_CSP_I444) ? "C444" : (par->internalCsp >= X265_CSP_I422) ? "C422" : "C420",
this->frameSkip, this->frameSkip + this->framesToBeEncoded - 1, fileFrameCount);
}
this->input->startReader();
if (reconfn)
{
if (reconFileBitDepth == 0)
reconFileBitDepth = par->inputBitDepth;
this->recon = Output::open(reconfn, par->sourceWidth, par->sourceHeight, reconFileBitDepth, par->frameRate, par->internalCsp);
if (this->recon->isFail())
{
x265_log(par, X265_LOG_WARNING, "unable to write reconstruction file\n");
this->recon->release();
this->recon = 0;
}
}
#if HIGH_BIT_DEPTH
if (par->inputBitDepth != 12 && par->inputBitDepth != 10 && par->inputBitDepth != 8)
{
x265_log(par, X265_LOG_ERROR, "Only bit depths of 8, 10, or 12 are supported\n");
return true;
}
#else
if (par->inputBitDepth != 8)
{
x265_log(par, X265_LOG_ERROR, "not compiled for bit depths greater than 8\n");
return true;
}
#endif // if HIGH_BIT_DEPTH
this->bitstreamFile.open(bitstreamfn, std::fstream::binary | std::fstream::out);
if (!this->bitstreamFile)
{
x265_log(NULL, X265_LOG_ERROR, "failed to open bitstream file <%s> for writing\n", bitstreamfn);
return true;
}
x265_setup_primitives(par, cpuid);
printVersion(par);
return false;
}
int main(int argc, char *argv[])
{
CLIOptions cliopt;
param = x265_param_alloc();
struct timeval timer;
gettimeofday(&timer,NULL);
printf("START TIMER IN SERIAL CODE in secs :%ld\n",timer.tv_sec);
printf("START TIMER in SERIAL CODE in usecs:%ld\n",timer.tv_usec);
if (cliopt.parse(argc, argv, param))
{
cliopt.destroy();
exit(1);
}
param->bEnableStrongIntraSmoothing = false; //No strong intra smoothing for competition
TEncCfg *encoder = new TEncCfg();
if (!encoder)
{
x265_log(param, X265_LOG_ERROR, "failed to open encoder\n");
cliopt.destroy();
x265_cleanup();
exit(1);
}
// save a copy of final parameters in TEncCfg
memcpy(&encoder->param, param, sizeof(*param));
encoder->m_pad[0] = encoder->m_pad[1] = 0;
//MRJ the above (original) line always computes 8, let's set it to 4 instead to get the correct padding.
uint32_t minCUDepth = 4;
if ((param->sourceWidth % minCUDepth) != 0)
{
uint32_t padsize = 0;
uint32_t rem = param->sourceWidth % minCUDepth;
padsize = minCUDepth - rem;
param->sourceWidth += padsize;
encoder->m_pad[0] = padsize; //pad width
/* set the confirmation window offsets */
encoder->m_conformanceWindow.m_enabledFlag = true;
encoder->m_conformanceWindow.m_winRightOffset = encoder->m_pad[0];
}
//======== set pad size if height is not multiple of the minimum CU size =========
if ((param->sourceHeight % minCUDepth) != 0)
{
uint32_t padsize = 0;
uint32_t rem = param->sourceHeight % minCUDepth;
padsize = minCUDepth - rem;
param->sourceHeight += padsize;
encoder->m_pad[1] = padsize; //pad height
/* set the confirmation window offsets */
encoder->m_conformanceWindow.m_enabledFlag = true;
encoder->m_conformanceWindow.m_winBottomOffset = encoder->m_pad[1];
}
//Encoder *encoder_c = static_cast<Encoder*>(encoder);
//Initialize arrays for storing neighboring pixel values
refAbove1 = (Pel*)X265_MALLOC(Pel, 3 * MAX_CU_SIZE);
refAbove2 = (Pel*)X265_MALLOC(Pel, 3 * MAX_CU_SIZE);
refLeft1 = (Pel*)X265_MALLOC(Pel, 3 * MAX_CU_SIZE);
refLeft2 = (Pel*)X265_MALLOC(Pel, 3 * MAX_CU_SIZE);
//Save globals so we can restore them at the end
//We need to restore the original values before destroy()ing data structures because many of the destroy() functions
//use these globals to determine the size of their arrays
int g_maxCUDepth_bak = g_maxCUDepth;
int g_addCUDepth_bak = g_addCUDepth;
int g_maxCUWidth_bak = g_maxCUWidth;
int g_maxCUHeight_bak = g_maxCUHeight;
g_maxCUDepth = 0; //Disallow recursion to decompose frames into a regular grid of equal size CUs.
g_addCUDepth = 0;
//NOTE: has to be after x265_encoder_open() call, since that calls x265_set_globals(), which resets g_maxCUDepth.
x265_picture pic_orig;
x265_picture *pic_in = &pic_orig;
x265_picture_init(param, pic_in);
uint32_t inFrameCount = 0;
//Several pieces of the reference code assume 4:2:0 subsampling, so assert that here
if(param->internalCsp != X265_CSP_I420) {
fprintf(stderr, "Error: Input must use i420 colorspace (4:2:0 subsampling)\n");
exit(1);
}
#ifdef DUMP_TO_FILE
FILE *f = fopen(DUMP_FILE, "wb");
if(!f) {
fprintf(stderr, "Error opening dump file (" DUMP_FILE ")\n");
exit(1);
}
#endif
while (1)
{
pic_orig.poc = inFrameCount;
if (cliopt.framesToBeEncoded && inFrameCount >= cliopt.framesToBeEncoded)
break;
else if (cliopt.input->readPicture(pic_orig))
inFrameCount++;
else
break;
ece408_intra_pred_result *ref = ece408_competition_ref(encoder, pic_in, 1);
#ifdef DUMP_TO_FILE
ref[0].write_to_file(f);
#endif
ece408_frame frame(param->sourceWidth, param->sourceHeight, pic_in);
//Uncomment this one to run the student version
ece408_intra_pred_result *student = ece408_competition(&frame, 1);
//Uncomment this one instead to run the reference version twice (to test the compare function)
//ece408_intra_pred_result *student = ece408_competition_ref(encoder, pic_in, 1);
gettimeofday(&timer,NULL);
printf("IN SERIAL CODE END TIMER in secs :%ld\n",timer.tv_sec);
printf("IN SERIAL CODE END TIMER in usecs:%ld\n",timer.tv_usec);
#ifdef VERIFY
if(!ece408_compare(ref, student, 1)) {
printf("Error in frame %d\n", inFrameCount);
exit(1);
}
#endif
for(int i = 0; i < 4*1; i++) {
ref[i].destroy();
student[i].destroy();
}
delete[] ref;
delete[] student;
}
#ifdef DUMP_TO_FILE
fclose(f);
#endif
#ifdef VERIFY
printf("Success!\n");
#endif
//Restore globals
g_maxCUDepth = g_maxCUDepth_bak;
g_addCUDepth = g_addCUDepth_bak;
g_maxCUWidth = g_maxCUWidth_bak;
g_maxCUHeight = g_maxCUHeight_bak;
delete encoder;
X265_FREE(refAbove1);
X265_FREE(refAbove2);
X265_FREE(refLeft1);
X265_FREE(refLeft2);
orig_yuv.destroy();
pred_yuv.destroy();
x265_cleanup(); /* Free library singletons */
cliopt.destroy();
x265_param_free(param);
//gettimeofday(&timer,NULL);
//printf("IN SERIAL CODE END TIMER in secs :%ld\n",timer.tv_sec);
//printf("IN SERIAL CODE END TIMER in usecs:%ld\n",timer.tv_usec);
return 0;
}
//channel = 0 for luma, 1 for cb, 2 for cr
void ece408_intra_pred_channel(int luma_size, int channel, int32_t *sad_ptr) {
//#define VERBOSE
#ifdef VERBOSE
printf("refAbove1: ");
for(int i = 0; i < 32*3; i++)
printf("%d ", refAbove1[i]);
printf("\n");
printf("refAbove2: ");
for(int i = 0; i < 32*3; i++)
printf("%d ", refAbove2[i]);
printf("\n");
printf("refLeft1: ");
for(int i = 0; i < 32*3; i++)
printf("%d ", refLeft1[i]);
printf("\n");
printf("refLeft2: ");
for(int i = 0; i < 32*3; i++)
printf("%d ", refLeft2[i]);
printf("\n");
#endif
int chroma_size = luma_size >> 1;
bool luma = (channel == 0);
bool cb = (channel == 1);
bool cr = (channel == 2);
int size = luma ? luma_size : chroma_size;
Pel* orig_pel = luma ? orig_yuv.getLumaAddr(0, size) : (cb ? orig_yuv.getCbAddr(0, size) : orig_yuv.getCrAddr(0, size));
Pel* pred_pel = luma ? pred_yuv.getLumaAddr(0, size) : (cb ? pred_yuv.getCbAddr(0, size) : pred_yuv.getCrAddr(0, size));
uint32_t stride = luma ? pred_yuv.getStride() : pred_yuv.getCStride();
Pel *pAboveUnfilt = (cr ? refAbove2 : refAbove1) + size - 1;
Pel *pAboveFilt = luma ? (refAbove2 + size - 1) : pAboveUnfilt;
Pel *pLeftUnfilt = (cr ? refLeft2 : refLeft1) + size - 1;
Pel *pLeftFilt = luma ? (refLeft2 + size - 1) : pLeftUnfilt;
int nLog2SizeMinus2 = g_convertToBit[size];
pixelcmp_t sa8d = primitives.sa8d[nLog2SizeMinus2];
#ifdef VERBOSE
printf("Channel %d Orig:\n", channel);
for(int row = 0; row < size; row++) {
for(int col = 0; col < size; col++) {
printf("%02X ", orig_pel[row*size + col]);
}
printf("\n");
}
#endif
int sad;
Pel *above = (luma && size >= 8) ? pAboveFilt : pAboveUnfilt;
Pel *left = (luma && size >= 8) ? pLeftFilt : pLeftUnfilt;
//TODO check to make sure we're filtering in all the right conditions
primitives.intra_pred[nLog2SizeMinus2][0](pred_pel, stride, left, above, /*dummy dirMode argument*/ 0, /*dummy filter argument*/ 0);
sad = sa8d(orig_pel, stride, pred_pel, stride);
*(sad_ptr++) = sad;
#ifdef VERBOSE
printf("Planar SATD = %d\n", sad);
#endif
//TODO check to make sure we're filtering in all the right conditions
//DC (mode 1)
primitives.intra_pred[nLog2SizeMinus2][1](pred_pel, stride, pLeftUnfilt, pAboveUnfilt, /*dummy dirMode argument*/ 1, (luma && size <= 16));
sad = sa8d(orig_pel, stride, pred_pel, stride);
*(sad_ptr++) = sad;
#ifdef VERBOSE
printf("Size = %d, stride = %d, DC:\n", size, stride);
for(int row = 0; row < size; row++) {
for(int col = 0; col < size; col++) {
printf("%02X ", pred_pel[row*size+col]);
}
printf("\n");
}
printf("SATD = %d\n", sad);
#endif
primitives.transpose[nLog2SizeMinus2](buf_trans, orig_pel, stride);
//TODO check to make sure we're filtering in all the right conditions
primitives.intra_pred_allangs[nLog2SizeMinus2](tmp, pAboveUnfilt, pLeftUnfilt, pAboveFilt, pLeftFilt, (luma && (size <= 16)));
#ifdef VERBOSE
printf("Angular SATD = ", channel);
#endif
for (int mode = 2; mode < 35; mode++)
{
bool modeHor = (mode < 18);
Pel *cmp = (modeHor ? buf_trans : orig_pel);
intptr_t srcStride = (modeHor ? size : stride);
#ifdef VERBOSE
printf("Pred mode %d\n", mode);
for(int r = 0; r < size; r++) {
for(int c = 0; c < size; c++)
printf("%02X ", tmp[(mode-2) * (size * size) + r * size + c]);
printf("\n");
}
#endif
sad = sa8d(cmp, srcStride, &tmp[(mode - 2) * (size * size)], size);
*(sad_ptr++) = sad;
#ifdef VERBOSE
printf("%d, ", sad);
#endif
}
#ifdef VERBOSE
printf("\n");
#endif
}
//#undef VERBOSE
inline bool isAvailable(int frameWidth, int frameHeight, int r, int c) {
return (r >= 0 && c >= 0 && r < frameHeight && c < frameWidth);
}
//Channel is 0 for luma, 1 for Cb, 2 for Cr
void getReferencePixels(x265_picture *pic, unsigned int width, unsigned int height, unsigned int luma_size, unsigned int cu_index, Pel* refAbove, Pel* refLeft, Pel* refAboveFlt, Pel* refLeftFlt, int channel) {
uint32_t cuWidth = (channel == 0) ? luma_size : (luma_size / 2);
uint32_t cuWidth2 = cuWidth << 1;
uint32_t frameWidth = (channel == 0) ? width : (width / 2);
uint32_t frameHeight = (channel == 0) ? height : (height / 2);
uint32_t frameStride = pic->stride[channel];
//Base address of the array containing the required color component of the reconstructed image (equivalent to the original image for the ECE408 competition)
Pel *baseAddress = (Pel *)pic->planes[channel];
int32_t topLeftR = (cu_index / (frameWidth / cuWidth)) * cuWidth;
int32_t topLeftC = (cu_index % (frameWidth / cuWidth)) * cuWidth;
//Find value for bottom-left neighbor
//Search left from bottom to top
bool bottomLeftFound = false;
for(int32_t neighborR = (topLeftR + cuWidth2 - 1), neighborC = (topLeftC - 1); neighborR >= (topLeftR - 1); neighborR--)
if(isAvailable(frameWidth, frameHeight, neighborR, neighborC)) {
bottomLeftFound = true;
refLeft[cuWidth2] = baseAddress[neighborR*frameStride + neighborC];
//printf("Bottom left found on left (%d, %d) %d\n", neighborR, neighborC, refLeft[cuWidth2+1]);
break;
}
//If not found, search top from left to right
if(!bottomLeftFound) {
for(int32_t neighborR = (topLeftR - 1), neighborC = topLeftC; neighborC <= (int32_t)(topLeftC + cuWidth2 - 1); neighborC++) {
if(isAvailable(frameWidth, frameHeight, neighborR, neighborC)) {
bottomLeftFound = true;
refLeft[cuWidth2] = baseAddress[neighborR*frameStride + neighborC];
//printf("Bottom left found on top (%d, %d) %d \n", neighborR, neighborC, refLeft[cuWidth2+1]);
break;
}
}
}
//If still not found, no reference samples are available, so assign 50% value to all neighbors
if(!bottomLeftFound) {
refLeft[cuWidth2] = 1 << (BIT_DEPTH - 1);
//printf("Bottom left not found, using DC value %d\n", refLeft[cuWidth2]);
}
//Traverse bottom-left to top-left to top-right. If a pixel is not available, use the one before it (one below or to the left)
for(int32_t neighborR = (topLeftR + cuWidth2 - 2), neighborC = (topLeftC - 1), idx = cuWidth2 - 1; neighborR >= (topLeftR - 1); neighborR--, idx--) {
if(isAvailable(frameWidth, frameHeight, neighborR, neighborC)) {
refLeft[idx] = baseAddress[neighborR*frameStride + neighborC];
//printf("Left[%d] (%d %d) available: %d\n", idx, neighborR, neighborC, refLeft[idx]);
}
else {
refLeft[idx] = refLeft[idx+1];
//printf("Left[%d] (%d %d) not available: %d\n", idx, neighborR, neighborC, refLeft[idx]);
}
}
//Include the top-left corner in both refLeft and refAbove
refAbove[0] = refLeft[0];
for(int32_t neighborR = (topLeftR - 1), neighborC = topLeftC, idx = 1; neighborC <= (int32_t)(topLeftC + cuWidth2 - 1); neighborC++, idx++) {
if(isAvailable(frameWidth, frameHeight, neighborR, neighborC)) {
refAbove[idx] = baseAddress[neighborR*frameStride + neighborC];
//printf("Above[%d] (%d %d) available: %d\n", idx, neighborR, neighborC, refAbove[idx]);
}
else {
refAbove[idx] = refAbove[idx-1];
//printf("Above[%d] (%d %d) not available: %d\n", idx, neighborR, neighborC, refAbove[idx]);
}
}
//Make filtered version (for luma only)
if(channel == 0) {
//Special cases for the corner, bottom, and right pixels, [1 2 1] FIR filter for the rest
//pF[ 1 ][ 1 ] = ( p[ 1 ][ 0 ] + 2 * p[ 1 ][ 1 ] + p[ 0 ][ 1 ] + 2 ) >> 2
refLeftFlt[0] = refAboveFlt[0] = (refLeft[1] + 2 * refLeft[0] + refAbove[1] + 2) >> 2;
for(uint32_t idx = 1; idx < cuWidth2; idx++) {
refLeftFlt[idx] = (refLeft[idx-1] + 2 * refLeft[idx] + refLeft[idx+1] + 2) >> 2;
refAboveFlt[idx] = (refAbove[idx-1] + 2 * refAbove[idx] + refAbove[idx+1] + 2) >> 2;
}
refLeftFlt[cuWidth2] = refLeft[cuWidth2];
refAboveFlt[cuWidth2] = refAbove[cuWidth2];
}
}
//luma_size is the (square) block size of luma blocks, chroma blocks are assumed (luma_size/2)x(luma_size/2)
void ece408_intra_pred(x265_picture *pic, int width, int height, int luma_size, unsigned int cu_index, int32_t *y_ptr, int32_t *cb_ptr, int32_t *cr_ptr) {
unsigned int luma_r = (cu_index / (width / luma_size)) * luma_size;
unsigned int luma_c = (cu_index % (width / luma_size)) * luma_size;
//Copy luma bytes into orig_yuv
Pel *walker = orig_yuv.getLumaAddr();
for(int i = 0; i < luma_size; i++) {
memcpy(walker, ((Pel *)pic->planes[0]) + (((luma_r + i)*pic->stride[0]) + luma_c), luma_size*sizeof(*walker));
walker += luma_size;
}
if(luma_size > 4) {
//Copy chroma bytes into orig_yuv
unsigned int chroma_r = luma_r / 2;
unsigned int chroma_c = luma_c / 2;
unsigned int chroma_size = luma_size / 2;
walker = orig_yuv.getCbAddr();
for(unsigned int i = 0; i < chroma_size; i++) {
memcpy(walker, ((Pel *)pic->planes[1]) + (((chroma_r + i)*pic->stride[1]) + chroma_c), chroma_size*sizeof(*walker));
walker += chroma_size;
}
walker = orig_yuv.getCrAddr();
for(unsigned int i = 0; i < chroma_size; i++) {
memcpy(walker, ((Pel *)pic->planes[2]) + (((chroma_r + i)*pic->stride[2]) + chroma_c), chroma_size*sizeof(*walker));
walker += chroma_size;
}
}
//Get the unfiltered and filtered reference pixels. Position them (cuWidth-1) elements into their respective arrays so that the
//angular prediction function can use the unused space at the beginning of the array to extend the reference pixels as described
//in equations 8-48 and 8-56 in Section 8.4.4.2.6 of the H.265 standard.
getReferencePixels(pic, width, height, luma_size, cu_index, refAbove1+luma_size-1, refLeft1+luma_size-1, refAbove2+luma_size-1, refLeft2+luma_size-1, /*channel*/ 0);
#ifdef VERBOSE
printf("Above ");
for(int i = 0; i < (2*luma_size+1); i++)
printf("%3d ", refAbove1[i+luma_size-1]);
printf("\nLeft ");
for(int i = 0; i < (2*luma_size+1); i++)
printf("%3d ", refLeft1[i+luma_size-1]);
printf("\nAboveFilt ");
for(int i = 0; i < (2*luma_size+1); i++)
printf("%3d ", refAbove2[i+luma_size-1]);
printf("\nLeftFilt ");
for(int i = 0; i < (2*luma_size+1); i++)
printf("%3d ", refLeft2[i+luma_size-1]);
printf("\n");
#endif
ece408_intra_pred_channel(luma_size, 0, y_ptr);
if(luma_size > 4) { //No 2x2 chroma blocks, and 4x4 chroma blocks are covered with 8x8 luma
getReferencePixels(pic, width, height, luma_size, cu_index, (refAbove1+luma_size/2)-1, refLeft1+(luma_size/2)-1, NULL, NULL, /*channel*/ 1);
ece408_intra_pred_channel(luma_size, 1, cb_ptr);
getReferencePixels(pic, width, height, luma_size, cu_index, (refAbove2+luma_size/2)-1, refLeft2+(luma_size/2)-1, NULL, NULL, /*channel*/ 2);
ece408_intra_pred_channel(luma_size, 2, cr_ptr);
}
}
ece408_intra_pred_result *ece408_competition_ref(TEncCfg *encoder, x265_picture *pics_in, int num_frames) {
ece408_intra_pred_result *ret = new ece408_intra_pred_result[4*num_frames]; //4x4,8x8,16x16,32x32
ece408_intra_pred_result *cur_result = ret;
for(int i = 0; i < num_frames; i++) {
for(int luma_size_shift = 2; luma_size_shift <= 5; luma_size_shift++) {
int luma_size = 1 << luma_size_shift; // luma_size x luma_size luma PBs
cur_result->create(param->sourceWidth, param->sourceHeight, luma_size);
int32_t *y_satd_results = cur_result->y_satd_results;
uint8_t *y_modes = cur_result->y_modes;
int32_t *cb_satd_results = cur_result->cb_satd_results;
uint8_t *cb_modes = cur_result->cb_modes;
int32_t *cr_satd_results = cur_result->cr_satd_results;
uint8_t *cr_modes = cur_result->cr_modes;
orig_yuv.destroy();
orig_yuv.create(luma_size, luma_size, X265_CSP_I420);
pred_yuv.destroy();
pred_yuv.create(luma_size, luma_size, X265_CSP_I420);
for(unsigned int cuIndex = 0; cuIndex < (unsigned int)((encoder->param.sourceWidth/luma_size)*(encoder->param.sourceHeight/luma_size)); cuIndex++) {
ece408_intra_pred(&(pics_in[i]),
encoder->param.sourceWidth,
encoder->param.sourceHeight,
luma_size,
cuIndex,
&(y_satd_results[35*cuIndex]),
&(cb_satd_results[35*cuIndex]),
&(cr_satd_results[35*cuIndex]));
//printf("SATD results: ");
//for(int l = 0; l < 35; l++) {
// printf("(%d, %d, %d, %d) ", l, y_satd_results[35*cuIndex+l], cb_satd_results[35*cuIndex+l], cr_satd_results[35*cuIndex+l]);
//}
//printf("\n");
for(int mode = 0; mode < 35; mode++) {
y_satd_results[35*cuIndex + mode] = (y_satd_results[35*cuIndex + mode] << 8) | mode;
if(luma_size > 4) {
cb_satd_results[35*cuIndex + mode] = (cb_satd_results[35*cuIndex + mode] << 8) | mode;
cr_satd_results[35*cuIndex + mode] = (cr_satd_results[35*cuIndex + mode] << 8) | mode;
}
}
std::sort(&(y_satd_results[35*cuIndex]), &(y_satd_results[35*cuIndex+35]));
if(luma_size > 4) {
std::sort(&(cb_satd_results[35*cuIndex]), &(cb_satd_results[35*cuIndex+35]));
std::sort(&(cr_satd_results[35*cuIndex]), &(cr_satd_results[35*cuIndex+35]));
}
for(int mode = 0; mode < 35; mode++) {
y_modes[35*cuIndex+mode] = (y_satd_results[35*cuIndex+mode] & 0xFF);
y_satd_results[35*cuIndex+mode] >>= 8;
if(luma_size > 4) {
cb_modes[35*cuIndex+mode] = (cb_satd_results[35*cuIndex+mode] & 0xFF);
cb_satd_results[35*cuIndex+mode] >>= 8;
cr_modes[35*cuIndex+mode] = (cr_satd_results[35*cuIndex+mode] & 0xFF);
cr_satd_results[35*cuIndex+mode] >>= 8;
}
}
}
#ifdef MODE_HIST
int ymode_hist[35], cbmode_hist[35], crmode_hist[35];
for(int l = 0; l < 35; l++) {
ymode_hist[l] = cbmode_hist[l] = crmode_hist[l] = 0;
}
for(int l = 0; l < (35*((param->sourceWidth/luma_size)*(param->sourceHeight/luma_size))); l += 35) { //+= 1 to make sure all modes are accounted for, += 35 for histogram of best modes
ymode_hist[y_modes[l]]++;
if(luma_size > 4) {
cbmode_hist[cb_modes[l]]++;
crmode_hist[cr_modes[l]]++;
}
}
printf("ymode hist: ");
for(int l = 0; l < 35; l++)
printf("%d ", ymode_hist[l]);
if(luma_size > 4) {
printf("\ncbmode hist: ");
for(int l = 0; l < 35; l++)
printf("%d ", cbmode_hist[l]);
printf("\ncrmode hist: ");
for(int l = 0; l < 35; l++)
printf("%d ", crmode_hist[l]);
}
printf("\n");
#endif
cur_result++;
}
}
return ret;
}
//TODO sort student results by satd result *and* mode number to make sure we have *exactly* the same bytes in
//both arrays, even if several modes have the same SATD value.
//We want to do the sort here so that students are not required to (it's not necessary in a real x265 use case).
bool ece408_compare(ece408_intra_pred_result *ref, ece408_intra_pred_result *student, int num_frames) {
if(student == NULL) {
printf("Student result array pointer is NULL\n");
return false;
}
for(int i = 0; i < (4*num_frames); i++) {
int block_offset=35;
int b_s = 0;
for(int idx=0;idx<35;idx++)
{
//printf("\nSERIAL OFFSET: %d\n", block_offset+idx);
printf("Serial code : For mode: %u Ref value:%i\n",ref[b_s].y_modes[block_offset+idx], ref[b_s].y_satd_results[block_offset+idx]);
}
if(ref[i].luma_block_size != student[i].luma_block_size) {
printf("Ref result %d luma block size = %d, student = %d\n", i, ref[i].luma_block_size, student[i].luma_block_size);
return false;
}
if(ref[i].num_blocks != student[i].num_blocks) {
printf("Ref result %d num_blocks = %d, student = %d\n", i, ref[i].num_blocks, student[i].num_blocks);
return false;
}
if(memcmp(ref[i].y_modes, student[i].y_modes, 35*ref[i].num_blocks*sizeof(*ref[i].y_modes))) {
printf("Result %d, ref and student y_modes mismatched\n", i);
return false;
}
if(memcmp(ref[i].y_satd_results, student[i].y_satd_results, 35*ref[i].num_blocks*sizeof(*ref[i].y_satd_results))) {
printf("Result %d, ref and student y_satd_results mismatched\n", i);
return false;
}
if(ref[i].luma_block_size > 4) {
if(memcmp(ref[i].cb_modes, student[i].cb_modes, 35*ref[i].num_blocks*sizeof(*ref[i].cb_modes))) {
printf("Result %d, ref and student cb_modes mismatched\n", i);
return false;
}
if(memcmp(ref[i].cb_satd_results, student[i].cb_satd_results, 35*ref[i].num_blocks*sizeof(*ref[i].cb_satd_results))) {
printf("Result %d, ref and student cb_satd_results mismatched\n", i);
return false;
}
if(memcmp(ref[i].cr_modes, student[i].cr_modes, 35*ref[i].num_blocks*sizeof(*ref[i].cr_modes))) {
printf("Result %d, ref and student cr_modes mismatched\n", i);
return false;
}
if(memcmp(ref[i].cr_satd_results, student[i].cr_satd_results, 35*ref[i].num_blocks*sizeof(*ref[i].cr_satd_results))) {
printf("Result %d, ref and student cr_satd_results mismatched\n", i);
return false;
}
}
}
return true;
}
ece408_intra_pred_result *ece408_competition(ece408_frame *imgs, int num_frames) {
//Fill in your own!
(void)imgs;
ece408_frame * imgs1 = (ece408_frame *)imgs;
ece408_intra_pred_result *ret = new ece408_intra_pred_result[4*num_frames]; //8x8,16x16,32x32,64x64
ece408_intra_pred_result *cur_result = ret;
unsigned int debug_print = ((imgs->height+4-1)/4)*((imgs->width+4-1)/4);
printf("debug print : %d\n",debug_print );
hipError_t cuda_ret;
uint8_t *d_y,
*d_cr,
*d_cb;
unsigned int y_size = ((imgs->width) * (imgs->height)) * sizeof(uint8_t);
printf("\n Y SIZE : %u\n", y_size);
unsigned int cr_size,
cb_size;
// TO DO : do we need a ceil here ?
cr_size = cb_size = (y_size/2);
///////// timer code ////////////////
struct timeval timer;
gettimeofday(&timer,NULL);
printf("PARALLEL CODE START TIMER in secs :%ld\n",timer.tv_sec);
printf("PARALLEL CODE START TIMER in usecs:%ld\n",timer.tv_usec);
// Allocate global memorcy for y, cr, cb components of the frame
cuda_ret = hipMalloc((void **) &d_y, y_size);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMalloc((void **) &d_cr, cr_size);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMalloc((void **) &d_cb, cb_size);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipDeviceSynchronize();
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMemcpy(d_y, imgs1->y, y_size, hipMemcpyHostToDevice);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMemcpy(d_cr, imgs1->cr, cr_size, hipMemcpyHostToDevice);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMemcpy(d_cb, imgs1->cb, cb_size, hipMemcpyHostToDevice);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
printf("I AM AT THE END CUDA MEMCPY STAGE 1\n");
cuda_ret = hipDeviceSynchronize();
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
for(int i = 0; i < num_frames; i++) {
int res_count = 0;
//for(int luma_size_shift = 2; luma_size_shift <= 5; luma_size_shift++) {
for(int luma_size_shift = 2; luma_size_shift <=4; luma_size_shift++) {
int luma_size = 1 << luma_size_shift; // luma_size x luma_size luma PBs
//cur_result->create(32, 32, luma_size);
cur_result->create(imgs1->width, imgs1->height, luma_size);
// Start
int32_t *d_res_y;
int32_t *d_res_cr;
int32_t *d_res_cb;
uint8_t *d_y_modes;
uint8_t *d_cr_modes;
uint8_t *d_cb_modes;
//unsigned int y_res_size = (35 * (cur_result->num_blocks));
unsigned int num_blocks = ((imgs->height+luma_size-1)/luma_size)*((imgs->width+luma_size-1)/luma_size);
unsigned int y_res_size = 35*num_blocks*sizeof(int32_t);
unsigned int mode_size = 35*num_blocks*sizeof(uint8_t);
unsigned int cr_res_size,
cb_res_size;
printf("No.of blocks launched:%u\n",y_res_size/sizeof(int32_t));
cr_res_size = cb_res_size = y_res_size;
// Allocate result in the device
cuda_ret = hipMalloc((void **) &d_res_y, y_res_size);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMalloc((void **) &d_y_modes, mode_size);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if ( luma_size >= 4 && luma_size < 32 )
{
if(cur_result->cr_modes){
free(cur_result->cr_modes);
cur_result->cr_modes = NULL;
}
if(cur_result->cb_modes){
free(cur_result->cb_modes);
cur_result->cr_modes = NULL;
}
cur_result->cr_modes = (uint8_t *)malloc(mode_size*sizeof(uint8_t));
cur_result->cb_modes = (uint8_t *)malloc(mode_size*sizeof(uint8_t));
cuda_ret = hipMalloc((void **) &d_res_cr, cr_res_size);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMalloc((void **) &d_res_cb, cb_res_size);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMalloc((void **) &d_cr_modes, mode_size);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMalloc((void **) &d_cb_modes, mode_size);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
cuda_ret = hipDeviceSynchronize();
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// Grid dimension
dim3 dimGrid = dim3((int)ceil((imgs->width)/(float)luma_size), (int)ceil((imgs->height)/(float)luma_size), 1);
// Block dimension
dim3 dimBlock = dim3(luma_size, luma_size, 1);
//int neighbour_array_size = luma_size*2+1;
printf("\n KERNEL CONFIG: %d %d %d %d\n", dimGrid.x, dimGrid.y, dimBlock.x, dimBlock.y);
hipLaunchKernelGGL(( hevcPredictionKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_y, d_cr, d_cb, d_res_y, d_res_cr, d_res_cb, d_y_modes, d_cr_modes, d_cb_modes, imgs->height, imgs->width);
cuda_ret = hipDeviceSynchronize();
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
printf("current result num_block_size is %d\n", num_blocks);
printf("from serial code num_block is %d\n",cur_result->num_blocks);
cuda_ret = hipMemcpy(cur_result->y_satd_results, d_res_y, y_res_size, hipMemcpyDeviceToHost);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMemcpy(cur_result->cr_satd_results, d_res_cr, cr_res_size, hipMemcpyDeviceToHost);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMemcpy(cur_result->cb_satd_results, d_res_cb, cb_res_size, hipMemcpyDeviceToHost);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMemcpy(cur_result->y_modes, d_y_modes,mode_size, hipMemcpyDeviceToHost);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if(luma_size < 32){
cuda_ret = hipMemcpy(cur_result->cr_modes, d_cr_modes, mode_size, hipMemcpyDeviceToHost);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = hipMemcpy(cur_result->cb_modes, d_cb_modes, mode_size, hipMemcpyDeviceToHost);
if ( cuda_ret != hipSuccess )
{
printf("\n%s in %s at line %d\n", hipGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
if(d_res_cr){
hipFree(d_res_cr);
d_res_cr = NULL;
}
if(d_res_cb){
hipFree(d_res_cb);
d_res_cb = NULL;
}
if(d_res_y){
hipFree(d_res_y);
d_res_y = NULL;
}
if(d_y_modes){
hipFree(d_y_modes);
d_y_modes = NULL;
}
if(d_cr_modes){
hipFree(d_cr_modes);
d_cr_modes = NULL;
}
if(d_cb_modes){
hipFree(d_cb_modes);
d_cb_modes = NULL;
}
cur_result++;
res_count++;
}
gettimeofday(&timer,NULL);
printf("PARALLEL CODE END TIMER in secs :%ld\n",timer.tv_sec);
printf("PARALLEL CODE END TIMER in usecs:%ld\n",timer.tv_usec);
}
return ret;
}
| 5a780c402c1cebe1b385baab6e46a77ebf298c2c.cu | /*****************************************************************************
* Copyright (C) 2013 x265 project
*
* Authors: Gopu Govindaswamy <gopu@govindaswamy.org>
* Mandar Gurav <mandar@multicorewareinc.com>
* Mahesh Pittala <mahesh@multicorewareinc.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*
* This program is also available under a commercial proprietary license.
* For more information, contact us at licensing@multicorewareinc.com.
*****************************************************************************/
#include "ece408_competition.h"
#include "primitives.h"
#include "test/intrapredharness.h"
#include "cpu.h"
#include "TLibCommon/TComRom.h"
#include "TLibEncoder/TEncCfg.h"
#include "input/input.h"
#include "output/output.h"
#include "common.h"
#include "x265.h"
#include "getopt.h"
#include "PPA/ppa.h"
#include "encoder.h"
#include "TLibCommon/TComYuv.h"
#include "TLibCommon/TComPic.h"
#include "TLibCommon/TComPicYuv.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <fstream>
#include <algorithm>
#include <sys/time.h>
#include "kernel.cu"
//Define this to verify the student intra prediction against the reference version
#define VERIFY
//#define VERBOSE
//Define this to dump all reference results to file (to compare between versions)
//#define DUMP_TO_FILE
//This is the filename where all reference results will be dumped ifdef DUMP_TO_FILE
#define DUMP_FILE "dump.bin"
using namespace x265;
ece408_intra_pred_result *ece408_competition_ref(TEncCfg *encoder, x265_picture *pics_in, int num_frames);
ece408_intra_pred_result *ece408_competition(ece408_frame *imgs, int num_frames);
bool ece408_compare(ece408_intra_pred_result *ref, ece408_intra_pred_result *student, int num_frames);
Pel *refAbove1, *refAbove2, *refLeft1, *refLeft2;
Pel* predBuf;
int predBufStride;
int predBufHeight;
TComYuv pred_yuv;
TComYuv orig_yuv;
TComSPS sps;
TComPPS pps;
x265_param *param;
ALIGN_VAR_32(Pel, tmp[33 * 32 * 32]);
ALIGN_VAR_32(Pel, buf_trans[32 * 32]);
static const char short_options[] = "o:f:F:r:i:b:s:q:m:hwV";
static const struct option long_options[] =
{
#if HIGH_BIT_DEPTH
{ "depth", required_argument, NULL, 0 },
#endif
{ "help", no_argument, NULL, 'h' },
{ "version", no_argument, NULL, 'V' },
{ "cpuid", required_argument, NULL, 0 },
{ "threads", required_argument, NULL, 0 },
{ "preset", required_argument, NULL, 'p' },
{ "tune", required_argument, NULL, 't' },
{ "frame-threads", required_argument, NULL, 'F' },
{ "log", required_argument, NULL, 0 },
{ "csv", required_argument, NULL, 0 },
{ "y4m", no_argument, NULL, 0 },
{ "no-progress", no_argument, NULL, 0 },
{ "output", required_argument, NULL, 'o' },
{ "input", required_argument, NULL, 0 },
{ "input-depth", required_argument, NULL, 0 },
{ "input-res", required_argument, NULL, 0 },
{ "input-csp", required_argument, NULL, 0 },
{ "fps", required_argument, NULL, 0 },
{ "frame-skip", required_argument, NULL, 0 },
{ "frames", required_argument, NULL, 'f' },
{ "recon", required_argument, NULL, 'r' },
{ "recon-depth", required_argument, NULL, 0 },
{ "no-wpp", no_argument, NULL, 0 },
{ "wpp", no_argument, NULL, 0 },
{ "ctu", required_argument, NULL, 's' },
{ "tu-intra-depth", required_argument, NULL, 0 },
{ "tu-inter-depth", required_argument, NULL, 0 },
{ "me", required_argument, NULL, 0 },
{ "subme", required_argument, NULL, 'm' },
{ "merange", required_argument, NULL, 0 },
{ "max-merge", required_argument, NULL, 0 },
{ "rdpenalty", required_argument, NULL, 0 },
{ "no-rect", no_argument, NULL, 0 },
{ "rect", no_argument, NULL, 0 },
{ "no-amp", no_argument, NULL, 0 },
{ "amp", no_argument, NULL, 0 },
{ "no-early-skip", no_argument, NULL, 0 },
{ "early-skip", no_argument, NULL, 0 },
{ "no-fast-cbf", no_argument, NULL, 0 },
{ "fast-cbf", no_argument, NULL, 0 },
{ "no-tskip", no_argument, NULL, 0 },
{ "tskip", no_argument, NULL, 0 },
{ "no-tskip-fast", no_argument, NULL, 0 },
{ "tskip-fast", no_argument, NULL, 0 },
{ "no-constrained-intra", no_argument, NULL, 0 },
{ "constrained-intra", no_argument, NULL, 0 },
{ "refresh", required_argument, NULL, 0 },
{ "keyint", required_argument, NULL, 'i' },
{ "rc-lookahead", required_argument, NULL, 0 },
{ "bframes", required_argument, NULL, 'b' },
{ "bframe-bias", required_argument, NULL, 0 },
{ "b-adapt", required_argument, NULL, 0 },
{ "no-b-pyramid", no_argument, NULL, 0 },
{ "b-pyramid", no_argument, NULL, 0 },
{ "ref", required_argument, NULL, 0 },
{ "no-weightp", no_argument, NULL, 0 },
{ "weightp", no_argument, NULL, 'w' },
{ "crf", required_argument, NULL, 0 },
{ "vbv-maxrate", required_argument, NULL, 0 },
{ "vbv-bufsize", required_argument, NULL, 0 },
{ "vbv-init", required_argument, NULL, 0 },
{ "bitrate", required_argument, NULL, 0 },
{ "qp", required_argument, NULL, 'q' },
{ "aq-mode", required_argument, NULL, 0 },
{ "aq-strength", required_argument, NULL, 0 },
{ "cbqpoffs", required_argument, NULL, 0 },
{ "crqpoffs", required_argument, NULL, 0 },
{ "rd", required_argument, NULL, 0 },
{ "no-signhide", no_argument, NULL, 0 },
{ "signhide", no_argument, NULL, 0 },
{ "no-lft", no_argument, NULL, 0 },
{ "lft", no_argument, NULL, 0 },
{ "no-sao", no_argument, NULL, 0 },
{ "sao", no_argument, NULL, 0 },
{ "sao-lcu-bounds", required_argument, NULL, 0 },
{ "sao-lcu-opt", required_argument, NULL, 0 },
{ "no-ssim", no_argument, NULL, 0 },
{ "ssim", no_argument, NULL, 0 },
{ "no-psnr", no_argument, NULL, 0 },
{ "psnr", no_argument, NULL, 0 },
{ "hash", required_argument, NULL, 0 },
{ "no-strong-intra-smoothing", no_argument, NULL, 0 },
{ "strong-intra-smoothing", no_argument, NULL, 0 },
{ 0, 0, 0, 0 }
};
struct CLIOptions
{
Input* input;
Output* recon;
std::fstream bitstreamFile;
bool bProgress;
bool bForceY4m;
uint32_t totalbytes;
uint32_t frameSkip; // number of frames to skip from the beginning
uint32_t framesToBeEncoded; // number of frames to encode
int64_t startTime;
int64_t prevUpdateTime;
/* in microseconds */
static const int UPDATE_INTERVAL = 250000;
CLIOptions()
{
input = NULL;
recon = NULL;
framesToBeEncoded = frameSkip = totalbytes = 0;
bProgress = true;
bForceY4m = false;
startTime = x265_mdate();
prevUpdateTime = 0;
}
void destroy();
void writeNALs(const x265_nal* nal, uint32_t nalcount);
void printVersion(x265_param *par);
void showHelp(x265_param *par);
bool parse(int argc, char **argv, x265_param* par);
};
void CLIOptions::destroy()
{
if (input)
input->release();
input = NULL;
if (recon)
recon->release();
recon = NULL;
}
void CLIOptions::writeNALs(const x265_nal* nal, uint32_t nalcount)
{
PPAScopeEvent(bitstream_write);
for (uint32_t i = 0; i < nalcount; i++)
{
bitstreamFile.write((const char*)nal->payload, nal->sizeBytes);
totalbytes += nal->sizeBytes;
nal++;
}
}
void CLIOptions::printVersion(x265_param *par)
{
fprintf(stderr, "x265 [info]: HEVC encoder version %s\n", x265_version_str);
fprintf(stderr, "x265 [info]: build info %s\n", x265_build_info_str);
x265_setup_primitives(par, -1);
}
void CLIOptions::showHelp(x265_param *par)
{
x265_param_default(par);
printVersion(par);
#define H0 printf
#define OPT(value) (value ? "enabled" : "disabled")
H0("\nSyntax: x265 [options] infile [-o] outfile\n");
H0(" infile can be YUV or Y4M\n");
H0(" outfile is raw HEVC bitstream\n");
H0("\nExecutable Options:\n");
H0("-h/--h Show this help text and exit\n");
H0("-V/--version Show version info and exit\n");
H0(" --cpuid Limit SIMD capability bitmap 0:auto 1:None. Default:0\n");
H0(" --threads Number of threads for thread pool (0: detect CPU core count, default)\n");
H0("-p/--preset ultrafast, veryfast, faster, fast, medium, slow, slower, veryslow, or placebo\n");
H0("-t/--tune Tune the settings for a particular type of source or situation\n");
H0("-F/--frame-threads Number of concurrently encoded frames. Default %d\n", par->frameNumThreads);
H0(" --log Logging level 0:ERROR 1:WARNING 2:INFO 3:DEBUG -1:NONE. Default %d\n", par->logLevel);
H0(" --csv Comma separated log file, log level >= 3 frame log, else one line per run\n");
H0(" --y4m Parse input stream as YUV4MPEG2 regardless of file extension\n");
H0(" --no-progress Disable CLI progress reports\n");
H0("-o/--output Bitstream output file name\n");
H0("\nInput Options:\n");
H0(" --input Raw YUV or Y4M input file name\n");
H0(" --input-depth Bit-depth of input file (YUV only) Default %d\n", par->inputBitDepth);
H0(" --input-res Source picture size [w x h], auto-detected if Y4M\n");
H0(" --input-csp Source color space parameter, auto-detected if Y4M\n");
H0(" --fps Source frame rate, auto-detected if Y4M\n");
H0(" --frame-skip Number of frames to skip at start of input file\n");
H0("-f/--frames Number of frames to be encoded. Default all\n");
H0("\nQuad-Tree analysis:\n");
H0(" --[no-]wpp Enable Wavefront Parallel Processing. Default %s\n", OPT(par->bEnableWavefront));
H0("-s/--ctu Maximum CU size. Default %dx%d\n", par->maxCUSize, par->maxCUSize);
H0(" --tu-intra-depth Max TU recursive depth for intra CUs. Default %d\n", par->tuQTMaxIntraDepth);
H0(" --tu-inter-depth Max TU recursive depth for inter CUs. Default %d\n", par->tuQTMaxInterDepth);
H0("\nTemporal / motion search options:\n");
H0(" --me Motion search method 0:dia 1:hex 2:umh 3:star 4:full. Default %d\n", par->searchMethod);
H0("-m/--subme Amount of subpel refinement to perform (0:least .. 7:most). Default %d \n", par->subpelRefine);
H0(" --merange Motion search range. Default %d\n", par->searchRange);
H0(" --[no-]rect Enable rectangular motion partitions Nx2N and 2NxN. Default %s\n", OPT(par->bEnableRectInter));
H0(" --[no-]amp Enable asymmetric motion partitions, requires --rect. Default %s\n", OPT(par->bEnableAMP));
H0(" --max-merge Maximum number of merge candidates. Default %d\n", par->maxNumMergeCand);
H0(" --[no-]early-skip Enable early SKIP detection. Default %s\n", OPT(par->bEnableEarlySkip));
H0(" --[no-]fast-cbf Enable Cbf fast mode \n \t\t\t\t Default : %s\n", OPT(par->bEnableCbfFastMode));
H0("\nSpatial / intra options:\n");
H0(" --rdpenalty penalty for 32x32 intra TU in non-I slices. 0:disabled 1:RD-penalty 2:maximum. Default %d\n", par->rdPenalty);
H0(" --[no-]tskip Enable intra transform skipping. Default %s\n", OPT(par->bEnableTransformSkip));
H0(" --[no-]tskip-fast Enable fast intra transform skipping. Default %s\n", OPT(par->bEnableTSkipFast));
H0(" --[no-]strong-intra-smoothing Enable strong intra smoothing for 32x32 blocks. Default %s\n", OPT(par->bEnableStrongIntraSmoothing));
H0(" --[no-]constrained-intra Constrained intra prediction (use only intra coded reference pixels) Default %s\n", OPT(par->bEnableConstrainedIntra));
H0("\nSlice decision options:\n");
H0(" --refresh Intra refresh type - 0:none, 1:CDR, 2:IDR (default: CDR) Default %d\n", par->decodingRefreshType);
H0("-i/--keyint Max intra period in frames. Default %d\n", par->keyframeMax);
H0(" --rc-lookahead Number of frames for frame-type lookahead (determines encoder latency) Default %d\n", par->lookaheadDepth);
H0(" --bframes Maximum number of consecutive b-frames (now it only enables B GOP structure) Default %d\n", par->bframes);
H0(" --bframe-bias Bias towards B frame decisions. Default %d\n", par->bFrameBias);
H0(" --b-adapt 0 - none, 1 - fast, 2 - full (trellis) adaptive B frame scheduling. Default %d\n", par->bFrameAdaptive);
H0(" --[no-]b-pyramid Use B-frames as references. Default %s\n", OPT(par->bBPyramid));
H0(" --ref max number of L0 references to be allowed (1 .. 16) Default %d\n", par->maxNumReferences);
H0("-w/--[no-]weightp Enable weighted prediction in P slices. Default %s\n", OPT(par->bEnableWeightedPred));
H0("\nQP, rate control and rate distortion options:\n");
H0(" --bitrate Target bitrate (kbps), implies ABR. Default %d\n", par->rc.bitrate);
H0(" --crf Quality-based VBR (0-51). Default %f\n", par->rc.rfConstant);
H0(" --vbv-maxrate Max local bitrate (kbit/s). Default %d\n", par->rc.vbvMaxBitrate);
H0(" --vbv-bufsize Set size of the VBV buffer (kbit). Default %d\n", par->rc.vbvBufferSize);
H0(" --vbv-init Initial VBV buffer occupancy. Default %f\n", par->rc.vbvBufferInit);
H0("-q/--qp Base QP for CQP mode. Default %d\n", par->rc.qp);
H0(" --aq-mode Mode for Adaptive Quantization - 0:none 1:aqVariance Default %d\n", par->rc.aqMode);
H0(" --aq-strength Reduces blocking and blurring in flat and textured areas.(0 to 3.0)<double> . Default %f\n", par->rc.aqStrength);
H0(" --cbqpoffs Chroma Cb QP Offset. Default %d\n", par->cbQpOffset);
H0(" --crqpoffs Chroma Cr QP Offset. Default %d\n", par->crQpOffset);
H0(" --rd Level of RD in mode decision 0:least....2:full RDO. Default %d\n", par->rdLevel);
H0(" --[no-]signhide Hide sign bit of one coeff per TU (rdo). Default %s\n", OPT(par->bEnableSignHiding));
H0("\nLoop filter:\n");
H0(" --[no-]lft Enable Loop Filter. Default %s\n", OPT(par->bEnableLoopFilter));
H0("\nSample Adaptive Offset loop filter:\n");
H0(" --[no-]sao Enable Sample Adaptive Offset. Default %s\n", OPT(par->bEnableSAO));
H0(" --sao-lcu-bounds 0: right/bottom boundary areas skipped 1: non-deblocked pixels are used. Default %d\n", par->saoLcuBoundary);
H0(" --sao-lcu-opt 0: SAO picture-based optimization, 1: SAO LCU-based optimization. Default %d\n", par->saoLcuBasedOptimization);
H0("\nQuality reporting metrics:\n");
H0(" --[no-]ssim Enable reporting SSIM metric scores. Default %s\n", OPT(par->bEnableSsim));
H0(" --[no-]psnr Enable reporting PSNR metric scores. Default %s\n", OPT(par->bEnablePsnr));
H0("\nReconstructed video options (debugging):\n");
H0("-r/--recon Reconstructed raw image YUV or Y4M output file name\n");
H0(" --recon-depth Bit-depth of reconstructed raw image file. Default 8\n");
H0("\nSEI options:\n");
H0(" --hash Decoded Picture Hash SEI 0: disabled, 1: MD5, 2: CRC, 3: Checksum. Default %d\n", par->decodedPictureHashSEI);
#undef OPT
#undef H0
exit(0);
}
bool CLIOptions::parse(int argc, char **argv, x265_param* par)
{
int berror = 0;
int help = 0;
int cpuid = 0;
int reconFileBitDepth = 0;
const char *inputfn = NULL;
const char *reconfn = NULL;
const char *bitstreamfn = NULL;
const char *inputRes = NULL;
const char *preset = "medium";
const char *tune = "psnr";
/* Presets are applied before all other options. */
for (optind = 0;; )
{
int c = getopt_long(argc, argv, short_options, long_options, NULL);
if (c == -1)
break;
if (c == 'p')
preset = optarg;
if (c == 't')
tune = optarg;
else if (c == '?')
return true;
}
if (x265_param_default_preset(param, preset, tune) < 0)
{
x265_log(NULL, X265_LOG_WARNING, "preset or tune unrecognized\n");
return true;
}
//MRJ Set max CU size to 32x32 so that frames are padded in Encoder::configure() to a multiple of 4x4, not a multiple of 8x8.
par->maxCUSize = 32;
for (optind = 0;; )
{
int long_options_index = -1;
int c = getopt_long(argc, argv, short_options, long_options, &long_options_index);
if (c == -1)
{
break;
}
switch (c)
{
case 'h':
showHelp(par);
break;
case 'V':
printVersion(par);
exit(0);
default:
if (long_options_index < 0 && c > 0)
{
for (size_t i = 0; i < sizeof(long_options) / sizeof(long_options[0]); i++)
{
if (long_options[i].val == c)
{
long_options_index = (int)i;
break;
}
}
if (long_options_index < 0)
{
/* getopt_long might have already printed an error message */
if (c != 63)
x265_log(NULL, X265_LOG_WARNING, "internal error: short option '%c' has no long option\n", c);
return true;
}
}
if (long_options_index < 0)
{
x265_log(NULL, X265_LOG_WARNING, "short option '%c' unrecognized\n", c);
return true;
}
#define OPT(longname) \
else if (!strcmp(long_options[long_options_index].name, longname))
if (0) ;
OPT("cpuid") cpuid = atoi(optarg);
OPT("frames") this->framesToBeEncoded = (uint32_t)atoi(optarg);
OPT("preset") preset = optarg;
OPT("tune") tune = optarg;
OPT("no-progress") this->bProgress = false;
OPT("frame-skip") this->frameSkip = (uint32_t)atoi(optarg);
OPT("output") bitstreamfn = optarg;
OPT("input") inputfn = optarg;
OPT("recon") reconfn = optarg;
OPT("input-depth") par->inputBitDepth = (uint32_t)atoi(optarg);
OPT("recon-depth") reconFileBitDepth = (uint32_t)atoi(optarg);
OPT("input-res") inputRes = optarg;
OPT("y4m") bForceY4m = true;
else
berror |= x265_param_parse(par, long_options[long_options_index].name, optarg);
if (berror)
{
const char *name = long_options_index > 0 ? long_options[long_options_index].name : argv[optind - 2];
x265_log(NULL, X265_LOG_ERROR, "invalid argument: %s = %s\n", name, optarg);
return true;
}
#undef OPT
}
}
if (optind < argc && !inputfn)
inputfn = argv[optind++];
if (optind < argc && !bitstreamfn)
bitstreamfn = argv[optind++];
if (optind < argc)
{
x265_log(par, X265_LOG_WARNING, "extra unused command arguments given <%s>\n", argv[optind]);
return true;
}
if (argc <= 1 || help)
showHelp(par);
if (inputfn == NULL || bitstreamfn == NULL)
{
x265_log(par, X265_LOG_ERROR, "input or output file not specified, try -V for help\n");
return true;
}
this->input = Input::open(inputfn, par->inputBitDepth, bForceY4m);
if (!this->input || this->input->isFail())
{
x265_log(par, X265_LOG_ERROR, "unable to open input file <%s>\n", inputfn);
return true;
}
if (this->input->getWidth())
{
/* parse the width, height, frame rate from the y4m file */
par->internalCsp = this->input->getColorSpace();
par->sourceWidth = this->input->getWidth();
par->sourceHeight = this->input->getHeight();
par->frameRate = (int)this->input->getRate();
}
else if (inputRes)
{
this->input->setColorSpace(par->internalCsp);
sscanf(inputRes, "%dx%d", &par->sourceWidth, &par->sourceHeight);
this->input->setDimensions(par->sourceWidth, par->sourceHeight);
this->input->setBitDepth(par->inputBitDepth);
}
else if (par->sourceHeight <= 0 || par->sourceWidth <= 0 || par->frameRate <= 0)
{
x265_log(par, X265_LOG_ERROR, "YUV input requires source width, height, and rate to be specified\n");
return true;
}
else
{
this->input->setDimensions(par->sourceWidth, par->sourceHeight);
this->input->setBitDepth(par->inputBitDepth);
}
int guess = this->input->guessFrameCount();
if (this->frameSkip)
{
this->input->skipFrames(this->frameSkip);
}
uint32_t fileFrameCount = guess < 0 ? 0 : (uint32_t)guess;
if (this->framesToBeEncoded && fileFrameCount)
this->framesToBeEncoded = X265_MIN(this->framesToBeEncoded, fileFrameCount - this->frameSkip);
else if (fileFrameCount)
this->framesToBeEncoded = fileFrameCount - this->frameSkip;
if (par->logLevel >= X265_LOG_INFO)
{
if (this->framesToBeEncoded == 0)
fprintf(stderr, "%s [info]: %dx%d %dHz %s, unknown frame count\n", input->getName(),
par->sourceWidth, par->sourceHeight, par->frameRate,
(par->internalCsp >= X265_CSP_I444) ? "C444" : (par->internalCsp >= X265_CSP_I422) ? "C422" : "C420");
else
fprintf(stderr, "%s [info]: %dx%d %dHz %s, frames %u - %d of %d\n", input->getName(),
par->sourceWidth, par->sourceHeight, par->frameRate,
(par->internalCsp >= X265_CSP_I444) ? "C444" : (par->internalCsp >= X265_CSP_I422) ? "C422" : "C420",
this->frameSkip, this->frameSkip + this->framesToBeEncoded - 1, fileFrameCount);
}
this->input->startReader();
if (reconfn)
{
if (reconFileBitDepth == 0)
reconFileBitDepth = par->inputBitDepth;
this->recon = Output::open(reconfn, par->sourceWidth, par->sourceHeight, reconFileBitDepth, par->frameRate, par->internalCsp);
if (this->recon->isFail())
{
x265_log(par, X265_LOG_WARNING, "unable to write reconstruction file\n");
this->recon->release();
this->recon = 0;
}
}
#if HIGH_BIT_DEPTH
if (par->inputBitDepth != 12 && par->inputBitDepth != 10 && par->inputBitDepth != 8)
{
x265_log(par, X265_LOG_ERROR, "Only bit depths of 8, 10, or 12 are supported\n");
return true;
}
#else
if (par->inputBitDepth != 8)
{
x265_log(par, X265_LOG_ERROR, "not compiled for bit depths greater than 8\n");
return true;
}
#endif // if HIGH_BIT_DEPTH
this->bitstreamFile.open(bitstreamfn, std::fstream::binary | std::fstream::out);
if (!this->bitstreamFile)
{
x265_log(NULL, X265_LOG_ERROR, "failed to open bitstream file <%s> for writing\n", bitstreamfn);
return true;
}
x265_setup_primitives(par, cpuid);
printVersion(par);
return false;
}
int main(int argc, char *argv[])
{
CLIOptions cliopt;
param = x265_param_alloc();
struct timeval timer;
gettimeofday(&timer,NULL);
printf("START TIMER IN SERIAL CODE in secs :%ld\n",timer.tv_sec);
printf("START TIMER in SERIAL CODE in usecs:%ld\n",timer.tv_usec);
if (cliopt.parse(argc, argv, param))
{
cliopt.destroy();
exit(1);
}
param->bEnableStrongIntraSmoothing = false; //No strong intra smoothing for competition
TEncCfg *encoder = new TEncCfg();
if (!encoder)
{
x265_log(param, X265_LOG_ERROR, "failed to open encoder\n");
cliopt.destroy();
x265_cleanup();
exit(1);
}
// save a copy of final parameters in TEncCfg
memcpy(&encoder->param, param, sizeof(*param));
encoder->m_pad[0] = encoder->m_pad[1] = 0;
//MRJ the above (original) line always computes 8, let's set it to 4 instead to get the correct padding.
uint32_t minCUDepth = 4;
if ((param->sourceWidth % minCUDepth) != 0)
{
uint32_t padsize = 0;
uint32_t rem = param->sourceWidth % minCUDepth;
padsize = minCUDepth - rem;
param->sourceWidth += padsize;
encoder->m_pad[0] = padsize; //pad width
/* set the confirmation window offsets */
encoder->m_conformanceWindow.m_enabledFlag = true;
encoder->m_conformanceWindow.m_winRightOffset = encoder->m_pad[0];
}
//======== set pad size if height is not multiple of the minimum CU size =========
if ((param->sourceHeight % minCUDepth) != 0)
{
uint32_t padsize = 0;
uint32_t rem = param->sourceHeight % minCUDepth;
padsize = minCUDepth - rem;
param->sourceHeight += padsize;
encoder->m_pad[1] = padsize; //pad height
/* set the confirmation window offsets */
encoder->m_conformanceWindow.m_enabledFlag = true;
encoder->m_conformanceWindow.m_winBottomOffset = encoder->m_pad[1];
}
//Encoder *encoder_c = static_cast<Encoder*>(encoder);
//Initialize arrays for storing neighboring pixel values
refAbove1 = (Pel*)X265_MALLOC(Pel, 3 * MAX_CU_SIZE);
refAbove2 = (Pel*)X265_MALLOC(Pel, 3 * MAX_CU_SIZE);
refLeft1 = (Pel*)X265_MALLOC(Pel, 3 * MAX_CU_SIZE);
refLeft2 = (Pel*)X265_MALLOC(Pel, 3 * MAX_CU_SIZE);
//Save globals so we can restore them at the end
//We need to restore the original values before destroy()ing data structures because many of the destroy() functions
//use these globals to determine the size of their arrays
int g_maxCUDepth_bak = g_maxCUDepth;
int g_addCUDepth_bak = g_addCUDepth;
int g_maxCUWidth_bak = g_maxCUWidth;
int g_maxCUHeight_bak = g_maxCUHeight;
g_maxCUDepth = 0; //Disallow recursion to decompose frames into a regular grid of equal size CUs.
g_addCUDepth = 0;
//NOTE: has to be after x265_encoder_open() call, since that calls x265_set_globals(), which resets g_maxCUDepth.
x265_picture pic_orig;
x265_picture *pic_in = &pic_orig;
x265_picture_init(param, pic_in);
uint32_t inFrameCount = 0;
//Several pieces of the reference code assume 4:2:0 subsampling, so assert that here
if(param->internalCsp != X265_CSP_I420) {
fprintf(stderr, "Error: Input must use i420 colorspace (4:2:0 subsampling)\n");
exit(1);
}
#ifdef DUMP_TO_FILE
FILE *f = fopen(DUMP_FILE, "wb");
if(!f) {
fprintf(stderr, "Error opening dump file (" DUMP_FILE ")\n");
exit(1);
}
#endif
while (1)
{
pic_orig.poc = inFrameCount;
if (cliopt.framesToBeEncoded && inFrameCount >= cliopt.framesToBeEncoded)
break;
else if (cliopt.input->readPicture(pic_orig))
inFrameCount++;
else
break;
ece408_intra_pred_result *ref = ece408_competition_ref(encoder, pic_in, 1);
#ifdef DUMP_TO_FILE
ref[0].write_to_file(f);
#endif
ece408_frame frame(param->sourceWidth, param->sourceHeight, pic_in);
//Uncomment this one to run the student version
ece408_intra_pred_result *student = ece408_competition(&frame, 1);
//Uncomment this one instead to run the reference version twice (to test the compare function)
//ece408_intra_pred_result *student = ece408_competition_ref(encoder, pic_in, 1);
gettimeofday(&timer,NULL);
printf("IN SERIAL CODE END TIMER in secs :%ld\n",timer.tv_sec);
printf("IN SERIAL CODE END TIMER in usecs:%ld\n",timer.tv_usec);
#ifdef VERIFY
if(!ece408_compare(ref, student, 1)) {
printf("Error in frame %d\n", inFrameCount);
exit(1);
}
#endif
for(int i = 0; i < 4*1; i++) {
ref[i].destroy();
student[i].destroy();
}
delete[] ref;
delete[] student;
}
#ifdef DUMP_TO_FILE
fclose(f);
#endif
#ifdef VERIFY
printf("Success!\n");
#endif
//Restore globals
g_maxCUDepth = g_maxCUDepth_bak;
g_addCUDepth = g_addCUDepth_bak;
g_maxCUWidth = g_maxCUWidth_bak;
g_maxCUHeight = g_maxCUHeight_bak;
delete encoder;
X265_FREE(refAbove1);
X265_FREE(refAbove2);
X265_FREE(refLeft1);
X265_FREE(refLeft2);
orig_yuv.destroy();
pred_yuv.destroy();
x265_cleanup(); /* Free library singletons */
cliopt.destroy();
x265_param_free(param);
//gettimeofday(&timer,NULL);
//printf("IN SERIAL CODE END TIMER in secs :%ld\n",timer.tv_sec);
//printf("IN SERIAL CODE END TIMER in usecs:%ld\n",timer.tv_usec);
return 0;
}
//channel = 0 for luma, 1 for cb, 2 for cr
void ece408_intra_pred_channel(int luma_size, int channel, int32_t *sad_ptr) {
//#define VERBOSE
#ifdef VERBOSE
printf("refAbove1: ");
for(int i = 0; i < 32*3; i++)
printf("%d ", refAbove1[i]);
printf("\n");
printf("refAbove2: ");
for(int i = 0; i < 32*3; i++)
printf("%d ", refAbove2[i]);
printf("\n");
printf("refLeft1: ");
for(int i = 0; i < 32*3; i++)
printf("%d ", refLeft1[i]);
printf("\n");
printf("refLeft2: ");
for(int i = 0; i < 32*3; i++)
printf("%d ", refLeft2[i]);
printf("\n");
#endif
int chroma_size = luma_size >> 1;
bool luma = (channel == 0);
bool cb = (channel == 1);
bool cr = (channel == 2);
int size = luma ? luma_size : chroma_size;
Pel* orig_pel = luma ? orig_yuv.getLumaAddr(0, size) : (cb ? orig_yuv.getCbAddr(0, size) : orig_yuv.getCrAddr(0, size));
Pel* pred_pel = luma ? pred_yuv.getLumaAddr(0, size) : (cb ? pred_yuv.getCbAddr(0, size) : pred_yuv.getCrAddr(0, size));
uint32_t stride = luma ? pred_yuv.getStride() : pred_yuv.getCStride();
Pel *pAboveUnfilt = (cr ? refAbove2 : refAbove1) + size - 1;
Pel *pAboveFilt = luma ? (refAbove2 + size - 1) : pAboveUnfilt;
Pel *pLeftUnfilt = (cr ? refLeft2 : refLeft1) + size - 1;
Pel *pLeftFilt = luma ? (refLeft2 + size - 1) : pLeftUnfilt;
int nLog2SizeMinus2 = g_convertToBit[size];
pixelcmp_t sa8d = primitives.sa8d[nLog2SizeMinus2];
#ifdef VERBOSE
printf("Channel %d Orig:\n", channel);
for(int row = 0; row < size; row++) {
for(int col = 0; col < size; col++) {
printf("%02X ", orig_pel[row*size + col]);
}
printf("\n");
}
#endif
int sad;
Pel *above = (luma && size >= 8) ? pAboveFilt : pAboveUnfilt;
Pel *left = (luma && size >= 8) ? pLeftFilt : pLeftUnfilt;
//TODO check to make sure we're filtering in all the right conditions
primitives.intra_pred[nLog2SizeMinus2][0](pred_pel, stride, left, above, /*dummy dirMode argument*/ 0, /*dummy filter argument*/ 0);
sad = sa8d(orig_pel, stride, pred_pel, stride);
*(sad_ptr++) = sad;
#ifdef VERBOSE
printf("Planar SATD = %d\n", sad);
#endif
//TODO check to make sure we're filtering in all the right conditions
//DC (mode 1)
primitives.intra_pred[nLog2SizeMinus2][1](pred_pel, stride, pLeftUnfilt, pAboveUnfilt, /*dummy dirMode argument*/ 1, (luma && size <= 16));
sad = sa8d(orig_pel, stride, pred_pel, stride);
*(sad_ptr++) = sad;
#ifdef VERBOSE
printf("Size = %d, stride = %d, DC:\n", size, stride);
for(int row = 0; row < size; row++) {
for(int col = 0; col < size; col++) {
printf("%02X ", pred_pel[row*size+col]);
}
printf("\n");
}
printf("SATD = %d\n", sad);
#endif
primitives.transpose[nLog2SizeMinus2](buf_trans, orig_pel, stride);
//TODO check to make sure we're filtering in all the right conditions
primitives.intra_pred_allangs[nLog2SizeMinus2](tmp, pAboveUnfilt, pLeftUnfilt, pAboveFilt, pLeftFilt, (luma && (size <= 16)));
#ifdef VERBOSE
printf("Angular SATD = ", channel);
#endif
for (int mode = 2; mode < 35; mode++)
{
bool modeHor = (mode < 18);
Pel *cmp = (modeHor ? buf_trans : orig_pel);
intptr_t srcStride = (modeHor ? size : stride);
#ifdef VERBOSE
printf("Pred mode %d\n", mode);
for(int r = 0; r < size; r++) {
for(int c = 0; c < size; c++)
printf("%02X ", tmp[(mode-2) * (size * size) + r * size + c]);
printf("\n");
}
#endif
sad = sa8d(cmp, srcStride, &tmp[(mode - 2) * (size * size)], size);
*(sad_ptr++) = sad;
#ifdef VERBOSE
printf("%d, ", sad);
#endif
}
#ifdef VERBOSE
printf("\n");
#endif
}
//#undef VERBOSE
inline bool isAvailable(int frameWidth, int frameHeight, int r, int c) {
return (r >= 0 && c >= 0 && r < frameHeight && c < frameWidth);
}
//Channel is 0 for luma, 1 for Cb, 2 for Cr
void getReferencePixels(x265_picture *pic, unsigned int width, unsigned int height, unsigned int luma_size, unsigned int cu_index, Pel* refAbove, Pel* refLeft, Pel* refAboveFlt, Pel* refLeftFlt, int channel) {
uint32_t cuWidth = (channel == 0) ? luma_size : (luma_size / 2);
uint32_t cuWidth2 = cuWidth << 1;
uint32_t frameWidth = (channel == 0) ? width : (width / 2);
uint32_t frameHeight = (channel == 0) ? height : (height / 2);
uint32_t frameStride = pic->stride[channel];
//Base address of the array containing the required color component of the reconstructed image (equivalent to the original image for the ECE408 competition)
Pel *baseAddress = (Pel *)pic->planes[channel];
int32_t topLeftR = (cu_index / (frameWidth / cuWidth)) * cuWidth;
int32_t topLeftC = (cu_index % (frameWidth / cuWidth)) * cuWidth;
//Find value for bottom-left neighbor
//Search left from bottom to top
bool bottomLeftFound = false;
for(int32_t neighborR = (topLeftR + cuWidth2 - 1), neighborC = (topLeftC - 1); neighborR >= (topLeftR - 1); neighborR--)
if(isAvailable(frameWidth, frameHeight, neighborR, neighborC)) {
bottomLeftFound = true;
refLeft[cuWidth2] = baseAddress[neighborR*frameStride + neighborC];
//printf("Bottom left found on left (%d, %d) %d\n", neighborR, neighborC, refLeft[cuWidth2+1]);
break;
}
//If not found, search top from left to right
if(!bottomLeftFound) {
for(int32_t neighborR = (topLeftR - 1), neighborC = topLeftC; neighborC <= (int32_t)(topLeftC + cuWidth2 - 1); neighborC++) {
if(isAvailable(frameWidth, frameHeight, neighborR, neighborC)) {
bottomLeftFound = true;
refLeft[cuWidth2] = baseAddress[neighborR*frameStride + neighborC];
//printf("Bottom left found on top (%d, %d) %d \n", neighborR, neighborC, refLeft[cuWidth2+1]);
break;
}
}
}
//If still not found, no reference samples are available, so assign 50% value to all neighbors
if(!bottomLeftFound) {
refLeft[cuWidth2] = 1 << (BIT_DEPTH - 1);
//printf("Bottom left not found, using DC value %d\n", refLeft[cuWidth2]);
}
//Traverse bottom-left to top-left to top-right. If a pixel is not available, use the one before it (one below or to the left)
for(int32_t neighborR = (topLeftR + cuWidth2 - 2), neighborC = (topLeftC - 1), idx = cuWidth2 - 1; neighborR >= (topLeftR - 1); neighborR--, idx--) {
if(isAvailable(frameWidth, frameHeight, neighborR, neighborC)) {
refLeft[idx] = baseAddress[neighborR*frameStride + neighborC];
//printf("Left[%d] (%d %d) available: %d\n", idx, neighborR, neighborC, refLeft[idx]);
}
else {
refLeft[idx] = refLeft[idx+1];
//printf("Left[%d] (%d %d) not available: %d\n", idx, neighborR, neighborC, refLeft[idx]);
}
}
//Include the top-left corner in both refLeft and refAbove
refAbove[0] = refLeft[0];
for(int32_t neighborR = (topLeftR - 1), neighborC = topLeftC, idx = 1; neighborC <= (int32_t)(topLeftC + cuWidth2 - 1); neighborC++, idx++) {
if(isAvailable(frameWidth, frameHeight, neighborR, neighborC)) {
refAbove[idx] = baseAddress[neighborR*frameStride + neighborC];
//printf("Above[%d] (%d %d) available: %d\n", idx, neighborR, neighborC, refAbove[idx]);
}
else {
refAbove[idx] = refAbove[idx-1];
//printf("Above[%d] (%d %d) not available: %d\n", idx, neighborR, neighborC, refAbove[idx]);
}
}
//Make filtered version (for luma only)
if(channel == 0) {
//Special cases for the corner, bottom, and right pixels, [1 2 1] FIR filter for the rest
//pF[ −1 ][ −1 ] = ( p[ −1 ][ 0 ] + 2 * p[ −1 ][ −1 ] + p[ 0 ][ −1 ] + 2 ) >> 2
refLeftFlt[0] = refAboveFlt[0] = (refLeft[1] + 2 * refLeft[0] + refAbove[1] + 2) >> 2;
for(uint32_t idx = 1; idx < cuWidth2; idx++) {
refLeftFlt[idx] = (refLeft[idx-1] + 2 * refLeft[idx] + refLeft[idx+1] + 2) >> 2;
refAboveFlt[idx] = (refAbove[idx-1] + 2 * refAbove[idx] + refAbove[idx+1] + 2) >> 2;
}
refLeftFlt[cuWidth2] = refLeft[cuWidth2];
refAboveFlt[cuWidth2] = refAbove[cuWidth2];
}
}
//luma_size is the (square) block size of luma blocks, chroma blocks are assumed (luma_size/2)x(luma_size/2)
void ece408_intra_pred(x265_picture *pic, int width, int height, int luma_size, unsigned int cu_index, int32_t *y_ptr, int32_t *cb_ptr, int32_t *cr_ptr) {
unsigned int luma_r = (cu_index / (width / luma_size)) * luma_size;
unsigned int luma_c = (cu_index % (width / luma_size)) * luma_size;
//Copy luma bytes into orig_yuv
Pel *walker = orig_yuv.getLumaAddr();
for(int i = 0; i < luma_size; i++) {
memcpy(walker, ((Pel *)pic->planes[0]) + (((luma_r + i)*pic->stride[0]) + luma_c), luma_size*sizeof(*walker));
walker += luma_size;
}
if(luma_size > 4) {
//Copy chroma bytes into orig_yuv
unsigned int chroma_r = luma_r / 2;
unsigned int chroma_c = luma_c / 2;
unsigned int chroma_size = luma_size / 2;
walker = orig_yuv.getCbAddr();
for(unsigned int i = 0; i < chroma_size; i++) {
memcpy(walker, ((Pel *)pic->planes[1]) + (((chroma_r + i)*pic->stride[1]) + chroma_c), chroma_size*sizeof(*walker));
walker += chroma_size;
}
walker = orig_yuv.getCrAddr();
for(unsigned int i = 0; i < chroma_size; i++) {
memcpy(walker, ((Pel *)pic->planes[2]) + (((chroma_r + i)*pic->stride[2]) + chroma_c), chroma_size*sizeof(*walker));
walker += chroma_size;
}
}
//Get the unfiltered and filtered reference pixels. Position them (cuWidth-1) elements into their respective arrays so that the
//angular prediction function can use the unused space at the beginning of the array to extend the reference pixels as described
//in equations 8-48 and 8-56 in Section 8.4.4.2.6 of the H.265 standard.
getReferencePixels(pic, width, height, luma_size, cu_index, refAbove1+luma_size-1, refLeft1+luma_size-1, refAbove2+luma_size-1, refLeft2+luma_size-1, /*channel*/ 0);
#ifdef VERBOSE
printf("Above ");
for(int i = 0; i < (2*luma_size+1); i++)
printf("%3d ", refAbove1[i+luma_size-1]);
printf("\nLeft ");
for(int i = 0; i < (2*luma_size+1); i++)
printf("%3d ", refLeft1[i+luma_size-1]);
printf("\nAboveFilt ");
for(int i = 0; i < (2*luma_size+1); i++)
printf("%3d ", refAbove2[i+luma_size-1]);
printf("\nLeftFilt ");
for(int i = 0; i < (2*luma_size+1); i++)
printf("%3d ", refLeft2[i+luma_size-1]);
printf("\n");
#endif
ece408_intra_pred_channel(luma_size, 0, y_ptr);
if(luma_size > 4) { //No 2x2 chroma blocks, and 4x4 chroma blocks are covered with 8x8 luma
getReferencePixels(pic, width, height, luma_size, cu_index, (refAbove1+luma_size/2)-1, refLeft1+(luma_size/2)-1, NULL, NULL, /*channel*/ 1);
ece408_intra_pred_channel(luma_size, 1, cb_ptr);
getReferencePixels(pic, width, height, luma_size, cu_index, (refAbove2+luma_size/2)-1, refLeft2+(luma_size/2)-1, NULL, NULL, /*channel*/ 2);
ece408_intra_pred_channel(luma_size, 2, cr_ptr);
}
}
ece408_intra_pred_result *ece408_competition_ref(TEncCfg *encoder, x265_picture *pics_in, int num_frames) {
ece408_intra_pred_result *ret = new ece408_intra_pred_result[4*num_frames]; //4x4,8x8,16x16,32x32
ece408_intra_pred_result *cur_result = ret;
for(int i = 0; i < num_frames; i++) {
for(int luma_size_shift = 2; luma_size_shift <= 5; luma_size_shift++) {
int luma_size = 1 << luma_size_shift; // luma_size x luma_size luma PBs
cur_result->create(param->sourceWidth, param->sourceHeight, luma_size);
int32_t *y_satd_results = cur_result->y_satd_results;
uint8_t *y_modes = cur_result->y_modes;
int32_t *cb_satd_results = cur_result->cb_satd_results;
uint8_t *cb_modes = cur_result->cb_modes;
int32_t *cr_satd_results = cur_result->cr_satd_results;
uint8_t *cr_modes = cur_result->cr_modes;
orig_yuv.destroy();
orig_yuv.create(luma_size, luma_size, X265_CSP_I420);
pred_yuv.destroy();
pred_yuv.create(luma_size, luma_size, X265_CSP_I420);
for(unsigned int cuIndex = 0; cuIndex < (unsigned int)((encoder->param.sourceWidth/luma_size)*(encoder->param.sourceHeight/luma_size)); cuIndex++) {
ece408_intra_pred(&(pics_in[i]),
encoder->param.sourceWidth,
encoder->param.sourceHeight,
luma_size,
cuIndex,
&(y_satd_results[35*cuIndex]),
&(cb_satd_results[35*cuIndex]),
&(cr_satd_results[35*cuIndex]));
//printf("SATD results: ");
//for(int l = 0; l < 35; l++) {
// printf("(%d, %d, %d, %d) ", l, y_satd_results[35*cuIndex+l], cb_satd_results[35*cuIndex+l], cr_satd_results[35*cuIndex+l]);
//}
//printf("\n");
for(int mode = 0; mode < 35; mode++) {
y_satd_results[35*cuIndex + mode] = (y_satd_results[35*cuIndex + mode] << 8) | mode;
if(luma_size > 4) {
cb_satd_results[35*cuIndex + mode] = (cb_satd_results[35*cuIndex + mode] << 8) | mode;
cr_satd_results[35*cuIndex + mode] = (cr_satd_results[35*cuIndex + mode] << 8) | mode;
}
}
std::sort(&(y_satd_results[35*cuIndex]), &(y_satd_results[35*cuIndex+35]));
if(luma_size > 4) {
std::sort(&(cb_satd_results[35*cuIndex]), &(cb_satd_results[35*cuIndex+35]));
std::sort(&(cr_satd_results[35*cuIndex]), &(cr_satd_results[35*cuIndex+35]));
}
for(int mode = 0; mode < 35; mode++) {
y_modes[35*cuIndex+mode] = (y_satd_results[35*cuIndex+mode] & 0xFF);
y_satd_results[35*cuIndex+mode] >>= 8;
if(luma_size > 4) {
cb_modes[35*cuIndex+mode] = (cb_satd_results[35*cuIndex+mode] & 0xFF);
cb_satd_results[35*cuIndex+mode] >>= 8;
cr_modes[35*cuIndex+mode] = (cr_satd_results[35*cuIndex+mode] & 0xFF);
cr_satd_results[35*cuIndex+mode] >>= 8;
}
}
}
#ifdef MODE_HIST
int ymode_hist[35], cbmode_hist[35], crmode_hist[35];
for(int l = 0; l < 35; l++) {
ymode_hist[l] = cbmode_hist[l] = crmode_hist[l] = 0;
}
for(int l = 0; l < (35*((param->sourceWidth/luma_size)*(param->sourceHeight/luma_size))); l += 35) { //+= 1 to make sure all modes are accounted for, += 35 for histogram of best modes
ymode_hist[y_modes[l]]++;
if(luma_size > 4) {
cbmode_hist[cb_modes[l]]++;
crmode_hist[cr_modes[l]]++;
}
}
printf("ymode hist: ");
for(int l = 0; l < 35; l++)
printf("%d ", ymode_hist[l]);
if(luma_size > 4) {
printf("\ncbmode hist: ");
for(int l = 0; l < 35; l++)
printf("%d ", cbmode_hist[l]);
printf("\ncrmode hist: ");
for(int l = 0; l < 35; l++)
printf("%d ", crmode_hist[l]);
}
printf("\n");
#endif
cur_result++;
}
}
return ret;
}
//TODO sort student results by satd result *and* mode number to make sure we have *exactly* the same bytes in
//both arrays, even if several modes have the same SATD value.
//We want to do the sort here so that students are not required to (it's not necessary in a real x265 use case).
bool ece408_compare(ece408_intra_pred_result *ref, ece408_intra_pred_result *student, int num_frames) {
if(student == NULL) {
printf("Student result array pointer is NULL\n");
return false;
}
for(int i = 0; i < (4*num_frames); i++) {
int block_offset=35;
int b_s = 0;
for(int idx=0;idx<35;idx++)
{
//printf("\nSERIAL OFFSET: %d\n", block_offset+idx);
printf("Serial code : For mode: %u Ref value:%i\n",ref[b_s].y_modes[block_offset+idx], ref[b_s].y_satd_results[block_offset+idx]);
}
if(ref[i].luma_block_size != student[i].luma_block_size) {
printf("Ref result %d luma block size = %d, student = %d\n", i, ref[i].luma_block_size, student[i].luma_block_size);
return false;
}
if(ref[i].num_blocks != student[i].num_blocks) {
printf("Ref result %d num_blocks = %d, student = %d\n", i, ref[i].num_blocks, student[i].num_blocks);
return false;
}
if(memcmp(ref[i].y_modes, student[i].y_modes, 35*ref[i].num_blocks*sizeof(*ref[i].y_modes))) {
printf("Result %d, ref and student y_modes mismatched\n", i);
return false;
}
if(memcmp(ref[i].y_satd_results, student[i].y_satd_results, 35*ref[i].num_blocks*sizeof(*ref[i].y_satd_results))) {
printf("Result %d, ref and student y_satd_results mismatched\n", i);
return false;
}
if(ref[i].luma_block_size > 4) {
if(memcmp(ref[i].cb_modes, student[i].cb_modes, 35*ref[i].num_blocks*sizeof(*ref[i].cb_modes))) {
printf("Result %d, ref and student cb_modes mismatched\n", i);
return false;
}
if(memcmp(ref[i].cb_satd_results, student[i].cb_satd_results, 35*ref[i].num_blocks*sizeof(*ref[i].cb_satd_results))) {
printf("Result %d, ref and student cb_satd_results mismatched\n", i);
return false;
}
if(memcmp(ref[i].cr_modes, student[i].cr_modes, 35*ref[i].num_blocks*sizeof(*ref[i].cr_modes))) {
printf("Result %d, ref and student cr_modes mismatched\n", i);
return false;
}
if(memcmp(ref[i].cr_satd_results, student[i].cr_satd_results, 35*ref[i].num_blocks*sizeof(*ref[i].cr_satd_results))) {
printf("Result %d, ref and student cr_satd_results mismatched\n", i);
return false;
}
}
}
return true;
}
ece408_intra_pred_result *ece408_competition(ece408_frame *imgs, int num_frames) {
//Fill in your own!
(void)imgs;
ece408_frame * imgs1 = (ece408_frame *)imgs;
ece408_intra_pred_result *ret = new ece408_intra_pred_result[4*num_frames]; //8x8,16x16,32x32,64x64
ece408_intra_pred_result *cur_result = ret;
unsigned int debug_print = ((imgs->height+4-1)/4)*((imgs->width+4-1)/4);
printf("debug print : %d\n",debug_print );
cudaError_t cuda_ret;
uint8_t *d_y,
*d_cr,
*d_cb;
unsigned int y_size = ((imgs->width) * (imgs->height)) * sizeof(uint8_t);
printf("\n Y SIZE : %u\n", y_size);
unsigned int cr_size,
cb_size;
// TO DO : do we need a ceil here ?
cr_size = cb_size = (y_size/2);
///////// timer code ////////////////
struct timeval timer;
gettimeofday(&timer,NULL);
printf("PARALLEL CODE START TIMER in secs :%ld\n",timer.tv_sec);
printf("PARALLEL CODE START TIMER in usecs:%ld\n",timer.tv_usec);
// Allocate global memorcy for y, cr, cb components of the frame
cuda_ret = cudaMalloc((void **) &d_y, y_size);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMalloc((void **) &d_cr, cr_size);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMalloc((void **) &d_cb, cb_size);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaDeviceSynchronize();
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMemcpy(d_y, imgs1->y, y_size, cudaMemcpyHostToDevice);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMemcpy(d_cr, imgs1->cr, cr_size, cudaMemcpyHostToDevice);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMemcpy(d_cb, imgs1->cb, cb_size, cudaMemcpyHostToDevice);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
printf("I AM AT THE END CUDA MEMCPY STAGE 1\n");
cuda_ret = cudaDeviceSynchronize();
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
for(int i = 0; i < num_frames; i++) {
int res_count = 0;
//for(int luma_size_shift = 2; luma_size_shift <= 5; luma_size_shift++) {
for(int luma_size_shift = 2; luma_size_shift <=4; luma_size_shift++) {
int luma_size = 1 << luma_size_shift; // luma_size x luma_size luma PBs
//cur_result->create(32, 32, luma_size);
cur_result->create(imgs1->width, imgs1->height, luma_size);
// Start
int32_t *d_res_y;
int32_t *d_res_cr;
int32_t *d_res_cb;
uint8_t *d_y_modes;
uint8_t *d_cr_modes;
uint8_t *d_cb_modes;
//unsigned int y_res_size = (35 * (cur_result->num_blocks));
unsigned int num_blocks = ((imgs->height+luma_size-1)/luma_size)*((imgs->width+luma_size-1)/luma_size);
unsigned int y_res_size = 35*num_blocks*sizeof(int32_t);
unsigned int mode_size = 35*num_blocks*sizeof(uint8_t);
unsigned int cr_res_size,
cb_res_size;
printf("No.of blocks launched:%u\n",y_res_size/sizeof(int32_t));
cr_res_size = cb_res_size = y_res_size;
// Allocate result in the device
cuda_ret = cudaMalloc((void **) &d_res_y, y_res_size);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMalloc((void **) &d_y_modes, mode_size);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if ( luma_size >= 4 && luma_size < 32 )
{
if(cur_result->cr_modes){
free(cur_result->cr_modes);
cur_result->cr_modes = NULL;
}
if(cur_result->cb_modes){
free(cur_result->cb_modes);
cur_result->cr_modes = NULL;
}
cur_result->cr_modes = (uint8_t *)malloc(mode_size*sizeof(uint8_t));
cur_result->cb_modes = (uint8_t *)malloc(mode_size*sizeof(uint8_t));
cuda_ret = cudaMalloc((void **) &d_res_cr, cr_res_size);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMalloc((void **) &d_res_cb, cb_res_size);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMalloc((void **) &d_cr_modes, mode_size);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMalloc((void **) &d_cb_modes, mode_size);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
cuda_ret = cudaDeviceSynchronize();
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// Grid dimension
dim3 dimGrid = dim3((int)ceil((imgs->width)/(float)luma_size), (int)ceil((imgs->height)/(float)luma_size), 1);
// Block dimension
dim3 dimBlock = dim3(luma_size, luma_size, 1);
//int neighbour_array_size = luma_size*2+1;
printf("\n KERNEL CONFIG: %d %d %d %d\n", dimGrid.x, dimGrid.y, dimBlock.x, dimBlock.y);
hevcPredictionKernel<<<dimGrid, dimBlock>>>(d_y, d_cr, d_cb, d_res_y, d_res_cr, d_res_cb, d_y_modes, d_cr_modes, d_cb_modes, imgs->height, imgs->width);
cuda_ret = cudaDeviceSynchronize();
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
printf("current result num_block_size is %d\n", num_blocks);
printf("from serial code num_block is %d\n",cur_result->num_blocks);
cuda_ret = cudaMemcpy(cur_result->y_satd_results, d_res_y, y_res_size, cudaMemcpyDeviceToHost);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMemcpy(cur_result->cr_satd_results, d_res_cr, cr_res_size, cudaMemcpyDeviceToHost);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMemcpy(cur_result->cb_satd_results, d_res_cb, cb_res_size, cudaMemcpyDeviceToHost);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMemcpy(cur_result->y_modes, d_y_modes,mode_size, cudaMemcpyDeviceToHost);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
if(luma_size < 32){
cuda_ret = cudaMemcpy(cur_result->cr_modes, d_cr_modes, mode_size, cudaMemcpyDeviceToHost);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cuda_ret = cudaMemcpy(cur_result->cb_modes, d_cb_modes, mode_size, cudaMemcpyDeviceToHost);
if ( cuda_ret != cudaSuccess )
{
printf("\n%s in %s at line %d\n", cudaGetErrorString(cuda_ret), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
if(d_res_cr){
cudaFree(d_res_cr);
d_res_cr = NULL;
}
if(d_res_cb){
cudaFree(d_res_cb);
d_res_cb = NULL;
}
if(d_res_y){
cudaFree(d_res_y);
d_res_y = NULL;
}
if(d_y_modes){
cudaFree(d_y_modes);
d_y_modes = NULL;
}
if(d_cr_modes){
cudaFree(d_cr_modes);
d_cr_modes = NULL;
}
if(d_cb_modes){
cudaFree(d_cb_modes);
d_cb_modes = NULL;
}
cur_result++;
res_count++;
}
gettimeofday(&timer,NULL);
printf("PARALLEL CODE END TIMER in secs :%ld\n",timer.tv_sec);
printf("PARALLEL CODE END TIMER in usecs:%ld\n",timer.tv_usec);
}
return ret;
}
|
6fe97073c9f35312ab95fc5f2ccb089a90160a4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file bilinear_sampler.cu
* \brief
* \author Xu Dong
*/
#include "./bilinear_sampler-inl.h"
#include <algorithm>
#include "../common/cuda_utils.h"
#if MXNET_USE_CUDNN == 1
#include "./cudnn_bilinear_sampler-inl.h"
#endif // MXNET_USE_CUDNN
namespace mshadow {
namespace cuda {
template<typename DType>
__device__ bool between(DType value, int lowerBound, int upperBound) {
return (value >= lowerBound && value <= upperBound);
}
template<typename DType>
__global__ void BilinearSamplerForwardKernel(const int i_c, const int i_h,
const int i_w, const DType* data,
const DType* grid, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* out) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_c * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in out
//printf("test");
int w = index % o_w;
int h = (index / o_w) % o_h;
int c = (index / o_w / o_h) % o_c;
int n = index / o_w / o_h / o_c;
int out_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
int grid_index = n * o_h * o_w * 2 + h * o_w + w;
// x y real between 0 w or h
DType y_real = (*(grid + grid_index + o_h * o_w) + 1) * (i_h ) / 2;
DType x_real = (*(grid + grid_index) + 1) * (i_w ) / 2;
DType y_fake = y_real - 0.5;
DType x_fake = x_real - 0.5;
// x real >=0.5 and <= w-0.5
//printf("%d,%d,%d,%d",x_fake,i_w,y_fake,i_h);
if (between(x_fake, 0, i_w-1) && between(y_fake, 0, i_h-1)){
int top_left_y = static_cast<int>(floor(y_fake));
int top_left_x = static_cast<int>(floor(x_fake));
DType top_left_y_w = 1.0 - (y_fake - top_left_y);
DType top_left_x_w = 1.0 - (x_fake - top_left_x);
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_y * i_w + top_left_x;
DType top_left_v = 0;
DType top_right_v = 0;
DType bottom_left_v = 0;
DType bottom_right_v = 0;
if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1))
top_left_v = *(data + data_index);
if (between(top_left_x + 1, 0, i_w-1) && between(top_left_y, 0, i_h-1))
top_right_v = *(data + data_index + 1);
if (between(top_left_x, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1))
bottom_left_v = *(data + data_index + i_w);
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1))
bottom_right_v = *(data + data_index + i_w + 1);
*(out+out_index) = top_left_v * top_left_y_w * top_left_x_w +
top_right_v * top_left_y_w * (1.0 - top_left_x_w) +
bottom_left_v * (1.0 - top_left_y_w) * top_left_x_w +
bottom_right_v * (1.0 - top_left_y_w) * (1.0 - top_left_x_w);
}
else if (x_fake <= 0 && y_fake <= 0){
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +0 * i_w + 0;
*(out+out_index) = *(data + data_index);
}
else if (x_fake >= (i_w -1) && y_fake <= 0){
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +0 * i_w + i_w -1 ;
*(out+out_index) = *(data + data_index) ;
}
else if (x_fake >= (i_w -1) && y_fake >=(i_h -1)){
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +(i_h - 1) * i_w + i_w -1 ;
*(out+out_index) = *(data + data_index) ;
}
else if (x_fake <= 0 && y_fake >=(i_h - 1 )){
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +(i_h - 1 ) * i_w + 0;
*(out+out_index) = *(data + data_index) ;
}
else if (x_fake <= 0 ){
DType top_right_v = 0;
DType bottom_right_v = 0;
int top_left_y = static_cast<int>(floor(y_fake));
DType top_left_y_w = 1.0 - (y_fake - top_left_y);
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +
top_left_y * i_w ;
top_right_v = *(data + data_index );
bottom_right_v = *(data + data_index + i_w );
*(out+out_index) = top_right_v * ( top_left_y_w) +bottom_right_v * (1.0 - top_left_y_w);
}
else if (y_fake <= 0 ){
DType bottom_left_v = 0;
DType bottom_right_v = 0;
int top_left_x = static_cast<int>(floor(x_fake));
DType top_left_x_w = 1.0 - (x_fake - top_left_x);
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_x;
bottom_left_v = *(data + data_index );
bottom_right_v = *(data + data_index + 1);
*(out+out_index) = bottom_left_v*(top_left_x_w) + bottom_right_v *(1.0 - top_left_x_w);
}
else if (x_fake >= i_w -1 ){
DType top_left_v = 0;
DType bottom_left_v = 0;
int top_left_y = static_cast<int>(floor(y_fake));
int top_left_x = static_cast<int>(floor(x_fake));
DType top_left_y_w = 1.0 - (y_fake - top_left_y);
//right = left
//int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_y * i_w + top_left_x;
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_y * i_w + top_left_x;
top_left_v = *(data + data_index );
bottom_left_v = *(data + data_index + i_w);
*(out+out_index) = bottom_left_v*(1.0 - top_left_y_w) + top_left_v * ( top_left_y_w);
}
else if (y_fake >= i_h -1 ){
DType top_left_v = 0;
DType top_right_v = 0;
int top_left_x = static_cast<int>(floor(x_fake));
int top_left_y = static_cast<int>(floor(y_fake));
DType top_left_x_w = 1.0 - (x_fake - top_left_x);
//int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_y * i_w + top_left_x;
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_y * i_w + top_left_x;
top_left_v = *(data + data_index );
top_right_v = *(data + data_index + 1);
*(out+out_index) = top_left_v*( top_left_x_w) + top_right_v * (1.0 - top_left_x_w);
}
}
}
template<typename DType, int Req1, int Req2>
__global__ void BilinearSamplerBackwardKernel(const int i_c, const int i_h,
const int i_w, const DType* grad,
const DType* data, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* g_input,
const DType* grid_src,
DType* grad_grid) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in grad
int w = index % o_w;
int h = (index / o_w) % o_h;
int n = index / o_w / o_h;
DType top_left_y_gw = 0.0;
DType top_left_x_gw = 0.0;
int grid_src_index = n * o_h * o_w * 2 + h * o_w + w;
//DType y_real = (*(grid + grid_index + o_h * o_w) + 1) * (i_h ) / 2;
DType y_real = (*(grid_src + grid_src_index + o_h * o_w) + 1) * (i_h ) / 2;
DType x_real = (*(grid_src + grid_src_index) + 1) * (i_w ) / 2;
DType y_fake = y_real - 0.5;
DType x_fake = x_real - 0.5;
int top_left_y = static_cast<int>(floor(y_fake));
int top_left_x = static_cast<int>(floor(x_fake));
DType top_left_y_w = 1.0 - (y_fake - top_left_y);
DType top_left_x_w = 1.0 - (x_fake - top_left_x);
if (between(x_fake, 0, i_w-1) && between(y_fake, 0, i_h-1)){
for (int c = 0; c < static_cast<int>(o_c); ++c) {
int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
int grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
// calc 4 vertex value in input data
DType top_left_v = 0;
DType top_right_v = 0;
DType bottom_left_v = 0;
DType bottom_right_v = 0;
// calc input grad
if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1)) {
if (Req1 != mxnet::kNullOp) {
atomicAdd (&g_input[data_index], *(grad + grad_index) * top_left_y_w * top_left_x_w);
}
top_left_v = *(data + data_index);
}
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y, 0, i_h-1)) {
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index + 1],
*(grad + grad_index) * top_left_y_w * (1.0 - top_left_x_w));
}
top_right_v = *(data + data_index + 1);
}
if (between(top_left_x, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) {
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index+ i_w],
*(grad + grad_index) * (1.0 - top_left_y_w) * top_left_x_w);
}
bottom_left_v = *(data + data_index + i_w);
}
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) {
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index+ i_w + 1],*(grad + grad_index) * (1.0 - top_left_y_w) * (1.0 - top_left_x_w));
}
bottom_right_v = *(data + data_index + i_w + 1);
}
// calc weight grad of top_left_w, then multiple -1 is the grad of grid_src
top_left_y_gw -= *(grad + grad_index) * (top_right_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v)
* top_left_x_w);
top_left_x_gw -= *(grad + grad_index) * (bottom_left_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v)
* top_left_y_w);
}
if (Req2 != mxnet::kNullOp) {
// calc grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += top_left_y_gw * (i_h ) / 2;
*(grad_grid + grid_src_index) += top_left_x_gw * (i_w ) / 2;
}
}
else if (x_fake <= 0 && y_fake <= 0){
for (int c = 0; c < static_cast<int>(o_c); ++c) {
//int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +0 * i_w + 0;
int grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
// calc input grad
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index], *(grad + grad_index));
}
if (Req2 != mxnet::kNullOp) {
// ignore grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += 0;
*(grad_grid + grid_src_index) += 0;
}
}
}
else if (x_fake >= (i_w -1) && y_fake <= 0){
for (int c = 0; c < static_cast<int>(o_c); ++c) {
//int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
int grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +0 * i_w + i_w -1 ;
// calc input grad
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index], *(grad + grad_index) );
}
if (Req2 != mxnet::kNullOp) {
// ignore grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += 0;
*(grad_grid + grid_src_index) += 0;
}
}
}
else if (x_fake >= (i_w -1) && y_fake >=(i_h -1)){
for (int c = 0; c < static_cast<int>(o_c); ++c) {
//int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
int grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +(i_h - 1) * i_w + i_w -1 ;
// calc input grad
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[ data_index] , *(grad + grad_index)) ;
}
if (Req2 != mxnet::kNullOp) {
// ignore grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += 0;
*(grad_grid + grid_src_index) += 0;
}
}
}
else if (x_fake <= 0 && y_fake >=(i_h - 1 )){
for (int c = 0; c < static_cast<int>(o_c); ++c) {
//int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +(i_h - 1 ) * i_w + 0;
int grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
// calc input grad
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index] , *(grad + grad_index)) ;
}
if (Req2 != mxnet::kNullOp) {
// ignore grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += 0;
*(grad_grid + grid_src_index) += 0;
}
}
}
else if (x_fake <= 0 ){
DType top_right_v = 0;
DType bottom_right_v = 0;
int top_left_y = static_cast<int>(floor(y_fake));
DType top_left_y_w = 1.0 - (y_fake - top_left_y);
for (int c = 0; c < static_cast<int>(o_c); ++c) {
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_y * i_w ;
int grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index], *(grad + grad_index)*( top_left_y_w)) ;
atomicAdd(&g_input[data_index+ i_w], *(grad + grad_index)*( 1.0 - top_left_y_w)) ;
}
if (Req2 != mxnet::kNullOp) {
// ignore grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += 0;
*(grad_grid + grid_src_index) += 0;
}
}
}
else if (y_fake <= 0 ){
DType bottom_left_v = 0;
DType bottom_right_v = 0;
int top_left_x = static_cast<int>(floor(x_fake));
DType top_left_x_w = 1.0 - (x_fake - top_left_x);
for (int c = 0; c < static_cast<int>(o_c); ++c) {
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_x;
int grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index] , *(grad + grad_index)*( top_left_x_w)) ;
atomicAdd(&g_input[data_index+ 1] , *(grad + grad_index)*( 1.0 - top_left_x_w)) ;
}
if (Req2 != mxnet::kNullOp) {
// ignore grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += 0;
*(grad_grid + grid_src_index) += 0;
}
}
}
else if (x_fake >= i_w -1 ){
DType top_left_v = 0;
DType bottom_left_v = 0;
int top_left_y = static_cast<int>(floor(y_fake));
int top_left_x = static_cast<int>(floor(x_fake));
DType top_left_y_w = 1.0 - (y_fake - top_left_y);
//right = left
for (int c = 0; c < static_cast<int>(o_c); ++c) {
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_y * i_w + top_left_x;
int grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index] , *(grad + grad_index)*( top_left_y_w) );
atomicAdd(&g_input[data_index+ i_w] ,*(grad + grad_index)*( 1.0 - top_left_y_w)) ;
}
if (Req2 != mxnet::kNullOp) {
// ignore grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += 0;
*(grad_grid + grid_src_index) += 0;
}
}
}
else if (y_fake >= i_h -1 ){
DType top_left_v = 0;
DType top_right_v = 0;
int top_left_x = static_cast<int>(floor(x_fake));
int top_left_y = static_cast<int>(floor(y_fake));
DType top_left_x_w = 1.0 - (x_fake - top_left_x);
//int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_y * i_w + top_left_x;
for (int c = 0; c < static_cast<int>(o_c); ++c) {
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_y * i_w + top_left_x;
int grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index],*(grad + grad_index)*( top_left_x_w)) ;
atomicAdd(&g_input[data_index+ 1] ,*(grad + grad_index)*( 1.0 - top_left_x_w)) ;
}
if (Req2 != mxnet::kNullOp) {
// ignore grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += 0;
*(grad_grid + grid_src_index) += 0;
}
}
}
}
}
} // namespace cuda
template<typename DType>
inline void BilinearSamplerForward(const Tensor<gpu, 4, DType> &output,
const Tensor<gpu, 4, DType> &input,
const Tensor<gpu, 4, DType> &grid_src) {
DType *out = output.dptr_;
//printf("gpuforwardtest");
const DType *data = input.dptr_;
const DType *grid = grid_src.dptr_;
int o_n = output.size(0), o_c = output.size(1), o_h = output.size(2), o_w = output.size(3);
int i_c = input.size(1), i_h = input.size(2), i_w = input.size(3);
using namespace cuda;
const int max_block = (output.shape_.Size() + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
const int grid_dim_x = (max_block > kMaxGridDim) ? kMaxGridDim : max_block;
const int grid_dim_y =
(max_block > kMaxGridDim) ? (max_block + kMaxGridDim - 1) / kMaxGridDim : 1;
dim3 num_blocks(grid_dim_x, grid_dim_y);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "bilinear sampler forward");
hipStream_t stream = Stream<gpu>::GetStream(output.stream_);
cuda::BilinearSamplerForwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >(
i_c, i_h, i_w, data, grid, o_n, o_c, o_h, o_w, out);
// post kernel check
hipError_t err = hipPeekAtLastError();
CHECK_EQ(err, hipSuccess) << hipGetErrorString(err);
}
template<typename DType>
inline void BilinearSamplerBackward(const Tensor<gpu, 4, DType> &input_grad,
const Tensor<gpu, 4, DType> &ggrid,
const Tensor<gpu, 4, DType> &output_grad,
const Tensor<gpu, 4, DType> &input_data,
const Tensor<gpu, 4, DType> &grid,
const mxnet::OpReqType data_req,
const mxnet::OpReqType grid_req) {
using namespace mxnet;
DType *g_input = input_grad.dptr_;
DType *grad_grid = ggrid.dptr_;
const DType *grid_src = grid.dptr_;
const DType *grad = output_grad.dptr_;
const DType *data = input_data.dptr_;
int o_n = output_grad.size(0), o_c = output_grad.size(1),
o_h = output_grad.size(2), o_w = output_grad.size(3);
int i_c = input_data.size(1), i_h = input_data.size(2), i_w = input_data.size(3);
using namespace cuda;
const int max_block = (output_grad.shape_.Size() / o_c + kMaxThreadsPerBlock - 1)
/ kMaxThreadsPerBlock;
const int grid_dim_x = (max_block > kMaxGridDim) ? kMaxGridDim : max_block;
const int grid_dim_y =
(max_block > kMaxGridDim) ? (max_block + kMaxGridDim - 1) / kMaxGridDim : 1;
dim3 num_blocks(grid_dim_x, grid_dim_y);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "bilinear sampler backward");
hipStream_t stream = Stream<gpu>::GetStream(input_grad.stream_);
MXNET_REQ_TYPE_SWITCH(data_req, Req1, {
MXNET_REQ_TYPE_SWITCH(grid_req, Req2, {
hipLaunchKernelGGL(( cuda::BilinearSamplerBackwardKernel<DType, Req1, Req2>)
, dim3(num_blocks), dim3(threads_per_block), 0, stream ,
i_c, i_h, i_w, grad, data, o_n, o_c, o_h, o_w, g_input, grid_src, grad_grid);
});
});
// post kernel check
hipError_t err = hipPeekAtLastError();
CHECK_EQ(err, hipSuccess) << hipGetErrorString(err);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(BilinearSamplerParam param, int dtype) {
Operator *op = nullptr;
#if MXNET_USE_CUDNN == 1
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
if (param.cudnn_off.has_value() && param.cudnn_off.value()) {
op = new BilinearSamplerOp<gpu, DType>(param);
} else {
op = new CuDNNBilinearSamplerOp<DType>(param);
}
})
#else
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new BilinearSamplerOp<gpu, DType>(param);
})
#endif // MXNET_USE_CUDNN
return op;
}
} // namespace op
} // namespace mxnet
| 6fe97073c9f35312ab95fc5f2ccb089a90160a4c.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file bilinear_sampler.cu
* \brief
* \author Xu Dong
*/
#include "./bilinear_sampler-inl.h"
#include <algorithm>
#include "../common/cuda_utils.h"
#if MXNET_USE_CUDNN == 1
#include "./cudnn_bilinear_sampler-inl.h"
#endif // MXNET_USE_CUDNN
namespace mshadow {
namespace cuda {
template<typename DType>
__device__ bool between(DType value, int lowerBound, int upperBound) {
return (value >= lowerBound && value <= upperBound);
}
template<typename DType>
__global__ void BilinearSamplerForwardKernel(const int i_c, const int i_h,
const int i_w, const DType* data,
const DType* grid, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* out) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_c * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in out
//printf("test");
int w = index % o_w;
int h = (index / o_w) % o_h;
int c = (index / o_w / o_h) % o_c;
int n = index / o_w / o_h / o_c;
int out_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
int grid_index = n * o_h * o_w * 2 + h * o_w + w;
// x y real between 0 w or h
DType y_real = (*(grid + grid_index + o_h * o_w) + 1) * (i_h ) / 2;
DType x_real = (*(grid + grid_index) + 1) * (i_w ) / 2;
DType y_fake = y_real - 0.5;
DType x_fake = x_real - 0.5;
// x real >=0.5 and <= w-0.5
//printf("%d,%d,%d,%d",x_fake,i_w,y_fake,i_h);
if (between(x_fake, 0, i_w-1) && between(y_fake, 0, i_h-1)){
int top_left_y = static_cast<int>(floor(y_fake));
int top_left_x = static_cast<int>(floor(x_fake));
DType top_left_y_w = 1.0 - (y_fake - top_left_y);
DType top_left_x_w = 1.0 - (x_fake - top_left_x);
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_y * i_w + top_left_x;
DType top_left_v = 0;
DType top_right_v = 0;
DType bottom_left_v = 0;
DType bottom_right_v = 0;
if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1))
top_left_v = *(data + data_index);
if (between(top_left_x + 1, 0, i_w-1) && between(top_left_y, 0, i_h-1))
top_right_v = *(data + data_index + 1);
if (between(top_left_x, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1))
bottom_left_v = *(data + data_index + i_w);
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1))
bottom_right_v = *(data + data_index + i_w + 1);
*(out+out_index) = top_left_v * top_left_y_w * top_left_x_w +
top_right_v * top_left_y_w * (1.0 - top_left_x_w) +
bottom_left_v * (1.0 - top_left_y_w) * top_left_x_w +
bottom_right_v * (1.0 - top_left_y_w) * (1.0 - top_left_x_w);
}
else if (x_fake <= 0 && y_fake <= 0){
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +0 * i_w + 0;
*(out+out_index) = *(data + data_index);
}
else if (x_fake >= (i_w -1) && y_fake <= 0){
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +0 * i_w + i_w -1 ;
*(out+out_index) = *(data + data_index) ;
}
else if (x_fake >= (i_w -1) && y_fake >=(i_h -1)){
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +(i_h - 1) * i_w + i_w -1 ;
*(out+out_index) = *(data + data_index) ;
}
else if (x_fake <= 0 && y_fake >=(i_h - 1 )){
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +(i_h - 1 ) * i_w + 0;
*(out+out_index) = *(data + data_index) ;
}
else if (x_fake <= 0 ){
DType top_right_v = 0;
DType bottom_right_v = 0;
int top_left_y = static_cast<int>(floor(y_fake));
DType top_left_y_w = 1.0 - (y_fake - top_left_y);
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +
top_left_y * i_w ;
top_right_v = *(data + data_index );
bottom_right_v = *(data + data_index + i_w );
*(out+out_index) = top_right_v * ( top_left_y_w) +bottom_right_v * (1.0 - top_left_y_w);
}
else if (y_fake <= 0 ){
DType bottom_left_v = 0;
DType bottom_right_v = 0;
int top_left_x = static_cast<int>(floor(x_fake));
DType top_left_x_w = 1.0 - (x_fake - top_left_x);
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_x;
bottom_left_v = *(data + data_index );
bottom_right_v = *(data + data_index + 1);
*(out+out_index) = bottom_left_v*(top_left_x_w) + bottom_right_v *(1.0 - top_left_x_w);
}
else if (x_fake >= i_w -1 ){
DType top_left_v = 0;
DType bottom_left_v = 0;
int top_left_y = static_cast<int>(floor(y_fake));
int top_left_x = static_cast<int>(floor(x_fake));
DType top_left_y_w = 1.0 - (y_fake - top_left_y);
//right = left
//int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_y * i_w + top_left_x;
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_y * i_w + top_left_x;
top_left_v = *(data + data_index );
bottom_left_v = *(data + data_index + i_w);
*(out+out_index) = bottom_left_v*(1.0 - top_left_y_w) + top_left_v * ( top_left_y_w);
}
else if (y_fake >= i_h -1 ){
DType top_left_v = 0;
DType top_right_v = 0;
int top_left_x = static_cast<int>(floor(x_fake));
int top_left_y = static_cast<int>(floor(y_fake));
DType top_left_x_w = 1.0 - (x_fake - top_left_x);
//int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_y * i_w + top_left_x;
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_y * i_w + top_left_x;
top_left_v = *(data + data_index );
top_right_v = *(data + data_index + 1);
*(out+out_index) = top_left_v*( top_left_x_w) + top_right_v * (1.0 - top_left_x_w);
}
}
}
template<typename DType, int Req1, int Req2>
__global__ void BilinearSamplerBackwardKernel(const int i_c, const int i_h,
const int i_w, const DType* grad,
const DType* data, const int o_n,
const int o_c, const int o_h,
const int o_w, DType* g_input,
const DType* grid_src,
DType* grad_grid) {
for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
index < o_n * o_h * o_w;
index += blockDim.x * gridDim.x * gridDim.y) {
// (n, c, h, w) is the element in grad
int w = index % o_w;
int h = (index / o_w) % o_h;
int n = index / o_w / o_h;
DType top_left_y_gw = 0.0;
DType top_left_x_gw = 0.0;
int grid_src_index = n * o_h * o_w * 2 + h * o_w + w;
//DType y_real = (*(grid + grid_index + o_h * o_w) + 1) * (i_h ) / 2;
DType y_real = (*(grid_src + grid_src_index + o_h * o_w) + 1) * (i_h ) / 2;
DType x_real = (*(grid_src + grid_src_index) + 1) * (i_w ) / 2;
DType y_fake = y_real - 0.5;
DType x_fake = x_real - 0.5;
int top_left_y = static_cast<int>(floor(y_fake));
int top_left_x = static_cast<int>(floor(x_fake));
DType top_left_y_w = 1.0 - (y_fake - top_left_y);
DType top_left_x_w = 1.0 - (x_fake - top_left_x);
if (between(x_fake, 0, i_w-1) && between(y_fake, 0, i_h-1)){
for (int c = 0; c < static_cast<int>(o_c); ++c) {
int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
int grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
// calc 4 vertex value in input data
DType top_left_v = 0;
DType top_right_v = 0;
DType bottom_left_v = 0;
DType bottom_right_v = 0;
// calc input grad
if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1)) {
if (Req1 != mxnet::kNullOp) {
atomicAdd (&g_input[data_index], *(grad + grad_index) * top_left_y_w * top_left_x_w);
}
top_left_v = *(data + data_index);
}
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y, 0, i_h-1)) {
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index + 1],
*(grad + grad_index) * top_left_y_w * (1.0 - top_left_x_w));
}
top_right_v = *(data + data_index + 1);
}
if (between(top_left_x, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) {
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index+ i_w],
*(grad + grad_index) * (1.0 - top_left_y_w) * top_left_x_w);
}
bottom_left_v = *(data + data_index + i_w);
}
if (between(top_left_x+1, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) {
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index+ i_w + 1],*(grad + grad_index) * (1.0 - top_left_y_w) * (1.0 - top_left_x_w));
}
bottom_right_v = *(data + data_index + i_w + 1);
}
// calc weight grad of top_left_w, then multiple -1 is the grad of grid_src
top_left_y_gw -= *(grad + grad_index) * (top_right_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v)
* top_left_x_w);
top_left_x_gw -= *(grad + grad_index) * (bottom_left_v - bottom_right_v +
(top_left_v - top_right_v - bottom_left_v + bottom_right_v)
* top_left_y_w);
}
if (Req2 != mxnet::kNullOp) {
// calc grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += top_left_y_gw * (i_h ) / 2;
*(grad_grid + grid_src_index) += top_left_x_gw * (i_w ) / 2;
}
}
else if (x_fake <= 0 && y_fake <= 0){
for (int c = 0; c < static_cast<int>(o_c); ++c) {
//int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +0 * i_w + 0;
int grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
// calc input grad
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index], *(grad + grad_index));
}
if (Req2 != mxnet::kNullOp) {
// ignore grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += 0;
*(grad_grid + grid_src_index) += 0;
}
}
}
else if (x_fake >= (i_w -1) && y_fake <= 0){
for (int c = 0; c < static_cast<int>(o_c); ++c) {
//int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
int grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +0 * i_w + i_w -1 ;
// calc input grad
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index], *(grad + grad_index) );
}
if (Req2 != mxnet::kNullOp) {
// ignore grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += 0;
*(grad_grid + grid_src_index) += 0;
}
}
}
else if (x_fake >= (i_w -1) && y_fake >=(i_h -1)){
for (int c = 0; c < static_cast<int>(o_c); ++c) {
//int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
int grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +(i_h - 1) * i_w + i_w -1 ;
// calc input grad
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[ data_index] , *(grad + grad_index)) ;
}
if (Req2 != mxnet::kNullOp) {
// ignore grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += 0;
*(grad_grid + grid_src_index) += 0;
}
}
}
else if (x_fake <= 0 && y_fake >=(i_h - 1 )){
for (int c = 0; c < static_cast<int>(o_c); ++c) {
//int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x;
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +(i_h - 1 ) * i_w + 0;
int grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
// calc input grad
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index] , *(grad + grad_index)) ;
}
if (Req2 != mxnet::kNullOp) {
// ignore grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += 0;
*(grad_grid + grid_src_index) += 0;
}
}
}
else if (x_fake <= 0 ){
DType top_right_v = 0;
DType bottom_right_v = 0;
int top_left_y = static_cast<int>(floor(y_fake));
DType top_left_y_w = 1.0 - (y_fake - top_left_y);
for (int c = 0; c < static_cast<int>(o_c); ++c) {
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_y * i_w ;
int grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index], *(grad + grad_index)*( top_left_y_w)) ;
atomicAdd(&g_input[data_index+ i_w], *(grad + grad_index)*( 1.0 - top_left_y_w)) ;
}
if (Req2 != mxnet::kNullOp) {
// ignore grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += 0;
*(grad_grid + grid_src_index) += 0;
}
}
}
else if (y_fake <= 0 ){
DType bottom_left_v = 0;
DType bottom_right_v = 0;
int top_left_x = static_cast<int>(floor(x_fake));
DType top_left_x_w = 1.0 - (x_fake - top_left_x);
for (int c = 0; c < static_cast<int>(o_c); ++c) {
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_x;
int grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index] , *(grad + grad_index)*( top_left_x_w)) ;
atomicAdd(&g_input[data_index+ 1] , *(grad + grad_index)*( 1.0 - top_left_x_w)) ;
}
if (Req2 != mxnet::kNullOp) {
// ignore grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += 0;
*(grad_grid + grid_src_index) += 0;
}
}
}
else if (x_fake >= i_w -1 ){
DType top_left_v = 0;
DType bottom_left_v = 0;
int top_left_y = static_cast<int>(floor(y_fake));
int top_left_x = static_cast<int>(floor(x_fake));
DType top_left_y_w = 1.0 - (y_fake - top_left_y);
//right = left
for (int c = 0; c < static_cast<int>(o_c); ++c) {
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_y * i_w + top_left_x;
int grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index] , *(grad + grad_index)*( top_left_y_w) );
atomicAdd(&g_input[data_index+ i_w] ,*(grad + grad_index)*( 1.0 - top_left_y_w)) ;
}
if (Req2 != mxnet::kNullOp) {
// ignore grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += 0;
*(grad_grid + grid_src_index) += 0;
}
}
}
else if (y_fake >= i_h -1 ){
DType top_left_v = 0;
DType top_right_v = 0;
int top_left_x = static_cast<int>(floor(x_fake));
int top_left_y = static_cast<int>(floor(y_fake));
DType top_left_x_w = 1.0 - (x_fake - top_left_x);
//int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_y * i_w + top_left_x;
for (int c = 0; c < static_cast<int>(o_c); ++c) {
int data_index = n * i_c * i_h * i_w + c * i_h * i_w +top_left_y * i_w + top_left_x;
int grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w;
if (Req1 != mxnet::kNullOp) {
atomicAdd(&g_input[data_index],*(grad + grad_index)*( top_left_x_w)) ;
atomicAdd(&g_input[data_index+ 1] ,*(grad + grad_index)*( 1.0 - top_left_x_w)) ;
}
if (Req2 != mxnet::kNullOp) {
// ignore grad of grid
*(grad_grid + grid_src_index + o_h * o_w) += 0;
*(grad_grid + grid_src_index) += 0;
}
}
}
}
}
} // namespace cuda
template<typename DType>
inline void BilinearSamplerForward(const Tensor<gpu, 4, DType> &output,
const Tensor<gpu, 4, DType> &input,
const Tensor<gpu, 4, DType> &grid_src) {
DType *out = output.dptr_;
//printf("gpuforwardtest");
const DType *data = input.dptr_;
const DType *grid = grid_src.dptr_;
int o_n = output.size(0), o_c = output.size(1), o_h = output.size(2), o_w = output.size(3);
int i_c = input.size(1), i_h = input.size(2), i_w = input.size(3);
using namespace cuda;
const int max_block = (output.shape_.Size() + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
const int grid_dim_x = (max_block > kMaxGridDim) ? kMaxGridDim : max_block;
const int grid_dim_y =
(max_block > kMaxGridDim) ? (max_block + kMaxGridDim - 1) / kMaxGridDim : 1;
dim3 num_blocks(grid_dim_x, grid_dim_y);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "bilinear sampler forward");
cudaStream_t stream = Stream<gpu>::GetStream(output.stream_);
cuda::BilinearSamplerForwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >(
i_c, i_h, i_w, data, grid, o_n, o_c, o_h, o_w, out);
// post kernel check
cudaError err = cudaPeekAtLastError();
CHECK_EQ(err, cudaSuccess) << cudaGetErrorString(err);
}
template<typename DType>
inline void BilinearSamplerBackward(const Tensor<gpu, 4, DType> &input_grad,
const Tensor<gpu, 4, DType> &ggrid,
const Tensor<gpu, 4, DType> &output_grad,
const Tensor<gpu, 4, DType> &input_data,
const Tensor<gpu, 4, DType> &grid,
const mxnet::OpReqType data_req,
const mxnet::OpReqType grid_req) {
using namespace mxnet;
DType *g_input = input_grad.dptr_;
DType *grad_grid = ggrid.dptr_;
const DType *grid_src = grid.dptr_;
const DType *grad = output_grad.dptr_;
const DType *data = input_data.dptr_;
int o_n = output_grad.size(0), o_c = output_grad.size(1),
o_h = output_grad.size(2), o_w = output_grad.size(3);
int i_c = input_data.size(1), i_h = input_data.size(2), i_w = input_data.size(3);
using namespace cuda;
const int max_block = (output_grad.shape_.Size() / o_c + kMaxThreadsPerBlock - 1)
/ kMaxThreadsPerBlock;
const int grid_dim_x = (max_block > kMaxGridDim) ? kMaxGridDim : max_block;
const int grid_dim_y =
(max_block > kMaxGridDim) ? (max_block + kMaxGridDim - 1) / kMaxGridDim : 1;
dim3 num_blocks(grid_dim_x, grid_dim_y);
dim3 threads_per_block(kMaxThreadsPerBlock);
CheckLaunchParam(num_blocks, threads_per_block, "bilinear sampler backward");
cudaStream_t stream = Stream<gpu>::GetStream(input_grad.stream_);
MXNET_REQ_TYPE_SWITCH(data_req, Req1, {
MXNET_REQ_TYPE_SWITCH(grid_req, Req2, {
cuda::BilinearSamplerBackwardKernel<DType, Req1, Req2>
<<<num_blocks, threads_per_block, 0, stream >>>(
i_c, i_h, i_w, grad, data, o_n, o_c, o_h, o_w, g_input, grid_src, grad_grid);
});
});
// post kernel check
cudaError err = cudaPeekAtLastError();
CHECK_EQ(err, cudaSuccess) << cudaGetErrorString(err);
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(BilinearSamplerParam param, int dtype) {
Operator *op = nullptr;
#if MXNET_USE_CUDNN == 1
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
if (param.cudnn_off.has_value() && param.cudnn_off.value()) {
op = new BilinearSamplerOp<gpu, DType>(param);
} else {
op = new CuDNNBilinearSamplerOp<DType>(param);
}
})
#else
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new BilinearSamplerOp<gpu, DType>(param);
})
#endif // MXNET_USE_CUDNN
return op;
}
} // namespace op
} // namespace mxnet
|
5dd4c838f0ffcc2e719b6d5e64f16693b6d6fc09.hip | // !!! This is a file automatically generated by hipify!!!
/** A mixed-precision implicit Particle-in-Cell simulator for heterogeneous systems **/
// Allocator for 2D, 3D and 4D array: chain of pointers
#include "Alloc.h"
// Precision: fix precision for different quantities
#include "PrecisionTypes.h"
// Simulation Parameter - structure
#include "Parameters.h"
// Grid structure
#include "Grid.h"
// Interpolated Quantities Structures
#include "InterpDensSpecies.h"
#include "InterpDensNet.h"
// Field structure
#include "EMfield.h" // Just E and Bn
#include "EMfield_aux.h" // Bc, Phi, Eth, D
// Particles structure
#include "Particles.h"
#include "Particles_aux.h" // Needed only if dointerpolation on GPU - avoid reduction on GPU
// Initial Condition
#include "IC.h"
// Boundary Conditions
#include "BC.h"
// timing
#include "Timing.h"
// Read and output operations
#include "RW_IO.h"
#include <stdio.h>
// Cuda
#include <hip/hip_runtime.h>
int main(int argc, char **argv){
// Read the inputfile and fill the param structure
parameters param;
// Read the input file name from command line
readInputFile(¶m,argc,argv);
printParameters(¶m);
saveParameters(¶m);
// Timing variables
double iStart = cpuSecond();
double iMover, iInterp, eMover = 0.0, eInterp= 0.0;
// Set-up the grid information
grid grd;
setGrid(¶m, &grd);
// Allocate Fields
EMfield field;
field_allocate(&grd,&field);
EMfield_aux field_aux;
field_aux_allocate(&grd,&field_aux);
// Allocate Interpolated Quantities
// per species
interpDensSpecies *ids = new interpDensSpecies[param.ns];
for (int is=0; is < param.ns; is++)
interp_dens_species_allocate(&grd,&ids[is],is);
// Net densities
interpDensNet idn;
interp_dens_net_allocate(&grd,&idn);
// Allocate Particles
particles *part = new particles[param.ns];
// allocation
for (int is=0; is < param.ns; is++){
particle_allocate(¶m,&part[is],is);
}
// Initialization
initGEM(¶m,&grd,&field,&field_aux,part,ids);
// **********************************************************//
// ********************** GPU allocation ********************//
// **********************************************************//
//Particle vars
FPpart *part_x_gpu; FPpart *part_y_gpu; FPpart *part_z_gpu;
FPpart *part_u_gpu; FPpart *part_v_gpu; FPpart *part_w_gpu;
FPinterp* part_q_gpu;
//EMField vars
FPfield *Ex_gpu ; FPfield *Ey_gpu ; FPfield *Ez_gpu;
FPfield *Bxn_gpu; FPfield *Byn_gpu; FPfield *Bzn_gpu;
//Grd vars
FPfield *XN_gpu; FPfield *YN_gpu; FPfield *ZN_gpu;
// is vars
FPinterp *rhon_gpu;
FPinterp *Jx_gpu , *Jy_gpu , *Jz_gpu;
FPinterp *pxx_gpu, *pxy_gpu, *pxz_gpu;
FPinterp *pyy_gpu, *pyz_gpu, *pzz_gpu;
long field_size = grd.nxn * grd.nyn * grd.nzn;
long grd_size = grd.nxn * grd.nyn * grd.nzn;
// Allocate GPU memory
hipMalloc(&part_x_gpu, part->npmax * sizeof(FPpart));
hipMalloc(&part_y_gpu, part->npmax * sizeof(FPpart));
hipMalloc(&part_z_gpu, part->npmax * sizeof(FPpart));
hipMalloc(&part_u_gpu, part->npmax * sizeof(FPpart));
hipMalloc(&part_v_gpu, part->npmax * sizeof(FPpart));
hipMalloc(&part_w_gpu, part->npmax * sizeof(FPpart));
hipMalloc(&part_q_gpu, part->npmax * sizeof(FPinterp));
hipMalloc(&Ex_gpu , field_size * sizeof(FPfield));
hipMalloc(&Ey_gpu , field_size * sizeof(FPfield));
hipMalloc(&Ez_gpu , field_size * sizeof(FPfield));
hipMalloc(&Bxn_gpu, field_size * sizeof(FPfield));
hipMalloc(&Byn_gpu, field_size * sizeof(FPfield));
hipMalloc(&Bzn_gpu, field_size * sizeof(FPfield));
hipMalloc(&XN_gpu, grd_size * sizeof(FPfield));
hipMalloc(&YN_gpu, grd_size * sizeof(FPfield));
hipMalloc(&ZN_gpu, grd_size * sizeof(FPfield));
hipMalloc(&rhon_gpu , grd_size * sizeof(FPinterp));
hipMalloc(&Jx_gpu , grd_size * sizeof(FPinterp));
hipMalloc(&Jy_gpu , grd_size * sizeof(FPinterp));
hipMalloc(&Jz_gpu , grd_size * sizeof(FPinterp));
hipMalloc(&pxx_gpu, grd_size * sizeof(FPinterp));
hipMalloc(&pxy_gpu, grd_size * sizeof(FPinterp));
hipMalloc(&pxz_gpu, grd_size * sizeof(FPinterp));
hipMalloc(&pyy_gpu, grd_size * sizeof(FPinterp));
hipMalloc(&pyz_gpu, grd_size * sizeof(FPinterp));
hipMalloc(&pzz_gpu, grd_size * sizeof(FPinterp));
// **********************************************************//
// **********************************************************//
// **** Start the Simulation! Cycle index start from 1 *** //
// **********************************************************//
for (int cycle = param.first_cycle_n; cycle < (param.first_cycle_n + param.ncycles); cycle++) {
std::cout << std::endl;
std::cout << "***********************" << std::endl;
std::cout << " cycle = " << cycle << std::endl;
std::cout << "***********************" << std::endl;
// set to zero the densities - needed for interpolation
setZeroDensities(&idn,ids,&grd,param.ns);
// implicit mover
iMover = cpuSecond(); // start timer for mover
// **********************************************************//
// *********************** GPU Version **********************//
// **********************************************************//
for (int is=0; is < param.ns; is++)
// mover_PC_cpu(&part[is],&field,&grd,¶m);
mover_PC_gpu(&part[is] , &field , &grd , ¶m , part_x_gpu, part_y_gpu,
part_z_gpu, part_u_gpu , part_v_gpu, part_w_gpu, Ex_gpu , Ey_gpu ,
Ez_gpu , Bxn_gpu , Byn_gpu , Bzn_gpu , XN_gpu , YN_gpu ,
ZN_gpu , field_size , grd_size);
// **********************************************************//
eMover += (cpuSecond() - iMover); // stop timer for mover
// interpolation particle to grid
iInterp = cpuSecond(); // start timer for the interpolation step
// interpolate species
for (int is=0; is < param.ns; is++)
// interpP2G_cpu(&part[is],&ids[is],&grd);
interpP2G_gpu(&part[is] , &ids[is] , &grd , part_x_gpu , part_y_gpu , part_z_gpu ,
part_u_gpu , part_v_gpu , part_w_gpu , part_q_gpu , Jx_gpu , Jy_gpu ,
Jz_gpu , pxx_gpu , pxy_gpu , pxz_gpu , pyy_gpu , pyz_gpu ,
pzz_gpu , rhon_gpu , XN_gpu , YN_gpu , ZN_gpu , grd_size);
// apply BC to interpolated densities
for (int is=0; is < param.ns; is++)
applyBCids(&ids[is],&grd,¶m);
// sum over species
sumOverSpecies(&idn,ids,&grd,param.ns);
// interpolate charge density from center to node
applyBCscalarDensN(idn.rhon,&grd,¶m);
// write E, B, rho to disk
if (cycle%param.FieldOutputCycle==0){
VTK_Write_Vectors(cycle, &grd,&field);
VTK_Write_Scalars(cycle, &grd,ids,&idn);
}
eInterp += (cpuSecond() - iInterp); // stop timer for interpolation
} // end of one PIC cycle
/// Release the resources
// deallocate field
grid_deallocate(&grd);
field_deallocate(&grd,&field);
// interp
interp_dens_net_deallocate(&grd,&idn);
// Deallocate interpolated densities and particles
for (int is=0; is < param.ns; is++){
interp_dens_species_deallocate(&grd,&ids[is]);
particle_deallocate(&part[is]);
}
// GPU deallocate memory
hipFree(part_x_gpu);
hipFree(part_y_gpu);
hipFree(part_z_gpu);
hipFree(part_u_gpu);
hipFree(part_v_gpu);
hipFree(part_w_gpu);
hipFree(Ex_gpu);
hipFree(Ey_gpu);
hipFree(Ez_gpu);
hipFree(Bxn_gpu);
hipFree(Byn_gpu);
hipFree(Bzn_gpu);
hipFree(XN_gpu);
hipFree(YN_gpu);
hipFree(ZN_gpu);
// stop timer
double iElaps = cpuSecond() - iStart;
// Print timing of simulation
std::cout << std::endl;
std::cout << "**************************************" << std::endl;
std::cout << " Tot. Simulation Time (s) = " << iElaps << std::endl;
std::cout << " Mover Time / Cycle (s) = " << eMover/param.ncycles << std::endl;
std::cout << " Interp. Time / Cycle (s) = " << eInterp/param.ncycles << std::endl;
std::cout << "**************************************" << std::endl;
// exit
return 0;
}
| 5dd4c838f0ffcc2e719b6d5e64f16693b6d6fc09.cu | /** A mixed-precision implicit Particle-in-Cell simulator for heterogeneous systems **/
// Allocator for 2D, 3D and 4D array: chain of pointers
#include "Alloc.h"
// Precision: fix precision for different quantities
#include "PrecisionTypes.h"
// Simulation Parameter - structure
#include "Parameters.h"
// Grid structure
#include "Grid.h"
// Interpolated Quantities Structures
#include "InterpDensSpecies.h"
#include "InterpDensNet.h"
// Field structure
#include "EMfield.h" // Just E and Bn
#include "EMfield_aux.h" // Bc, Phi, Eth, D
// Particles structure
#include "Particles.h"
#include "Particles_aux.h" // Needed only if dointerpolation on GPU - avoid reduction on GPU
// Initial Condition
#include "IC.h"
// Boundary Conditions
#include "BC.h"
// timing
#include "Timing.h"
// Read and output operations
#include "RW_IO.h"
#include <stdio.h>
// Cuda
#include <cuda.h>
int main(int argc, char **argv){
// Read the inputfile and fill the param structure
parameters param;
// Read the input file name from command line
readInputFile(¶m,argc,argv);
printParameters(¶m);
saveParameters(¶m);
// Timing variables
double iStart = cpuSecond();
double iMover, iInterp, eMover = 0.0, eInterp= 0.0;
// Set-up the grid information
grid grd;
setGrid(¶m, &grd);
// Allocate Fields
EMfield field;
field_allocate(&grd,&field);
EMfield_aux field_aux;
field_aux_allocate(&grd,&field_aux);
// Allocate Interpolated Quantities
// per species
interpDensSpecies *ids = new interpDensSpecies[param.ns];
for (int is=0; is < param.ns; is++)
interp_dens_species_allocate(&grd,&ids[is],is);
// Net densities
interpDensNet idn;
interp_dens_net_allocate(&grd,&idn);
// Allocate Particles
particles *part = new particles[param.ns];
// allocation
for (int is=0; is < param.ns; is++){
particle_allocate(¶m,&part[is],is);
}
// Initialization
initGEM(¶m,&grd,&field,&field_aux,part,ids);
// **********************************************************//
// ********************** GPU allocation ********************//
// **********************************************************//
//Particle vars
FPpart *part_x_gpu; FPpart *part_y_gpu; FPpart *part_z_gpu;
FPpart *part_u_gpu; FPpart *part_v_gpu; FPpart *part_w_gpu;
FPinterp* part_q_gpu;
//EMField vars
FPfield *Ex_gpu ; FPfield *Ey_gpu ; FPfield *Ez_gpu;
FPfield *Bxn_gpu; FPfield *Byn_gpu; FPfield *Bzn_gpu;
//Grd vars
FPfield *XN_gpu; FPfield *YN_gpu; FPfield *ZN_gpu;
// is vars
FPinterp *rhon_gpu;
FPinterp *Jx_gpu , *Jy_gpu , *Jz_gpu;
FPinterp *pxx_gpu, *pxy_gpu, *pxz_gpu;
FPinterp *pyy_gpu, *pyz_gpu, *pzz_gpu;
long field_size = grd.nxn * grd.nyn * grd.nzn;
long grd_size = grd.nxn * grd.nyn * grd.nzn;
// Allocate GPU memory
cudaMalloc(&part_x_gpu, part->npmax * sizeof(FPpart));
cudaMalloc(&part_y_gpu, part->npmax * sizeof(FPpart));
cudaMalloc(&part_z_gpu, part->npmax * sizeof(FPpart));
cudaMalloc(&part_u_gpu, part->npmax * sizeof(FPpart));
cudaMalloc(&part_v_gpu, part->npmax * sizeof(FPpart));
cudaMalloc(&part_w_gpu, part->npmax * sizeof(FPpart));
cudaMalloc(&part_q_gpu, part->npmax * sizeof(FPinterp));
cudaMalloc(&Ex_gpu , field_size * sizeof(FPfield));
cudaMalloc(&Ey_gpu , field_size * sizeof(FPfield));
cudaMalloc(&Ez_gpu , field_size * sizeof(FPfield));
cudaMalloc(&Bxn_gpu, field_size * sizeof(FPfield));
cudaMalloc(&Byn_gpu, field_size * sizeof(FPfield));
cudaMalloc(&Bzn_gpu, field_size * sizeof(FPfield));
cudaMalloc(&XN_gpu, grd_size * sizeof(FPfield));
cudaMalloc(&YN_gpu, grd_size * sizeof(FPfield));
cudaMalloc(&ZN_gpu, grd_size * sizeof(FPfield));
cudaMalloc(&rhon_gpu , grd_size * sizeof(FPinterp));
cudaMalloc(&Jx_gpu , grd_size * sizeof(FPinterp));
cudaMalloc(&Jy_gpu , grd_size * sizeof(FPinterp));
cudaMalloc(&Jz_gpu , grd_size * sizeof(FPinterp));
cudaMalloc(&pxx_gpu, grd_size * sizeof(FPinterp));
cudaMalloc(&pxy_gpu, grd_size * sizeof(FPinterp));
cudaMalloc(&pxz_gpu, grd_size * sizeof(FPinterp));
cudaMalloc(&pyy_gpu, grd_size * sizeof(FPinterp));
cudaMalloc(&pyz_gpu, grd_size * sizeof(FPinterp));
cudaMalloc(&pzz_gpu, grd_size * sizeof(FPinterp));
// **********************************************************//
// **********************************************************//
// **** Start the Simulation! Cycle index start from 1 *** //
// **********************************************************//
for (int cycle = param.first_cycle_n; cycle < (param.first_cycle_n + param.ncycles); cycle++) {
std::cout << std::endl;
std::cout << "***********************" << std::endl;
std::cout << " cycle = " << cycle << std::endl;
std::cout << "***********************" << std::endl;
// set to zero the densities - needed for interpolation
setZeroDensities(&idn,ids,&grd,param.ns);
// implicit mover
iMover = cpuSecond(); // start timer for mover
// **********************************************************//
// *********************** GPU Version **********************//
// **********************************************************//
for (int is=0; is < param.ns; is++)
// mover_PC_cpu(&part[is],&field,&grd,¶m);
mover_PC_gpu(&part[is] , &field , &grd , ¶m , part_x_gpu, part_y_gpu,
part_z_gpu, part_u_gpu , part_v_gpu, part_w_gpu, Ex_gpu , Ey_gpu ,
Ez_gpu , Bxn_gpu , Byn_gpu , Bzn_gpu , XN_gpu , YN_gpu ,
ZN_gpu , field_size , grd_size);
// **********************************************************//
eMover += (cpuSecond() - iMover); // stop timer for mover
// interpolation particle to grid
iInterp = cpuSecond(); // start timer for the interpolation step
// interpolate species
for (int is=0; is < param.ns; is++)
// interpP2G_cpu(&part[is],&ids[is],&grd);
interpP2G_gpu(&part[is] , &ids[is] , &grd , part_x_gpu , part_y_gpu , part_z_gpu ,
part_u_gpu , part_v_gpu , part_w_gpu , part_q_gpu , Jx_gpu , Jy_gpu ,
Jz_gpu , pxx_gpu , pxy_gpu , pxz_gpu , pyy_gpu , pyz_gpu ,
pzz_gpu , rhon_gpu , XN_gpu , YN_gpu , ZN_gpu , grd_size);
// apply BC to interpolated densities
for (int is=0; is < param.ns; is++)
applyBCids(&ids[is],&grd,¶m);
// sum over species
sumOverSpecies(&idn,ids,&grd,param.ns);
// interpolate charge density from center to node
applyBCscalarDensN(idn.rhon,&grd,¶m);
// write E, B, rho to disk
if (cycle%param.FieldOutputCycle==0){
VTK_Write_Vectors(cycle, &grd,&field);
VTK_Write_Scalars(cycle, &grd,ids,&idn);
}
eInterp += (cpuSecond() - iInterp); // stop timer for interpolation
} // end of one PIC cycle
/// Release the resources
// deallocate field
grid_deallocate(&grd);
field_deallocate(&grd,&field);
// interp
interp_dens_net_deallocate(&grd,&idn);
// Deallocate interpolated densities and particles
for (int is=0; is < param.ns; is++){
interp_dens_species_deallocate(&grd,&ids[is]);
particle_deallocate(&part[is]);
}
// GPU deallocate memory
cudaFree(part_x_gpu);
cudaFree(part_y_gpu);
cudaFree(part_z_gpu);
cudaFree(part_u_gpu);
cudaFree(part_v_gpu);
cudaFree(part_w_gpu);
cudaFree(Ex_gpu);
cudaFree(Ey_gpu);
cudaFree(Ez_gpu);
cudaFree(Bxn_gpu);
cudaFree(Byn_gpu);
cudaFree(Bzn_gpu);
cudaFree(XN_gpu);
cudaFree(YN_gpu);
cudaFree(ZN_gpu);
// stop timer
double iElaps = cpuSecond() - iStart;
// Print timing of simulation
std::cout << std::endl;
std::cout << "**************************************" << std::endl;
std::cout << " Tot. Simulation Time (s) = " << iElaps << std::endl;
std::cout << " Mover Time / Cycle (s) = " << eMover/param.ncycles << std::endl;
std::cout << " Interp. Time / Cycle (s) = " << eInterp/param.ncycles << std::endl;
std::cout << "**************************************" << std::endl;
// exit
return 0;
}
|
8f27bd2da9a4ec3f024116b5f1b91bbeaafe8171.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "cudaWorker.h"
#include "cudaWorkerKernel.cuh"
typedef struct {
float* dev_interconnects;
float* dev_memories;
float* dev_constants;
float* dev_inputs;
float* dev_lvl1;
float* dev_lvl2;
float* dev_lvl3;
float* dev_outputs;
} cudaNetworkGPUAdresses_kernel;
__global__ void cudaWorkerKernell(void * index_, int step) {
cudaNetworkGPUAdresses_kernel *index = (cudaNetworkGPUAdresses_kernel*)index_;
float* networkInterconnects;
float* networkConstants;
float* networkMemories;
float* inputs;
float* outputs;
int nInputs;
int nOutputs;
switch (step) {
case 0: // Inputs to stage 1
networkInterconnects = index[blockIdx.x].dev_interconnects;
networkConstants = index[blockIdx.x].dev_constants;
networkMemories = index[blockIdx.x].dev_memories;
inputs = index[blockIdx.x].dev_inputs;
outputs = index[blockIdx.x].dev_lvl1;
nInputs = N_INPUTS;
nOutputs = N_NLVL1;
break;
case 1: // Stage 1 to stage 2
networkInterconnects = index[blockIdx.x].dev_interconnects + N_NLVL1 * N_INPUTS;
networkConstants = index[blockIdx.x].dev_constants + N_NLVL1;
networkMemories = index[blockIdx.x].dev_memories + N_NLVL1;
inputs = index[blockIdx.x].dev_lvl1;
outputs = index[blockIdx.x].dev_lvl2;
nInputs = N_NLVL1;
nOutputs = N_NLVL2;
break;
case 2: // Stage 2 to stage 3
networkInterconnects = index[blockIdx.x].dev_interconnects + N_NLVL1 * N_INPUTS + N_NLVL2 * N_NLVL1;
networkConstants = index[blockIdx.x].dev_constants + N_NLVL1 + N_NLVL2;
networkMemories = index[blockIdx.x].dev_memories + N_NLVL1 + N_NLVL2;
inputs = index[blockIdx.x].dev_lvl2;
outputs = index[blockIdx.x].dev_lvl3;
nInputs = N_NLVL2;
nOutputs = N_NLVL3;
break;
case 3: // Stage 3 to output
networkInterconnects = index[blockIdx.x].dev_interconnects + N_NLVL1 * N_INPUTS + N_NLVL2 * N_NLVL1 + N_NLVL3 * N_NLVL2;
networkConstants = index[blockIdx.x].dev_constants + N_NLVL1 + N_NLVL2 + N_NLVL3;
networkMemories = index[blockIdx.x].dev_memories + N_NLVL1 + N_NLVL2 + N_NLVL3;
inputs = index[blockIdx.x].dev_lvl3;
outputs = index[blockIdx.x].dev_outputs;
nInputs = N_NLVL3;
nOutputs = N_OUTPUTS;
break;
default :
printf("Error in kernel, received incorrect stage id.\n");
return;
}
int threadID = threadIdx.x;
int io_ratio = nInputs / nOutputs;
/* Shared memory */
__shared__ float cached_inputs[MAX_NLVL];
float sum;
/* Caching inputs in shared memory, because every thread will need it */
for (int i = 0; i < io_ratio; i++) {
cached_inputs[io_ratio * i + threadID] = inputs[io_ratio * i + threadID];
}
__syncthreads();
/* Doing old_output * memCoefficient + constant in a single operation
Global memory accesses are coalesced */
sum = fmaf(outputs[threadID], networkMemories[threadID], networkConstants[threadID]);
__syncthreads();
/* Do some computations, global memory accesses are coalesced */
for (int i = 0; i < nInputs; i++) {
sum = fmaf(cached_inputs[i], networkInterconnects[nOutputs * i + threadID], sum);
}
/* Write everything (coalesced write of 1 float per thread)*/
outputs[threadID] = sum;
/*if (blockIdx.x == 0)
printf("Kernel thread %d, step %d, output %f\n", threadID, step, sum);*/
}
void cudaWorkerKernellCall(int nblocks, int nthreads, void* index, int step) {
hipLaunchKernelGGL(( cudaWorkerKernell), dim3(nblocks), dim3(nthreads), 0, 0, index, step);
hipError_t cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching Kernel!\n", cudaStatus);
return;
}
}
| 8f27bd2da9a4ec3f024116b5f1b91bbeaafe8171.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include "cudaWorker.h"
#include "cudaWorkerKernel.cuh"
typedef struct {
float* dev_interconnects;
float* dev_memories;
float* dev_constants;
float* dev_inputs;
float* dev_lvl1;
float* dev_lvl2;
float* dev_lvl3;
float* dev_outputs;
} cudaNetworkGPUAdresses_kernel;
__global__ void cudaWorkerKernell(void * index_, int step) {
cudaNetworkGPUAdresses_kernel *index = (cudaNetworkGPUAdresses_kernel*)index_;
float* networkInterconnects;
float* networkConstants;
float* networkMemories;
float* inputs;
float* outputs;
int nInputs;
int nOutputs;
switch (step) {
case 0: // Inputs to stage 1
networkInterconnects = index[blockIdx.x].dev_interconnects;
networkConstants = index[blockIdx.x].dev_constants;
networkMemories = index[blockIdx.x].dev_memories;
inputs = index[blockIdx.x].dev_inputs;
outputs = index[blockIdx.x].dev_lvl1;
nInputs = N_INPUTS;
nOutputs = N_NLVL1;
break;
case 1: // Stage 1 to stage 2
networkInterconnects = index[blockIdx.x].dev_interconnects + N_NLVL1 * N_INPUTS;
networkConstants = index[blockIdx.x].dev_constants + N_NLVL1;
networkMemories = index[blockIdx.x].dev_memories + N_NLVL1;
inputs = index[blockIdx.x].dev_lvl1;
outputs = index[blockIdx.x].dev_lvl2;
nInputs = N_NLVL1;
nOutputs = N_NLVL2;
break;
case 2: // Stage 2 to stage 3
networkInterconnects = index[blockIdx.x].dev_interconnects + N_NLVL1 * N_INPUTS + N_NLVL2 * N_NLVL1;
networkConstants = index[blockIdx.x].dev_constants + N_NLVL1 + N_NLVL2;
networkMemories = index[blockIdx.x].dev_memories + N_NLVL1 + N_NLVL2;
inputs = index[blockIdx.x].dev_lvl2;
outputs = index[blockIdx.x].dev_lvl3;
nInputs = N_NLVL2;
nOutputs = N_NLVL3;
break;
case 3: // Stage 3 to output
networkInterconnects = index[blockIdx.x].dev_interconnects + N_NLVL1 * N_INPUTS + N_NLVL2 * N_NLVL1 + N_NLVL3 * N_NLVL2;
networkConstants = index[blockIdx.x].dev_constants + N_NLVL1 + N_NLVL2 + N_NLVL3;
networkMemories = index[blockIdx.x].dev_memories + N_NLVL1 + N_NLVL2 + N_NLVL3;
inputs = index[blockIdx.x].dev_lvl3;
outputs = index[blockIdx.x].dev_outputs;
nInputs = N_NLVL3;
nOutputs = N_OUTPUTS;
break;
default :
printf("Error in kernel, received incorrect stage id.\n");
return;
}
int threadID = threadIdx.x;
int io_ratio = nInputs / nOutputs;
/* Shared memory */
__shared__ float cached_inputs[MAX_NLVL];
float sum;
/* Caching inputs in shared memory, because every thread will need it */
for (int i = 0; i < io_ratio; i++) {
cached_inputs[io_ratio * i + threadID] = inputs[io_ratio * i + threadID];
}
__syncthreads();
/* Doing old_output * memCoefficient + constant in a single operation
Global memory accesses are coalesced */
sum = fmaf(outputs[threadID], networkMemories[threadID], networkConstants[threadID]);
__syncthreads();
/* Do some computations, global memory accesses are coalesced */
for (int i = 0; i < nInputs; i++) {
sum = fmaf(cached_inputs[i], networkInterconnects[nOutputs * i + threadID], sum);
}
/* Write everything (coalesced write of 1 float per thread)*/
outputs[threadID] = sum;
/*if (blockIdx.x == 0)
printf("Kernel thread %d, step %d, output %f\n", threadID, step, sum);*/
}
void cudaWorkerKernellCall(int nblocks, int nthreads, void* index, int step) {
cudaWorkerKernell<<<nblocks, nthreads>>>(index, step);
cudaError cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching Kernel!\n", cudaStatus);
return;
}
}
|
8666fb3c57c238a16cfd926a5c65a4702645ebf8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void pythagoras(unsigned char* Gx, unsigned char* Gy, unsigned char* G, unsigned char* theta)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float af = float(Gx[idx]);
float bf = float(Gy[idx]);
G[idx] = (unsigned char)sqrtf(af * af + bf * bf);
theta[idx] = (unsigned char)atan2f(af, bf)*63.994;
} | 8666fb3c57c238a16cfd926a5c65a4702645ebf8.cu | #include "includes.h"
__global__ void pythagoras(unsigned char* Gx, unsigned char* Gy, unsigned char* G, unsigned char* theta)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float af = float(Gx[idx]);
float bf = float(Gy[idx]);
G[idx] = (unsigned char)sqrtf(af * af + bf * bf);
theta[idx] = (unsigned char)atan2f(af, bf)*63.994;
} |
116756f5f493cf180babc6f3447cc5c0d267df16.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "util.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define H(a) (-a * log2f(a))
#define H2(a1, a2, p) (H(((float)(a1) + (p)) / ((float)(a1 + a2) + 1.0f)) + \
H(((float)(a2) + (1.0f - p)) / ((float)(a1 + a2) + 1.0f)))
/* Makra do sumowania tablicy 2 x 3 x 3 */
#define SUM_N3(a, n1, n2) (a[n1][n2][0] + a[n1][n2][1] + a[n1][n2][2])
#define SUM_N2(a, n1, n3) (a[n1][0][n3] + a[n1][1][n3] + a[n1][2][n3])
#define SUM_N1(a, n2, n3) (a[0][n2][n3] + a[1][n2][n3])
#define SUM_N2_N3(a, n1) (SUM_N3(a, n1, 0) + SUM_N3(a, n1, 1) + SUM_N3(a, n1, 2))
#define SUM_N1_N3(a, n2) (SUM_N3(a, 0, n2) + SUM_N3(a, 1, n2))
#define SUM_N1_N2(a, n3) (SUM_N2(a, 0, n3) + SUM_N2(a, 1, n3))
/* Format danych:
* - wektor wartoci pierwszej zmiennej opisowej *v1s, 1 zmienna, wszystkie obiekty
* - wektor wartoci drugiej zmiennej opisowej *v2s, 1 zmienna, wszystkie obiekty
* - wektor wartoci zmiennych decyzyjnych *ds
* - ilo obiektw num_objects
*/
__device__ float compute_gig_1_2(int v1_p, int v2_p, char *vars, char *ds, int num_vars, int num_objects, float p)
{
int count[2][3][3] = { 0 };
#pragma unroll 4
for (int i = 0; i < num_objects; ++i) {
char d = (ds[i / 8] >> (i % 8)) & 1;
char v1 = (vars[i * num_vars + v1_p / 4] >> ((v1_p % 4) * 2)) & 3;
char v2 = (vars[i * num_vars + v2_p / 4] >> ((v2_p % 4) * 2)) & 3;
count[d][v1][v2]++;
}
float ig1, ig2, ig12, h_p;
h_p = H2(SUM_N2_N3(count, 0), SUM_N2_N3(count, 1), p);
ig1 = h_p - SUM_N1_N3(count, 0) * H2(SUM_N3(count, 0, 0), SUM_N3(count, 1, 0), p) -
SUM_N1_N3(count, 1) * H2(SUM_N3(count, 0, 1), SUM_N3(count, 1, 1), p) -
SUM_N1_N3(count, 2) * H2(SUM_N3(count, 0, 2), SUM_N3(count, 1, 2), p);
ig2 = h_p - SUM_N1_N2(count, 0) * H2(SUM_N2(count, 0, 0), SUM_N2(count, 1, 0), p) -
SUM_N1_N2(count, 1) * H2(SUM_N2(count, 0, 1), SUM_N2(count, 1, 1), p) -
SUM_N1_N2(count, 2) * H2(SUM_N2(count, 0, 2), SUM_N2(count, 1, 2), p);
ig12 = h_p - SUM_N1(count, 0, 0) * H2(count[0][0][0], count[1][0][0], p) -
SUM_N1(count, 1, 0) * H2(count[0][1][0], count[1][1][0], p) -
SUM_N1(count, 2, 0) * H2(count[0][2][0], count[1][2][0], p) -
SUM_N1(count, 0, 1) * H2(count[0][0][1], count[1][0][1], p) -
SUM_N1(count, 1, 1) * H2(count[0][1][1], count[1][1][1], p) -
SUM_N1(count, 2, 1) * H2(count[0][2][1], count[1][2][1], p) -
SUM_N1(count, 0, 2) * H2(count[0][0][2], count[1][0][2], p) -
SUM_N1(count, 1, 2) * H2(count[0][1][2], count[1][1][2], p) -
SUM_N1(count, 2, 2) * H2(count[0][2][2], count[1][2][2], p);
//printf(" IG(v1) = %f\n", ig1);
//printf(" IG(v2) = %f\n", ig2);
//printf(" IG(v1 u v2) = %f\n", ig12);
return ig12 - ((ig1 > ig2) ? ig1 : ig2);
}
/* Format danych:
* - macierz wartoci zmiennych opisowych *vars, 1 wiersz - 1 zmienna
* - wektor wartoci zmiennych decyzyjnych *ds
* - ilo obiektw num_objects
* - ilo zmiennych num_vars
* - wynikowe GIG
*/
__global__ void compute_gig_kernel(char *vars, char *ds, int num_objects, int num_vars, float *r_gig, float p)
{
int v1_p = blockIdx.x * blockDim.x + threadIdx.x;
int v2_p = blockIdx.y * blockDim.y + threadIdx.y;
if (v1_p >= v2_p) return;
if (v1_p >= num_vars) return;
if (v2_p >= num_vars) return;
//printf("compute_gig(%d, %d) %d\n", v1_p, v2_p, blockIdx.y);
const int num_v_padded = (num_vars - 1) / 4 + 1;
r_gig[v1_p * num_vars + v2_p] = compute_gig_1_2(v1_p, v2_p, vars, ds, num_v_padded, num_objects, p);
//printf(" GIG = %f\n", r_gig[v1_p * num_vars + v2_p]);
}
struct GigStruct {
float gig;
int v1, v2;
};
__global__ void compute_gig_wt_kernel(char *vars, char *ds, int num_objects, int num_vars,
struct GigStruct *r_gig, int max_num_gig_structs, int* num_gig_structs,
float p, float threshold)
{
int v1_p = blockIdx.x * blockDim.x + threadIdx.x;
int v2_p = blockIdx.y * blockDim.y + threadIdx.y;
if (v1_p >= v2_p) return;
if (v1_p >= num_vars) return;
if (v2_p >= num_vars) return;
//printf("compute_gig(%d, %d) %d\n", v1_p, v2_p, blockIdx.y);
const int num_v_padded = (num_vars - 1) / 4 + 1;
float gig = compute_gig_1_2(v1_p, v2_p, vars, ds, num_v_padded, num_objects, p);
if (gig < threshold) return;
/* atomicInc() wraps around to 0 */
int num = atomicAdd(num_gig_structs, 1);
if (num < max_num_gig_structs) {
r_gig[num].gig = gig;
r_gig[num].v1 = v1_p;
r_gig[num].v2 = v2_p;
}
//printf(" GIG = %f\n", r_gig[v1_p * num_vars + v2_p]);
}
/* Komparatory do sortowania _malejco_ */
int compare_gig(const void *a, const void *b)
{
if (((struct GigStruct*)a)->gig > ((struct GigStruct*)b)->gig) return -1;
else if (((struct GigStruct*)a)->gig == ((struct GigStruct*)b)->gig) return 0;
else return 1;
}
int compare_float(const void *a, const void *b)
{
if (*((float*)a) > *((float*)b)) return -1;
else if (*((float*)a) == *((float*)b)) return 0;
else return 1;
}
int main()
{
int num_objects, num_vars, result_size, real_result_size;
float a_priori, threshold;
float input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all;
Timer timer;
timer.start();
scanf("%d %d %d %f", &num_objects, &num_vars, &result_size, &a_priori);
Sync2BitArray2D vars(num_objects, num_vars);
SyncBitArray ds(num_objects);
/* Czytamy dane */
{
for (int i = 0; i < num_objects; ++i) {
int a; scanf("%d", &a); a &= 1;
ds.setHost(i, a);
for (int j = 0; j < num_vars; ++j) {
int b; scanf("%d", &b); b &= 3;
vars.setHost(i, j, b);
}
}
input = timer.lap();
}
/* Kopiujemy dane na kart */
{
vars.syncToDevice();
ds.syncToDevice();
copy = timer.lap();
}
/* Wykonujemy zrandomizowan prb na pierwszym 10% zmiennych */
{
int random_trial_size = num_vars / 10;
/* Alokacja pamici na wynikowe GIG si nie udaje gdy pami jest > ok. 400MB.
XXX: Tablica gig nie musiaaby by kwadratowa. */
if (random_trial_size > 8192)
random_trial_size = 8192;
float percent = (float)random_trial_size / (float)num_vars ;
SyncArray2D<float> gig(random_trial_size, random_trial_size);
dim3 block_size(16, 16);
dim3 grid_size(padToMultipleOf(random_trial_size, block_size.x) / block_size.x,
padToMultipleOf(random_trial_size, block_size.y) / block_size.y);
hipLaunchKernelGGL(( compute_gig_kernel), dim3(grid_size), dim3(block_size), 0, 0, (char*)vars.getDevice(), (char*)ds.getDevice(),
num_objects, random_trial_size, (float*)gig.getDevice(), a_priori);
CUDA_CALL(hipGetLastError());
hipDeviceSynchronize();
random_trial_kernel = timer.lap();
gig.syncToHost();
random_trial_copy = timer.lap();
/* Przepisujemy obliczone GIG do spjnego kawaka pamici,
sortujemy i wybieramy odpowiedni element jako threshold */
{
int num_gig = 0;
float *gig_sorted = (float*)malloc(sizeof(float) * random_trial_size * random_trial_size);
for (int v1_p = 0; v1_p < random_trial_size; ++v1_p)
for (int v2_p = v1_p + 1; v2_p < random_trial_size; ++v2_p)
gig_sorted[num_gig++] = gig.getHostEl(v1_p, v2_p);
qsort(gig_sorted, num_gig, sizeof(float), compare_float);
/* gig_sorted jest posortowany malejco */
threshold = gig_sorted[(int)((float)result_size * percent * percent)];
free(gig_sorted);
}
random_trial_process = timer.lap();
}
/* Wykonujemy docelowe obliczenia na wszystkich zmiennych kernelem,
ktry zapisuje tylko wartoci wiksze ni threshold */
{
const int max_num_structs = result_size * 2;
SyncArray<struct GigStruct> gig_structs(max_num_structs);
SyncVar<int> num_structs;
dim3 block_size(16, 16);
dim3 grid_size(padToMultipleOf(num_vars, block_size.x) / block_size.x,
padToMultipleOf(num_vars, block_size.y) / block_size.y);
hipLaunchKernelGGL(( compute_gig_wt_kernel), dim3(grid_size), dim3(block_size), 0, 0, (char*)vars.getDevice(), (char*)ds.getDevice(),
num_objects, num_vars, (struct GigStruct*)gig_structs.getDevice(),
max_num_structs, num_structs.getDevice(), a_priori, threshold);
CUDA_CALL(hipGetLastError());
hipDeviceSynchronize();
main_kernel = timer.lap();
num_structs.syncToHost();
gig_structs.syncToHost();
main_copy = timer.lap();
real_result_size = *num_structs.getHost();
qsort(gig_structs.getHost(), *num_structs.getHost(), sizeof(struct GigStruct), compare_gig);
for (int i = *num_structs.getHost() - 1; i >= 0; --i)
printf("%f %d %d\n", gig_structs.getHostEl(i).gig, gig_structs.getHostEl(i).v1, gig_structs.getHostEl(i).v2);
main_process = timer.lap();
}
all = input + copy + random_trial_kernel + random_trial_copy + random_trial_process + main_kernel + main_copy + main_process;
fprintf(stderr, "data: variables, objects, result_size, true result size, threshold\n");
fprintf(stderr, "%d, %d, %d, %d, %f\n", num_vars, num_objects, result_size, real_result_size, threshold);
fprintf(stderr, "times: input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all\n");
fprintf(stderr, "%.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f\n", input, copy, random_trial_kernel,
random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all);
fprintf(stderr, "%.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f\n", input / all * 100.0f, copy / all * 100.0f,
random_trial_kernel / all * 100.0f, random_trial_copy / all * 100.0f, random_trial_process / all * 100.0f,
main_kernel / all * 100.0f, main_copy / all * 100.0f, main_process / all * 100.0f);
return 0;
}
| 116756f5f493cf180babc6f3447cc5c0d267df16.cu | #include "util.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define H(a) (-a * log2f(a))
#define H2(a1, a2, p) (H(((float)(a1) + (p)) / ((float)(a1 + a2) + 1.0f)) + \
H(((float)(a2) + (1.0f - p)) / ((float)(a1 + a2) + 1.0f)))
/* Makra do sumowania tablicy 2 x 3 x 3 */
#define SUM_N3(a, n1, n2) (a[n1][n2][0] + a[n1][n2][1] + a[n1][n2][2])
#define SUM_N2(a, n1, n3) (a[n1][0][n3] + a[n1][1][n3] + a[n1][2][n3])
#define SUM_N1(a, n2, n3) (a[0][n2][n3] + a[1][n2][n3])
#define SUM_N2_N3(a, n1) (SUM_N3(a, n1, 0) + SUM_N3(a, n1, 1) + SUM_N3(a, n1, 2))
#define SUM_N1_N3(a, n2) (SUM_N3(a, 0, n2) + SUM_N3(a, 1, n2))
#define SUM_N1_N2(a, n3) (SUM_N2(a, 0, n3) + SUM_N2(a, 1, n3))
/* Format danych:
* - wektor wartości pierwszej zmiennej opisowej *v1s, 1 zmienna, wszystkie obiekty
* - wektor wartości drugiej zmiennej opisowej *v2s, 1 zmienna, wszystkie obiekty
* - wektor wartości zmiennych decyzyjnych *ds
* - ilość obiektów num_objects
*/
__device__ float compute_gig_1_2(int v1_p, int v2_p, char *vars, char *ds, int num_vars, int num_objects, float p)
{
int count[2][3][3] = { 0 };
#pragma unroll 4
for (int i = 0; i < num_objects; ++i) {
char d = (ds[i / 8] >> (i % 8)) & 1;
char v1 = (vars[i * num_vars + v1_p / 4] >> ((v1_p % 4) * 2)) & 3;
char v2 = (vars[i * num_vars + v2_p / 4] >> ((v2_p % 4) * 2)) & 3;
count[d][v1][v2]++;
}
float ig1, ig2, ig12, h_p;
h_p = H2(SUM_N2_N3(count, 0), SUM_N2_N3(count, 1), p);
ig1 = h_p - SUM_N1_N3(count, 0) * H2(SUM_N3(count, 0, 0), SUM_N3(count, 1, 0), p) -
SUM_N1_N3(count, 1) * H2(SUM_N3(count, 0, 1), SUM_N3(count, 1, 1), p) -
SUM_N1_N3(count, 2) * H2(SUM_N3(count, 0, 2), SUM_N3(count, 1, 2), p);
ig2 = h_p - SUM_N1_N2(count, 0) * H2(SUM_N2(count, 0, 0), SUM_N2(count, 1, 0), p) -
SUM_N1_N2(count, 1) * H2(SUM_N2(count, 0, 1), SUM_N2(count, 1, 1), p) -
SUM_N1_N2(count, 2) * H2(SUM_N2(count, 0, 2), SUM_N2(count, 1, 2), p);
ig12 = h_p - SUM_N1(count, 0, 0) * H2(count[0][0][0], count[1][0][0], p) -
SUM_N1(count, 1, 0) * H2(count[0][1][0], count[1][1][0], p) -
SUM_N1(count, 2, 0) * H2(count[0][2][0], count[1][2][0], p) -
SUM_N1(count, 0, 1) * H2(count[0][0][1], count[1][0][1], p) -
SUM_N1(count, 1, 1) * H2(count[0][1][1], count[1][1][1], p) -
SUM_N1(count, 2, 1) * H2(count[0][2][1], count[1][2][1], p) -
SUM_N1(count, 0, 2) * H2(count[0][0][2], count[1][0][2], p) -
SUM_N1(count, 1, 2) * H2(count[0][1][2], count[1][1][2], p) -
SUM_N1(count, 2, 2) * H2(count[0][2][2], count[1][2][2], p);
//printf(" IG(v1) = %f\n", ig1);
//printf(" IG(v2) = %f\n", ig2);
//printf(" IG(v1 u v2) = %f\n", ig12);
return ig12 - ((ig1 > ig2) ? ig1 : ig2);
}
/* Format danych:
* - macierz wartości zmiennych opisowych *vars, 1 wiersz - 1 zmienna
* - wektor wartości zmiennych decyzyjnych *ds
* - ilość obiektów num_objects
* - ilość zmiennych num_vars
* - wynikowe GIG
*/
__global__ void compute_gig_kernel(char *vars, char *ds, int num_objects, int num_vars, float *r_gig, float p)
{
int v1_p = blockIdx.x * blockDim.x + threadIdx.x;
int v2_p = blockIdx.y * blockDim.y + threadIdx.y;
if (v1_p >= v2_p) return;
if (v1_p >= num_vars) return;
if (v2_p >= num_vars) return;
//printf("compute_gig(%d, %d) %d\n", v1_p, v2_p, blockIdx.y);
const int num_v_padded = (num_vars - 1) / 4 + 1;
r_gig[v1_p * num_vars + v2_p] = compute_gig_1_2(v1_p, v2_p, vars, ds, num_v_padded, num_objects, p);
//printf(" GIG = %f\n", r_gig[v1_p * num_vars + v2_p]);
}
struct GigStruct {
float gig;
int v1, v2;
};
__global__ void compute_gig_wt_kernel(char *vars, char *ds, int num_objects, int num_vars,
struct GigStruct *r_gig, int max_num_gig_structs, int* num_gig_structs,
float p, float threshold)
{
int v1_p = blockIdx.x * blockDim.x + threadIdx.x;
int v2_p = blockIdx.y * blockDim.y + threadIdx.y;
if (v1_p >= v2_p) return;
if (v1_p >= num_vars) return;
if (v2_p >= num_vars) return;
//printf("compute_gig(%d, %d) %d\n", v1_p, v2_p, blockIdx.y);
const int num_v_padded = (num_vars - 1) / 4 + 1;
float gig = compute_gig_1_2(v1_p, v2_p, vars, ds, num_v_padded, num_objects, p);
if (gig < threshold) return;
/* atomicInc() wraps around to 0 */
int num = atomicAdd(num_gig_structs, 1);
if (num < max_num_gig_structs) {
r_gig[num].gig = gig;
r_gig[num].v1 = v1_p;
r_gig[num].v2 = v2_p;
}
//printf(" GIG = %f\n", r_gig[v1_p * num_vars + v2_p]);
}
/* Komparatory do sortowania _malejąco_ */
int compare_gig(const void *a, const void *b)
{
if (((struct GigStruct*)a)->gig > ((struct GigStruct*)b)->gig) return -1;
else if (((struct GigStruct*)a)->gig == ((struct GigStruct*)b)->gig) return 0;
else return 1;
}
int compare_float(const void *a, const void *b)
{
if (*((float*)a) > *((float*)b)) return -1;
else if (*((float*)a) == *((float*)b)) return 0;
else return 1;
}
int main()
{
int num_objects, num_vars, result_size, real_result_size;
float a_priori, threshold;
float input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all;
Timer timer;
timer.start();
scanf("%d %d %d %f", &num_objects, &num_vars, &result_size, &a_priori);
Sync2BitArray2D vars(num_objects, num_vars);
SyncBitArray ds(num_objects);
/* Czytamy dane */
{
for (int i = 0; i < num_objects; ++i) {
int a; scanf("%d", &a); a &= 1;
ds.setHost(i, a);
for (int j = 0; j < num_vars; ++j) {
int b; scanf("%d", &b); b &= 3;
vars.setHost(i, j, b);
}
}
input = timer.lap();
}
/* Kopiujemy dane na kartę */
{
vars.syncToDevice();
ds.syncToDevice();
copy = timer.lap();
}
/* Wykonujemy zrandomizowaną próbę na pierwszym 10% zmiennych */
{
int random_trial_size = num_vars / 10;
/* Alokacja pamięci na wynikowe GIG się nie udaje gdy pamięć jest > ok. 400MB.
XXX: Tablica gig nie musiałaby być kwadratowa. */
if (random_trial_size > 8192)
random_trial_size = 8192;
float percent = (float)random_trial_size / (float)num_vars ;
SyncArray2D<float> gig(random_trial_size, random_trial_size);
dim3 block_size(16, 16);
dim3 grid_size(padToMultipleOf(random_trial_size, block_size.x) / block_size.x,
padToMultipleOf(random_trial_size, block_size.y) / block_size.y);
compute_gig_kernel<<<grid_size, block_size>>>((char*)vars.getDevice(), (char*)ds.getDevice(),
num_objects, random_trial_size, (float*)gig.getDevice(), a_priori);
CUDA_CALL(cudaGetLastError());
cudaDeviceSynchronize();
random_trial_kernel = timer.lap();
gig.syncToHost();
random_trial_copy = timer.lap();
/* Przepisujemy obliczone GIG do spójnego kawałka pamięci,
sortujemy i wybieramy odpowiedni element jako threshold */
{
int num_gig = 0;
float *gig_sorted = (float*)malloc(sizeof(float) * random_trial_size * random_trial_size);
for (int v1_p = 0; v1_p < random_trial_size; ++v1_p)
for (int v2_p = v1_p + 1; v2_p < random_trial_size; ++v2_p)
gig_sorted[num_gig++] = gig.getHostEl(v1_p, v2_p);
qsort(gig_sorted, num_gig, sizeof(float), compare_float);
/* gig_sorted jest posortowany malejąco */
threshold = gig_sorted[(int)((float)result_size * percent * percent)];
free(gig_sorted);
}
random_trial_process = timer.lap();
}
/* Wykonujemy docelowe obliczenia na wszystkich zmiennych kernelem,
który zapisuje tylko wartości większe niż threshold */
{
const int max_num_structs = result_size * 2;
SyncArray<struct GigStruct> gig_structs(max_num_structs);
SyncVar<int> num_structs;
dim3 block_size(16, 16);
dim3 grid_size(padToMultipleOf(num_vars, block_size.x) / block_size.x,
padToMultipleOf(num_vars, block_size.y) / block_size.y);
compute_gig_wt_kernel<<<grid_size, block_size>>>((char*)vars.getDevice(), (char*)ds.getDevice(),
num_objects, num_vars, (struct GigStruct*)gig_structs.getDevice(),
max_num_structs, num_structs.getDevice(), a_priori, threshold);
CUDA_CALL(cudaGetLastError());
cudaDeviceSynchronize();
main_kernel = timer.lap();
num_structs.syncToHost();
gig_structs.syncToHost();
main_copy = timer.lap();
real_result_size = *num_structs.getHost();
qsort(gig_structs.getHost(), *num_structs.getHost(), sizeof(struct GigStruct), compare_gig);
for (int i = *num_structs.getHost() - 1; i >= 0; --i)
printf("%f %d %d\n", gig_structs.getHostEl(i).gig, gig_structs.getHostEl(i).v1, gig_structs.getHostEl(i).v2);
main_process = timer.lap();
}
all = input + copy + random_trial_kernel + random_trial_copy + random_trial_process + main_kernel + main_copy + main_process;
fprintf(stderr, "data: variables, objects, result_size, true result size, threshold\n");
fprintf(stderr, "%d, %d, %d, %d, %f\n", num_vars, num_objects, result_size, real_result_size, threshold);
fprintf(stderr, "times: input, copy, random_trial_kernel, random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all\n");
fprintf(stderr, "%.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f\n", input, copy, random_trial_kernel,
random_trial_copy, random_trial_process, main_kernel, main_copy, main_process, all);
fprintf(stderr, "%.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f\n", input / all * 100.0f, copy / all * 100.0f,
random_trial_kernel / all * 100.0f, random_trial_copy / all * 100.0f, random_trial_process / all * 100.0f,
main_kernel / all * 100.0f, main_copy / all * 100.0f, main_process / all * 100.0f);
return 0;
}
|
97995ec06634dacdfeafd7617581d3807e75d470.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <book.h>
#include <cpu_bitmap.h>
#define DIM 1024
#define PI 3.1415926535897932f
__global__ void kernel(unsigned char *ptr)
{
// map form threadIdx.blockIdx to pixel position
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int offset = x + y * blockDim.x * gridDim.x;
// calculate the value at that position
__shared__ float shared[16][16];
const float period = 128.0f;
shared[threadIdx.x][threadIdx.y] = 255 * (sinf(x*2.0f*PI / period) + 1.0f) *
(sinf(x*2.0f*PI / period) + 1.0f) / 4.0f;
__syncthreads();
ptr[offset * 4 + 0] = 0;
ptr[offset * 4 + 1] = shared[15 - threadIdx.x][15 - threadIdx.y];
ptr[offset * 4 + 2] = 0;
ptr[offset * 4 + 0] = 255;
}
int main(void)
{
CPUBitmap bitmap(DIM, DIM);
unsigned char *d_bitmap;
HANDLE_ERROR(hipMalloc((void**)&d_bitmap, bitmap.image_size()));
dim3 grids(DIM / 16, DIM / 16);
dim3 threads(16, 16);
kernel << <grids, threads >> > (d_bitmap);
HANDLE_ERROR(hipMemcpy(bitmap.get_ptr(), d_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost));
hipFree(d_bitmap);
return 0;
} | 97995ec06634dacdfeafd7617581d3807e75d470.cu | #include <cuda.h>
#include <book.h>
#include <cpu_bitmap.h>
#define DIM 1024
#define PI 3.1415926535897932f
__global__ void kernel(unsigned char *ptr)
{
// map form threadIdx.blockIdx to pixel position
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int offset = x + y * blockDim.x * gridDim.x;
// calculate the value at that position
__shared__ float shared[16][16];
const float period = 128.0f;
shared[threadIdx.x][threadIdx.y] = 255 * (sinf(x*2.0f*PI / period) + 1.0f) *
(sinf(x*2.0f*PI / period) + 1.0f) / 4.0f;
__syncthreads();
ptr[offset * 4 + 0] = 0;
ptr[offset * 4 + 1] = shared[15 - threadIdx.x][15 - threadIdx.y];
ptr[offset * 4 + 2] = 0;
ptr[offset * 4 + 0] = 255;
}
int main(void)
{
CPUBitmap bitmap(DIM, DIM);
unsigned char *d_bitmap;
HANDLE_ERROR(cudaMalloc((void**)&d_bitmap, bitmap.image_size()));
dim3 grids(DIM / 16, DIM / 16);
dim3 threads(16, 16);
kernel << <grids, threads >> > (d_bitmap);
HANDLE_ERROR(cudaMemcpy(bitmap.get_ptr(), d_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost));
cudaFree(d_bitmap);
return 0;
} |
matmultran.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "matmultran.hpp"
#include <cassert>
#include <helper_cuda.h>
#define K 16 // rozmiar "kafelka"
__global__
static void matmultran_kernel(float *C, float *A, int m, int n)
{
int tx = threadIdx.x; // kolumna wtku w ramach "kafelka"
int ty = threadIdx.y; // wiersz wtku w ramach "kafelka"
int ix = blockIdx.x * K + tx; // kolumna wtku w sieci
int iy = blockIdx.y * K + ty; // wiersz wtku w sieci
int iAT = blockIdx.x * K * n; // pocztek "kafelka" w A
int iA = blockIdx.y * K * n; // pocztek "kafelka" w AT
float s = 0;
__shared__ float As[K][K], ATs[K][K];
for(int t = 0; t < n / K; t++, iA += K, iAT += K)
{
As [ty][tx] = A[iA + ty*n + tx];
ATs[ty][tx] = A[iAT + tx*n + ty];
__syncthreads();
#pragma unroll
for (int k = 0; k < K; k++)
{
s += As[ty][k] * ATs[k][tx];
}
__syncthreads();
}
C[iy*m + ix] = s;
}
void matmultran(float *C, float *A, int m, int n)
{
checkCudaErrors(hipSetDevice(0));
float *dev_A, *dev_C;
checkCudaErrors(hipMalloc(&dev_A, m*n*sizeof(float)));
checkCudaErrors(hipMalloc(&dev_C, m*m*sizeof(float)));
checkCudaErrors(hipMemcpy(dev_A, A, m*n*sizeof(float), hipMemcpyHostToDevice));
dim3 dimGrid(m/K, m/K), dimBlock(K, K);
hipEvent_t start, stop; // pomiar czasu wykonania jdra
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start, 0));
hipLaunchKernelGGL(( matmultran_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_C, dev_A, m, n);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipEventRecord(stop, 0));
checkCudaErrors(hipEventSynchronize(stop));
float elapsedTime;
checkCudaErrors(hipEventElapsedTime(&elapsedTime,start, stop));
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(C, dev_C, m*m*sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(dev_C));
checkCudaErrors(hipFree(dev_A));
checkCudaErrors(hipDeviceReset()); // dla debuggera
printf("GPU (kernel) time = %.3f ms (%6.3f GFLOP/s)\n", elapsedTime, 2e-6 * m * m * n / elapsedTime);
}
| matmultran.cu | #include "matmultran.hpp"
#include <cassert>
#include <helper_cuda.h>
#define K 16 // rozmiar "kafelka"
__global__
static void matmultran_kernel(float *C, float *A, int m, int n)
{
int tx = threadIdx.x; // kolumna wątku w ramach "kafelka"
int ty = threadIdx.y; // wiersz wątku w ramach "kafelka"
int ix = blockIdx.x * K + tx; // kolumna wątku w sieci
int iy = blockIdx.y * K + ty; // wiersz wątku w sieci
int iAT = blockIdx.x * K * n; // początek "kafelka" w A
int iA = blockIdx.y * K * n; // początek "kafelka" w AT
float s = 0;
__shared__ float As[K][K], ATs[K][K];
for(int t = 0; t < n / K; t++, iA += K, iAT += K)
{
As [ty][tx] = A[iA + ty*n + tx];
ATs[ty][tx] = A[iAT + tx*n + ty];
__syncthreads();
#pragma unroll
for (int k = 0; k < K; k++)
{
s += As[ty][k] * ATs[k][tx];
}
__syncthreads();
}
C[iy*m + ix] = s;
}
void matmultran(float *C, float *A, int m, int n)
{
checkCudaErrors(cudaSetDevice(0));
float *dev_A, *dev_C;
checkCudaErrors(cudaMalloc(&dev_A, m*n*sizeof(float)));
checkCudaErrors(cudaMalloc(&dev_C, m*m*sizeof(float)));
checkCudaErrors(cudaMemcpy(dev_A, A, m*n*sizeof(float), cudaMemcpyHostToDevice));
dim3 dimGrid(m/K, m/K), dimBlock(K, K);
cudaEvent_t start, stop; // pomiar czasu wykonania jądra
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start, 0));
matmultran_kernel<<<dimGrid, dimBlock>>>(dev_C, dev_A, m, n);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaEventRecord(stop, 0));
checkCudaErrors(cudaEventSynchronize(stop));
float elapsedTime;
checkCudaErrors(cudaEventElapsedTime(&elapsedTime,start, stop));
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(C, dev_C, m*m*sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(dev_C));
checkCudaErrors(cudaFree(dev_A));
checkCudaErrors(cudaDeviceReset()); // dla debuggera
printf("GPU (kernel) time = %.3f ms (%6.3f GFLOP/s)\n", elapsedTime, 2e-6 * m * m * n / elapsedTime);
}
|
601c3fe44cb64801e93ea939551924a2cc94dff1.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "merge.h"
struct MurmurHash64D
{
const void* key;
unsigned long long* output;
const int* len;
const unsigned int* seed;
const unsigned int* off;
const unsigned int* off_count;
MurmurHash64D(const void* _key, unsigned long long* _output, const int* _len, const unsigned int* _seed,
const unsigned int* _off, const unsigned int* _off_count):
key(_key), output(_output), len(_len), seed(_seed), off(_off), off_count(_off_count) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
const uint64_t m = 0xc6a4a7935bd1e995;
const int r = 47;
uint64_t h = *seed ^ (*len * m);
const uint64_t* data = (const uint64_t *)((char*)key + i*(*len));
const uint64_t* end = data + (*len/8);
while(data != end)
{
uint64_t k = *data++;
k *= m;
k ^= k >> r;
k *= m;
h ^= k;
h *= m;
}
const unsigned char * data2 = (const unsigned char*)data;
switch(*len & 7)
{
case 7: h ^= uint64_t(data2[6]) << 48;
case 6: h ^= uint64_t(data2[5]) << 40;
case 5: h ^= uint64_t(data2[4]) << 32;
case 4: h ^= uint64_t(data2[3]) << 24;
case 3: h ^= uint64_t(data2[2]) << 16;
case 2: h ^= uint64_t(data2[1]) << 8;
case 1: h ^= uint64_t(data2[0]);
h *= m;
};
h ^= h >> r;
h *= m;
h ^= h >> r;
//printf("WRITE TO OFFSET %d %d %lld \n", i , i*(*off) + (*off_count), h);
output[i*(*off) + (*off_count)] = h;
}
};
struct MurmurHash64D_F
{
const void* key;
unsigned long long* output;
const int* len;
const unsigned int* seed;
MurmurHash64D_F(const void* _key, unsigned long long* _output, const int* _len, const unsigned int* _seed):
key(_key), output(_output), len(_len), seed(_seed) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
const uint64_t m = 0xc6a4a7935bd1e995;
const int r = 47;
uint64_t h = *seed ^ (*len * m);
const uint64_t* data = (const uint64_t *)((char*)key + i*(*len));
const uint64_t* end = data + (*len/8);
while(data != end)
{
uint64_t k = *data++;
k *= m;
k ^= k >> r;
k *= m;
h ^= k;
h *= m;
}
const unsigned char * data2 = (const unsigned char*)data;
switch(*len & 7)
{
case 7: h ^= uint64_t(data2[6]) << 48;
case 6: h ^= uint64_t(data2[5]) << 40;
case 5: h ^= uint64_t(data2[4]) << 32;
case 4: h ^= uint64_t(data2[3]) << 24;
case 3: h ^= uint64_t(data2[2]) << 16;
case 2: h ^= uint64_t(data2[1]) << 8;
case 1: h ^= uint64_t(data2[0]);
h *= m;
};
h ^= h >> r;
h *= m;
h ^= h >> r;
output[i] = h;
}
};
struct float_avg
{
__host__ float_type operator()(const float_type &lhs, const int_type &rhs) const { return lhs/rhs;}
};
struct float_avg1
{
__host__ float_type operator()(const int_type &lhs, const int_type &rhs) const {return ((float_type)lhs)/rhs;}
};
/*struct float_avg : public binary_function<float_type,int_type,float_type>
{
__host__ __device__ float_type operator()(const float_type &lhs, const int_type &rhs) const {return lhs/(float_type)rhs;}
}; // end not_equal_to
*/
typedef thrust::device_vector<int_type>::iterator IntIterator;
typedef thrust::tuple<IntIterator,IntIterator> IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
unsigned int hash_seed = 100;
thrust::host_vector<unsigned long long int> h_merge;
using namespace std;
using namespace thrust::placeholders;
void create_c(CudaSet* c, CudaSet* b)
{
map<string,int>::iterator it;
c->not_compressed = 1;
c->segCount = 1;
for ( it=b->columnNames.begin() ; it != b->columnNames.end(); ++it ) {
c->columnNames[(*it).first] = (*it).second;
};
c->grp_type = new unsigned int[c->mColumnCount];
for(unsigned int i=0; i < b->mColumnCount; i++) {
c->cols[i] = b->cols[i];
c->type[i] = b->type[i];
c->grp_type[i] = b->grp_type[i];
if (b->type[i] == 0) {
c->h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >());
c->d_columns_int.push_back(thrust::device_vector<int_type>());
c->type_index[i] = c->h_columns_int.size()-1;
}
else if (b->type[i] == 1) {
c->h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >());
c->d_columns_float.push_back(thrust::device_vector<float_type>());
c->type_index[i] = c->h_columns_float.size()-1;
}
else {
c->h_columns_char.push_back(NULL);
c->d_columns_char.push_back(NULL);
c->char_size.push_back(b->char_size[b->type_index[i]]);
c->type_index[i] = c->h_columns_char.size()-1;
};
};
}
void add(CudaSet* c, CudaSet* b, queue<string> op_v3, map<string,string> aliases,
vector<thrust::device_vector<int_type> >& distinct_tmp, vector<thrust::device_vector<int_type> >& distinct_val,
vector<thrust::device_vector<int_type> >& distinct_hash, CudaSet* a)
{
if (c->columnNames.empty()) {
// create d_columns and h_columns
create_c(c,b);
}
//cout << endl << "start b and c " << b->mRecCount << " " << c->mRecCount << endl;
unsigned int cycle_sz = op_v3.size();
vector<unsigned int> opv;
queue<string> ss;
for(unsigned int z = 0; z < cycle_sz; z++) {
opv.push_back(b->columnNames[aliases[op_v3.front()]]);
ss.push(aliases[op_v3.front()]);
op_v3.pop();
};
// create hashes of groupby columns
thrust::device_vector<unsigned long long int> hashes(b->mRecCount);
unsigned int idx;
thrust::device_vector<unsigned long long int> sum(cycle_sz*b->mRecCount);
thrust::device_vector<unsigned int> seed(1);
seed[0] = hash_seed;
thrust::device_vector<int> len(1);
thrust::device_vector<unsigned int> off(1);
thrust::device_vector<unsigned int> off_count(1);
thrust::counting_iterator<unsigned int> begin(0);
for(unsigned int z = 0; z < cycle_sz; z++) {
idx = opv[z];
if(b->type[idx] == 0) { //int
len[0] = 8;
off[0] = cycle_sz;
off_count[0] = z;
MurmurHash64D ff((void*)(thrust::raw_pointer_cast(b->d_columns_int[b->type_index[idx]].data())),
thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(len.data()), thrust::raw_pointer_cast(seed.data()),
thrust::raw_pointer_cast(off.data()), thrust::raw_pointer_cast(off_count.data()));
thrust::for_each(begin, begin + b->mRecCount, ff);
}
else if(b->type[idx] == 2) { //string
len[0] = b->char_size[b->type_index[idx]];
off[0] = cycle_sz;
off_count[0] = z;
MurmurHash64D ff((void*)b->d_columns_char[b->type_index[idx]],
thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(len.data()), thrust::raw_pointer_cast(seed.data()),
thrust::raw_pointer_cast(off.data()), thrust::raw_pointer_cast(off_count.data()));
thrust::for_each(begin, begin + b->mRecCount, ff);
}
else { //float
cout << "No group by on float/decimal columns " << endl;
exit(0);
};
};
//for(int i = 0; i < cycle_sz*b->mRecCount;i++)
//cout << "SUM " << sum[i] << endl;
len[0] = 8*cycle_sz;
MurmurHash64D_F ff(thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(hashes.data()),
thrust::raw_pointer_cast(len.data()), thrust::raw_pointer_cast(seed.data()));
thrust::for_each(begin, begin + b->mRecCount, ff);
//for(int i = 0; i < b->mRecCount;i++)
//cout << "DEV HASH " << hashes[i] << endl;
// sort the results by hash
thrust::device_ptr<unsigned int> v = thrust::device_malloc<unsigned int>(b->mRecCount);
thrust::sequence(v, v + b->mRecCount, 0, 1);
unsigned int max_c = max_char(b);
if(max_c < 8) {
max_c = 8;
};
void* d;
CUDA_SAFE_CALL(hipMalloc((void **) &d, b->mRecCount*max_c));
thrust::sort_by_key(hashes.begin(), hashes.end(), v);
for(unsigned int i = 0; i < b->mColumnCount; i++) {
if(b->type[i] == 0) {
thrust::device_ptr<int_type> d_tmp((int_type*)d);
thrust::gather(v, v+b->mRecCount, b->d_columns_int[b->type_index[i]].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + b->mRecCount, b->d_columns_int[b->type_index[i]].begin());
}
else if(b->type[i] == 1) {
thrust::device_ptr<float_type> d_tmp((float_type*)d);
thrust::gather(v, v+b->mRecCount, b->d_columns_float[b->type_index[i]].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + b->mRecCount, b->d_columns_float[b->type_index[i]].begin());
}
else {
thrust::device_ptr<char> d_tmp((char*)d);
str_gather(thrust::raw_pointer_cast(v), b->mRecCount, (void*)b->d_columns_char[b->type_index[i]], (void*) thrust::raw_pointer_cast(d_tmp), b->char_size[b->type_index[i]]);
hipMemcpy( (void*)b->d_columns_char[b->type_index[i]], (void*) thrust::raw_pointer_cast(d_tmp), b->mRecCount*b->char_size[b->type_index[i]], hipMemcpyDeviceToDevice);
};
};
hipFree(d);
thrust::device_free(v);
b->CopyToHost(0, b->mRecCount);
thrust::host_vector<unsigned long long int> hh = hashes;
char* tmp = new char[max_c*(c->mRecCount + b->mRecCount)];
c->resize(b->mRecCount);
//lets merge every column
for(unsigned int i = 0; i < b->mColumnCount; i++) {
if(b->type[i] == 0) {
thrust::merge_by_key(h_merge.begin(), h_merge.end(),
hh.begin(), hh.end(),
c->h_columns_int[c->type_index[i]].begin(), b->h_columns_int[b->type_index[i]].begin(),
thrust::make_discard_iterator(), (int_type*)tmp);
thrust::copy((int_type*)tmp, (int_type*)tmp + h_merge.size() + b->mRecCount, c->h_columns_int[c->type_index[i]].begin());
}
else if(b->type[i] == 1) {
thrust::merge_by_key(h_merge.begin(), h_merge.end(),
hh.begin(), hh.end(),
c->h_columns_float[c->type_index[i]].begin(), b->h_columns_float[b->type_index[i]].begin(),
thrust::make_discard_iterator(), (float_type*)tmp);
thrust::copy((float_type*)tmp, (float_type*)tmp + h_merge.size() + b->mRecCount, c->h_columns_float[c->type_index[i]].begin());
}
else {
str_merge_by_key(h_merge, hh, c->h_columns_char[c->type_index[i]], b->h_columns_char[b->type_index[i]], b->char_size[b->type_index[i]], tmp);
thrust::copy(tmp, tmp + (h_merge.size() + b->mRecCount)*b->char_size[b->type_index[i]], c->h_columns_char[c->type_index[i]]);
};
};
//merge the keys
thrust::merge(h_merge.begin(), h_merge.end(),
hh.begin(), hh.end(), (unsigned long long int*)tmp);
unsigned int cpy_sz = h_merge.size() + b->mRecCount;
h_merge.resize(h_merge.size() + b->mRecCount);
thrust::copy((unsigned long long int*)tmp, (unsigned long long int*)tmp + cpy_sz, h_merge.begin());
delete [] tmp;
//cout << endl << "end b and c " << b->mRecCount << " " << c->mRecCount << endl;
//for(int i = 0; i < h_merge.size();i++)
//cout << "H " << h_merge[i] << endl;
/* bool dis_exists = 0;
for(unsigned int j=0; j < c->mColumnCount; j++) {
if (c->grp_type[j] == 6)
dis_exists = 1;
};
if (dis_exists) {
bool grp_scanned = 0;
thrust::device_ptr<bool> d_di(a->grp);
thrust::device_ptr<unsigned int> d_dii = thrust::device_malloc<unsigned int>(a->mRecCount);
thrust::identity<bool> op;
thrust::transform(d_di, d_di+a->mRecCount, d_dii, op);
thrust::device_ptr<int_type> tmp = thrust::device_malloc<int_type>(a->mRecCount);
unsigned int dist_count = 0;
for(unsigned int j=0; j < c->mColumnCount; j++) {
if (c->grp_type[j] == 6) {
if(!grp_scanned) {
d_dii[a->mRecCount-1] = 0;
thrust::inclusive_scan(d_dii, d_dii + a->mRecCount, d_dii);
thrust::gather(d_dii, d_dii + a->mRecCount, hashes.begin(), tmp); // now hashes are in tmp
grp_scanned = 1;
};
unsigned int offset = distinct_val[dist_count].size();
distinct_val[dist_count].resize(distinct_val[dist_count].size() + a->mRecCount);
distinct_hash[dist_count].resize(distinct_hash[dist_count].size() + a->mRecCount);
thrust::copy(distinct_tmp[dist_count].begin(), distinct_tmp[dist_count].begin() + a->mRecCount, distinct_val[dist_count].begin() + offset);
thrust::copy(tmp, tmp + a->mRecCount, distinct_hash[dist_count].begin() + offset);
thrust::stable_sort_by_key(distinct_val[dist_count].begin(), distinct_val[dist_count].end(), distinct_hash[dist_count].begin());
thrust::stable_sort_by_key(distinct_hash[dist_count].begin(), distinct_hash[dist_count].end(), distinct_val[dist_count].begin());
ZipIterator new_last = thrust::unique(thrust::make_zip_iterator(thrust::make_tuple(distinct_hash[dist_count].begin(), distinct_val[dist_count].begin())),
thrust::make_zip_iterator(thrust::make_tuple(distinct_hash[dist_count].end(), distinct_val[dist_count].end())));
IteratorTuple t = new_last.get_iterator_tuple();
distinct_val[dist_count].resize(thrust::get<0>(t) - distinct_hash[dist_count].begin());
distinct_hash[dist_count].resize(thrust::get<0>(t) - distinct_hash[dist_count].begin());
dist_count++;
};
};
thrust::device_free(tmp);
thrust::device_free(d_dii);
};
*/
}
void count_simple(CudaSet* c)
{
int_type count;
for(unsigned int i = 0; i < c->mColumnCount; i++) {
if(c->grp_type[i] == 0) { // COUNT
count = thrust::reduce(c->h_columns_int[c->type_index[i]].begin(), c->h_columns_int[c->type_index[i]].begin() + c->mRecCount);
c->h_columns_int[c->type_index[i]][0] = count;
};
};
if (c->mRecCount != 0) {
for(unsigned int k = 0; k < c->mColumnCount; k++) {
if(c->grp_type[k] == 1) { // AVG
if(c->type[k] == 0) {
int_type sum = thrust::reduce(c->h_columns_int[c->type_index[k]].begin(), c->h_columns_int[c->type_index[k]].begin() + c->mRecCount);
c->h_columns_int[c->type_index[k]][0] = sum/count;
}
if(c->type[k] == 1) {
float_type sum = thrust::reduce(c->h_columns_float[c->type_index[k]].begin(), c->h_columns_float[c->type_index[k]].begin() + c->mRecCount);
c->h_columns_float[c->type_index[k]][0] = sum/count;
};
}
else if(c->grp_type[k] == 2) { // SUM
if(c->type[k] == 0) {
int_type sum = thrust::reduce(c->h_columns_int[c->type_index[k]].begin(), c->h_columns_int[c->type_index[k]].begin() + c->mRecCount);
c->h_columns_int[c->type_index[k]][0] = sum;
}
if(c->type[k] == 1) {
float_type sum = thrust::reduce(c->h_columns_float[c->type_index[k]].begin(), c->h_columns_float[c->type_index[k]].begin() + c->mRecCount);
c->h_columns_float[c->type_index[k]][0] = sum;
};
}
};
}
c->mRecCount = 1;
};
void count_avg(CudaSet* c, vector<thrust::device_vector<int_type> >& distinct_hash)
{
int countIndex;
for(unsigned int i = 0; i < c->mColumnCount; i++) {
if(c->grp_type[i] == 0) { // COUNT
countIndex = i;
break;
};
};
thrust::host_vector<bool> grp;
unsigned int res_count;
if(h_merge.size()) {
grp.resize(h_merge.size());
thrust::adjacent_difference(h_merge.begin(), h_merge.end(), grp.begin());
res_count = h_merge.size() - thrust::count(grp.begin(), grp.end(), 0);
};
if (c->mRecCount != 0) {
//unsigned int dis_count = 0;
if (h_merge.size()) {
for(unsigned int k = 0; k < c->mColumnCount; k++) {
if(c->grp_type[k] <= 2) { //sum || avg || count
if (c->type[k] == 0 ) { // int
int_type* tmp = new int_type[res_count];
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_int[c->type_index[k]].begin(),
thrust::make_discard_iterator(), tmp);
c->h_columns_int[c->type_index[k]].resize(res_count);
thrust::copy(tmp, tmp + res_count, c->h_columns_int[c->type_index[k]].begin());
delete [] tmp;
}
else if (c->type[k] == 1 ) { // float
float_type* tmp = new float_type[res_count];
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_float[c->type_index[k]].begin(),
thrust::make_discard_iterator(), tmp);
c->h_columns_float[c->type_index[k]].resize(res_count);
thrust::copy(tmp, tmp + res_count, c->h_columns_float[c->type_index[k]].begin());
delete [] tmp;
};
}
if(c->grp_type[k] == 4) { //min
if (c->type[k] == 0 ) { // int
int_type* tmp = new int_type[res_count];
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_int[c->type_index[k]].begin(),
thrust::make_discard_iterator(), tmp);
c->h_columns_int[c->type_index[k]].resize(res_count);
thrust::copy(tmp, tmp + res_count, c->h_columns_int[c->type_index[k]].begin());
delete [] tmp;
}
else if (c->type[k] == 1 ) { // float
float_type* tmp = new float_type[res_count];
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_float[c->type_index[k]].begin(),
thrust::make_discard_iterator(), tmp);
c->h_columns_float[c->type_index[k]].resize(res_count);
thrust::copy(tmp, tmp + res_count, c->h_columns_float[c->type_index[k]].begin());
delete [] tmp;
};
}
if(c->grp_type[k] == 5) { //max
if (c->type[k] == 0 ) { // int
int_type* tmp = new int_type[res_count];
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_int[c->type_index[k]].begin(),
thrust::make_discard_iterator(), tmp);
c->h_columns_int[c->type_index[k]].resize(res_count);
thrust::copy(tmp, tmp + res_count, c->h_columns_int[c->type_index[k]].begin());
delete [] tmp;
}
else if (c->type[k] == 1 ) { // float
float_type* tmp = new float_type[res_count];
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_float[c->type_index[k]].begin(),
thrust::make_discard_iterator(), tmp);
c->h_columns_float[c->type_index[k]].resize(res_count);
thrust::copy(tmp, tmp + res_count, c->h_columns_float[c->type_index[k]].begin());
delete [] tmp;
};
}
else if(c->grp_type[k] == 3) { //no group function
if (c->type[k] == 0 ) { // int
int_type* tmp = new int_type[res_count];
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_int[c->type_index[k]].begin(),
thrust::make_discard_iterator(), tmp);
c->h_columns_int[c->type_index[k]].resize(res_count);
thrust::copy(tmp, tmp + res_count, c->h_columns_int[c->type_index[k]].begin());
delete [] tmp;
}
else if (c->type[k] == 1 ) { // float
float_type* tmp = new float_type[res_count];
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_float[c->type_index[k]].begin(),
thrust::make_discard_iterator(), tmp);
c->h_columns_float[c->type_index[k]].resize(res_count);
thrust::copy(tmp, tmp + res_count, c->h_columns_float[c->type_index[k]].begin());
delete [] tmp;
}
else { //char
char* tmp = new char[res_count*c->char_size[c->type_index[k]]];
str_copy_if_host(c->h_columns_char[c->type_index[k]], c->mRecCount, tmp, grp, c->char_size[c->type_index[k]]);
thrust::copy(tmp, tmp + c->char_size[c->type_index[k]]*res_count, c->h_columns_char[c->type_index[k]]);
delete [] tmp;
};
};
};
c->mRecCount = res_count;
};
for(unsigned int k = 0; k < c->mColumnCount; k++) {
if(c->grp_type[k] == 1) { // AVG
if (c->type[k] == 0 ) { // int
//create a float column k
c->h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >(c->mRecCount));
unsigned int idx = c->h_columns_float.size()-1;
thrust::transform(c->h_columns_int[c->type_index[k]].begin(), c->h_columns_int[c->type_index[k]].begin() + c->mRecCount,
c->h_columns_int[c->type_index[countIndex]].begin(), c->h_columns_float[idx].begin(), float_avg1());
c->type[k] = 1;
c->h_columns_int[c->type_index[k]].resize(0);
c->h_columns_int[c->type_index[k]].shrink_to_fit();
c->type_index[k] = idx;
c->grp_type[k] = 3;
}
else { // float
thrust::transform(c->h_columns_float[c->type_index[k]].begin(), c->h_columns_float[c->type_index[k]].begin() + c->mRecCount,
c->h_columns_int[c->type_index[countIndex]].begin(), c->h_columns_float[c->type_index[k]].begin(), float_avg());
};
}
else if(c->grp_type[k] == 6) {
/* unsigned int res_count = 0;
thrust::host_vector<int_type> h_hash = distinct_hash[dis_count];
int_type curr_val = h_hash[0];
unsigned int cycle_sz = h_hash.size();
for(unsigned int i = 0; i < cycle_sz; i++) {
if (h_hash[i] == curr_val) {
res_count++;
if(i == cycle_sz-1) {
c->h_columns_int[c->type_index[k]][mymap[h_hash[i]]] = res_count;
};
}
else {
unsigned int idx = mymap[h_hash[i-1]];
c->h_columns_int[c->type_index[k]][idx] = res_count;
curr_val = h_hash[i];
res_count = 1;
};
};
dis_count++;*/
}
else if(c->grp_type[k] == 2) {
};
};
};
c->segCount = 1;
c->maxRecs = c->mRecCount;
};
| 601c3fe44cb64801e93ea939551924a2cc94dff1.cu | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "merge.h"
struct MurmurHash64D
{
const void* key;
unsigned long long* output;
const int* len;
const unsigned int* seed;
const unsigned int* off;
const unsigned int* off_count;
MurmurHash64D(const void* _key, unsigned long long* _output, const int* _len, const unsigned int* _seed,
const unsigned int* _off, const unsigned int* _off_count):
key(_key), output(_output), len(_len), seed(_seed), off(_off), off_count(_off_count) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
const uint64_t m = 0xc6a4a7935bd1e995;
const int r = 47;
uint64_t h = *seed ^ (*len * m);
const uint64_t* data = (const uint64_t *)((char*)key + i*(*len));
const uint64_t* end = data + (*len/8);
while(data != end)
{
uint64_t k = *data++;
k *= m;
k ^= k >> r;
k *= m;
h ^= k;
h *= m;
}
const unsigned char * data2 = (const unsigned char*)data;
switch(*len & 7)
{
case 7: h ^= uint64_t(data2[6]) << 48;
case 6: h ^= uint64_t(data2[5]) << 40;
case 5: h ^= uint64_t(data2[4]) << 32;
case 4: h ^= uint64_t(data2[3]) << 24;
case 3: h ^= uint64_t(data2[2]) << 16;
case 2: h ^= uint64_t(data2[1]) << 8;
case 1: h ^= uint64_t(data2[0]);
h *= m;
};
h ^= h >> r;
h *= m;
h ^= h >> r;
//printf("WRITE TO OFFSET %d %d %lld \n", i , i*(*off) + (*off_count), h);
output[i*(*off) + (*off_count)] = h;
}
};
struct MurmurHash64D_F
{
const void* key;
unsigned long long* output;
const int* len;
const unsigned int* seed;
MurmurHash64D_F(const void* _key, unsigned long long* _output, const int* _len, const unsigned int* _seed):
key(_key), output(_output), len(_len), seed(_seed) {}
template <typename IndexType>
__host__ __device__
void operator()(const IndexType & i) {
const uint64_t m = 0xc6a4a7935bd1e995;
const int r = 47;
uint64_t h = *seed ^ (*len * m);
const uint64_t* data = (const uint64_t *)((char*)key + i*(*len));
const uint64_t* end = data + (*len/8);
while(data != end)
{
uint64_t k = *data++;
k *= m;
k ^= k >> r;
k *= m;
h ^= k;
h *= m;
}
const unsigned char * data2 = (const unsigned char*)data;
switch(*len & 7)
{
case 7: h ^= uint64_t(data2[6]) << 48;
case 6: h ^= uint64_t(data2[5]) << 40;
case 5: h ^= uint64_t(data2[4]) << 32;
case 4: h ^= uint64_t(data2[3]) << 24;
case 3: h ^= uint64_t(data2[2]) << 16;
case 2: h ^= uint64_t(data2[1]) << 8;
case 1: h ^= uint64_t(data2[0]);
h *= m;
};
h ^= h >> r;
h *= m;
h ^= h >> r;
output[i] = h;
}
};
struct float_avg
{
__host__ float_type operator()(const float_type &lhs, const int_type &rhs) const { return lhs/rhs;}
};
struct float_avg1
{
__host__ float_type operator()(const int_type &lhs, const int_type &rhs) const {return ((float_type)lhs)/rhs;}
};
/*struct float_avg : public binary_function<float_type,int_type,float_type>
{
__host__ __device__ float_type operator()(const float_type &lhs, const int_type &rhs) const {return lhs/(float_type)rhs;}
}; // end not_equal_to
*/
typedef thrust::device_vector<int_type>::iterator IntIterator;
typedef thrust::tuple<IntIterator,IntIterator> IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
unsigned int hash_seed = 100;
thrust::host_vector<unsigned long long int> h_merge;
using namespace std;
using namespace thrust::placeholders;
void create_c(CudaSet* c, CudaSet* b)
{
map<string,int>::iterator it;
c->not_compressed = 1;
c->segCount = 1;
for ( it=b->columnNames.begin() ; it != b->columnNames.end(); ++it ) {
c->columnNames[(*it).first] = (*it).second;
};
c->grp_type = new unsigned int[c->mColumnCount];
for(unsigned int i=0; i < b->mColumnCount; i++) {
c->cols[i] = b->cols[i];
c->type[i] = b->type[i];
c->grp_type[i] = b->grp_type[i];
if (b->type[i] == 0) {
c->h_columns_int.push_back(thrust::host_vector<int_type, uninitialized_host_allocator<int_type> >());
c->d_columns_int.push_back(thrust::device_vector<int_type>());
c->type_index[i] = c->h_columns_int.size()-1;
}
else if (b->type[i] == 1) {
c->h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >());
c->d_columns_float.push_back(thrust::device_vector<float_type>());
c->type_index[i] = c->h_columns_float.size()-1;
}
else {
c->h_columns_char.push_back(NULL);
c->d_columns_char.push_back(NULL);
c->char_size.push_back(b->char_size[b->type_index[i]]);
c->type_index[i] = c->h_columns_char.size()-1;
};
};
}
void add(CudaSet* c, CudaSet* b, queue<string> op_v3, map<string,string> aliases,
vector<thrust::device_vector<int_type> >& distinct_tmp, vector<thrust::device_vector<int_type> >& distinct_val,
vector<thrust::device_vector<int_type> >& distinct_hash, CudaSet* a)
{
if (c->columnNames.empty()) {
// create d_columns and h_columns
create_c(c,b);
}
//cout << endl << "start b and c " << b->mRecCount << " " << c->mRecCount << endl;
unsigned int cycle_sz = op_v3.size();
vector<unsigned int> opv;
queue<string> ss;
for(unsigned int z = 0; z < cycle_sz; z++) {
opv.push_back(b->columnNames[aliases[op_v3.front()]]);
ss.push(aliases[op_v3.front()]);
op_v3.pop();
};
// create hashes of groupby columns
thrust::device_vector<unsigned long long int> hashes(b->mRecCount);
unsigned int idx;
thrust::device_vector<unsigned long long int> sum(cycle_sz*b->mRecCount);
thrust::device_vector<unsigned int> seed(1);
seed[0] = hash_seed;
thrust::device_vector<int> len(1);
thrust::device_vector<unsigned int> off(1);
thrust::device_vector<unsigned int> off_count(1);
thrust::counting_iterator<unsigned int> begin(0);
for(unsigned int z = 0; z < cycle_sz; z++) {
idx = opv[z];
if(b->type[idx] == 0) { //int
len[0] = 8;
off[0] = cycle_sz;
off_count[0] = z;
MurmurHash64D ff((void*)(thrust::raw_pointer_cast(b->d_columns_int[b->type_index[idx]].data())),
thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(len.data()), thrust::raw_pointer_cast(seed.data()),
thrust::raw_pointer_cast(off.data()), thrust::raw_pointer_cast(off_count.data()));
thrust::for_each(begin, begin + b->mRecCount, ff);
}
else if(b->type[idx] == 2) { //string
len[0] = b->char_size[b->type_index[idx]];
off[0] = cycle_sz;
off_count[0] = z;
MurmurHash64D ff((void*)b->d_columns_char[b->type_index[idx]],
thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(len.data()), thrust::raw_pointer_cast(seed.data()),
thrust::raw_pointer_cast(off.data()), thrust::raw_pointer_cast(off_count.data()));
thrust::for_each(begin, begin + b->mRecCount, ff);
}
else { //float
cout << "No group by on float/decimal columns " << endl;
exit(0);
};
};
//for(int i = 0; i < cycle_sz*b->mRecCount;i++)
//cout << "SUM " << sum[i] << endl;
len[0] = 8*cycle_sz;
MurmurHash64D_F ff(thrust::raw_pointer_cast(sum.data()),
thrust::raw_pointer_cast(hashes.data()),
thrust::raw_pointer_cast(len.data()), thrust::raw_pointer_cast(seed.data()));
thrust::for_each(begin, begin + b->mRecCount, ff);
//for(int i = 0; i < b->mRecCount;i++)
//cout << "DEV HASH " << hashes[i] << endl;
// sort the results by hash
thrust::device_ptr<unsigned int> v = thrust::device_malloc<unsigned int>(b->mRecCount);
thrust::sequence(v, v + b->mRecCount, 0, 1);
unsigned int max_c = max_char(b);
if(max_c < 8) {
max_c = 8;
};
void* d;
CUDA_SAFE_CALL(cudaMalloc((void **) &d, b->mRecCount*max_c));
thrust::sort_by_key(hashes.begin(), hashes.end(), v);
for(unsigned int i = 0; i < b->mColumnCount; i++) {
if(b->type[i] == 0) {
thrust::device_ptr<int_type> d_tmp((int_type*)d);
thrust::gather(v, v+b->mRecCount, b->d_columns_int[b->type_index[i]].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + b->mRecCount, b->d_columns_int[b->type_index[i]].begin());
}
else if(b->type[i] == 1) {
thrust::device_ptr<float_type> d_tmp((float_type*)d);
thrust::gather(v, v+b->mRecCount, b->d_columns_float[b->type_index[i]].begin(), d_tmp);
thrust::copy(d_tmp, d_tmp + b->mRecCount, b->d_columns_float[b->type_index[i]].begin());
}
else {
thrust::device_ptr<char> d_tmp((char*)d);
str_gather(thrust::raw_pointer_cast(v), b->mRecCount, (void*)b->d_columns_char[b->type_index[i]], (void*) thrust::raw_pointer_cast(d_tmp), b->char_size[b->type_index[i]]);
cudaMemcpy( (void*)b->d_columns_char[b->type_index[i]], (void*) thrust::raw_pointer_cast(d_tmp), b->mRecCount*b->char_size[b->type_index[i]], cudaMemcpyDeviceToDevice);
};
};
cudaFree(d);
thrust::device_free(v);
b->CopyToHost(0, b->mRecCount);
thrust::host_vector<unsigned long long int> hh = hashes;
char* tmp = new char[max_c*(c->mRecCount + b->mRecCount)];
c->resize(b->mRecCount);
//lets merge every column
for(unsigned int i = 0; i < b->mColumnCount; i++) {
if(b->type[i] == 0) {
thrust::merge_by_key(h_merge.begin(), h_merge.end(),
hh.begin(), hh.end(),
c->h_columns_int[c->type_index[i]].begin(), b->h_columns_int[b->type_index[i]].begin(),
thrust::make_discard_iterator(), (int_type*)tmp);
thrust::copy((int_type*)tmp, (int_type*)tmp + h_merge.size() + b->mRecCount, c->h_columns_int[c->type_index[i]].begin());
}
else if(b->type[i] == 1) {
thrust::merge_by_key(h_merge.begin(), h_merge.end(),
hh.begin(), hh.end(),
c->h_columns_float[c->type_index[i]].begin(), b->h_columns_float[b->type_index[i]].begin(),
thrust::make_discard_iterator(), (float_type*)tmp);
thrust::copy((float_type*)tmp, (float_type*)tmp + h_merge.size() + b->mRecCount, c->h_columns_float[c->type_index[i]].begin());
}
else {
str_merge_by_key(h_merge, hh, c->h_columns_char[c->type_index[i]], b->h_columns_char[b->type_index[i]], b->char_size[b->type_index[i]], tmp);
thrust::copy(tmp, tmp + (h_merge.size() + b->mRecCount)*b->char_size[b->type_index[i]], c->h_columns_char[c->type_index[i]]);
};
};
//merge the keys
thrust::merge(h_merge.begin(), h_merge.end(),
hh.begin(), hh.end(), (unsigned long long int*)tmp);
unsigned int cpy_sz = h_merge.size() + b->mRecCount;
h_merge.resize(h_merge.size() + b->mRecCount);
thrust::copy((unsigned long long int*)tmp, (unsigned long long int*)tmp + cpy_sz, h_merge.begin());
delete [] tmp;
//cout << endl << "end b and c " << b->mRecCount << " " << c->mRecCount << endl;
//for(int i = 0; i < h_merge.size();i++)
//cout << "H " << h_merge[i] << endl;
/* bool dis_exists = 0;
for(unsigned int j=0; j < c->mColumnCount; j++) {
if (c->grp_type[j] == 6)
dis_exists = 1;
};
if (dis_exists) {
bool grp_scanned = 0;
thrust::device_ptr<bool> d_di(a->grp);
thrust::device_ptr<unsigned int> d_dii = thrust::device_malloc<unsigned int>(a->mRecCount);
thrust::identity<bool> op;
thrust::transform(d_di, d_di+a->mRecCount, d_dii, op);
thrust::device_ptr<int_type> tmp = thrust::device_malloc<int_type>(a->mRecCount);
unsigned int dist_count = 0;
for(unsigned int j=0; j < c->mColumnCount; j++) {
if (c->grp_type[j] == 6) {
if(!grp_scanned) {
d_dii[a->mRecCount-1] = 0;
thrust::inclusive_scan(d_dii, d_dii + a->mRecCount, d_dii);
thrust::gather(d_dii, d_dii + a->mRecCount, hashes.begin(), tmp); // now hashes are in tmp
grp_scanned = 1;
};
unsigned int offset = distinct_val[dist_count].size();
distinct_val[dist_count].resize(distinct_val[dist_count].size() + a->mRecCount);
distinct_hash[dist_count].resize(distinct_hash[dist_count].size() + a->mRecCount);
thrust::copy(distinct_tmp[dist_count].begin(), distinct_tmp[dist_count].begin() + a->mRecCount, distinct_val[dist_count].begin() + offset);
thrust::copy(tmp, tmp + a->mRecCount, distinct_hash[dist_count].begin() + offset);
thrust::stable_sort_by_key(distinct_val[dist_count].begin(), distinct_val[dist_count].end(), distinct_hash[dist_count].begin());
thrust::stable_sort_by_key(distinct_hash[dist_count].begin(), distinct_hash[dist_count].end(), distinct_val[dist_count].begin());
ZipIterator new_last = thrust::unique(thrust::make_zip_iterator(thrust::make_tuple(distinct_hash[dist_count].begin(), distinct_val[dist_count].begin())),
thrust::make_zip_iterator(thrust::make_tuple(distinct_hash[dist_count].end(), distinct_val[dist_count].end())));
IteratorTuple t = new_last.get_iterator_tuple();
distinct_val[dist_count].resize(thrust::get<0>(t) - distinct_hash[dist_count].begin());
distinct_hash[dist_count].resize(thrust::get<0>(t) - distinct_hash[dist_count].begin());
dist_count++;
};
};
thrust::device_free(tmp);
thrust::device_free(d_dii);
};
*/
}
void count_simple(CudaSet* c)
{
int_type count;
for(unsigned int i = 0; i < c->mColumnCount; i++) {
if(c->grp_type[i] == 0) { // COUNT
count = thrust::reduce(c->h_columns_int[c->type_index[i]].begin(), c->h_columns_int[c->type_index[i]].begin() + c->mRecCount);
c->h_columns_int[c->type_index[i]][0] = count;
};
};
if (c->mRecCount != 0) {
for(unsigned int k = 0; k < c->mColumnCount; k++) {
if(c->grp_type[k] == 1) { // AVG
if(c->type[k] == 0) {
int_type sum = thrust::reduce(c->h_columns_int[c->type_index[k]].begin(), c->h_columns_int[c->type_index[k]].begin() + c->mRecCount);
c->h_columns_int[c->type_index[k]][0] = sum/count;
}
if(c->type[k] == 1) {
float_type sum = thrust::reduce(c->h_columns_float[c->type_index[k]].begin(), c->h_columns_float[c->type_index[k]].begin() + c->mRecCount);
c->h_columns_float[c->type_index[k]][0] = sum/count;
};
}
else if(c->grp_type[k] == 2) { // SUM
if(c->type[k] == 0) {
int_type sum = thrust::reduce(c->h_columns_int[c->type_index[k]].begin(), c->h_columns_int[c->type_index[k]].begin() + c->mRecCount);
c->h_columns_int[c->type_index[k]][0] = sum;
}
if(c->type[k] == 1) {
float_type sum = thrust::reduce(c->h_columns_float[c->type_index[k]].begin(), c->h_columns_float[c->type_index[k]].begin() + c->mRecCount);
c->h_columns_float[c->type_index[k]][0] = sum;
};
}
};
}
c->mRecCount = 1;
};
void count_avg(CudaSet* c, vector<thrust::device_vector<int_type> >& distinct_hash)
{
int countIndex;
for(unsigned int i = 0; i < c->mColumnCount; i++) {
if(c->grp_type[i] == 0) { // COUNT
countIndex = i;
break;
};
};
thrust::host_vector<bool> grp;
unsigned int res_count;
if(h_merge.size()) {
grp.resize(h_merge.size());
thrust::adjacent_difference(h_merge.begin(), h_merge.end(), grp.begin());
res_count = h_merge.size() - thrust::count(grp.begin(), grp.end(), 0);
};
if (c->mRecCount != 0) {
//unsigned int dis_count = 0;
if (h_merge.size()) {
for(unsigned int k = 0; k < c->mColumnCount; k++) {
if(c->grp_type[k] <= 2) { //sum || avg || count
if (c->type[k] == 0 ) { // int
int_type* tmp = new int_type[res_count];
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_int[c->type_index[k]].begin(),
thrust::make_discard_iterator(), tmp);
c->h_columns_int[c->type_index[k]].resize(res_count);
thrust::copy(tmp, tmp + res_count, c->h_columns_int[c->type_index[k]].begin());
delete [] tmp;
}
else if (c->type[k] == 1 ) { // float
float_type* tmp = new float_type[res_count];
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_float[c->type_index[k]].begin(),
thrust::make_discard_iterator(), tmp);
c->h_columns_float[c->type_index[k]].resize(res_count);
thrust::copy(tmp, tmp + res_count, c->h_columns_float[c->type_index[k]].begin());
delete [] tmp;
};
}
if(c->grp_type[k] == 4) { //min
if (c->type[k] == 0 ) { // int
int_type* tmp = new int_type[res_count];
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_int[c->type_index[k]].begin(),
thrust::make_discard_iterator(), tmp);
c->h_columns_int[c->type_index[k]].resize(res_count);
thrust::copy(tmp, tmp + res_count, c->h_columns_int[c->type_index[k]].begin());
delete [] tmp;
}
else if (c->type[k] == 1 ) { // float
float_type* tmp = new float_type[res_count];
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_float[c->type_index[k]].begin(),
thrust::make_discard_iterator(), tmp);
c->h_columns_float[c->type_index[k]].resize(res_count);
thrust::copy(tmp, tmp + res_count, c->h_columns_float[c->type_index[k]].begin());
delete [] tmp;
};
}
if(c->grp_type[k] == 5) { //max
if (c->type[k] == 0 ) { // int
int_type* tmp = new int_type[res_count];
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_int[c->type_index[k]].begin(),
thrust::make_discard_iterator(), tmp);
c->h_columns_int[c->type_index[k]].resize(res_count);
thrust::copy(tmp, tmp + res_count, c->h_columns_int[c->type_index[k]].begin());
delete [] tmp;
}
else if (c->type[k] == 1 ) { // float
float_type* tmp = new float_type[res_count];
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_float[c->type_index[k]].begin(),
thrust::make_discard_iterator(), tmp);
c->h_columns_float[c->type_index[k]].resize(res_count);
thrust::copy(tmp, tmp + res_count, c->h_columns_float[c->type_index[k]].begin());
delete [] tmp;
};
}
else if(c->grp_type[k] == 3) { //no group function
if (c->type[k] == 0 ) { // int
int_type* tmp = new int_type[res_count];
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_int[c->type_index[k]].begin(),
thrust::make_discard_iterator(), tmp);
c->h_columns_int[c->type_index[k]].resize(res_count);
thrust::copy(tmp, tmp + res_count, c->h_columns_int[c->type_index[k]].begin());
delete [] tmp;
}
else if (c->type[k] == 1 ) { // float
float_type* tmp = new float_type[res_count];
thrust::reduce_by_key(h_merge.begin(), h_merge.end(), c->h_columns_float[c->type_index[k]].begin(),
thrust::make_discard_iterator(), tmp);
c->h_columns_float[c->type_index[k]].resize(res_count);
thrust::copy(tmp, tmp + res_count, c->h_columns_float[c->type_index[k]].begin());
delete [] tmp;
}
else { //char
char* tmp = new char[res_count*c->char_size[c->type_index[k]]];
str_copy_if_host(c->h_columns_char[c->type_index[k]], c->mRecCount, tmp, grp, c->char_size[c->type_index[k]]);
thrust::copy(tmp, tmp + c->char_size[c->type_index[k]]*res_count, c->h_columns_char[c->type_index[k]]);
delete [] tmp;
};
};
};
c->mRecCount = res_count;
};
for(unsigned int k = 0; k < c->mColumnCount; k++) {
if(c->grp_type[k] == 1) { // AVG
if (c->type[k] == 0 ) { // int
//create a float column k
c->h_columns_float.push_back(thrust::host_vector<float_type, uninitialized_host_allocator<float_type> >(c->mRecCount));
unsigned int idx = c->h_columns_float.size()-1;
thrust::transform(c->h_columns_int[c->type_index[k]].begin(), c->h_columns_int[c->type_index[k]].begin() + c->mRecCount,
c->h_columns_int[c->type_index[countIndex]].begin(), c->h_columns_float[idx].begin(), float_avg1());
c->type[k] = 1;
c->h_columns_int[c->type_index[k]].resize(0);
c->h_columns_int[c->type_index[k]].shrink_to_fit();
c->type_index[k] = idx;
c->grp_type[k] = 3;
}
else { // float
thrust::transform(c->h_columns_float[c->type_index[k]].begin(), c->h_columns_float[c->type_index[k]].begin() + c->mRecCount,
c->h_columns_int[c->type_index[countIndex]].begin(), c->h_columns_float[c->type_index[k]].begin(), float_avg());
};
}
else if(c->grp_type[k] == 6) {
/* unsigned int res_count = 0;
thrust::host_vector<int_type> h_hash = distinct_hash[dis_count];
int_type curr_val = h_hash[0];
unsigned int cycle_sz = h_hash.size();
for(unsigned int i = 0; i < cycle_sz; i++) {
if (h_hash[i] == curr_val) {
res_count++;
if(i == cycle_sz-1) {
c->h_columns_int[c->type_index[k]][mymap[h_hash[i]]] = res_count;
};
}
else {
unsigned int idx = mymap[h_hash[i-1]];
c->h_columns_int[c->type_index[k]][idx] = res_count;
curr_val = h_hash[i];
res_count = 1;
};
};
dis_count++;*/
}
else if(c->grp_type[k] == 2) {
};
};
};
c->segCount = 1;
c->maxRecs = c->mRecCount;
};
|
778754190bcc9ddd541d0355a7ed5e654bb0a39d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2009 NVIDIA Corporation. All rights reserved.
NOTICE TO LICENSEE:
This source code and/or documentation ("Licensed Deliverables") are subject
to NVIDIA intellectual property rights under U.S. and international Copyright
laws.
These Licensed Deliverables contained herein is PROPRIETARY and CONFIDENTIAL
to NVIDIA and is being provided under the terms and conditions of a form of
NVIDIA software license agreement by and between NVIDIA and Licensee ("License
Agreement") or electronically accepted by Licensee. Notwithstanding any terms
or conditions to the contrary in the License Agreement, reproduction or
disclosure of the Licensed Deliverables to any third party without the express
written consent of NVIDIA is prohibited.
NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE LICENSE AGREEMENT,
NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THESE LICENSED
DELIVERABLES FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED
WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE
LICENSED DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. NOTWITHSTANDING ANY
TERMS OR CONDITIONS TO THE CONTRARY IN THE LICENSE AGREEMENT, IN NO EVENT SHALL
NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES,
OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THESE LICENSED DELIVERABLES.
U.S. Government End Users. These Licensed Deliverables are a "commercial item"
as that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
"commercial computer software" and "commercial computer software documentation"
as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) and is provided to the
U.S. Government only as a commercial end item. Consistent with 48 C.F.R.12.212
and 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all U.S. Government
End Users acquire the Licensed Deliverables with only those rights set forth
herein.
Any use of the Licensed Deliverables in individual and commercial software must
include, in the user documentation and internal comments to the code, the above
Disclaimer and U.S. Government End Users Notice.
*/
/*
* cuPrintf.cu
*
* This is a printf command callable from within a kernel. It is set
* up so that output is sent to a memory buffer, which is emptied from
* the host side - but only after a hipDeviceSynchronize() on the host.
*
* Currently, there is a limitation of around 200 characters of output
* and no more than 10 arguments to a single cuPrintf() call. Issue
* multiple calls if longer format strings are required.
*
* It requires minimal setup, and is *NOT* optimised for performance.
* For example, writes are not coalesced - this is because there is an
* assumption that people will not want to printf from every single one
* of thousands of threads, but only from individual threads at a time.
*
* Using this is simple - it requires one host-side call to initialise
* everything, and then kernels can call cuPrintf at will. Sample code
* is the easiest way to demonstrate:
*
#include "cuPrintf.hip"
__global__ void testKernel(int val)
{
cuPrintf("Value is: %d\n", val);
}
int main()
{
cudaPrintfInit();
testKernel<<< 2, 3 >>>(10);
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
return 0;
}
*
* See the header file, "cuPrintf.cuh" for more info, especially
* arguments to cudaPrintfInit() and cudaPrintfDisplay();
*/
#ifndef CUPRINTF_CU
#define CUPRINTF_CU
#include "cuPrintf_hip.cuh"
#if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture
#include <sm_20_atomic_functions.h>
#endif
// This is the smallest amount of memory, per-thread, which is allowed.
// It is also the largest amount of space a single printf() can take up
const static int CUPRINTF_MAX_LEN = 256;
// This structure is used internally to track block/thread output restrictions.
typedef struct __align__(8) {
int threadid; // CUPRINTF_UNRESTRICTED for unrestricted
int blockid; // CUPRINTF_UNRESTRICTED for unrestricted
} cuPrintfRestriction;
// The main storage is in a global print buffer, which has a known
// start/end/length. These are atomically updated so it works as a
// circular buffer.
// Since the only control primitive that can be used is atomicAdd(),
// we cannot wrap the pointer as such. The actual address must be
// calculated from printfBufferPtr by mod-ing with printfBufferLength.
// For sm_10 architecture, we must subdivide the buffer per-thread
// since we do not even have an atomic primitive.
__constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host)
__constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host)
__device__ static cuPrintfRestriction restrictRules; // Output restrictions
__device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset
// This is the header preceeding all printf entries.
// NOTE: It *must* be size-aligned to the maximum entity size (size_t)
typedef struct __align__(8) {
unsigned short magic; // Magic number says we're valid
unsigned short fmtoffset; // Offset of fmt string into buffer
unsigned short blockid; // Block ID of author
unsigned short threadid; // Thread ID of author
} cuPrintfHeader;
// Special header for sm_10 architecture
#define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character
typedef struct __align__(16) {
unsigned short magic; // sm_10 specific magic number
unsigned short unused;
unsigned int thread_index; // thread ID for this buffer
unsigned int thread_buf_len; // per-thread buffer length
unsigned int offset; // most recent printf's offset
} cuPrintfHeaderSM10;
// Because we can't write an element which is not aligned to its bit-size,
// we have to align all sizes and variables on maximum-size boundaries.
// That means sizeof(double) in this case, but we'll use (long long) for
// better arch<1.3 support
#define CUPRINTF_ALIGN_SIZE sizeof(long long)
// All our headers are prefixed with a magic number so we know they're ready
#define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character
//
// getNextPrintfBufPtr
//
// Grabs a block of space in the general circular buffer, using an
// atomic function to ensure that it's ours. We handle wrapping
// around the circular buffer and return a pointer to a place which
// can be written to.
//
// Important notes:
// 1. We always grab CUPRINTF_MAX_LEN bytes
// 2. Because of 1, we never worry about wrapping around the end
// 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN
//
// This returns a pointer to the place where we own.
//
__device__ static char *getNextPrintfBufPtr()
{
// Initialisation check
if(!printfBufferPtr)
return NULL;
// Thread/block restriction check
if((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y)))
return NULL;
if((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z)))
return NULL;
// Conditional section, dependent on architecture
#if __CUDA_ARCH__ == 100
// For sm_10 architectures, we have no atomic add - this means we must split the
// entire available buffer into per-thread blocks. Inefficient, but what can you do.
int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z);
int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z +
(blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z);
// Find our own block of data and go to it. Make sure the per-thread length
// is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and
// alignment issues! We must round down, of course.
unsigned int thread_buf_len = printfBufferLength / thread_count;
thread_buf_len &= ~(CUPRINTF_MAX_LEN-1);
// We *must* have a thread buffer length able to fit at least two printfs (one header, one real)
if(thread_buf_len < (CUPRINTF_MAX_LEN * 2))
return NULL;
// Now address our section of the buffer. The first item is a header.
char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index);
cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer;
if(hdr.magic != CUPRINTF_SM10_MAGIC)
{
// If our header is not set up, initialise it
hdr.magic = CUPRINTF_SM10_MAGIC;
hdr.thread_index = thread_index;
hdr.thread_buf_len = thread_buf_len;
hdr.offset = 0; // Note we start at 0! We pre-increment below.
*(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header
// For initial setup purposes, we might need to init thread0's header too
// (so that cudaPrintfDisplay() below will work). This is only run once.
cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer;
tophdr->thread_buf_len = thread_buf_len;
}
// Adjust the offset by the right amount, and wrap it if need be
unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN;
if(offset >= hdr.thread_buf_len)
offset = CUPRINTF_MAX_LEN;
// Write back the new offset for next time and return a pointer to it
((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset;
return myPrintfBuffer + offset;
#else
// Much easier with an atomic operation!
size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer;
offset %= printfBufferLength;
return globalPrintfBuffer + offset;
#endif
}
//
// writePrintfHeader
//
// Inserts the header for containing our UID, fmt position and
// block/thread number. We generate it dynamically to avoid
// issues arising from requiring pre-initialisation.
//
__device__ static void writePrintfHeader(char *ptr, char *fmtptr)
{
if(ptr)
{
cuPrintfHeader header;
header.magic = CUPRINTF_SM11_MAGIC;
header.fmtoffset = (unsigned short)(fmtptr - ptr);
header.blockid = blockIdx.x + gridDim.x*blockIdx.y;
header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
*(cuPrintfHeader *)(void *)ptr = header;
}
}
//
// cuPrintfStrncpy
//
// This special strncpy outputs an aligned length value, followed by the
// string. It then zero-pads the rest of the string until a 64-aligned
// boundary. The length *includes* the padding. A pointer to the byte
// just after the \0 is returned.
//
// This function could overflow CUPRINTF_MAX_LEN characters in our buffer.
// To avoid it, we must count as we output and truncate where necessary.
//
__device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end)
{
// Initialisation and overflow check
if(!dest || !src || (dest >= end))
return NULL;
// Prepare to write the length specifier. We're guaranteed to have
// at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in
// chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE.
int *lenptr = (int *)(void *)dest;
int len = 0;
dest += CUPRINTF_ALIGN_SIZE;
// Now copy the string
while(n--)
{
if(dest >= end) // Overflow check
break;
len++;
*dest++ = *src;
if(*src++ == '\0')
break;
}
// Now write out the padding bytes, and we have our length.
while((dest < end) && (((long)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0))
{
len++;
*dest++ = 0;
}
*lenptr = len;
return (dest < end) ? dest : NULL; // Overflow means return NULL
}
//
// copyArg
//
// This copies a length specifier and then the argument out to the
// data buffer. Templates let the compiler figure all this out at
// compile-time, making life much simpler from the programming
// point of view. I'm assuimg all (const char *) is a string, and
// everything else is the variable it points at. I'd love to see
// a better way of doing it, but aside from parsing the format
// string I can't think of one.
//
// The length of the data type is inserted at the beginning (so that
// the display can distinguish between float and double), and the
// pointer to the end of the entry is returned.
//
__device__ static char *copyArg(char *ptr, const char *arg, char *end)
{
// Initialisation check
if(!ptr || !arg)
return NULL;
// strncpy does all our work. We just terminate.
if((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL)
*ptr = 0;
return ptr;
}
template <typename T>
__device__ static char *copyArg(char *ptr, T &arg, char *end)
{
// Initisalisation and overflow check. Alignment rules mean that
// we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need
// to check that one offset.
if(!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end))
return NULL;
// Write the length and argument
*(int *)(void *)ptr = sizeof(arg);
ptr += CUPRINTF_ALIGN_SIZE;
*(T *)(void *)ptr = arg;
ptr += CUPRINTF_ALIGN_SIZE;
*ptr = 0;
return ptr;
}
//
// cuPrintf
//
// Templated printf functions to handle multiple arguments.
// Note we return the total amount of data copied, not the number
// of characters output. But then again, who ever looks at the
// return from printf() anyway?
//
// The format is to grab a block of circular buffer space, the
// start of which will hold a header and a pointer to the format
// string. We then write in all the arguments, and finally the
// format string itself. This is to make it easy to prevent
// overflow of our buffer (we support up to 10 arguments, each of
// which can be 12 bytes in length - that means that only the
// format string (or a %s) can actually overflow; so the overflow
// check need only be in the strcpy function.
//
// The header is written at the very last because that's what
// makes it look like we're done.
//
// Errors, which are basically lack-of-initialisation, are ignored
// in the called functions because NULL pointers are passed around
//
// All printf variants basically do the same thing, setting up the
// buffer, writing all arguments, then finalising the header. For
// clarity, we'll pack the code into some big macros.
#define CUPRINTF_PREAMBLE \
char *start, *end, *bufptr, *fmtstart; \
if((start = getNextPrintfBufPtr()) == NULL) return 0; \
end = start + CUPRINTF_MAX_LEN; \
bufptr = start + sizeof(cuPrintfHeader);
// Posting an argument is easy
#define CUPRINTF_ARG(argname) \
bufptr = copyArg(bufptr, argname, end);
// After args are done, record start-of-fmt and write the fmt and header
#define CUPRINTF_POSTAMBLE \
fmtstart = bufptr; \
end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \
writePrintfHeader(start, end ? fmtstart : NULL); \
return end ? (int)(end - start) : 0;
__device__ int cuPrintf(const char *fmt)
{
CUPRINTF_PREAMBLE;
CUPRINTF_POSTAMBLE;
}
template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_ARG(arg10);
CUPRINTF_POSTAMBLE;
}
#undef CUPRINTF_PREAMBLE
#undef CUPRINTF_ARG
#undef CUPRINTF_POSTAMBLE
//
// cuPrintfRestrict
//
// Called to restrict output to a given thread/block.
// We store the info in "restrictRules", which is set up at
// init time by the host. It's not the cleanest way to do this
// because it means restrictions will last between
// invocations, but given the output-pointer continuity,
// I feel this is reasonable.
//
__device__ void cuPrintfRestrict(int threadid, int blockid)
{
int thread_count = blockDim.x * blockDim.y * blockDim.z;
if(((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED))
restrictRules.threadid = threadid;
int block_count = gridDim.x * gridDim.y;
if(((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED))
restrictRules.blockid = blockid;
}
///////////////////////////////////////////////////////////////////////////////
// HOST SIDE
#include <stdio.h>
static FILE *printf_fp;
static char *printfbuf_start=NULL;
static char *printfbuf_device=NULL;
static int printfbuf_len=0;
//
// outputPrintfData
//
// Our own internal function, which takes a pointer to a data buffer
// and passes it through libc's printf for output.
//
// We receive the formate string and a pointer to where the data is
// held. We then run through and print it out.
//
// Returns 0 on failure, 1 on success
//
static int outputPrintfData(char *fmt, char *data)
{
// Format string is prefixed by a length that we don't need
fmt += CUPRINTF_ALIGN_SIZE;
// Now run through it, printing everything we can. We must
// run to every % character, extract only that, and use printf
// to format it.
char *p = strchr(fmt, '%');
while(p != NULL)
{
// Print up to the % character
*p = '\0';
fputs(fmt, printf_fp);
*p = '%'; // Put back the %
// Now handle the format specifier
char *format = p++; // Points to the '%'
p += strcspn(p, "%cdiouxXeEfgGaAnps");
if(*p == '\0') // If no format specifier, print the whole thing
{
fmt = format;
break;
}
// Cut out the format bit and use printf to print it. It's prefixed
// by its length.
int arglen = *(int *)data;
if(arglen > CUPRINTF_MAX_LEN)
{
fputs("Corrupt printf buffer data - aborting\n", printf_fp);
return 0;
}
data += CUPRINTF_ALIGN_SIZE;
char specifier = *p++;
char c = *p; // Store for later
*p = '\0';
switch(specifier)
{
// These all take integer arguments
case 'c':
case 'd':
case 'i':
case 'o':
case 'u':
case 'x':
case 'X':
case 'p':
fprintf(printf_fp, format, *((int *)data));
break;
// These all take double arguments
case 'e':
case 'E':
case 'f':
case 'g':
case 'G':
case 'a':
case 'A':
if(arglen == 4) // Float vs. Double thing
fprintf(printf_fp, format, *((float *)data));
else
fprintf(printf_fp, format, *((double *)data));
break;
// Strings are handled in a special way
case 's':
fprintf(printf_fp, format, (char *)data);
break;
// % is special
case '%':
fprintf(printf_fp, "%%");
break;
// Everything else is just printed out as-is
default:
fprintf(printf_fp, "%s",format);
break;
}
data += CUPRINTF_ALIGN_SIZE; // Move on to next argument
*p = c; // Restore what we removed
fmt = p; // Adjust fmt string to be past the specifier
p = strchr(fmt, '%'); // and get the next specifier
}
// Print out the last of the string
fputs(fmt, printf_fp);
return 1;
}
//
// doPrintfDisplay
//
// This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the
// print function above to display them. We've got this separate from
// cudaPrintfDisplay() below so we can handle the SM_10 architecture
// partitioning.
//
static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr)
{
// Grab, piece-by-piece, each output element until we catch
// up with the circular buffer end pointer
int printf_count=0;
char printfbuf_local[CUPRINTF_MAX_LEN+1];
printfbuf_local[CUPRINTF_MAX_LEN] = '\0';
while(bufptr != endptr)
{
// Wrap ourselves at the end-of-buffer
if(bufptr == bufend)
bufptr = bufstart;
// Adjust our start pointer to within the circular buffer and copy a block.
hipMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, hipMemcpyDeviceToHost);
// If the magic number isn't valid, then this write hasn't gone through
// yet and we'll wait until it does (or we're past the end for non-async printfs).
cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local;
if((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN))
{
//fprintf(printf_fp, "Bad magic number in printf header\n");
break;
}
// Extract all the info and get this printf done
if(headings)
fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid);
if(hdr->fmtoffset == 0)
fprintf(printf_fp, "printf buffer overflow\n");
else if(!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader)))
break;
printf_count++;
// Clear if asked
if(clear)
hipMemset(bufptr, 0, CUPRINTF_MAX_LEN);
// Now advance our start location, because we're done, and keep copying
bufptr += CUPRINTF_MAX_LEN;
}
return printf_count;
}
//
// cudaPrintfInit
//
// Takes a buffer length to allocate, creates the memory on the device and
// returns a pointer to it for when a kernel is called. It's up to the caller
// to free it.
//
extern "C" hipError_t cudaPrintfInit(size_t bufferLen)
{
// Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN
bufferLen = (bufferLen < CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen;
if((bufferLen % CUPRINTF_MAX_LEN) > 0)
bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN));
printfbuf_len = (int)bufferLen;
// Allocate a print buffer on the device and zero it
if(hipMalloc((void **)&printfbuf_device, printfbuf_len) != hipSuccess)
return hipErrorInitializationError;
hipMemset(printfbuf_device, 0, printfbuf_len);
printfbuf_start = printfbuf_device; // Where we start reading from
// No restrictions to begin with
cuPrintfRestriction restrict;
restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED;
hipMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict));
// Initialise the buffer and the respective lengths/pointers.
hipMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *));
hipMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *));
hipMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len));
return hipSuccess;
}
//
// cudaPrintfEnd
//
// Frees up the memory which we allocated
//
extern "C" void cudaPrintfEnd()
{
if(!printfbuf_start || !printfbuf_device)
return;
hipFree(printfbuf_device);
printfbuf_start = printfbuf_device = NULL;
}
//
// cudaPrintfDisplay
//
// Each call to this function dumps the entire current contents
// of the printf buffer to the pre-specified FILE pointer. The
// circular "start" pointer is advanced so that subsequent calls
// dumps only new stuff.
//
// In the case of async memory access (via streams), call this
// repeatedly to keep trying to empty the buffer. If it's a sync
// access, then the whole buffer should empty in one go.
//
// Arguments:
// outputFP - File descriptor to output to (NULL => stdout)
// showThreadID - If true, prints [block,thread] before each line
//
extern "C" hipError_t cudaPrintfDisplay(void *outputFP, bool showThreadID)
{
printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP);
// For now, we force "synchronous" mode which means we're not concurrent
// with kernel execution. This also means we don't need clearOnPrint.
// If you're patching it for async operation, here's where you want it.
bool sync_printfs = true;
bool clearOnPrint = false;
// Initialisation check
if(!printfbuf_start || !printfbuf_device || !printf_fp)
return hipErrorMissingConfiguration;
// To determine which architecture we're using, we read the
// first short from the buffer - it'll be the magic number
// relating to the version.
unsigned short magic;
hipMemcpy(&magic, printfbuf_device, sizeof(unsigned short), hipMemcpyDeviceToHost);
// For SM_10 architecture, we've split our buffer into one-per-thread.
// That means we must do each thread block separately. It'll require
// extra reading. We also, for now, don't support async printfs because
// that requires tracking one start pointer per thread.
if(magic == CUPRINTF_SM10_MAGIC)
{
sync_printfs = true;
clearOnPrint = false;
int blocklen = 0;
char *blockptr = printfbuf_device;
while(blockptr < (printfbuf_device + printfbuf_len))
{
cuPrintfHeaderSM10 hdr;
hipMemcpy(&hdr, blockptr, sizeof(hdr), hipMemcpyDeviceToHost);
// We get our block-size-step from the very first header
if(hdr.thread_buf_len != 0)
blocklen = hdr.thread_buf_len;
// No magic number means no printfs from this thread
if(hdr.magic != CUPRINTF_SM10_MAGIC)
{
if(blocklen == 0)
{
fprintf(printf_fp, "No printf headers found at all!\n");
break; // No valid headers!
}
blockptr += blocklen;
continue;
}
// "offset" is non-zero then we can print the block contents
if(hdr.offset > 0)
{
// For synchronous printfs, we must print from endptr->bufend, then from start->end
if(sync_printfs)
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len);
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN);
}
// Move on to the next block and loop again
blockptr += hdr.thread_buf_len;
}
}
// For SM_11 and up, everything is a single buffer and it's simple
else if(magic == CUPRINTF_SM11_MAGIC)
{
// Grab the current "end of circular buffer" pointer.
char *printfbuf_end = NULL;
hipMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *));
// Adjust our starting and ending pointers to within the block
char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device;
char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device;
// For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular
// buffer wrap carefully because we could miss those past "end".
if(sync_printfs)
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len);
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr);
printfbuf_start = printfbuf_end;
}
else
;//printf("Bad magic number in cuPrintf buffer header\n");
// If we were synchronous, then we must ensure that the memory is cleared on exit
// otherwise another kernel launch with a different grid size could conflict.
if(sync_printfs)
hipMemset(printfbuf_device, 0, printfbuf_len);
return hipSuccess;
}
// Cleanup
#undef CUPRINTF_MAX_LEN
#undef CUPRINTF_ALIGN_SIZE
#undef CUPRINTF_SM10_MAGIC
#undef CUPRINTF_SM11_MAGIC
#endif
| 778754190bcc9ddd541d0355a7ed5e654bb0a39d.cu | /*
Copyright 2009 NVIDIA Corporation. All rights reserved.
NOTICE TO LICENSEE:
This source code and/or documentation ("Licensed Deliverables") are subject
to NVIDIA intellectual property rights under U.S. and international Copyright
laws.
These Licensed Deliverables contained herein is PROPRIETARY and CONFIDENTIAL
to NVIDIA and is being provided under the terms and conditions of a form of
NVIDIA software license agreement by and between NVIDIA and Licensee ("License
Agreement") or electronically accepted by Licensee. Notwithstanding any terms
or conditions to the contrary in the License Agreement, reproduction or
disclosure of the Licensed Deliverables to any third party without the express
written consent of NVIDIA is prohibited.
NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE LICENSE AGREEMENT,
NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THESE LICENSED
DELIVERABLES FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED
WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE
LICENSED DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. NOTWITHSTANDING ANY
TERMS OR CONDITIONS TO THE CONTRARY IN THE LICENSE AGREEMENT, IN NO EVENT SHALL
NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES,
OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THESE LICENSED DELIVERABLES.
U.S. Government End Users. These Licensed Deliverables are a "commercial item"
as that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
"commercial computer software" and "commercial computer software documentation"
as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) and is provided to the
U.S. Government only as a commercial end item. Consistent with 48 C.F.R.12.212
and 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all U.S. Government
End Users acquire the Licensed Deliverables with only those rights set forth
herein.
Any use of the Licensed Deliverables in individual and commercial software must
include, in the user documentation and internal comments to the code, the above
Disclaimer and U.S. Government End Users Notice.
*/
/*
* cuPrintf.cu
*
* This is a printf command callable from within a kernel. It is set
* up so that output is sent to a memory buffer, which is emptied from
* the host side - but only after a cudaThreadSynchronize() on the host.
*
* Currently, there is a limitation of around 200 characters of output
* and no more than 10 arguments to a single cuPrintf() call. Issue
* multiple calls if longer format strings are required.
*
* It requires minimal setup, and is *NOT* optimised for performance.
* For example, writes are not coalesced - this is because there is an
* assumption that people will not want to printf from every single one
* of thousands of threads, but only from individual threads at a time.
*
* Using this is simple - it requires one host-side call to initialise
* everything, and then kernels can call cuPrintf at will. Sample code
* is the easiest way to demonstrate:
*
#include "cuPrintf.cu"
__global__ void testKernel(int val)
{
cuPrintf("Value is: %d\n", val);
}
int main()
{
cudaPrintfInit();
testKernel<<< 2, 3 >>>(10);
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
return 0;
}
*
* See the header file, "cuPrintf.cuh" for more info, especially
* arguments to cudaPrintfInit() and cudaPrintfDisplay();
*/
#ifndef CUPRINTF_CU
#define CUPRINTF_CU
#include "cuPrintf.cuh"
#if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture
#include <sm_20_atomic_functions.h>
#endif
// This is the smallest amount of memory, per-thread, which is allowed.
// It is also the largest amount of space a single printf() can take up
const static int CUPRINTF_MAX_LEN = 256;
// This structure is used internally to track block/thread output restrictions.
typedef struct __align__(8) {
int threadid; // CUPRINTF_UNRESTRICTED for unrestricted
int blockid; // CUPRINTF_UNRESTRICTED for unrestricted
} cuPrintfRestriction;
// The main storage is in a global print buffer, which has a known
// start/end/length. These are atomically updated so it works as a
// circular buffer.
// Since the only control primitive that can be used is atomicAdd(),
// we cannot wrap the pointer as such. The actual address must be
// calculated from printfBufferPtr by mod-ing with printfBufferLength.
// For sm_10 architecture, we must subdivide the buffer per-thread
// since we do not even have an atomic primitive.
__constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host)
__constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host)
__device__ static cuPrintfRestriction restrictRules; // Output restrictions
__device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset
// This is the header preceeding all printf entries.
// NOTE: It *must* be size-aligned to the maximum entity size (size_t)
typedef struct __align__(8) {
unsigned short magic; // Magic number says we're valid
unsigned short fmtoffset; // Offset of fmt string into buffer
unsigned short blockid; // Block ID of author
unsigned short threadid; // Thread ID of author
} cuPrintfHeader;
// Special header for sm_10 architecture
#define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character
typedef struct __align__(16) {
unsigned short magic; // sm_10 specific magic number
unsigned short unused;
unsigned int thread_index; // thread ID for this buffer
unsigned int thread_buf_len; // per-thread buffer length
unsigned int offset; // most recent printf's offset
} cuPrintfHeaderSM10;
// Because we can't write an element which is not aligned to its bit-size,
// we have to align all sizes and variables on maximum-size boundaries.
// That means sizeof(double) in this case, but we'll use (long long) for
// better arch<1.3 support
#define CUPRINTF_ALIGN_SIZE sizeof(long long)
// All our headers are prefixed with a magic number so we know they're ready
#define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character
//
// getNextPrintfBufPtr
//
// Grabs a block of space in the general circular buffer, using an
// atomic function to ensure that it's ours. We handle wrapping
// around the circular buffer and return a pointer to a place which
// can be written to.
//
// Important notes:
// 1. We always grab CUPRINTF_MAX_LEN bytes
// 2. Because of 1, we never worry about wrapping around the end
// 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN
//
// This returns a pointer to the place where we own.
//
__device__ static char *getNextPrintfBufPtr()
{
// Initialisation check
if(!printfBufferPtr)
return NULL;
// Thread/block restriction check
if((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y)))
return NULL;
if((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z)))
return NULL;
// Conditional section, dependent on architecture
#if __CUDA_ARCH__ == 100
// For sm_10 architectures, we have no atomic add - this means we must split the
// entire available buffer into per-thread blocks. Inefficient, but what can you do.
int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z);
int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z +
(blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z);
// Find our own block of data and go to it. Make sure the per-thread length
// is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and
// alignment issues! We must round down, of course.
unsigned int thread_buf_len = printfBufferLength / thread_count;
thread_buf_len &= ~(CUPRINTF_MAX_LEN-1);
// We *must* have a thread buffer length able to fit at least two printfs (one header, one real)
if(thread_buf_len < (CUPRINTF_MAX_LEN * 2))
return NULL;
// Now address our section of the buffer. The first item is a header.
char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index);
cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer;
if(hdr.magic != CUPRINTF_SM10_MAGIC)
{
// If our header is not set up, initialise it
hdr.magic = CUPRINTF_SM10_MAGIC;
hdr.thread_index = thread_index;
hdr.thread_buf_len = thread_buf_len;
hdr.offset = 0; // Note we start at 0! We pre-increment below.
*(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header
// For initial setup purposes, we might need to init thread0's header too
// (so that cudaPrintfDisplay() below will work). This is only run once.
cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer;
tophdr->thread_buf_len = thread_buf_len;
}
// Adjust the offset by the right amount, and wrap it if need be
unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN;
if(offset >= hdr.thread_buf_len)
offset = CUPRINTF_MAX_LEN;
// Write back the new offset for next time and return a pointer to it
((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset;
return myPrintfBuffer + offset;
#else
// Much easier with an atomic operation!
size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer;
offset %= printfBufferLength;
return globalPrintfBuffer + offset;
#endif
}
//
// writePrintfHeader
//
// Inserts the header for containing our UID, fmt position and
// block/thread number. We generate it dynamically to avoid
// issues arising from requiring pre-initialisation.
//
__device__ static void writePrintfHeader(char *ptr, char *fmtptr)
{
if(ptr)
{
cuPrintfHeader header;
header.magic = CUPRINTF_SM11_MAGIC;
header.fmtoffset = (unsigned short)(fmtptr - ptr);
header.blockid = blockIdx.x + gridDim.x*blockIdx.y;
header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
*(cuPrintfHeader *)(void *)ptr = header;
}
}
//
// cuPrintfStrncpy
//
// This special strncpy outputs an aligned length value, followed by the
// string. It then zero-pads the rest of the string until a 64-aligned
// boundary. The length *includes* the padding. A pointer to the byte
// just after the \0 is returned.
//
// This function could overflow CUPRINTF_MAX_LEN characters in our buffer.
// To avoid it, we must count as we output and truncate where necessary.
//
__device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end)
{
// Initialisation and overflow check
if(!dest || !src || (dest >= end))
return NULL;
// Prepare to write the length specifier. We're guaranteed to have
// at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in
// chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE.
int *lenptr = (int *)(void *)dest;
int len = 0;
dest += CUPRINTF_ALIGN_SIZE;
// Now copy the string
while(n--)
{
if(dest >= end) // Overflow check
break;
len++;
*dest++ = *src;
if(*src++ == '\0')
break;
}
// Now write out the padding bytes, and we have our length.
while((dest < end) && (((long)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0))
{
len++;
*dest++ = 0;
}
*lenptr = len;
return (dest < end) ? dest : NULL; // Overflow means return NULL
}
//
// copyArg
//
// This copies a length specifier and then the argument out to the
// data buffer. Templates let the compiler figure all this out at
// compile-time, making life much simpler from the programming
// point of view. I'm assuimg all (const char *) is a string, and
// everything else is the variable it points at. I'd love to see
// a better way of doing it, but aside from parsing the format
// string I can't think of one.
//
// The length of the data type is inserted at the beginning (so that
// the display can distinguish between float and double), and the
// pointer to the end of the entry is returned.
//
__device__ static char *copyArg(char *ptr, const char *arg, char *end)
{
// Initialisation check
if(!ptr || !arg)
return NULL;
// strncpy does all our work. We just terminate.
if((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL)
*ptr = 0;
return ptr;
}
template <typename T>
__device__ static char *copyArg(char *ptr, T &arg, char *end)
{
// Initisalisation and overflow check. Alignment rules mean that
// we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need
// to check that one offset.
if(!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end))
return NULL;
// Write the length and argument
*(int *)(void *)ptr = sizeof(arg);
ptr += CUPRINTF_ALIGN_SIZE;
*(T *)(void *)ptr = arg;
ptr += CUPRINTF_ALIGN_SIZE;
*ptr = 0;
return ptr;
}
//
// cuPrintf
//
// Templated printf functions to handle multiple arguments.
// Note we return the total amount of data copied, not the number
// of characters output. But then again, who ever looks at the
// return from printf() anyway?
//
// The format is to grab a block of circular buffer space, the
// start of which will hold a header and a pointer to the format
// string. We then write in all the arguments, and finally the
// format string itself. This is to make it easy to prevent
// overflow of our buffer (we support up to 10 arguments, each of
// which can be 12 bytes in length - that means that only the
// format string (or a %s) can actually overflow; so the overflow
// check need only be in the strcpy function.
//
// The header is written at the very last because that's what
// makes it look like we're done.
//
// Errors, which are basically lack-of-initialisation, are ignored
// in the called functions because NULL pointers are passed around
//
// All printf variants basically do the same thing, setting up the
// buffer, writing all arguments, then finalising the header. For
// clarity, we'll pack the code into some big macros.
#define CUPRINTF_PREAMBLE \
char *start, *end, *bufptr, *fmtstart; \
if((start = getNextPrintfBufPtr()) == NULL) return 0; \
end = start + CUPRINTF_MAX_LEN; \
bufptr = start + sizeof(cuPrintfHeader);
// Posting an argument is easy
#define CUPRINTF_ARG(argname) \
bufptr = copyArg(bufptr, argname, end);
// After args are done, record start-of-fmt and write the fmt and header
#define CUPRINTF_POSTAMBLE \
fmtstart = bufptr; \
end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \
writePrintfHeader(start, end ? fmtstart : NULL); \
return end ? (int)(end - start) : 0;
__device__ int cuPrintf(const char *fmt)
{
CUPRINTF_PREAMBLE;
CUPRINTF_POSTAMBLE;
}
template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_POSTAMBLE;
}
template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10)
{
CUPRINTF_PREAMBLE;
CUPRINTF_ARG(arg1);
CUPRINTF_ARG(arg2);
CUPRINTF_ARG(arg3);
CUPRINTF_ARG(arg4);
CUPRINTF_ARG(arg5);
CUPRINTF_ARG(arg6);
CUPRINTF_ARG(arg7);
CUPRINTF_ARG(arg8);
CUPRINTF_ARG(arg9);
CUPRINTF_ARG(arg10);
CUPRINTF_POSTAMBLE;
}
#undef CUPRINTF_PREAMBLE
#undef CUPRINTF_ARG
#undef CUPRINTF_POSTAMBLE
//
// cuPrintfRestrict
//
// Called to restrict output to a given thread/block.
// We store the info in "restrictRules", which is set up at
// init time by the host. It's not the cleanest way to do this
// because it means restrictions will last between
// invocations, but given the output-pointer continuity,
// I feel this is reasonable.
//
__device__ void cuPrintfRestrict(int threadid, int blockid)
{
int thread_count = blockDim.x * blockDim.y * blockDim.z;
if(((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED))
restrictRules.threadid = threadid;
int block_count = gridDim.x * gridDim.y;
if(((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED))
restrictRules.blockid = blockid;
}
///////////////////////////////////////////////////////////////////////////////
// HOST SIDE
#include <stdio.h>
static FILE *printf_fp;
static char *printfbuf_start=NULL;
static char *printfbuf_device=NULL;
static int printfbuf_len=0;
//
// outputPrintfData
//
// Our own internal function, which takes a pointer to a data buffer
// and passes it through libc's printf for output.
//
// We receive the formate string and a pointer to where the data is
// held. We then run through and print it out.
//
// Returns 0 on failure, 1 on success
//
static int outputPrintfData(char *fmt, char *data)
{
// Format string is prefixed by a length that we don't need
fmt += CUPRINTF_ALIGN_SIZE;
// Now run through it, printing everything we can. We must
// run to every % character, extract only that, and use printf
// to format it.
char *p = strchr(fmt, '%');
while(p != NULL)
{
// Print up to the % character
*p = '\0';
fputs(fmt, printf_fp);
*p = '%'; // Put back the %
// Now handle the format specifier
char *format = p++; // Points to the '%'
p += strcspn(p, "%cdiouxXeEfgGaAnps");
if(*p == '\0') // If no format specifier, print the whole thing
{
fmt = format;
break;
}
// Cut out the format bit and use printf to print it. It's prefixed
// by its length.
int arglen = *(int *)data;
if(arglen > CUPRINTF_MAX_LEN)
{
fputs("Corrupt printf buffer data - aborting\n", printf_fp);
return 0;
}
data += CUPRINTF_ALIGN_SIZE;
char specifier = *p++;
char c = *p; // Store for later
*p = '\0';
switch(specifier)
{
// These all take integer arguments
case 'c':
case 'd':
case 'i':
case 'o':
case 'u':
case 'x':
case 'X':
case 'p':
fprintf(printf_fp, format, *((int *)data));
break;
// These all take double arguments
case 'e':
case 'E':
case 'f':
case 'g':
case 'G':
case 'a':
case 'A':
if(arglen == 4) // Float vs. Double thing
fprintf(printf_fp, format, *((float *)data));
else
fprintf(printf_fp, format, *((double *)data));
break;
// Strings are handled in a special way
case 's':
fprintf(printf_fp, format, (char *)data);
break;
// % is special
case '%':
fprintf(printf_fp, "%%");
break;
// Everything else is just printed out as-is
default:
fprintf(printf_fp, "%s",format);
break;
}
data += CUPRINTF_ALIGN_SIZE; // Move on to next argument
*p = c; // Restore what we removed
fmt = p; // Adjust fmt string to be past the specifier
p = strchr(fmt, '%'); // and get the next specifier
}
// Print out the last of the string
fputs(fmt, printf_fp);
return 1;
}
//
// doPrintfDisplay
//
// This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the
// print function above to display them. We've got this separate from
// cudaPrintfDisplay() below so we can handle the SM_10 architecture
// partitioning.
//
static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr)
{
// Grab, piece-by-piece, each output element until we catch
// up with the circular buffer end pointer
int printf_count=0;
char printfbuf_local[CUPRINTF_MAX_LEN+1];
printfbuf_local[CUPRINTF_MAX_LEN] = '\0';
while(bufptr != endptr)
{
// Wrap ourselves at the end-of-buffer
if(bufptr == bufend)
bufptr = bufstart;
// Adjust our start pointer to within the circular buffer and copy a block.
cudaMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, cudaMemcpyDeviceToHost);
// If the magic number isn't valid, then this write hasn't gone through
// yet and we'll wait until it does (or we're past the end for non-async printfs).
cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local;
if((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN))
{
//fprintf(printf_fp, "Bad magic number in printf header\n");
break;
}
// Extract all the info and get this printf done
if(headings)
fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid);
if(hdr->fmtoffset == 0)
fprintf(printf_fp, "printf buffer overflow\n");
else if(!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader)))
break;
printf_count++;
// Clear if asked
if(clear)
cudaMemset(bufptr, 0, CUPRINTF_MAX_LEN);
// Now advance our start location, because we're done, and keep copying
bufptr += CUPRINTF_MAX_LEN;
}
return printf_count;
}
//
// cudaPrintfInit
//
// Takes a buffer length to allocate, creates the memory on the device and
// returns a pointer to it for when a kernel is called. It's up to the caller
// to free it.
//
extern "C" cudaError_t cudaPrintfInit(size_t bufferLen)
{
// Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN
bufferLen = (bufferLen < CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen;
if((bufferLen % CUPRINTF_MAX_LEN) > 0)
bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN));
printfbuf_len = (int)bufferLen;
// Allocate a print buffer on the device and zero it
if(cudaMalloc((void **)&printfbuf_device, printfbuf_len) != cudaSuccess)
return cudaErrorInitializationError;
cudaMemset(printfbuf_device, 0, printfbuf_len);
printfbuf_start = printfbuf_device; // Where we start reading from
// No restrictions to begin with
cuPrintfRestriction restrict;
restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED;
cudaMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict));
// Initialise the buffer and the respective lengths/pointers.
cudaMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *));
cudaMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *));
cudaMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len));
return cudaSuccess;
}
//
// cudaPrintfEnd
//
// Frees up the memory which we allocated
//
extern "C" void cudaPrintfEnd()
{
if(!printfbuf_start || !printfbuf_device)
return;
cudaFree(printfbuf_device);
printfbuf_start = printfbuf_device = NULL;
}
//
// cudaPrintfDisplay
//
// Each call to this function dumps the entire current contents
// of the printf buffer to the pre-specified FILE pointer. The
// circular "start" pointer is advanced so that subsequent calls
// dumps only new stuff.
//
// In the case of async memory access (via streams), call this
// repeatedly to keep trying to empty the buffer. If it's a sync
// access, then the whole buffer should empty in one go.
//
// Arguments:
// outputFP - File descriptor to output to (NULL => stdout)
// showThreadID - If true, prints [block,thread] before each line
//
extern "C" cudaError_t cudaPrintfDisplay(void *outputFP, bool showThreadID)
{
printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP);
// For now, we force "synchronous" mode which means we're not concurrent
// with kernel execution. This also means we don't need clearOnPrint.
// If you're patching it for async operation, here's where you want it.
bool sync_printfs = true;
bool clearOnPrint = false;
// Initialisation check
if(!printfbuf_start || !printfbuf_device || !printf_fp)
return cudaErrorMissingConfiguration;
// To determine which architecture we're using, we read the
// first short from the buffer - it'll be the magic number
// relating to the version.
unsigned short magic;
cudaMemcpy(&magic, printfbuf_device, sizeof(unsigned short), cudaMemcpyDeviceToHost);
// For SM_10 architecture, we've split our buffer into one-per-thread.
// That means we must do each thread block separately. It'll require
// extra reading. We also, for now, don't support async printfs because
// that requires tracking one start pointer per thread.
if(magic == CUPRINTF_SM10_MAGIC)
{
sync_printfs = true;
clearOnPrint = false;
int blocklen = 0;
char *blockptr = printfbuf_device;
while(blockptr < (printfbuf_device + printfbuf_len))
{
cuPrintfHeaderSM10 hdr;
cudaMemcpy(&hdr, blockptr, sizeof(hdr), cudaMemcpyDeviceToHost);
// We get our block-size-step from the very first header
if(hdr.thread_buf_len != 0)
blocklen = hdr.thread_buf_len;
// No magic number means no printfs from this thread
if(hdr.magic != CUPRINTF_SM10_MAGIC)
{
if(blocklen == 0)
{
fprintf(printf_fp, "No printf headers found at all!\n");
break; // No valid headers!
}
blockptr += blocklen;
continue;
}
// "offset" is non-zero then we can print the block contents
if(hdr.offset > 0)
{
// For synchronous printfs, we must print from endptr->bufend, then from start->end
if(sync_printfs)
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len);
doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN);
}
// Move on to the next block and loop again
blockptr += hdr.thread_buf_len;
}
}
// For SM_11 and up, everything is a single buffer and it's simple
else if(magic == CUPRINTF_SM11_MAGIC)
{
// Grab the current "end of circular buffer" pointer.
char *printfbuf_end = NULL;
cudaMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *));
// Adjust our starting and ending pointers to within the block
char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device;
char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device;
// For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular
// buffer wrap carefully because we could miss those past "end".
if(sync_printfs)
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len);
doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr);
printfbuf_start = printfbuf_end;
}
else
;//printf("Bad magic number in cuPrintf buffer header\n");
// If we were synchronous, then we must ensure that the memory is cleared on exit
// otherwise another kernel launch with a different grid size could conflict.
if(sync_printfs)
cudaMemset(printfbuf_device, 0, printfbuf_len);
return cudaSuccess;
}
// Cleanup
#undef CUPRINTF_MAX_LEN
#undef CUPRINTF_ALIGN_SIZE
#undef CUPRINTF_SM10_MAGIC
#undef CUPRINTF_SM11_MAGIC
#endif
|
538df20003b0dd94de6def6f9a9a9c5ece1defa8.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include "rocblas.h"
#include "local_lib.h"
#define M 4
#define K 4
#define IDX2C(i,j,ld) (((i)*(ld))+(j))
int main(int argc, char ** argv){
hipError_t cudaStat; // hipMalloc status
hipblasStatus_t stat; // CUBLAS functions status
hipblasHandle_t handle; // CUBLAS context
float *a; // mxk matrix a on the host
a = (float*) malloc (M*K* sizeof(float)); // host memory for a
int i,j,ind = 1;
for(i=0;i<M;i++){ // 11 ,17 ,23 ,29 ,35
for(j=0;j<K;j++){ // 12 ,18 ,24 ,30 ,36
a[IDX2C(i,j,K)]=(float)ind++; // 13 ,19 ,25 ,31 ,37
} // 14 ,20 ,26 ,32 ,38
} // 15 ,21 ,27 ,33 ,39
printf ("a:\n");
for (i=0;i<M;i ++){
for (j=0;j<K;j ++){
printf (" %.5f",a[ IDX2C(i,j,K)]);
}
printf ("\n");
}
float * d_a; // d_a - a on the device
cudaStat = hipMalloc (( void **)& d_a ,M*K* sizeof (float)); // device
stat = hipblasCreate (&handle); // initialize CUBLAS context
stat = hipblasSetMatrix(M, K, sizeof(float), a, M, d_a, M); //a -> d_a
activationFunction(d_a, M, K);
stat = hipblasGetMatrix (M,K, sizeof (float) ,d_a ,M,a,M); // cp d_c - >c
printf ("a:\n");
for (i=0;i<M;i ++){
for (j=0;j<K;j ++){
printf (" %.5f",a[ IDX2C(i,j,K)]);
}
printf ("\n");
}
}
| 538df20003b0dd94de6def6f9a9a9c5ece1defa8.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include "cublas_v2.h"
#include "local_lib.h"
#define M 4
#define K 4
#define IDX2C(i,j,ld) (((i)*(ld))+(j))
int main(int argc, char ** argv){
cudaError_t cudaStat; // cudaMalloc status
cublasStatus_t stat; // CUBLAS functions status
cublasHandle_t handle; // CUBLAS context
float *a; // mxk matrix a on the host
a = (float*) malloc (M*K* sizeof(float)); // host memory for a
int i,j,ind = 1;
for(i=0;i<M;i++){ // 11 ,17 ,23 ,29 ,35
for(j=0;j<K;j++){ // 12 ,18 ,24 ,30 ,36
a[IDX2C(i,j,K)]=(float)ind++; // 13 ,19 ,25 ,31 ,37
} // 14 ,20 ,26 ,32 ,38
} // 15 ,21 ,27 ,33 ,39
printf ("a:\n");
for (i=0;i<M;i ++){
for (j=0;j<K;j ++){
printf (" %.5f",a[ IDX2C(i,j,K)]);
}
printf ("\n");
}
float * d_a; // d_a - a on the device
cudaStat = cudaMalloc (( void **)& d_a ,M*K* sizeof (float)); // device
stat = cublasCreate (&handle); // initialize CUBLAS context
stat = cublasSetMatrix(M, K, sizeof(float), a, M, d_a, M); //a -> d_a
activationFunction(d_a, M, K);
stat = cublasGetMatrix (M,K, sizeof (float) ,d_a ,M,a,M); // cp d_c - >c
printf ("a:\n");
for (i=0;i<M;i ++){
for (j=0;j<K;j ++){
printf (" %.5f",a[ IDX2C(i,j,K)]);
}
printf ("\n");
}
}
|
57c63250eac7e12017d4032fccec095c642fd755.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <type_traits>
#include "tensor_sum_kernel.cuh"
#include "kernel_tools.h"
#include "cudaBasics.h"
#include "cuda_settings.h"
#include "auto_graph.h"
__device__ void __syncthreads();
namespace dnnbasic
{
using gpuArray = smallGPUArray<uint32_t, tensor<uint32_t>::MAX_DIMENSION_COUNT>;
static const uint32_t THREADS_PER_BLOCK = 1024;
static const uint32_t THREADS_PER_WARP = 32;
static const uint32_t WARPS_PER_BLOCK = THREADS_PER_BLOCK / THREADS_PER_WARP;
template<typename T>
__device__ T getWarpSum(const T threadValue)
{
T warpSum = threadValue;
for (uint32_t i = THREADS_PER_WARP / 2; i > 0; i /= 2)
{
warpSum += __shfl_down_sync(0xffffffff, warpSum, i);
}
return warpSum;
}
template<typename T>
__global__ void sumKernel(
const cudabasic::span<T> input,
cudabasic::span<T> output,
const uint32_t sumElementStride,
const uint32_t sumDimSize,
const uint32_t sumsToMake,
const uint32_t blocksMade)
{
extern __shared__ __align__(sizeof(T)) int8_t sharedArray[];
T* sharedMemT = reinterpret_cast<T*>(sharedArray);
const uint32_t sumElemIdx = blockIdx.x * blockDim.x + threadIdx.x;
for (uint32_t i = blockIdx.y; i < sumsToMake; i += blocksMade)
{
//if index is out of bounds then load zero instead
//as all threads in a warp are needed to sum
//and keeping all threads to begin with it the
//easiest way to do that
const T value = sumElemIdx >= sumDimSize ? 0 : input[sumElemIdx * sumElementStride + (i / sumElementStride) * sumElementStride * sumDimSize + (i % sumElementStride)];
//Make warp sum
const T warpSum = getWarpSum(value);
//First thread in each warp will store their sum
//in shared memory so the first warp can sum it up
if (threadIdx.x % THREADS_PER_WARP == 0)
{
sharedMemT[threadIdx.x / WARPS_PER_BLOCK] = warpSum;
}
__syncthreads();
//First warp in each block will now
//make a block sum
T blockSum = 0;
if (threadIdx.x < WARPS_PER_BLOCK)
{
blockSum = getWarpSum(sharedMemT[threadIdx.x]);
}
__syncthreads();
//First thread in block will now atomic add the result
if (threadIdx.x == 0)
{
if constexpr (std::is_integral<T>::value && std::is_signed<T>::value)
{
using unsigned_T = typename std::make_unsigned<T>::type;
atomicAdd(reinterpret_cast<unsigned_T*>(&output[i]), (unsigned_T)blockSum);
}
else
{
atomicAdd(&output[i], blockSum);
}
}
}
}
template<typename T>
void tensorSum(const tensor<T>& input, tensor<T>& output, const uint32_t sumDimIdx)
{
if constexpr (sizeof(T) < 4)
{
throw std::runtime_error("Sum is currently not supported for that tensor type.");
}
else
{
uint32_t sumElementStride = 1;
for (size_t i = sumDimIdx + 1; i < input.getDimensions().size(); i++)
{
sumElementStride *= input.getDimensions()[i].dim;
}
const uint32_t sumDim = input.getDimensions()[sumDimIdx].dim;
const uint32_t dimsToSum = output.elementCount();
const uint32_t blocksMade = ::min(dimsToSum, 40u);
const dim3 blockDim(THREADS_PER_BLOCK);
const dim3 gridDim(integerCeilDivision(sumDim, blockDim.x), blocksMade);
if (autoGraph::isRecordingGraph())
{
const std::vector<void*> inputPtrs = { reinterpret_cast<void*>(input.getGPUArray().begin()) };
const void* outputPtr = reinterpret_cast<void*>(output.getGPUArray().begin());
autoGraph::addMemsetNode(output.getGPUArray(), 0);
autoGraph::addKernelNode(inputPtrs, outputPtr, sumKernel<T>, blockDim, gridDim, (uint32_t)sizeof(T) * WARPS_PER_BLOCK, input.getGPUArrayConst(), output.getGPUArray(), sumElementStride, sumDim, dimsToSum, blocksMade);
}
else
{
hipMemsetAsync(output.getGPUArray().begin(), 0, output.elementCount() * sizeof(T), cuda::getDefaultStream());
cudabasic::executeKernel(sumKernel<T>, blockDim, gridDim, sizeof(T) * WARPS_PER_BLOCK, cuda::getDefaultStream(), input.getGPUArrayConst(), output.getGPUArray(), sumElementStride, sumDim, dimsToSum, blocksMade);
}
}
}
template void tensorSum(const tensor<bool>& input, tensor<bool>& output, const uint32_t sumDimIdx);
template void tensorSum(const tensor<uint8_t>& input, tensor<uint8_t>& output, const uint32_t sumDimIdx);
template void tensorSum(const tensor<uint16_t>& input, tensor<uint16_t>& output, const uint32_t sumDimIdx);
template void tensorSum(const tensor<uint32_t>& input, tensor<uint32_t>& output, const uint32_t sumDimIdx);
template void tensorSum(const tensor<uint64_t>& input, tensor<uint64_t>& output, const uint32_t sumDimIdx);
template void tensorSum(const tensor<int8_t>& input, tensor<int8_t>& output, const uint32_t sumDimIdx);
template void tensorSum(const tensor<int16_t>& input, tensor<int16_t>& output, const uint32_t sumDimIdx);
template void tensorSum(const tensor<int32_t>& input, tensor<int32_t>& output, const uint32_t sumDimIdx);
template void tensorSum(const tensor<int64_t>& input, tensor<int64_t>& output, const uint32_t sumDimIdx);
template void tensorSum(const tensor<float>& input, tensor<float>& output, const uint32_t sumDimIdx);
template void tensorSum(const tensor<double>& input, tensor<double>& output, const uint32_t sumDimIdx);
} | 57c63250eac7e12017d4032fccec095c642fd755.cu | #include <cuda_runtime.h>
#include <type_traits>
#include "tensor_sum_kernel.cuh"
#include "kernel_tools.h"
#include "cudaBasics.h"
#include "cuda_settings.h"
#include "auto_graph.h"
__device__ void __syncthreads();
namespace dnnbasic
{
using gpuArray = smallGPUArray<uint32_t, tensor<uint32_t>::MAX_DIMENSION_COUNT>;
static const uint32_t THREADS_PER_BLOCK = 1024;
static const uint32_t THREADS_PER_WARP = 32;
static const uint32_t WARPS_PER_BLOCK = THREADS_PER_BLOCK / THREADS_PER_WARP;
template<typename T>
__device__ T getWarpSum(const T threadValue)
{
T warpSum = threadValue;
for (uint32_t i = THREADS_PER_WARP / 2; i > 0; i /= 2)
{
warpSum += __shfl_down_sync(0xffffffff, warpSum, i);
}
return warpSum;
}
template<typename T>
__global__ void sumKernel(
const cudabasic::span<T> input,
cudabasic::span<T> output,
const uint32_t sumElementStride,
const uint32_t sumDimSize,
const uint32_t sumsToMake,
const uint32_t blocksMade)
{
extern __shared__ __align__(sizeof(T)) int8_t sharedArray[];
T* sharedMemT = reinterpret_cast<T*>(sharedArray);
const uint32_t sumElemIdx = blockIdx.x * blockDim.x + threadIdx.x;
for (uint32_t i = blockIdx.y; i < sumsToMake; i += blocksMade)
{
//if index is out of bounds then load zero instead
//as all threads in a warp are needed to sum
//and keeping all threads to begin with it the
//easiest way to do that
const T value = sumElemIdx >= sumDimSize ? 0 : input[sumElemIdx * sumElementStride + (i / sumElementStride) * sumElementStride * sumDimSize + (i % sumElementStride)];
//Make warp sum
const T warpSum = getWarpSum(value);
//First thread in each warp will store their sum
//in shared memory so the first warp can sum it up
if (threadIdx.x % THREADS_PER_WARP == 0)
{
sharedMemT[threadIdx.x / WARPS_PER_BLOCK] = warpSum;
}
__syncthreads();
//First warp in each block will now
//make a block sum
T blockSum = 0;
if (threadIdx.x < WARPS_PER_BLOCK)
{
blockSum = getWarpSum(sharedMemT[threadIdx.x]);
}
__syncthreads();
//First thread in block will now atomic add the result
if (threadIdx.x == 0)
{
if constexpr (std::is_integral<T>::value && std::is_signed<T>::value)
{
using unsigned_T = typename std::make_unsigned<T>::type;
atomicAdd(reinterpret_cast<unsigned_T*>(&output[i]), (unsigned_T)blockSum);
}
else
{
atomicAdd(&output[i], blockSum);
}
}
}
}
template<typename T>
void tensorSum(const tensor<T>& input, tensor<T>& output, const uint32_t sumDimIdx)
{
if constexpr (sizeof(T) < 4)
{
throw std::runtime_error("Sum is currently not supported for that tensor type.");
}
else
{
uint32_t sumElementStride = 1;
for (size_t i = sumDimIdx + 1; i < input.getDimensions().size(); i++)
{
sumElementStride *= input.getDimensions()[i].dim;
}
const uint32_t sumDim = input.getDimensions()[sumDimIdx].dim;
const uint32_t dimsToSum = output.elementCount();
const uint32_t blocksMade = std::min(dimsToSum, 40u);
const dim3 blockDim(THREADS_PER_BLOCK);
const dim3 gridDim(integerCeilDivision(sumDim, blockDim.x), blocksMade);
if (autoGraph::isRecordingGraph())
{
const std::vector<void*> inputPtrs = { reinterpret_cast<void*>(input.getGPUArray().begin()) };
const void* outputPtr = reinterpret_cast<void*>(output.getGPUArray().begin());
autoGraph::addMemsetNode(output.getGPUArray(), 0);
autoGraph::addKernelNode(inputPtrs, outputPtr, sumKernel<T>, blockDim, gridDim, (uint32_t)sizeof(T) * WARPS_PER_BLOCK, input.getGPUArrayConst(), output.getGPUArray(), sumElementStride, sumDim, dimsToSum, blocksMade);
}
else
{
cudaMemsetAsync(output.getGPUArray().begin(), 0, output.elementCount() * sizeof(T), cuda::getDefaultStream());
cudabasic::executeKernel(sumKernel<T>, blockDim, gridDim, sizeof(T) * WARPS_PER_BLOCK, cuda::getDefaultStream(), input.getGPUArrayConst(), output.getGPUArray(), sumElementStride, sumDim, dimsToSum, blocksMade);
}
}
}
template void tensorSum(const tensor<bool>& input, tensor<bool>& output, const uint32_t sumDimIdx);
template void tensorSum(const tensor<uint8_t>& input, tensor<uint8_t>& output, const uint32_t sumDimIdx);
template void tensorSum(const tensor<uint16_t>& input, tensor<uint16_t>& output, const uint32_t sumDimIdx);
template void tensorSum(const tensor<uint32_t>& input, tensor<uint32_t>& output, const uint32_t sumDimIdx);
template void tensorSum(const tensor<uint64_t>& input, tensor<uint64_t>& output, const uint32_t sumDimIdx);
template void tensorSum(const tensor<int8_t>& input, tensor<int8_t>& output, const uint32_t sumDimIdx);
template void tensorSum(const tensor<int16_t>& input, tensor<int16_t>& output, const uint32_t sumDimIdx);
template void tensorSum(const tensor<int32_t>& input, tensor<int32_t>& output, const uint32_t sumDimIdx);
template void tensorSum(const tensor<int64_t>& input, tensor<int64_t>& output, const uint32_t sumDimIdx);
template void tensorSum(const tensor<float>& input, tensor<float>& output, const uint32_t sumDimIdx);
template void tensorSum(const tensor<double>& input, tensor<double>& output, const uint32_t sumDimIdx);
} |
548cb44b7b6370ea8e019ac6b1f59630cc994597.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <kernel_helpers.h>
#include <cstdint>
using namespace at;
template <typename scalar_t>
__global__ void buffer_store_kernel(
TensorIndexer<scalar_t, 3> destination,
const TensorIndexer<int64_t, 1> flock_indices,
const TensorIndexer<int64_t, 1> buffer_ptr_indices,
const TensorIndexer<scalar_t, 2> src,
int data_size,
int required_threads){
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < required_threads){
int data_idx = id % data_size;
int flock_idx = id / data_size;
destination.at(flock_indices[flock_idx], buffer_ptr_indices[flock_idx], data_idx) = src.at(flock_idx, data_idx);
}
}
void buffer_store(at::Tensor destination,
at::Tensor flock_indices,
at::Tensor buffer_ptr_indices,
at::Tensor src,
int data_size,
int flock_size){
CHECK_INPUT(destination);
CHECK_INPUT(flock_indices);
CHECK_INPUT(buffer_ptr_indices);
CHECK_INPUT(src);
const auto required_threads = flock_size * data_size;
const int blocks = GET_BLOCK_COUNT(required_threads);
const hipStream_t stream = set_device_get_cuda_stream(destination.get_device());
AT_DISPATCH_FLOATING_TYPES(destination.type(), "buffer_store_kernel", ([&] {
hipLaunchKernelGGL(( buffer_store_kernel<scalar_t>), dim3(blocks), dim3(max_threads_per_block), 0, stream,
TensorIndexer<scalar_t, 3> (destination),
TensorIndexer<int64_t, 1> (flock_indices),
TensorIndexer<int64_t, 1> (buffer_ptr_indices),
TensorIndexer<scalar_t, 2> (src),
data_size,
required_threads);
}));
} | 548cb44b7b6370ea8e019ac6b1f59630cc994597.cu | #include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <kernel_helpers.h>
#include <cstdint>
using namespace at;
template <typename scalar_t>
__global__ void buffer_store_kernel(
TensorIndexer<scalar_t, 3> destination,
const TensorIndexer<int64_t, 1> flock_indices,
const TensorIndexer<int64_t, 1> buffer_ptr_indices,
const TensorIndexer<scalar_t, 2> src,
int data_size,
int required_threads){
const int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < required_threads){
int data_idx = id % data_size;
int flock_idx = id / data_size;
destination.at(flock_indices[flock_idx], buffer_ptr_indices[flock_idx], data_idx) = src.at(flock_idx, data_idx);
}
}
void buffer_store(at::Tensor destination,
at::Tensor flock_indices,
at::Tensor buffer_ptr_indices,
at::Tensor src,
int data_size,
int flock_size){
CHECK_INPUT(destination);
CHECK_INPUT(flock_indices);
CHECK_INPUT(buffer_ptr_indices);
CHECK_INPUT(src);
const auto required_threads = flock_size * data_size;
const int blocks = GET_BLOCK_COUNT(required_threads);
const cudaStream_t stream = set_device_get_cuda_stream(destination.get_device());
AT_DISPATCH_FLOATING_TYPES(destination.type(), "buffer_store_kernel", ([&] {
buffer_store_kernel<scalar_t><<<blocks, max_threads_per_block, 0, stream>>>(
TensorIndexer<scalar_t, 3> (destination),
TensorIndexer<int64_t, 1> (flock_indices),
TensorIndexer<int64_t, 1> (buffer_ptr_indices),
TensorIndexer<scalar_t, 2> (src),
data_size,
required_threads);
}));
} |
c89c96ec80bdf0da139b711efbab538d0118904f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include "clock_kernel.cu"
// This example shows how to use the clock function to measure the performance of
// a kernel accurately.
//
// Blocks are executed in parallel and out of order. Since there's no synchronization
// mechanism between blocks, we measure the clock once for each block. The clock
// samples are written to device memory.
#define NUM_BLOCKS 64
#define NUM_THREADS 256
// It's interesting to change the number of blocks and the number of threads to
// understand how to keep the hardware busy.
//
// Here are some numbers I get on my G80:
// blocks - clocks
// 1 - 3096
// 8 - 3232
// 16 - 3364
// 32 - 4615
// 64 - 9981
//
// With less than 16 blocks some of the multiprocessors of the device are idle. With
// more than 16 you are using all the multiprocessors, but there's only one block per
// multiprocessor and that doesn't allow you to hide the latency of the memory. With
// more than 32 the speed scales linearly.
int main(int argc, char** argv)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if ( cutCheckCmdLineFlag(argc, (const char **)argv, "device")) {
int devID = cutilDeviceInit(argc, argv);
if (devID < 0) {
printf("exiting...\n");
cutilExit(argc, argv);
exit(0);
}
} else {
hipSetDevice( cutGetMaxGflopsDeviceId() );
}
float * dinput = NULL;
float * doutput = NULL;
clock_t * dtimer = NULL;
clock_t timer[NUM_BLOCKS * 2];
float input[NUM_THREADS * 2];
for (int i = 0; i < NUM_THREADS * 2; i++)
{
input[i] = (float)i;
}
cutilSafeCall(hipMalloc((void**)&dinput, sizeof(float) * NUM_THREADS * 2));
cutilSafeCall(hipMalloc((void**)&doutput, sizeof(float) * NUM_BLOCKS));
cutilSafeCall(hipMalloc((void**)&dtimer, sizeof(clock_t) * NUM_BLOCKS * 2));
cutilSafeCall(hipMemcpy(dinput, input, sizeof(float) * NUM_THREADS * 2, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( timedReduction), dim3(NUM_BLOCKS), dim3(NUM_THREADS), sizeof(float) * 2 * NUM_THREADS, 0, dinput, doutput, dtimer);
//cutilSafeCall(hipMemcpy(output, doutput, sizeof(float) * NUM_BLOCKS, hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(timer, dtimer, sizeof(clock_t) * NUM_BLOCKS * 2, hipMemcpyDeviceToHost));
cutilSafeCall(hipFree(dinput));
cutilSafeCall(hipFree(doutput));
cutilSafeCall(hipFree(dtimer));
// This test always passes.
printf( "PASSED\n");
// Compute the difference between the last block end and the first block start.
clock_t minStart = timer[0];
clock_t maxEnd = timer[NUM_BLOCKS];
for (int i = 1; i < NUM_BLOCKS; i++)
{
minStart = timer[i] < minStart ? timer[i] : minStart;
maxEnd = timer[NUM_BLOCKS+i] > maxEnd ? timer[NUM_BLOCKS+i] : maxEnd;
}
printf("time = %d\n", maxEnd - minStart);
hipDeviceReset();
cutilExit(argc, argv);
}
| c89c96ec80bdf0da139b711efbab538d0118904f.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <cutil_inline.h>
#include "clock_kernel.cu"
// This example shows how to use the clock function to measure the performance of
// a kernel accurately.
//
// Blocks are executed in parallel and out of order. Since there's no synchronization
// mechanism between blocks, we measure the clock once for each block. The clock
// samples are written to device memory.
#define NUM_BLOCKS 64
#define NUM_THREADS 256
// It's interesting to change the number of blocks and the number of threads to
// understand how to keep the hardware busy.
//
// Here are some numbers I get on my G80:
// blocks - clocks
// 1 - 3096
// 8 - 3232
// 16 - 3364
// 32 - 4615
// 64 - 9981
//
// With less than 16 blocks some of the multiprocessors of the device are idle. With
// more than 16 you are using all the multiprocessors, but there's only one block per
// multiprocessor and that doesn't allow you to hide the latency of the memory. With
// more than 32 the speed scales linearly.
int main(int argc, char** argv)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if ( cutCheckCmdLineFlag(argc, (const char **)argv, "device")) {
int devID = cutilDeviceInit(argc, argv);
if (devID < 0) {
printf("exiting...\n");
cutilExit(argc, argv);
exit(0);
}
} else {
cudaSetDevice( cutGetMaxGflopsDeviceId() );
}
float * dinput = NULL;
float * doutput = NULL;
clock_t * dtimer = NULL;
clock_t timer[NUM_BLOCKS * 2];
float input[NUM_THREADS * 2];
for (int i = 0; i < NUM_THREADS * 2; i++)
{
input[i] = (float)i;
}
cutilSafeCall(cudaMalloc((void**)&dinput, sizeof(float) * NUM_THREADS * 2));
cutilSafeCall(cudaMalloc((void**)&doutput, sizeof(float) * NUM_BLOCKS));
cutilSafeCall(cudaMalloc((void**)&dtimer, sizeof(clock_t) * NUM_BLOCKS * 2));
cutilSafeCall(cudaMemcpy(dinput, input, sizeof(float) * NUM_THREADS * 2, cudaMemcpyHostToDevice));
timedReduction<<<NUM_BLOCKS, NUM_THREADS, sizeof(float) * 2 * NUM_THREADS>>>(dinput, doutput, dtimer);
//cutilSafeCall(cudaMemcpy(output, doutput, sizeof(float) * NUM_BLOCKS, cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(timer, dtimer, sizeof(clock_t) * NUM_BLOCKS * 2, cudaMemcpyDeviceToHost));
cutilSafeCall(cudaFree(dinput));
cutilSafeCall(cudaFree(doutput));
cutilSafeCall(cudaFree(dtimer));
// This test always passes.
printf( "PASSED\n");
// Compute the difference between the last block end and the first block start.
clock_t minStart = timer[0];
clock_t maxEnd = timer[NUM_BLOCKS];
for (int i = 1; i < NUM_BLOCKS; i++)
{
minStart = timer[i] < minStart ? timer[i] : minStart;
maxEnd = timer[NUM_BLOCKS+i] > maxEnd ? timer[NUM_BLOCKS+i] : maxEnd;
}
printf("time = %d\n", maxEnd - minStart);
cudaThreadExit();
cutilExit(argc, argv);
}
|
b3a23d4a7153953c2b2c6541f6d51652e1e9e0c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "../include/data_structures.cuh"
#include "../core/include/debugging.cuh"
#include <hip/hip_complex.h>
#pragma region GPUArray
template<typename T>
fdsp::GPUArray<T>::GPUArray(const T* ptr, const std::vector<size_t>& dimSizes):
m_dimSizes(dimSizes), ndims(dimSizes.size())
{
m_size = 1;
for(auto& size: m_dimSizes)
m_size *= size;
m_dArray.resize(m_size);
CHECK(hipMemcpy(GetPointerToArray(), ptr, sizeof(T)*m_size, hipMemcpyHostToDevice));
}
template<typename T>
fdsp::GPUArray<T>::GPUArray(const std::vector<size_t> &dimSizes) :
m_dimSizes(dimSizes), ndims(dimSizes.size())
{
m_size = 1;
for(auto& size: m_dimSizes)
m_size *= size;
m_dArray.resize(m_size);
CHECK(hipMemset(GetPointerToArray(), 0, sizeof(T)*m_size));
}
template<typename T>
fdsp::GPUArray<T>::GPUArray(const GPUArray<T>& array)
{
m_dimSizes = array.m_dimSizes;
ndims = array.ndims;
m_size = array.m_size;
m_dArray.resize(m_size);
CHECK(hipMemcpy(GetPointerToArray(), thrust::raw_pointer_cast(&array.m_dArray[0]), sizeof(T)*m_size, hipMemcpyDeviceToDevice));
}
template<typename T>
void fdsp::GPUArray<T>::Get(T *h_ptr) const
{
thrust::copy(m_dArray.begin(), m_dArray.end(), h_ptr);
}
template<typename T>
T fdsp::GPUArray<T>::GetElement(size_t index) const
{
return m_dArray[index];
}
template<typename T>
std::vector<size_t> fdsp::GPUArray<T>::GetDimensionSizes() const
{
return m_dimSizes;
}
template<typename T>
const T* fdsp::GPUArray<T>::GetPointerToArrayConst()const
{
return thrust::raw_pointer_cast(&(m_dArray[0]));
}
template<typename T>
T* fdsp::GPUArray<T>::GetPointerToArray()
{
return thrust::raw_pointer_cast(&(m_dArray[0]));
}
template<typename T>
size_t fdsp::GPUArray<T>::GetSize() const
{
return m_size;
}
template<typename T>
thrust::device_vector<T> fdsp::GPUArray<T>::GetDeviceVector() const
{
return m_dArray;
}
template class fdsp::GPUArray<unsigned char>;
template class fdsp::GPUArray<int>;
template class fdsp::GPUArray<float>;
template class fdsp::GPUArray<double>;
template class fdsp::GPUArray<hipComplex>;
template class fdsp::GPUArray<hipDoubleComplex>;
#pragma endregion
| b3a23d4a7153953c2b2c6541f6d51652e1e9e0c7.cu | #include "../include/data_structures.cuh"
#include "../core/include/debugging.cuh"
#include <cuComplex.h>
#pragma region GPUArray
template<typename T>
fdsp::GPUArray<T>::GPUArray(const T* ptr, const std::vector<size_t>& dimSizes):
m_dimSizes(dimSizes), ndims(dimSizes.size())
{
m_size = 1;
for(auto& size: m_dimSizes)
m_size *= size;
m_dArray.resize(m_size);
CHECK(cudaMemcpy(GetPointerToArray(), ptr, sizeof(T)*m_size, cudaMemcpyHostToDevice));
}
template<typename T>
fdsp::GPUArray<T>::GPUArray(const std::vector<size_t> &dimSizes) :
m_dimSizes(dimSizes), ndims(dimSizes.size())
{
m_size = 1;
for(auto& size: m_dimSizes)
m_size *= size;
m_dArray.resize(m_size);
CHECK(cudaMemset(GetPointerToArray(), 0, sizeof(T)*m_size));
}
template<typename T>
fdsp::GPUArray<T>::GPUArray(const GPUArray<T>& array)
{
m_dimSizes = array.m_dimSizes;
ndims = array.ndims;
m_size = array.m_size;
m_dArray.resize(m_size);
CHECK(cudaMemcpy(GetPointerToArray(), thrust::raw_pointer_cast(&array.m_dArray[0]), sizeof(T)*m_size, cudaMemcpyDeviceToDevice));
}
template<typename T>
void fdsp::GPUArray<T>::Get(T *h_ptr) const
{
thrust::copy(m_dArray.begin(), m_dArray.end(), h_ptr);
}
template<typename T>
T fdsp::GPUArray<T>::GetElement(size_t index) const
{
return m_dArray[index];
}
template<typename T>
std::vector<size_t> fdsp::GPUArray<T>::GetDimensionSizes() const
{
return m_dimSizes;
}
template<typename T>
const T* fdsp::GPUArray<T>::GetPointerToArrayConst()const
{
return thrust::raw_pointer_cast(&(m_dArray[0]));
}
template<typename T>
T* fdsp::GPUArray<T>::GetPointerToArray()
{
return thrust::raw_pointer_cast(&(m_dArray[0]));
}
template<typename T>
size_t fdsp::GPUArray<T>::GetSize() const
{
return m_size;
}
template<typename T>
thrust::device_vector<T> fdsp::GPUArray<T>::GetDeviceVector() const
{
return m_dArray;
}
template class fdsp::GPUArray<unsigned char>;
template class fdsp::GPUArray<int>;
template class fdsp::GPUArray<float>;
template class fdsp::GPUArray<double>;
template class fdsp::GPUArray<cuComplex>;
template class fdsp::GPUArray<cuDoubleComplex>;
#pragma endregion
|
6b76d32d2cd445e5d578bef7a4749036957ede58.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cu_addition.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
const double *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
double *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
const int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cu_addition), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cu_addition), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cu_addition), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 6b76d32d2cd445e5d578bef7a4749036957ede58.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cu_addition.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
const double *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
double *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
const int n = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cu_addition<<<gridBlock,threadBlock>>>(A,B,C,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cu_addition<<<gridBlock,threadBlock>>>(A,B,C,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cu_addition<<<gridBlock,threadBlock>>>(A,B,C,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
54e15299aa2a4f0a0c765783bb31bf25759e62f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/PinnedMemoryAllocator.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/hip/MiscUtils.h>
#include <THH/THH.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info) {
AT_ERROR("solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
AT_ERROR("solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
AT_ERROR("lu only takes float or double Tensors");
}
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
AT_ERROR("lu only takes float or double Tensors");
}
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info) {
AT_ERROR("lu only takes float or double Tensors");
}
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("lu only takes float or double Tensors");
}
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n) {
AT_ERROR("getri only takes float or double Tensors");
}
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info) {
AT_ERROR("getri only takes float or double Tensors");
}
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("getri only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info) {
AT_ERROR("cholesky_solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("cholesky_solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info) {
AT_ERROR("cholesky only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("cholesky only takes float or double Tensors");
}
template<class scalar_t>
void magmaTriangularSolve(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb) {
AT_ERROR("triangular_solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
AT_ERROR("triangular_solve only takes float or double Tensors");
}
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n) {
AT_ERROR("geqrf only takes float or double Tensors");
}
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2) {
AT_ERROR("geqrf only takes float or double Tensors");
}
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info) {
AT_ERROR("orgqr only takes float or double Tensors");
}
template<class scalar_t>
void magmaSymeig(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
AT_ERROR("symeig only takes float or double Tensors");
}
template<class scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, scalar_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t* info) {
AT_ERROR("svd only takes float or double Tensors")
}
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
magma_spotrf_gpu(uplo, n, dA, ldda, info);
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaTriangularSolve<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb) {
magma_dtrsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
}
template<>
void magmaTriangularSolve<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb) {
magma_strsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
}
template<>
void magmaSymeig<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
}
template<>
void magmaSymeig<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t* info) {
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t* info) {
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
}
#endif
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
magma_int_t info = 0;
magmaSolve<scalar_t>(n, nrhs, A_data, n, ipiv.data<magma_int_t>(),
b_data, n, &info);
infos[0] = info;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / 65535)
// and these cover floor(batch_size / 65535) * 65535 matrix solves
int64_t mini_batches = batch_size / 65535, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * 65535; mini_idx += 65535) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, n, ipiv_array_cur, b_array_cur, n,
info_array_cur, 65535, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / 65535) * 65535
// which concisely is equal to batch_size % 65535
if (batch_size % 65535 != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], n, &ipiv_array[mini_idx], &b_array[mini_idx], n,
&info_array[mini_idx], batch_size % 65535, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
std::vector<int64_t> infos(batchCount(self), 0);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos[0], "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, n, ipiv_array, info_array,
batch_size, magma_queue);
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / 65535)
// and these cover floor(batch_size / 65535) * 65535 matrix solves
int64_t mini_batches = batch_size / 65535, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * 65535; mini_idx += 65535) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, n, ipiv_array_cur, self_inv_array_cur,
n, info_array_cur, 65535, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / 65535) * 65535
// which concisely is equal to batch_size % 65535
if (batch_size % 65535 != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], n, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
n, &info_array[mini_idx], batch_size % 65535, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
magma_int_t info_tmp = 0;
Tensor ipiv = at::empty({n}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, n, ipiv.data<magma_int_t>(), &info_tmp);
if (info_tmp != 0) {
info = info_tmp;
return;
}
magmaGetri<scalar_t>(
n, self_data, n, ipiv.data<magma_int_t>(), dwork.data<scalar_t>(), lwork, &info_tmp);
info = info_tmp;
#endif
}
Tensor _inverse_helper_cuda(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos);
});
batchCheckErrors(infos, "inverse_cuda");
} else {
int64_t info = 0;
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, info);
});
singleCheckErrors(info, "inverse_cuda");
}
return self_inv_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n,
b_data, n, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / 65535)
// and these cover floor(batch_size / 65535) * 65535 matrix solves
int64_t mini_batches = batch_size / 65535, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * 65535; mini_idx += 65535) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, n, b_array_cur, n,
info_tmp, 65535, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / 65535) * 65535
// which concisely is equal to batch_size % 65535
if (batch_size % 65535 != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n,
info_tmp, batch_size % 65535, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, n, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array, n, info_array,
batch_size, magma_queue);
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor self_working_copy;
if (upper) {
self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2));
} else {
self_working_copy = cloneBatchedColumnMajor(self);
}
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_cuda", [&]{
apply_cholesky<scalar_t>(self_working_copy, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
if (upper) {
return self_working_copy.transpose(-1, -2);
} else {
return self_working_copy;
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-1), "n");
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute
// the partially-pivoted LU decomposition with / without pivots.
// The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots.
// The data is later copied back to the appropriate output tensor.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({n}, at::kInt);
magmaLu<scalar_t>(
n, n, self_data, n, piv_tmp.data<magma_int_t>(), info_tmp.data<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(n, n, self_data, n, info_tmp.data<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
n, n, self_array, n, pivots_array,
infos.data<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
n, n, self_array, n, infos.data<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
TORCH_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
squareCheckInputs(self);
auto req_size = self.sizes().vec();
req_size.pop_back();
Tensor pivots_tensor = at::arange(1, self.size(-1) + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous();
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu");
} else {
batchCheckErrors(infos_tensor, "lu");
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triu/tril ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t, typename IndexType, bool upper>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__
void triu_tril_kernel(
cuda::detail::TensorInfo<scalar_t, IndexType> result_info,
const cuda::detail::TensorInfo<scalar_t, IndexType> self_info,
const int64_t k, const int64_t N) {
int64_t linear_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_idx >= N) {
return;
}
auto dims = self_info.dims;
IndexType self_offset = 0, result_offset = 0;
// Compute column index and corresponding offset
IndexType col = linear_idx % self_info.sizes[dims - 1];
linear_idx /= self_info.sizes[dims - 1];
self_offset += self_info.strides[dims - 1] * col;
result_offset += result_info.strides[dims - 1] * col;
// Compute row index and corresponding offset
IndexType row = linear_idx % self_info.sizes[dims - 2];
linear_idx /= self_info.sizes[dims - 2];
self_offset += self_info.strides[dims - 2] * row;
result_offset += result_info.strides[dims - 2] * row;
// Compute remaining offsets
IndexType running_index;
#pragma unroll
for (IndexType i = dims - 3; i >= 0; --i) {
running_index = linear_idx % self_info.sizes[i];
linear_idx /= self_info.sizes[i];
self_offset += running_index * self_info.strides[i];
result_offset += running_index * result_info.strides[i];
}
bool mask = upper ? (col - row >= k) : (col - row <= k);
result_info.data[result_offset] = mask ? self_info.data[self_offset] : scalar_t(0);
}
template <bool upper>
Tensor& triu_tril_cuda_template(Tensor& result, const Tensor& self, int64_t k, const char* name) {
int64_t N = self.numel();
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid((N + dim_block.x - 1) / dim_block.x);
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), name, [&]{
if (cuda::detail::canUse32BitIndexMath(result) && cuda::detail::canUse32BitIndexMath(self)) {
auto result_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(result);
auto self_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(self);
hipLaunchKernelGGL(( triu_tril_kernel<scalar_t, int32_t, upper>)
, dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result_info, self_info, k, N);
} else {
auto result_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(result);
auto self_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(self);
hipLaunchKernelGGL(( triu_tril_kernel<scalar_t, int64_t, upper>)
, dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result_info, self_info, k, N);
}
});
AT_CUDA_CHECK(hipGetLastError());
return result;
}
Tensor& tril_cuda_(Tensor &self, int64_t k) {
return tril_cuda_out(self, self, k);
}
Tensor& tril_cuda_out(Tensor &result, const Tensor& self, int64_t k) {
if (result.sizes() != self.sizes()) {
result.resize_as_(self);
}
if (self.numel() == 0) {
return result;
}
return triu_tril_cuda_template<false>(result, self, k, "tril");
}
Tensor& triu_cuda_(Tensor &self, int64_t k) {
return triu_cuda_out(self, self, k);
}
Tensor& triu_cuda_out(Tensor &result, const Tensor& self, int64_t k) {
if (result.sizes() != self.sizes()) {
result.resize_as_(self);
}
if (self.numel() == 0) {
return result;
}
return triu_tril_cuda_template<true>(result, self, k, "triu");
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
if (b.dim() == 2) {
magmaTriangularSolve<scalar_t>(uplo, trans, diag, n, nrhs, A_data, n, b_data, n);
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / 65535)
// and these cover floor(batch_size / 65535) * 65535 matrix solves
int64_t mini_batches = batch_size / 65535, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * 65535; mini_idx += 65535) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
n, b_array_cur, n, 65535, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / 65535) * 65535
// which concisely is equal to batch_size % 65535
if (batch_size % 65535 != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
n, &b_array[mini_idx], n, batch_size % 65535, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A,
bool upper, bool transpose, bool unitriangular) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular);
});
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t n_columns, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto q_data = Q.data<scalar_t>();
auto r_data = R.data<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
auto r_matrix_stride = matrixStride(R);
magma_int_t m = magma_int_cast(Q.size(-2), "Q.size(-2)");
magma_int_t n = magma_int_cast(R.size(-1), "R.size(-1)");
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data<scalar_t>();
scalar_t* work_data = work.data<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
infos[i] = info;
if (info != 0) {
return;
}
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
infos[i] = info;
if (info != 0) {
return;
}
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor,Tensor> _qr_helper_cuda(const Tensor& self, bool some) {
std::vector<int64_t> infos(batchCount(self), 0);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, some);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
// Fix the number of columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = n_columns_q;
q_working_copy = at::eye(q_sizes[self.dim() - 2], q_sizes[self.dim() - 1], self.options());
q_working_copy = q_working_copy.expand_as(q_working_copy);
// We repurpose the same q_sizes for r_working_copy
// Fix the number of rows and columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = self.size(-1);
q_sizes[self.dim() - 2] = n_columns_q;
r_working_copy = at::empty(q_sizes, self.options());
return std::make_tuple(q_working_copy, r_working_copy);
}
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
r_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, n_columns_q, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "qr_cuda");
} else {
singleCheckErrors(infos[0], "qr_cuda");
}
return std::make_tuple(q_working_copy.narrow_copy(-1, 0, n_columns_q),
r_working_copy.narrow_copy(-2, 0, n_columns_q).triu_());
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("symeig: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data<scalar_t>();
auto eigvals_data = eigvals.data<scalar_t>();
auto self_matrix_stride = matrixStride(self);
auto eigvals_stride = eigvals.size(-1);
int64_t batch_size = batchCount(self);
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec;
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, n * n);
magma_int_t info;
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magmaSymeig<scalar_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &iwkopt, liwork, &info);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(wkopt, "work_size");
liwork = magma_int_cast(iwkopt, "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
scalar_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride];
magmaSymeig<scalar_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr,
wA, n, work, lwork, iwork, liwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_sizes = self.sizes().vec();
self_sizes.pop_back();
// magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(self_sizes, self.options())
: at::empty(self_sizes, self.options().device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos);
});
if (!eigenvectors) {
self_working_copy.zero_();
}
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos[0], "symeig_cuda");
}
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data<scalar_t>();
auto U_data = U.data<scalar_t>();
auto S_data = S.data<scalar_t>();
auto VT_data = VT.data<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto k = ::min(m, n);
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * k);
magmaSvd<scalar_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, iwork, &info);
lwork = magma_int_cast(wkopt, "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
scalar_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t>(jobz, m, n, self_working_ptr, m,
S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = ::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
if (self.numel() > 0) {
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "svd_cuda", [&]{
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, self.options());
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (compute_uv) {
if (some) {
VT_working_copy = VT_working_copy.narrow(-1, 0, k);
}
} else {
VT_working_copy.zero_();
U_working_copy.zero_();
}
} else {
U_working_copy = same_stride_to(U_working_copy, self.options()).zero_();
S_working_copy = same_stride_to(S_working_copy, self.options());
VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_();
}
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
| 54e15299aa2a4f0a0c765783bb31bf25759e62f9.cu | #include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/PinnedMemoryAllocator.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/cuda/MiscUtils.h>
#include <THC/THC.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info) {
AT_ERROR("solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
AT_ERROR("solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
AT_ERROR("lu only takes float or double Tensors");
}
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
AT_ERROR("lu only takes float or double Tensors");
}
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info) {
AT_ERROR("lu only takes float or double Tensors");
}
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("lu only takes float or double Tensors");
}
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n) {
AT_ERROR("getri only takes float or double Tensors");
}
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info) {
AT_ERROR("getri only takes float or double Tensors");
}
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("getri only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info) {
AT_ERROR("cholesky_solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("cholesky_solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info) {
AT_ERROR("cholesky only takes float or double Tensors");
}
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
AT_ERROR("cholesky only takes float or double Tensors");
}
template<class scalar_t>
void magmaTriangularSolve(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb) {
AT_ERROR("triangular_solve only takes float or double Tensors");
}
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
AT_ERROR("triangular_solve only takes float or double Tensors");
}
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n) {
AT_ERROR("geqrf only takes float or double Tensors");
}
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2) {
AT_ERROR("geqrf only takes float or double Tensors");
}
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info) {
AT_ERROR("orgqr only takes float or double Tensors");
}
template<class scalar_t>
void magmaSymeig(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
AT_ERROR("symeig only takes float or double Tensors");
}
template<class scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, scalar_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t* info) {
AT_ERROR("svd only takes float or double Tensors")
}
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
magma_spotrf_gpu(uplo, n, dA, ldda, info);
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
}
template<>
void magmaTriangularSolve<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb) {
magma_dtrsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
}
template<>
void magmaTriangularSolve<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb) {
magma_strsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
}
template<>
void magmaSymeig<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
}
template<>
void magmaSymeig<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t* info) {
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
magma_int_t* iwork, magma_int_t* info) {
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
}
#endif
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
magma_int_t info = 0;
magmaSolve<scalar_t>(n, nrhs, A_data, n, ipiv.data<magma_int_t>(),
b_data, n, &info);
infos[0] = info;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / 65535)
// and these cover floor(batch_size / 65535) * 65535 matrix solves
int64_t mini_batches = batch_size / 65535, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * 65535; mini_idx += 65535) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, n, ipiv_array_cur, b_array_cur, n,
info_array_cur, 65535, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / 65535) * 65535
// which concisely is equal to batch_size % 65535
if (batch_size % 65535 != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], n, &ipiv_array[mini_idx], &b_array[mini_idx], n,
&info_array[mini_idx], batch_size % 65535, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
std::vector<int64_t> infos(batchCount(self), 0);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos[0], "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, n, ipiv_array, info_array,
batch_size, magma_queue);
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / 65535)
// and these cover floor(batch_size / 65535) * 65535 matrix solves
int64_t mini_batches = batch_size / 65535, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * 65535; mini_idx += 65535) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, n, ipiv_array_cur, self_inv_array_cur,
n, info_array_cur, 65535, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / 65535) * 65535
// which concisely is equal to batch_size % 65535
if (batch_size % 65535 != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], n, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
n, &info_array[mini_idx], batch_size % 65535, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
magma_int_t info_tmp = 0;
Tensor ipiv = at::empty({n}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, n, ipiv.data<magma_int_t>(), &info_tmp);
if (info_tmp != 0) {
info = info_tmp;
return;
}
magmaGetri<scalar_t>(
n, self_data, n, ipiv.data<magma_int_t>(), dwork.data<scalar_t>(), lwork, &info_tmp);
info = info_tmp;
#endif
}
Tensor _inverse_helper_cuda(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos);
});
batchCheckErrors(infos, "inverse_cuda");
} else {
int64_t info = 0;
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, info);
});
singleCheckErrors(info, "inverse_cuda");
}
return self_inv_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n,
b_data, n, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / 65535)
// and these cover floor(batch_size / 65535) * 65535 matrix solves
int64_t mini_batches = batch_size / 65535, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * 65535; mini_idx += 65535) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, n, b_array_cur, n,
info_tmp, 65535, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / 65535) * 65535
// which concisely is equal to batch_size % 65535
if (batch_size % 65535 != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n,
info_tmp, batch_size % 65535, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, n, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array, n, info_array,
batch_size, magma_queue);
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor self_working_copy;
if (upper) {
self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2));
} else {
self_working_copy = cloneBatchedColumnMajor(self);
}
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "cholesky_cuda", [&]{
apply_cholesky<scalar_t>(self_working_copy, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
if (upper) {
return self_working_copy.transpose(-1, -2);
} else {
return self_working_copy;
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-1), "n");
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute
// the partially-pivoted LU decomposition with / without pivots.
// The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots.
// The data is later copied back to the appropriate output tensor.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({n}, at::kInt);
magmaLu<scalar_t>(
n, n, self_data, n, piv_tmp.data<magma_int_t>(), info_tmp.data<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(n, n, self_data, n, info_tmp.data<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
n, n, self_array, n, pivots_array,
infos.data<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
n, n, self_array, n, infos.data<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
TORCH_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
squareCheckInputs(self);
auto req_size = self.sizes().vec();
req_size.pop_back();
Tensor pivots_tensor = at::arange(1, self.size(-1) + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous();
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu");
} else {
batchCheckErrors(infos_tensor, "lu");
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triu/tril ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t, typename IndexType, bool upper>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(512)
#endif
__global__
void triu_tril_kernel(
cuda::detail::TensorInfo<scalar_t, IndexType> result_info,
const cuda::detail::TensorInfo<scalar_t, IndexType> self_info,
const int64_t k, const int64_t N) {
int64_t linear_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_idx >= N) {
return;
}
auto dims = self_info.dims;
IndexType self_offset = 0, result_offset = 0;
// Compute column index and corresponding offset
IndexType col = linear_idx % self_info.sizes[dims - 1];
linear_idx /= self_info.sizes[dims - 1];
self_offset += self_info.strides[dims - 1] * col;
result_offset += result_info.strides[dims - 1] * col;
// Compute row index and corresponding offset
IndexType row = linear_idx % self_info.sizes[dims - 2];
linear_idx /= self_info.sizes[dims - 2];
self_offset += self_info.strides[dims - 2] * row;
result_offset += result_info.strides[dims - 2] * row;
// Compute remaining offsets
IndexType running_index;
#pragma unroll
for (IndexType i = dims - 3; i >= 0; --i) {
running_index = linear_idx % self_info.sizes[i];
linear_idx /= self_info.sizes[i];
self_offset += running_index * self_info.strides[i];
result_offset += running_index * result_info.strides[i];
}
bool mask = upper ? (col - row >= k) : (col - row <= k);
result_info.data[result_offset] = mask ? self_info.data[self_offset] : scalar_t(0);
}
template <bool upper>
Tensor& triu_tril_cuda_template(Tensor& result, const Tensor& self, int64_t k, const char* name) {
int64_t N = self.numel();
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid((N + dim_block.x - 1) / dim_block.x);
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, self.scalar_type(), name, [&]{
if (cuda::detail::canUse32BitIndexMath(result) && cuda::detail::canUse32BitIndexMath(self)) {
auto result_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(result);
auto self_info = cuda::detail::getTensorInfo<scalar_t, int32_t>(self);
triu_tril_kernel<scalar_t, int32_t, upper>
<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
result_info, self_info, k, N);
} else {
auto result_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(result);
auto self_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(self);
triu_tril_kernel<scalar_t, int64_t, upper>
<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
result_info, self_info, k, N);
}
});
AT_CUDA_CHECK(cudaGetLastError());
return result;
}
Tensor& tril_cuda_(Tensor &self, int64_t k) {
return tril_cuda_out(self, self, k);
}
Tensor& tril_cuda_out(Tensor &result, const Tensor& self, int64_t k) {
if (result.sizes() != self.sizes()) {
result.resize_as_(self);
}
if (self.numel() == 0) {
return result;
}
return triu_tril_cuda_template<false>(result, self, k, "tril");
}
Tensor& triu_cuda_(Tensor &self, int64_t k) {
return triu_cuda_out(self, self, k);
}
Tensor& triu_cuda_out(Tensor &result, const Tensor& self, int64_t k) {
if (result.sizes() != self.sizes()) {
result.resize_as_(self);
}
if (self.numel() == 0) {
return result;
}
return triu_tril_cuda_template<true>(result, self, k, "triu");
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
if (b.dim() == 2) {
magmaTriangularSolve<scalar_t>(uplo, trans, diag, n, nrhs, A_data, n, b_data, n);
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / 65535)
// and these cover floor(batch_size / 65535) * 65535 matrix solves
int64_t mini_batches = batch_size / 65535, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * 65535; mini_idx += 65535) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
n, b_array_cur, n, 65535, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / 65535) * 65535
// which concisely is equal to batch_size % 65535
if (batch_size % 65535 != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
n, &b_array[mini_idx], n, batch_size % 65535, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A,
bool upper, bool transpose, bool unitriangular) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular);
});
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t n_columns, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto q_data = Q.data<scalar_t>();
auto r_data = R.data<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
auto r_matrix_stride = matrixStride(R);
magma_int_t m = magma_int_cast(Q.size(-2), "Q.size(-2)");
magma_int_t n = magma_int_cast(R.size(-1), "R.size(-1)");
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data<scalar_t>();
scalar_t* work_data = work.data<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
infos[i] = info;
if (info != 0) {
return;
}
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
infos[i] = info;
if (info != 0) {
return;
}
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor,Tensor> _qr_helper_cuda(const Tensor& self, bool some) {
std::vector<int64_t> infos(batchCount(self), 0);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, some);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
// Fix the number of columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = n_columns_q;
q_working_copy = at::eye(q_sizes[self.dim() - 2], q_sizes[self.dim() - 1], self.options());
q_working_copy = q_working_copy.expand_as(q_working_copy);
// We repurpose the same q_sizes for r_working_copy
// Fix the number of rows and columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = self.size(-1);
q_sizes[self.dim() - 2] = n_columns_q;
r_working_copy = at::empty(q_sizes, self.options());
return std::make_tuple(q_working_copy, r_working_copy);
}
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
r_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, n_columns_q, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "qr_cuda");
} else {
singleCheckErrors(infos[0], "qr_cuda");
}
return std::make_tuple(q_working_copy.narrow_copy(-1, 0, n_columns_q),
r_working_copy.narrow_copy(-2, 0, n_columns_q).triu_());
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("symeig: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data<scalar_t>();
auto eigvals_data = eigvals.data<scalar_t>();
auto self_matrix_stride = matrixStride(self);
auto eigvals_stride = eigvals.size(-1);
int64_t batch_size = batchCount(self);
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec;
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, n * n);
magma_int_t info;
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magmaSymeig<scalar_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &iwkopt, liwork, &info);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(wkopt, "work_size");
liwork = magma_int_cast(iwkopt, "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
scalar_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride];
magmaSymeig<scalar_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr,
wA, n, work, lwork, iwork, liwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_sizes = self.sizes().vec();
self_sizes.pop_back();
// magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(self_sizes, self.options())
: at::empty(self_sizes, self.options().device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos);
});
if (!eigenvectors) {
self_working_copy.zero_();
}
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos[0], "symeig_cuda");
}
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data<scalar_t>();
auto U_data = U.data<scalar_t>();
auto S_data = S.data<scalar_t>();
auto VT_data = VT.data<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto k = std::min(m, n);
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * k);
magmaSvd<scalar_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, iwork, &info);
lwork = magma_int_cast(wkopt, "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
scalar_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t>(jobz, m, n, self_working_ptr, m,
S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = std::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
if (self.numel() > 0) {
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "svd_cuda", [&]{
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, self.options());
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (compute_uv) {
if (some) {
VT_working_copy = VT_working_copy.narrow(-1, 0, k);
}
} else {
VT_working_copy.zero_();
U_working_copy.zero_();
}
} else {
U_working_copy = same_stride_to(U_working_copy, self.options()).zero_();
S_working_copy = same_stride_to(S_working_copy, self.options());
VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_();
}
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
|
f17d49e8e2f55b06babd37eef0591b9edffb9073.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Parallel Sudoku solver using backtrack algorithm
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <math.h>
// declare C include here
extern "C" {
#include "methods.h"
}
int solve(int *board, int n, int expand, int tile_size, FILE *fp);
int cpu_initial_search(int n, int *boards, int space_size);
__global__ void backtrack_kernel(int n, int *ans, int *ans_found);
int* read_board(const char* file_name, int* N) {
FILE* fp = fopen(file_name, "r");
int* board = NULL;
fscanf(fp, "%d", N);
int total = *N * *N, i;
board = (int*) calloc(total, sizeof(int));
for (i = 0; i < total; ++ i)
fscanf(fp, "%d", board + i);
return board;
}
int main(int argc, char* argv[]) {
if (argc < 5) {
printf("Usage:\n");
printf("./sudoku_bt <input> <output> <expand> <block_size>\n");
exit(-1);
}
int N;
int* board = read_board(argv[1], &N);
int n = sqrt((double)N);
printf("Start to solve a %d x %d Sudoku.\n", N, N);
FILE* fp = fopen(argv[2], "w");
int expand = (int) atoi(argv[3]);
int tile_size = (int) atoi(argv[4]);
int ans = solve(board, n, expand, tile_size, fp);
if (ans == 1) printf("An answer is found and saved to %s.\n", argv[2]);
else printf("No answer is found.\n");
free(board);
return 0;
}
void error_check(int line_number, int arg_count, ...) {
hipError_t err=hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "[CUDA ERROR] %s at line %d\n", hipGetErrorString(err), line_number);
va_list ap;
va_start(ap, arg_count);
int i;
for (i = 0; i < arg_count; ++ i) {
int *arr = va_arg(ap, int*);
if (arr != 0) hipFree(arr);
}
va_end(ap);
exit(-1);
}
}
// solver function
int solve(int* board, int n, int expand, int tile_size, FILE* fp) {
int N = n * n;
int *boards = (int*) calloc(N * N * expand, sizeof(int));
memcpy(boards, board, N * N * sizeof(int));
//print_board(boards, N, stdout);
int answer_found = cpu_initial_search(n, boards, expand);
/*int i;
for (i = 0; i < expand; ++ i)
print_board(boards + i * N * N, N, stdout);*/
if (answer_found == 1) {
print_board(boards, N, fp);
} else if (answer_found == 0) {
printf("Initial search finished.\n");
// malloc arrays for cuda
int *boards_d = 0, *ans_found_d = 0;
#define errck error_check(__LINE__, 2, boards_d, ans_found_d)
hipMalloc((void**) &boards_d, N * N * expand * sizeof(int)); errck;
hipMemcpy(boards_d, boards, N * N * expand * sizeof(int), hipMemcpyHostToDevice); errck;
hipMalloc((void**) &ans_found_d, sizeof(int)); errck;
hipMemset(ans_found_d, -1, sizeof(int)); errck;
dim3 grid_dim(expand / tile_size);
//dim3 block_dim(N, N);
dim3 block_dim(tile_size);
hipLaunchKernelGGL(( backtrack_kernel), dim3(grid_dim), dim3(block_dim), 2 * tile_size * N * N * sizeof(int), 0, n, boards_d, ans_found_d); errck;
hipMemcpy(&answer_found, ans_found_d, sizeof(int), hipMemcpyDeviceToHost); errck;
printf("GPU search finished. %d\n", answer_found);
if (answer_found >= 0) {
hipMemcpy(boards, boards_d + answer_found * N * N, N * N * sizeof(int), hipMemcpyDeviceToHost); errck;
answer_found = 1;
print_board(boards, N, fp);
}
hipFree(boards_d);
hipFree(ans_found_d);
}
free(boards);
return answer_found;
}
// initial search on CPU with (modified) breadth first search
// expand_cnt is the size of expected nodes
// array boards should be multiple of (N * N * expand_cnt)
int cpu_initial_search(int n, int *boards, int expand_cnt) {
int N = n * n;
int chunk_size = N * N;
int head = -1, tail = 1;
int empty_slot = expand_cnt - 1;
int i;
// initialize every slot as empty
for (i = 1; i < expand_cnt; ++ i)
boards[i * N * N] = -1;
// expand search tree
while (empty_slot > 0 && empty_slot < expand_cnt) {
// advance head pointer
head ++;
if (head == expand_cnt) head = 0;
int *board_now = boards + head * chunk_size;
if (board_now[0] == -1) continue;
// find the first empty location
int nowp;
for (nowp = 0; nowp < N * N && board_now[nowp] != 0; ++ nowp);
if (nowp == N * N) {
// answer found
memcpy(boards, board_now, chunk_size * sizeof(int));
return 1;
}
// reserve a value for in-place modification
// which modifies the board at its original place to save one copy operation
// this can also be used as flag of answer found or dead node
int reserve = 0;
for (i = 1; i <= N; ++ i)
if (check_partial_board(board_now, n, nowp, i)) {
// fill in the reserved value
if (reserve == 0)
reserve = i * N * N + nowp;
else {
// find an empty slot
if (empty_slot == 0) return 0;
int *board_new = boards + tail * chunk_size;
while (board_new[0] != -1) {
tail ++;
if (tail == expand_cnt) tail = 0;
board_new = boards + tail * chunk_size;
}
empty_slot --;
// copy and modify the board
memcpy(board_new, board_now, chunk_size * sizeof(int));
board_new[nowp] = i;
}
}
if (reserve == 0) {
// dead node
board_now[0] = -1; // mark board_now == -1 to indicate this slot is empty
empty_slot ++;
} else {
// in-place modification
board_now[reserve % (N * N)] = reserve / (N * N);
}
}
return 0;
}
__device__ int check_partial_board_d(int* board, int n, int p, int num) {
int j;
int N = n * n;
int box_row = p / (n * N);
int box_col = (p % N) / n;
int box_top_left = box_row * n * N + box_col * n;
int now_row = ROW(p, N);
for (j = now_row * N; j < (now_row + 1) * N; ++ j)
if (board[j] == num)
return 0;
// check col
for (j = COL(p, N); j < N * N; j += N)
if (board[j] == num)
return 0;
// check box
for (j = 0; j < N; ++ j)
if (board[box_top_left + (j / n) * N + (j % n)] == num)
return 0;
return 1;
}
__global__ void backtrack_kernel(int n, int *ans_all, int *ans_found) {
int N = n * n, i;
int task_id = blockIdx.x * blockDim.x + threadIdx.x;
// use shared memory
extern __shared__ int shared_board[];
int offset = threadIdx.x * N * N;
for (i = 0; i < N * N; ++ i)
shared_board[i + offset] = ans_all[task_id * N * N + i];
int *board = shared_board + offset;
int *stack = shared_board + blockDim.x * N * N + offset;
int last_op = 0; // 0 - push stack, 1 - pop stack
int top = 0, nowp = 0;
while (*ans_found == -1) {
int num_to_try;
if (last_op == 0) {
// push stack
for (;nowp < N * N && board[nowp] != 0; ++ nowp);
// first check if the board is filled
if (nowp == N * N) {
// answer found
int old = atomicCAS(ans_found, -1, task_id);
if (old == -1) {
// copy back to global memory
for (i = 0; i < N * N; ++ i)
ans_all[task_id * N * N + i] = board[i];
}
break;
}
// else initialize the number to try
num_to_try = 1;
} else {
// read stack top and restore
int stack_num = stack[top];
nowp = stack_num % (N * N);
num_to_try = board[nowp] + 1;
}
// find next valid number
for (;num_to_try <= N; ++ num_to_try)
if (check_partial_board_d(board, n, nowp, num_to_try)) {
// push stack
stack[top ++] = nowp;
// move to next location
board[nowp] = num_to_try;
last_op = 0;
break;
}
if (num_to_try > N) {
// pop stack
if (top == 0) break;
board[nowp] = 0;
top --;
last_op = 1;
}
}
}
| f17d49e8e2f55b06babd37eef0591b9edffb9073.cu | /*
* Parallel Sudoku solver using backtrack algorithm
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <math.h>
// declare C include here
extern "C" {
#include "methods.h"
}
int solve(int *board, int n, int expand, int tile_size, FILE *fp);
int cpu_initial_search(int n, int *boards, int space_size);
__global__ void backtrack_kernel(int n, int *ans, int *ans_found);
int* read_board(const char* file_name, int* N) {
FILE* fp = fopen(file_name, "r");
int* board = NULL;
fscanf(fp, "%d", N);
int total = *N * *N, i;
board = (int*) calloc(total, sizeof(int));
for (i = 0; i < total; ++ i)
fscanf(fp, "%d", board + i);
return board;
}
int main(int argc, char* argv[]) {
if (argc < 5) {
printf("Usage:\n");
printf("./sudoku_bt <input> <output> <expand> <block_size>\n");
exit(-1);
}
int N;
int* board = read_board(argv[1], &N);
int n = sqrt((double)N);
printf("Start to solve a %d x %d Sudoku.\n", N, N);
FILE* fp = fopen(argv[2], "w");
int expand = (int) atoi(argv[3]);
int tile_size = (int) atoi(argv[4]);
int ans = solve(board, n, expand, tile_size, fp);
if (ans == 1) printf("An answer is found and saved to %s.\n", argv[2]);
else printf("No answer is found.\n");
free(board);
return 0;
}
void error_check(int line_number, int arg_count, ...) {
cudaError_t err=cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "[CUDA ERROR] %s at line %d\n", cudaGetErrorString(err), line_number);
va_list ap;
va_start(ap, arg_count);
int i;
for (i = 0; i < arg_count; ++ i) {
int *arr = va_arg(ap, int*);
if (arr != 0) cudaFree(arr);
}
va_end(ap);
exit(-1);
}
}
// solver function
int solve(int* board, int n, int expand, int tile_size, FILE* fp) {
int N = n * n;
int *boards = (int*) calloc(N * N * expand, sizeof(int));
memcpy(boards, board, N * N * sizeof(int));
//print_board(boards, N, stdout);
int answer_found = cpu_initial_search(n, boards, expand);
/*int i;
for (i = 0; i < expand; ++ i)
print_board(boards + i * N * N, N, stdout);*/
if (answer_found == 1) {
print_board(boards, N, fp);
} else if (answer_found == 0) {
printf("Initial search finished.\n");
// malloc arrays for cuda
int *boards_d = 0, *ans_found_d = 0;
#define errck error_check(__LINE__, 2, boards_d, ans_found_d)
cudaMalloc((void**) &boards_d, N * N * expand * sizeof(int)); errck;
cudaMemcpy(boards_d, boards, N * N * expand * sizeof(int), cudaMemcpyHostToDevice); errck;
cudaMalloc((void**) &ans_found_d, sizeof(int)); errck;
cudaMemset(ans_found_d, -1, sizeof(int)); errck;
dim3 grid_dim(expand / tile_size);
//dim3 block_dim(N, N);
dim3 block_dim(tile_size);
backtrack_kernel<<<grid_dim, block_dim, 2 * tile_size * N * N * sizeof(int)>>>(n, boards_d, ans_found_d); errck;
cudaMemcpy(&answer_found, ans_found_d, sizeof(int), cudaMemcpyDeviceToHost); errck;
printf("GPU search finished. %d\n", answer_found);
if (answer_found >= 0) {
cudaMemcpy(boards, boards_d + answer_found * N * N, N * N * sizeof(int), cudaMemcpyDeviceToHost); errck;
answer_found = 1;
print_board(boards, N, fp);
}
cudaFree(boards_d);
cudaFree(ans_found_d);
}
free(boards);
return answer_found;
}
// initial search on CPU with (modified) breadth first search
// expand_cnt is the size of expected nodes
// array boards should be multiple of (N * N * expand_cnt)
int cpu_initial_search(int n, int *boards, int expand_cnt) {
int N = n * n;
int chunk_size = N * N;
int head = -1, tail = 1;
int empty_slot = expand_cnt - 1;
int i;
// initialize every slot as empty
for (i = 1; i < expand_cnt; ++ i)
boards[i * N * N] = -1;
// expand search tree
while (empty_slot > 0 && empty_slot < expand_cnt) {
// advance head pointer
head ++;
if (head == expand_cnt) head = 0;
int *board_now = boards + head * chunk_size;
if (board_now[0] == -1) continue;
// find the first empty location
int nowp;
for (nowp = 0; nowp < N * N && board_now[nowp] != 0; ++ nowp);
if (nowp == N * N) {
// answer found
memcpy(boards, board_now, chunk_size * sizeof(int));
return 1;
}
// reserve a value for in-place modification
// which modifies the board at its original place to save one copy operation
// this can also be used as flag of answer found or dead node
int reserve = 0;
for (i = 1; i <= N; ++ i)
if (check_partial_board(board_now, n, nowp, i)) {
// fill in the reserved value
if (reserve == 0)
reserve = i * N * N + nowp;
else {
// find an empty slot
if (empty_slot == 0) return 0;
int *board_new = boards + tail * chunk_size;
while (board_new[0] != -1) {
tail ++;
if (tail == expand_cnt) tail = 0;
board_new = boards + tail * chunk_size;
}
empty_slot --;
// copy and modify the board
memcpy(board_new, board_now, chunk_size * sizeof(int));
board_new[nowp] = i;
}
}
if (reserve == 0) {
// dead node
board_now[0] = -1; // mark board_now == -1 to indicate this slot is empty
empty_slot ++;
} else {
// in-place modification
board_now[reserve % (N * N)] = reserve / (N * N);
}
}
return 0;
}
__device__ int check_partial_board_d(int* board, int n, int p, int num) {
int j;
int N = n * n;
int box_row = p / (n * N);
int box_col = (p % N) / n;
int box_top_left = box_row * n * N + box_col * n;
int now_row = ROW(p, N);
for (j = now_row * N; j < (now_row + 1) * N; ++ j)
if (board[j] == num)
return 0;
// check col
for (j = COL(p, N); j < N * N; j += N)
if (board[j] == num)
return 0;
// check box
for (j = 0; j < N; ++ j)
if (board[box_top_left + (j / n) * N + (j % n)] == num)
return 0;
return 1;
}
__global__ void backtrack_kernel(int n, int *ans_all, int *ans_found) {
int N = n * n, i;
int task_id = blockIdx.x * blockDim.x + threadIdx.x;
// use shared memory
extern __shared__ int shared_board[];
int offset = threadIdx.x * N * N;
for (i = 0; i < N * N; ++ i)
shared_board[i + offset] = ans_all[task_id * N * N + i];
int *board = shared_board + offset;
int *stack = shared_board + blockDim.x * N * N + offset;
int last_op = 0; // 0 - push stack, 1 - pop stack
int top = 0, nowp = 0;
while (*ans_found == -1) {
int num_to_try;
if (last_op == 0) {
// push stack
for (;nowp < N * N && board[nowp] != 0; ++ nowp);
// first check if the board is filled
if (nowp == N * N) {
// answer found
int old = atomicCAS(ans_found, -1, task_id);
if (old == -1) {
// copy back to global memory
for (i = 0; i < N * N; ++ i)
ans_all[task_id * N * N + i] = board[i];
}
break;
}
// else initialize the number to try
num_to_try = 1;
} else {
// read stack top and restore
int stack_num = stack[top];
nowp = stack_num % (N * N);
num_to_try = board[nowp] + 1;
}
// find next valid number
for (;num_to_try <= N; ++ num_to_try)
if (check_partial_board_d(board, n, nowp, num_to_try)) {
// push stack
stack[top ++] = nowp;
// move to next location
board[nowp] = num_to_try;
last_op = 0;
break;
}
if (num_to_try > N) {
// pop stack
if (top == 0) break;
board[nowp] = 0;
top --;
last_op = 1;
}
}
}
|
95955fc9bed8d7bdb26d65c6bb4b08c2306924d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void callOperationSharedStatic(int *a, int *b, int *res, int k, int p, int n)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= n) {
return;
}
__shared__ int s_a[size], s_b[size], s_res[size];
__shared__ int s_k, s_p;
s_k = k;
s_p = p;
s_a[tid] = a[tid];
s_b[tid] = b[tid];
s_res[tid] = s_a[tid] + s_b[tid];
if (s_res[tid] > s_k) {
s_res[tid] = s_p;
}
res[tid] = s_res[tid];
} | 95955fc9bed8d7bdb26d65c6bb4b08c2306924d9.cu | #include "includes.h"
__global__ void callOperationSharedStatic(int *a, int *b, int *res, int k, int p, int n)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid >= n) {
return;
}
__shared__ int s_a[size], s_b[size], s_res[size];
__shared__ int s_k, s_p;
s_k = k;
s_p = p;
s_a[tid] = a[tid];
s_b[tid] = b[tid];
s_res[tid] = s_a[tid] + s_b[tid];
if (s_res[tid] > s_k) {
s_res[tid] = s_p;
}
res[tid] = s_res[tid];
} |
13f60c95ffc69427d9f9728723657e79149ff3d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/naivespmv.h"
#include <stdint.h>
#include <stdexcept>
template<typename T>
__device__ T myLoad(const T* d)
{
return *d;
//return __ldg(d);
}
template<typename ValueType, typename IndexType, typename OffsetType>
__global__ void spmv(uint32_t num_non_zeroes, uint32_t out_size, uint32_t num_other,
const ValueType* __restrict matrix, const IndexType* __restrict inIndex, const OffsetType*__restrict offsets,
const ValueType* __restrict inVec, ValueType* __restrict outVec)
{
uint32_t i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= out_size)
return;
ValueType sum = 0;
for (OffsetType j = myLoad(offsets + i); j < myLoad(offsets + i + 1); ++j)
{
IndexType ind = myLoad(inIndex + j);
sum += myLoad(inVec + ind) * myLoad(matrix + j);
}
outVec[i] = sum;
}
//double atomic add hack for devices that do not support it in hardware
template<typename T>
__device__ inline T tempAtomicAdd(T* address, T val)
{
return atomicAdd(address, val);
}
#if __CUDA_ARCH__ < 600
//http://docs.nvidia.com/cuda/cuda-c-programming-guide/#atomic-functions
template<>
__device__ inline double tempAtomicAdd<double>(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
template<typename ValueType, typename IndexType, typename OffsetType>
__global__ void spmvt(uint32_t num_non_zeroes, uint32_t out_size, uint32_t num_other,
const ValueType* __restrict matrix, const IndexType* __restrict inIndex, const OffsetType*__restrict offsets,
const ValueType* __restrict inVec, ValueType* __restrict outVec)
{
uint32_t i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_other)
return;
ValueType inV = myLoad(inVec + i);
for (OffsetType j = myLoad(offsets + i); j < myLoad(offsets + i + 1); ++j)
{
IndexType ind = myLoad(inIndex + j);
ValueType res = inV * myLoad(matrix + j);
tempAtomicAdd(outVec + ind, res);
}
}
template<typename T>
void naive_spmv(dDenseVector<T>& res, const dCSR<T>& m, const dDenseVector<T>& v, bool transpose)
{
if (transpose && v.size != m.rows)
throw std::runtime_error("SPMV dimensions mismatch");
if (!transpose && v.size != m.cols)
throw std::runtime_error("SPMV dimensions mismatch");
size_t outsize = transpose ? m.cols : m.rows;
if (res.size < outsize)
res.alloc(outsize);
res.size = outsize;
uint32_t blockSize = 256;
if (transpose)
{
hipLaunchKernelGGL(( spmvt<T, unsigned int, unsigned int>) , dim3((m.cols + blockSize - 1) / blockSize), dim3(blockSize) , 0, 0,
m.nnz, m.rows, m.cols,
m.data, m.col_ids, m.row_offsets,
v.data, res.data);
}
else
{
hipLaunchKernelGGL(( spmv<T, unsigned int, unsigned int>), dim3((m.rows + blockSize - 1) / blockSize), dim3(blockSize) , 0, 0,
m.nnz, m.rows, m.cols,
m.data, m.col_ids, m.row_offsets,
v.data, res.data);
}
}
template void naive_spmv<float>(dDenseVector<float>& res, const dCSR<float>& m, const dDenseVector<float>& v, bool transpose);
template void naive_spmv<double>(dDenseVector<double>& res, const dCSR<double>& m, const dDenseVector<double>& v, bool transpose); | 13f60c95ffc69427d9f9728723657e79149ff3d6.cu | #include "../include/naivespmv.h"
#include <stdint.h>
#include <stdexcept>
template<typename T>
__device__ T myLoad(const T* d)
{
return *d;
//return __ldg(d);
}
template<typename ValueType, typename IndexType, typename OffsetType>
__global__ void spmv(uint32_t num_non_zeroes, uint32_t out_size, uint32_t num_other,
const ValueType* __restrict matrix, const IndexType* __restrict inIndex, const OffsetType*__restrict offsets,
const ValueType* __restrict inVec, ValueType* __restrict outVec)
{
uint32_t i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= out_size)
return;
ValueType sum = 0;
for (OffsetType j = myLoad(offsets + i); j < myLoad(offsets + i + 1); ++j)
{
IndexType ind = myLoad(inIndex + j);
sum += myLoad(inVec + ind) * myLoad(matrix + j);
}
outVec[i] = sum;
}
//double atomic add hack for devices that do not support it in hardware
template<typename T>
__device__ inline T tempAtomicAdd(T* address, T val)
{
return atomicAdd(address, val);
}
#if __CUDA_ARCH__ < 600
//http://docs.nvidia.com/cuda/cuda-c-programming-guide/#atomic-functions
template<>
__device__ inline double tempAtomicAdd<double>(double* address, double val)
{
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
template<typename ValueType, typename IndexType, typename OffsetType>
__global__ void spmvt(uint32_t num_non_zeroes, uint32_t out_size, uint32_t num_other,
const ValueType* __restrict matrix, const IndexType* __restrict inIndex, const OffsetType*__restrict offsets,
const ValueType* __restrict inVec, ValueType* __restrict outVec)
{
uint32_t i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= num_other)
return;
ValueType inV = myLoad(inVec + i);
for (OffsetType j = myLoad(offsets + i); j < myLoad(offsets + i + 1); ++j)
{
IndexType ind = myLoad(inIndex + j);
ValueType res = inV * myLoad(matrix + j);
tempAtomicAdd(outVec + ind, res);
}
}
template<typename T>
void naive_spmv(dDenseVector<T>& res, const dCSR<T>& m, const dDenseVector<T>& v, bool transpose)
{
if (transpose && v.size != m.rows)
throw std::runtime_error("SPMV dimensions mismatch");
if (!transpose && v.size != m.cols)
throw std::runtime_error("SPMV dimensions mismatch");
size_t outsize = transpose ? m.cols : m.rows;
if (res.size < outsize)
res.alloc(outsize);
res.size = outsize;
uint32_t blockSize = 256;
if (transpose)
{
spmvt<T, unsigned int, unsigned int> <<<(m.cols + blockSize - 1) / blockSize, blockSize >>> (
m.nnz, m.rows, m.cols,
m.data, m.col_ids, m.row_offsets,
v.data, res.data);
}
else
{
spmv<T, unsigned int, unsigned int><<<(m.rows + blockSize - 1) / blockSize, blockSize >>> (
m.nnz, m.rows, m.cols,
m.data, m.col_ids, m.row_offsets,
v.data, res.data);
}
}
template void naive_spmv<float>(dDenseVector<float>& res, const dCSR<float>& m, const dDenseVector<float>& v, bool transpose);
template void naive_spmv<double>(dDenseVector<double>& res, const dCSR<double>& m, const dDenseVector<double>& v, bool transpose); |
43c83695b08d9648f94677b0d9536be9afe417c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/func/pad_image_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
static __global__ void pad_kernel(int count, int height,int width, int pad, const Dtype *in, Dtype *out)
{
CUDA_KERNEL_LOOP(i, count)
{
int nc = i / (width+pad*2) / (height+pad*2) ;
int h = i / (width+pad*2) % (height+pad*2);
int w = i % (width+pad*2);
if (h < pad)
h = pad - 1 - h;
else if (h < pad+height)
h = h - pad;
else
h = height - 1 - (h - (pad+height));
if (w < pad)
w = pad - 1 - w;
else if (w < pad+width)
w = w - pad;
else
w = width - 1 - (w - (pad+width));
out[i] = in[(nc*height+h)*width+w];
}
}
template <typename Dtype>
void PadImageLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
int height = bottom[0]->height();
int width = bottom[0]->width();
hipLaunchKernelGGL(( pad_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(top[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
top[0]->count(),height,width, pad_, bottom[0]->gpu_data(),top[0]->mutable_gpu_data());
#if 0
FILE *fid = fopen("debug","wb");
fwrite(top[0]->cpu_data(),sizeof(Dtype),top[0]->count(),fid);
fclose(fid);
LOG(FATAL)<<height<<", "<<width;
#endif
}
template <typename Dtype>
void PadImageLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<Blob<Dtype>*>& bottom)
{
}
template <typename Dtype>
void PadImageLayer<Dtype>::SecForward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
}
INSTANTIATE_LAYER_GPU_FUNCS(PadImageLayer);
} // namespace caffe
| 43c83695b08d9648f94677b0d9536be9afe417c5.cu |
#include <vector>
#include "caffe/layers/func/pad_image_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
static __global__ void pad_kernel(int count, int height,int width, int pad, const Dtype *in, Dtype *out)
{
CUDA_KERNEL_LOOP(i, count)
{
int nc = i / (width+pad*2) / (height+pad*2) ;
int h = i / (width+pad*2) % (height+pad*2);
int w = i % (width+pad*2);
if (h < pad)
h = pad - 1 - h;
else if (h < pad+height)
h = h - pad;
else
h = height - 1 - (h - (pad+height));
if (w < pad)
w = pad - 1 - w;
else if (w < pad+width)
w = w - pad;
else
w = width - 1 - (w - (pad+width));
out[i] = in[(nc*height+h)*width+w];
}
}
template <typename Dtype>
void PadImageLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
int height = bottom[0]->height();
int width = bottom[0]->width();
pad_kernel<Dtype><<<CAFFE_GET_BLOCKS(top[0]->count()), CAFFE_CUDA_NUM_THREADS>>>
(top[0]->count(),height,width, pad_, bottom[0]->gpu_data(),top[0]->mutable_gpu_data());
#if 0
FILE *fid = fopen("debug","wb");
fwrite(top[0]->cpu_data(),sizeof(Dtype),top[0]->count(),fid);
fclose(fid);
LOG(FATAL)<<height<<", "<<width;
#endif
}
template <typename Dtype>
void PadImageLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<Blob<Dtype>*>& bottom)
{
}
template <typename Dtype>
void PadImageLayer<Dtype>::SecForward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
}
INSTANTIATE_LAYER_GPU_FUNCS(PadImageLayer);
} // namespace caffe
|
b64673207703564a71c679c7eed11034689a6fe5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <helper_math.h>
#include <helper_functions.h>
#include <helper_cuda.h> // CUDA device initialization helper functions
__constant__ float cGaussian[64]; //gaussian array in device side
texture<uchar4, 2, hipReadModeNormalizedFloat> rgbaTex;
uint *dImage = NULL; //original image
uint *dTemp = NULL; //temp array for iterations
size_t pitch;
/*
Perform a simple bilateral filter.
Bilateral filter is a nonlinear filter that is a mixture of range
filter and domain filter, the previous one preserves crisp edges and
the latter one filters noise. The intensity value at each pixel in
an image is replaced by a weighted average of intensity values from
nearby pixels.
The weight factor is calculated by the product of domain filter
component(using the gaussian distribution as a spatial distance) as
well as range filter component(Euclidean distance between center pixel
and the current neighbor pixel). Because this process is nonlinear,
the sample just uses a simple pixel by pixel step.
Texture fetches automatically clamp to edge of image. 1D gaussian array
is mapped to a 1D texture instead of using shared memory, which may
cause severe bank conflict.
Threads are y-pass(column-pass), because the output is coalesced.
Parameters
od - pointer to output data in global memory
d_f - pointer to the 1D gaussian array
e_d - euclidean delta
w - image width
h - image height
r - filter radius
*/
//Euclidean Distance (x, y, d) = exp((|x - y| / d)^2 / 2)
__device__ float euclideanLen(float4 a, float4 b, float d)
{
float mod = (b.x - a.x) * (b.x - a.x) +
(b.y - a.y) * (b.y - a.y) +
(b.z - a.z) * (b.z - a.z);
return __expf(-mod / (2.f * d * d));
}
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(fabs(rgba.x)); // clamp to [0.0, 1.0]
rgba.y = __saturatef(fabs(rgba.y));
rgba.z = __saturatef(fabs(rgba.z));
rgba.w = __saturatef(fabs(rgba.w));
return (uint(rgba.w * 255.0f) << 24) | (uint(rgba.z * 255.0f) << 16) | (uint(rgba.y * 255.0f) << 8) | uint(rgba.x * 255.0f);
}
__device__ float4 rgbaIntToFloat(uint c)
{
float4 rgba;
rgba.x = (c & 0xff) * 0.003921568627f; // /255.0f;
rgba.y = ((c>>8) & 0xff) * 0.003921568627f; // /255.0f;
rgba.z = ((c>>16) & 0xff) * 0.003921568627f; // /255.0f;
rgba.w = ((c>>24) & 0xff) * 0.003921568627f; // /255.0f;
return rgba;
}
//column pass using coalesced global memory reads
__global__ void
d_bilateral_filter(uint *od, int w, int h,
float e_d, int r)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= w || y >= h)
{
return;
}
float sum = 0.0f;
float factor;
float4 t = {0.f, 0.f, 0.f, 0.f};
float4 center = tex2D(rgbaTex, x, y);
for (int i = -r; i <= r; i++)
{
for (int j = -r; j <= r; j++)
{
float4 curPix = tex2D(rgbaTex, x + j, y + i);
factor = cGaussian[i + r] * cGaussian[j + r] * //domain factor
euclideanLen(curPix, center, e_d); //range factor
t += factor * curPix;
sum += factor;
}
}
od[y * w + x] = rgbaFloatToInt(t/sum);
}
extern "C"
void initTexture(int width, int height, uint *hImage)
{
// copy image data to array
checkCudaErrors(hipMallocPitch(&dImage, &pitch, sizeof(uint)*width, height));
checkCudaErrors(hipMallocPitch(&dTemp, &pitch, sizeof(uint)*width, height));
checkCudaErrors(hipMemcpy2D(dImage, pitch, hImage, sizeof(uint)*width,
sizeof(uint)*width, height, hipMemcpyHostToDevice));
}
extern "C"
void freeTextures()
{
checkCudaErrors(hipFree(dImage));
checkCudaErrors(hipFree(dTemp));
}
/*
Because a 2D gaussian mask is symmetry in row and column,
here only generate a 1D mask, and use the product by row
and column index later.
1D gaussian distribution :
g(x, d) -- C * exp(-x^2/d^2), C is a constant amplifier
parameters:
og - output gaussian array in global memory
delta - the 2nd parameter 'd' in the above function
radius - half of the filter size
(total filter size = 2 * radius + 1)
*/
extern "C"
void updateGaussian(float delta, int radius)
{
float fGaussian[64];
for (int i = 0; i < 2*radius + 1; ++i)
{
float x = i-radius;
fGaussian[i] = expf(-(x*x) / (2*delta*delta));
}
checkCudaErrors(hipMemcpyToSymbol(cGaussian, fGaussian, sizeof(float)*(2*radius+1)));
}
/*
Perform 2D bilateral filter on image using CUDA
Parameters:
d_dest - pointer to destination image in device memory
width - image width
height - image height
e_d - euclidean delta
radius - filter radius
iterations - number of iterations
*/
// RGBA version
extern "C"
double bilateralFilterRGBA(uint *dDest,
int width, int height,
float e_d, int radius, int iterations,
StopWatchInterface *timer)
{
// var for kernel computation timing
double dKernelTime;
// Bind the array to the texture
hipChannelFormatDesc desc = hipCreateChannelDesc<uchar4>();
checkCudaErrors(hipBindTexture2D(0, rgbaTex, dImage, desc, width, height, pitch));
for (int i=0; i<iterations; i++)
{
// sync host and start kernel computation timer
dKernelTime = 0.0;
checkCudaErrors(hipDeviceSynchronize());
sdkResetTimer(&timer);
dim3 gridSize((width + 16 - 1) / 16, (height + 16 - 1) / 16);
dim3 blockSize(16, 16);
hipLaunchKernelGGL(( d_bilateral_filter), dim3(gridSize), dim3(blockSize), 0, 0,
dDest, width, height, e_d, radius);
// sync host and stop computation timer
checkCudaErrors(hipDeviceSynchronize());
dKernelTime += sdkGetTimerValue(&timer);
if (iterations > 1)
{
// copy result back from global memory to array
checkCudaErrors(hipMemcpy2D(dTemp, pitch, dDest, sizeof(int)*width,
sizeof(int)*width, height, hipMemcpyDeviceToDevice));
checkCudaErrors(hipBindTexture2D(0, rgbaTex, dTemp, desc, width, height, pitch));
}
}
return ((dKernelTime/1000.)/(double)iterations);
} | b64673207703564a71c679c7eed11034689a6fe5.cu | /*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <helper_math.h>
#include <helper_functions.h>
#include <helper_cuda.h> // CUDA device initialization helper functions
__constant__ float cGaussian[64]; //gaussian array in device side
texture<uchar4, 2, cudaReadModeNormalizedFloat> rgbaTex;
uint *dImage = NULL; //original image
uint *dTemp = NULL; //temp array for iterations
size_t pitch;
/*
Perform a simple bilateral filter.
Bilateral filter is a nonlinear filter that is a mixture of range
filter and domain filter, the previous one preserves crisp edges and
the latter one filters noise. The intensity value at each pixel in
an image is replaced by a weighted average of intensity values from
nearby pixels.
The weight factor is calculated by the product of domain filter
component(using the gaussian distribution as a spatial distance) as
well as range filter component(Euclidean distance between center pixel
and the current neighbor pixel). Because this process is nonlinear,
the sample just uses a simple pixel by pixel step.
Texture fetches automatically clamp to edge of image. 1D gaussian array
is mapped to a 1D texture instead of using shared memory, which may
cause severe bank conflict.
Threads are y-pass(column-pass), because the output is coalesced.
Parameters
od - pointer to output data in global memory
d_f - pointer to the 1D gaussian array
e_d - euclidean delta
w - image width
h - image height
r - filter radius
*/
//Euclidean Distance (x, y, d) = exp((|x - y| / d)^2 / 2)
__device__ float euclideanLen(float4 a, float4 b, float d)
{
float mod = (b.x - a.x) * (b.x - a.x) +
(b.y - a.y) * (b.y - a.y) +
(b.z - a.z) * (b.z - a.z);
return __expf(-mod / (2.f * d * d));
}
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(fabs(rgba.x)); // clamp to [0.0, 1.0]
rgba.y = __saturatef(fabs(rgba.y));
rgba.z = __saturatef(fabs(rgba.z));
rgba.w = __saturatef(fabs(rgba.w));
return (uint(rgba.w * 255.0f) << 24) | (uint(rgba.z * 255.0f) << 16) | (uint(rgba.y * 255.0f) << 8) | uint(rgba.x * 255.0f);
}
__device__ float4 rgbaIntToFloat(uint c)
{
float4 rgba;
rgba.x = (c & 0xff) * 0.003921568627f; // /255.0f;
rgba.y = ((c>>8) & 0xff) * 0.003921568627f; // /255.0f;
rgba.z = ((c>>16) & 0xff) * 0.003921568627f; // /255.0f;
rgba.w = ((c>>24) & 0xff) * 0.003921568627f; // /255.0f;
return rgba;
}
//column pass using coalesced global memory reads
__global__ void
d_bilateral_filter(uint *od, int w, int h,
float e_d, int r)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= w || y >= h)
{
return;
}
float sum = 0.0f;
float factor;
float4 t = {0.f, 0.f, 0.f, 0.f};
float4 center = tex2D(rgbaTex, x, y);
for (int i = -r; i <= r; i++)
{
for (int j = -r; j <= r; j++)
{
float4 curPix = tex2D(rgbaTex, x + j, y + i);
factor = cGaussian[i + r] * cGaussian[j + r] * //domain factor
euclideanLen(curPix, center, e_d); //range factor
t += factor * curPix;
sum += factor;
}
}
od[y * w + x] = rgbaFloatToInt(t/sum);
}
extern "C"
void initTexture(int width, int height, uint *hImage)
{
// copy image data to array
checkCudaErrors(cudaMallocPitch(&dImage, &pitch, sizeof(uint)*width, height));
checkCudaErrors(cudaMallocPitch(&dTemp, &pitch, sizeof(uint)*width, height));
checkCudaErrors(cudaMemcpy2D(dImage, pitch, hImage, sizeof(uint)*width,
sizeof(uint)*width, height, cudaMemcpyHostToDevice));
}
extern "C"
void freeTextures()
{
checkCudaErrors(cudaFree(dImage));
checkCudaErrors(cudaFree(dTemp));
}
/*
Because a 2D gaussian mask is symmetry in row and column,
here only generate a 1D mask, and use the product by row
and column index later.
1D gaussian distribution :
g(x, d) -- C * exp(-x^2/d^2), C is a constant amplifier
parameters:
og - output gaussian array in global memory
delta - the 2nd parameter 'd' in the above function
radius - half of the filter size
(total filter size = 2 * radius + 1)
*/
extern "C"
void updateGaussian(float delta, int radius)
{
float fGaussian[64];
for (int i = 0; i < 2*radius + 1; ++i)
{
float x = i-radius;
fGaussian[i] = expf(-(x*x) / (2*delta*delta));
}
checkCudaErrors(cudaMemcpyToSymbol(cGaussian, fGaussian, sizeof(float)*(2*radius+1)));
}
/*
Perform 2D bilateral filter on image using CUDA
Parameters:
d_dest - pointer to destination image in device memory
width - image width
height - image height
e_d - euclidean delta
radius - filter radius
iterations - number of iterations
*/
// RGBA version
extern "C"
double bilateralFilterRGBA(uint *dDest,
int width, int height,
float e_d, int radius, int iterations,
StopWatchInterface *timer)
{
// var for kernel computation timing
double dKernelTime;
// Bind the array to the texture
cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar4>();
checkCudaErrors(cudaBindTexture2D(0, rgbaTex, dImage, desc, width, height, pitch));
for (int i=0; i<iterations; i++)
{
// sync host and start kernel computation timer
dKernelTime = 0.0;
checkCudaErrors(cudaDeviceSynchronize());
sdkResetTimer(&timer);
dim3 gridSize((width + 16 - 1) / 16, (height + 16 - 1) / 16);
dim3 blockSize(16, 16);
d_bilateral_filter<<< gridSize, blockSize>>>(
dDest, width, height, e_d, radius);
// sync host and stop computation timer
checkCudaErrors(cudaDeviceSynchronize());
dKernelTime += sdkGetTimerValue(&timer);
if (iterations > 1)
{
// copy result back from global memory to array
checkCudaErrors(cudaMemcpy2D(dTemp, pitch, dDest, sizeof(int)*width,
sizeof(int)*width, height, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaBindTexture2D(0, rgbaTex, dTemp, desc, width, height, pitch));
}
}
return ((dKernelTime/1000.)/(double)iterations);
} |
e9ee9bd9c73b827085747f5878d7a7c1148224bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
void initWith(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
__global__
void addArraysInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *array, int N)
{
for(int i = 0; i < N; i++)
{
if(array[i] != target)
{
printf("FAIL: array[%d] - %0.0f does not equal %0.0f\n", i, array[i], target);
exit(1);
}
}
printf("Success!\n");
}
int main()
{
const int N = 2<<20;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
hipMallocManaged(&a, size);
hipMallocManaged(&b, size);
hipMallocManaged(&c, size);
initWith(3, a, N);
initWith(4, b, N);
initWith(0, c, N);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
hipError_t addArraysErr;
hipLaunchKernelGGL(( addArraysInto), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, c, a, b, N);
addArraysErr = hipGetLastError();
if(addArraysErr != hipSuccess) printf("Error running addArrays %s\n", hipGetErrorString(addArraysErr));
hipDeviceSynchronize();
checkElementsAre(7, c, N);
hipFree(a);
hipFree(b);
hipFree(c);
}
| e9ee9bd9c73b827085747f5878d7a7c1148224bf.cu | #include <stdio.h>
void initWith(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
__global__
void addArraysInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *array, int N)
{
for(int i = 0; i < N; i++)
{
if(array[i] != target)
{
printf("FAIL: array[%d] - %0.0f does not equal %0.0f\n", i, array[i], target);
exit(1);
}
}
printf("Success!\n");
}
int main()
{
const int N = 2<<20;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
initWith(3, a, N);
initWith(4, b, N);
initWith(0, c, N);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
cudaError_t addArraysErr;
addArraysInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N);
addArraysErr = cudaGetLastError();
if(addArraysErr != cudaSuccess) printf("Error running addArrays %s\n", cudaGetErrorString(addArraysErr));
cudaDeviceSynchronize();
checkElementsAre(7, c, N);
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
|
5acfec3c9c53e50753ee5f884149324f8d9257bc.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Functions mapped over matrices and reductions using function tables. Unfortunately, it doesnt seem to be possible to
* use templates for this. Function pointers have to be stored as device const arrays, but there doesnt seem to be a way
* to use templated static class fields on the device to do this.
*/
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include <MatKernel.hpp>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/reverse.h>
#include <thrust/reduce.h>
#include <thrust/merge.h>
#include <thrust/fill.h>
#include <thrust/iterator/reverse_iterator.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#if __CUDA_ARCH__ > 200
#define MAXXGRID 2147483647
#else
#define MAXXGRID 65535
#endif
__device__ float op_add(float a, float b) {return a+b;}
__device__ float op_sub(float a, float b) {return a-b;}
__device__ float op_mul(float a, float b) {return a*b;}
__device__ float op_div(float a, float b) {return a/b;}
__device__ float op_gt(float a, float b) {return (a > b) ? 1.0f : 0;}
__device__ float op_lt(float a, float b) {return (a < b) ? 1.0f : 0;}
__device__ float op_eq(float a, float b) {return (a == b) ? 1.0f : 0;}
__device__ float op_ge(float a, float b) {return (a >= b) ? 1.0f : 0;}
__device__ float op_le(float a, float b) {return (a <= b) ? 1.0f : 0;}
__device__ float op_ne(float a, float b) {return (a != b) ? 1.0f : 0;}
__device__ float op_max(float a, float b) {return max(a,b);}
__device__ float op_min(float a, float b) {return min(a,b);}
__device__ float op_atan2(float a, float b) {return atan2f(a, b);}
__device__ float op_pow(float a, float b) {return powf(a, b);}
__device__ int iop_add(int a, int b) {return a+b;}
__device__ int iop_sub(int a, int b) {return a-b;}
__device__ int iop_mul(int a, int b) {return a*b;}
__device__ int iop_div(int a, int b) {return a/b;}
__device__ int iop_gt(int a, int b) {return (a > b) ? 1 : 0;}
__device__ int iop_lt(int a, int b) {return (a < b) ? 1 : 0;}
__device__ int iop_eq(int a, int b) {return (a == b) ? 1 : 0;}
__device__ int iop_ge(int a, int b) {return (a >= b) ? 1 : 0;}
__device__ int iop_le(int a, int b) {return (a <= b) ? 1 : 0;}
__device__ int iop_ne(int a, int b) {return (a != b) ? 1 : 0;}
__device__ int iop_max(int a, int b) {return max(a,b);}
__device__ int iop_min(int a, int b) {return min(a,b);}
__device__ long long lop_add(long long a, long long b) {return a+b;}
__device__ long long lop_sub(long long a, long long b) {return a-b;}
__device__ long long lop_mul(long long a, long long b) {return a*b;}
__device__ long long lop_div(long long a, long long b) {return a/b;}
__device__ long long lop_gt(long long a, long long b) {return (a > b) ? 1 : 0;}
__device__ long long lop_lt(long long a, long long b) {return (a < b) ? 1 : 0;}
__device__ long long lop_eq(long long a, long long b) {return (a == b) ? 1 : 0;}
__device__ long long lop_ge(long long a, long long b) {return (a >= b) ? 1 : 0;}
__device__ long long lop_le(long long a, long long b) {return (a <= b) ? 1 : 0;}
__device__ long long lop_ne(long long a, long long b) {return (a != b) ? 1 : 0;}
__device__ long long lop_max(long long a, long long b) {return max(a,b);}
__device__ long long lop_min(long long a, long long b) {return max(a,b);}
__device__ double dop_add(double a, double b) {return a+b;}
__device__ double dop_sub(double a, double b) {return a-b;}
__device__ double dop_mul(double a, double b) {return a*b;}
__device__ double dop_div(double a, double b) {return a/b;}
__device__ double dop_gt(double a, double b) {return (a > b) ? 1.0 : 0;}
__device__ double dop_lt(double a, double b) {return (a < b) ? 1.0 : 0;}
__device__ double dop_eq(double a, double b) {return (a == b) ? 1.0 : 0;}
__device__ double dop_ge(double a, double b) {return (a >= b) ? 1.0 : 0;}
__device__ double dop_le(double a, double b) {return (a <= b) ? 1.0 : 0;}
__device__ double dop_ne(double a, double b) {return (a != b) ? 1.0 : 0;}
__device__ double dop_max(double a, double b) {return max(a,b);}
__device__ double dop_min(double a, double b) {return min(a,b);}
__device__ double dop_atan2(double a, double b) {return atan2(a, b);}
__device__ double dop_pow(double a, double b) {return pow(a, b);}
// Check reducevec if these ever get changed.
__device__ const optype operators[] = {
op_add,
op_sub,
op_mul,
op_div,
op_gt,
op_lt,
op_eq,
op_ge,
op_le,
op_ne,
op_max,
op_min,
op_atan2,
op_pow};
__device__ const ioptype ioperators[] = {
iop_add,
iop_sub,
iop_mul,
iop_div,
iop_gt,
iop_lt,
iop_eq,
iop_ge,
iop_le,
iop_ne,
iop_max,
iop_min};
__device__ const loptype loperators[] = {
lop_add,
lop_sub,
lop_mul,
lop_div,
lop_gt,
lop_lt,
lop_eq,
lop_ge,
lop_le,
lop_ne,
lop_max,
lop_min};
__device__ const doptype doperators[] = {
dop_add,
dop_sub,
dop_mul,
dop_div,
dop_gt,
dop_lt,
dop_eq,
dop_ge,
dop_le,
dop_ne,
dop_max,
dop_min,
dop_atan2,
dop_pow};
__device__ float fn_abs(float a) {return abs(a);}
__device__ float fn_exp(float a) {return expf(a);}
__device__ float fn_log(float a) {return logf(a);}
__device__ float fn_expm1(float a) {return expm1f(a);}
__device__ float fn_sqrt(float a) {return sqrtf(a);}
__device__ float fn_ln(float a) {return logf(a);}
__device__ float fn_log10(float a) {return log10f(a);}
__device__ float fn_log1p(float a) {return log1pf(a);}
__device__ float fn_cos(float a) {return cosf(a);}
__device__ float fn_sin(float a) {return sinf(a);}
__device__ float fn_tan(float a) {return tanf(a);}
__device__ float fn_cosh(float a) {return coshf(a);}
__device__ float fn_sinh(float a) {return sinhf(a);}
__device__ float fn_tanh(float a) {return tanhf(a);}
__device__ float fn_acos(float a) {return acosf(a);}
__device__ float fn_asin(float a) {return asinf(a);}
__device__ float fn_atan(float a) {return atanf(a);}
__device__ float fn_acosh(float a) {return acoshf(a);}
__device__ float fn_asinh(float a) {return asinhf(a);}
__device__ float fn_atanh(float a) {return atanhf(a);}
__device__ float fn_erf(float a) {return erff(a);}
__device__ float fn_erfinv(float a) {return erfinvf(a);}
__device__ float fn_erfc(float a) {return erfcf(a);}
__device__ float fn_erfcinv(float a) {return erfcinvf(a);}
__device__ float fn_gammaln(float a) {return lgammaf(a);}
__device__ float fn_gamma(float a) {return tgammaf(a);}
__device__ float fn_ceil(float a) {return ceilf(a);}
__device__ float fn_floor(float a) {return floorf(a);}
__device__ float fn_round(float a) {return roundf(a);}
__device__ float fn_trunc(float a) {return truncf(a);}
__device__ float fn_sign(float a) {return (a>0) ? 1.0f : ((a<0) ? -1.0f : 0);}
__device__ float fn_j0(float a) {return j0f(a);}
__device__ float fn_j1(float a) {return j1f(a);}
//__device__ float fn_jn(float a) {return jnf(a);}
__device__ float fn_y0(float a) {return y0f(a);}
__device__ float fn_y1(float a) {return y1f(a);}
//__device__ float fn_yn(float a) {return ynf(a);}
__device__ float fn_exppsi(float a) {return (a<1.0f) ? 0.5f*a*a : a-0.5f;}
__device__ float fn_normcdf(float a) {return normcdff(a);}
__device__ float fn_normcdfinv(float a) {return normcdfinvf(a);}
__device__ float fn_atan2(float a, float b) {return atan2f(a, b);}
__device__ float fn_pow(float a, float b) {return powf(a, b);}
__device__ const fntype fctns[] = {
fn_abs,
fn_exp,
fn_expm1,
fn_sqrt,
fn_ln,
fn_log10,
fn_log1p,
fn_cos,
fn_sin,
fn_tan,
fn_cosh,
fn_sinh,
fn_tanh,
fn_acos,
fn_asin,
fn_atan,
fn_acosh,
fn_asinh,
fn_atanh,
fn_erf,
fn_erfinv,
fn_erfc,
fn_erfcinv,
fn_gammaln,
fn_gamma,
fn_ceil,
fn_floor,
fn_round,
fn_trunc,
fn_sign,
fn_j0,
fn_j1,
fn_y0,
fn_y1,
fn_exppsi,
fn_normcdf,
fn_normcdfinv};
// Some SLATEC functions
// fn_psi,
// fn_psiinv};
__device__ const optype fctns2[] = {
fn_atan2,
fn_pow};
// Some SLATEC functions
// fn_psifn};
__device__ double dfn_abs(double a) {return abs(a);}
__device__ double dfn_exp(double a) {return exp(a);}
__device__ double dfn_log(double a) {return log(a);}
__device__ double dfn_expm1(double a) {return expm1(a);}
__device__ double dfn_sqrt(double a) {return sqrt(a);}
__device__ double dfn_ln(double a) {return log(a);}
__device__ double dfn_log10(double a) {return log10(a);}
__device__ double dfn_log1p(double a) {return log1p(a);}
__device__ double dfn_cos(double a) {return cos(a);}
__device__ double dfn_sin(double a) {return sin(a);}
__device__ double dfn_tan(double a) {return tan(a);}
__device__ double dfn_cosh(double a) {return cosh(a);}
__device__ double dfn_sinh(double a) {return sinh(a);}
__device__ double dfn_tanh(double a) {return tanh(a);}
__device__ double dfn_acos(double a) {return acos(a);}
__device__ double dfn_asin(double a) {return asin(a);}
__device__ double dfn_atan(double a) {return atan(a);}
__device__ double dfn_acosh(double a) {return acosh(a);}
__device__ double dfn_asinh(double a) {return asinh(a);}
__device__ double dfn_atanh(double a) {return atanh(a);}
__device__ double dfn_erf(double a) {return erf(a);}
__device__ double dfn_erfinv(double a) {return erfinv(a);}
__device__ double dfn_erfc(double a) {return erfc(a);}
__device__ double dfn_erfcinv(double a) {return erfcinv(a);}
__device__ double dfn_gammaln(double a) {return lgamma(a);}
__device__ double dfn_gamma(double a) {return tgamma(a);}
__device__ double dfn_ceil(double a) {return ceil(a);}
__device__ double dfn_floor(double a) {return floor(a);}
__device__ double dfn_round(double a) {return round(a);}
__device__ double dfn_trunc(double a) {return trunc(a);}
__device__ double dfn_sign(double a) {return (a>0) ? 1.0 : ((a<0) ? -1.0 : 0);}
__device__ double dfn_j0(double a) {return j0(a);}
__device__ double dfn_j1(double a) {return j1(a);}
//__device__ double dfn_jn(double a) {return jnf(a);}
__device__ double dfn_y0(double a) {return y0(a);}
__device__ double dfn_y1(double a) {return y1(a);}
//__device__ double dfn_yn(double a) {return ynf(a);}
__device__ double dfn_exppsi(double a) {return (a<1.0) ? 0.5*a*a : a-0.5;}
__device__ double dfn_atan2(double a, double b) {return atan2(a, b);}
__device__ double dfn_pow(double a, double b) {return pow(a, b);}
__device__ const dfntype dfctns[35] = {
dfn_abs,
dfn_exp,
dfn_expm1,
dfn_sqrt,
dfn_ln,
dfn_log10,
dfn_log1p,
dfn_cos,
dfn_sin,
dfn_tan,
dfn_cosh,
dfn_sinh,
dfn_tanh,
dfn_acos,
dfn_asin,
dfn_atan,
dfn_acosh,
dfn_asinh,
dfn_atanh,
dfn_erf,
dfn_erfinv,
dfn_erfc,
dfn_erfcinv,
dfn_gammaln,
dfn_gamma,
dfn_ceil,
dfn_floor,
dfn_round,
dfn_trunc,
dfn_sign,
dfn_j0,
dfn_j1,
dfn_y0,
dfn_y1,
dfn_exppsi};
__device__ const doptype dfctns2[2] = {
dfn_atan2,
dfn_pow};
__device__ float psi_(float x);
int getDeviceVersion() {
int igpu;
hipGetDevice(&igpu);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, igpu);
return 100 * prop.major + 10 * prop.minor;
}
void setsizes(long long N, dim3 *gridp, int *nthreadsp) {
int nblocks = 1;
int nthreads = 32;
int threads_per_block = 1024;
// int version;
// version = getDeviceVersion();
// if (version == 320) threads_per_block = 512;
while (1L * nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < threads_per_block) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
gridp->y = 1 + (nblocks-1)/65536;
gridp->x = 1 + (nblocks-1)/gridp->y;
gridp->z = 1;
*nthreadsp = nthreads;
}
#define GENGFUN(ATYPE,FNTYPE,FUNCARRAY) \
__global__ void __apply_gfun_##ATYPE(ATYPE *A, ATYPE *B, int N, int opn) { \
FNTYPE fn = FUNCARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { \
B[i] = fn(A[i]); \
} \
} \
\
int apply_gfun(ATYPE *A, ATYPE *B, int N, int opn) { \
int nthreads; \
dim3 griddims; \
setsizes(N, &griddims, &nthreads); \
hipLaunchKernelGGL(( __apply_gfun_##ATYPE), dim3(griddims),dim3(nthreads), 0, 0, A, B, N, opn); \
hipDeviceSynchronize(); \
hipError_t err = hipGetLastError(); \
return err; \
}
GENGFUN(float,fntype,fctns)
GENGFUN(double,dfntype,dfctns)
#define GENGFUN2(ATYPE,FNTYPE,FUNCARRAY) \
__global__ void __apply_gfun2_##ATYPE(ATYPE *A, ATYPE *B, ATYPE *C, int N, int opn) { \
FNTYPE fn = FUNCARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { \
C[i] = fn(A[i], B[i]); \
} \
} \
\
int apply_gfun2(ATYPE *A, ATYPE *B, ATYPE *C, int N, int opn) { \
int nthreads; \
dim3 griddims; \
setsizes(N, &griddims, &nthreads); \
hipLaunchKernelGGL(( __apply_gfun2_##ATYPE), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, N, opn); \
hipDeviceSynchronize(); \
hipError_t err = hipGetLastError(); \
return err; \
}
GENGFUN2(float,optype,fctns2)
GENGFUN2(double,doptype,dfctns2)
#define GENAPPLY(ATYPE,OPTYPE,OPARRAY) \
__global__ void __apply_full(ATYPE *A, ATYPE *B, ATYPE *C, int N, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { \
C[i] = op(A[i],B[i]); \
} \
} \
\
__global__ void __apply_right_col(ATYPE *A, ATYPE *B, ATYPE *C, int nrows, int ncols, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \
C[i] = op(A[i],B[i % nrows]); \
} \
} \
\
__global__ void __apply_right_row(ATYPE *A, ATYPE *B, ATYPE *C, int nrows, int ncols, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \
C[i] = op(A[i],B[i / nrows]); \
} \
} \
\
__global__ void __apply_left_col(ATYPE *A, ATYPE *B, ATYPE *C, int nrows, int ncols, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \
C[i] = op(A[i % nrows],B[i]); \
} \
} \
\
__global__ void __apply_left_row(ATYPE *A, ATYPE *B, ATYPE *C, int nrows, int ncols, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \
C[i] = op(A[i / nrows],B[i]); \
} \
} \
\
__global__ void __apply_right_val(ATYPE *A, ATYPE *B, ATYPE *C, int nrows, int ncols, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
ATYPE val = B[0]; \
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \
C[i] = op(A[i],val); \
} \
} \
\
__global__ void __apply_left_val(ATYPE *A, ATYPE *B, ATYPE *C, int nrows, int ncols, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
ATYPE val = A[0]; \
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \
C[i] = op(val,B[i]); \
} \
} \
\
__global__ void __apply_right_const(ATYPE *A, ATYPE B, ATYPE *C, int nrows, int ncols, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \
C[i] = op(A[i],B); \
} \
} \
\
__global__ void __apply_left_const(ATYPE A, ATYPE *B, ATYPE *C, int nrows, int ncols, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \
C[i] = op(A,B[i]); \
} \
} \
\
int apply_binop(ATYPE *A, int Anrows, int Ancols, \
ATYPE *B, int Bnrows, int Bncols, ATYPE *C, int opn) { \
int N = max(Anrows, Bnrows)*max(Ancols, Bncols); \
int nthreads; \
dim3 griddims; \
setsizes(N, &griddims, &nthreads); \
if (Anrows == Bnrows && Ancols == Bncols) { \
hipLaunchKernelGGL(( __apply_full), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, N, opn); \
} else if (Anrows == Bnrows && Bncols == 1) { \
hipLaunchKernelGGL(( __apply_right_col), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn); \
} else if (Ancols == Bncols && Bnrows == 1) { \
hipLaunchKernelGGL(( __apply_right_row), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn); \
} else if (Anrows == Bnrows && Ancols == 1) { \
hipLaunchKernelGGL(( __apply_left_col), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn); \
} else if (Ancols == Bncols && Anrows == 1) { \
hipLaunchKernelGGL(( __apply_left_row), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn); \
} else if (Bnrows == 1 && Bncols == 1) { \
hipLaunchKernelGGL(( __apply_right_val), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn); \
} else if (Anrows == 1 && Ancols == 1) { \
hipLaunchKernelGGL(( __apply_left_val), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn); \
} \
hipDeviceSynchronize(); \
hipError_t err = hipGetLastError(); \
return err; \
} \
\
int apply_binop_left_const(ATYPE A, \
ATYPE *B, int Bnrows, int Bncols, ATYPE *C, int opn) { \
int N = Bnrows* Bncols; \
int nthreads; \
dim3 griddims; \
setsizes(N, &griddims, &nthreads); \
hipLaunchKernelGGL(( __apply_left_const), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Bnrows, Bncols, opn); \
hipDeviceSynchronize(); \
hipError_t err = hipGetLastError(); \
return err; \
} \
\
int apply_binop_right_const(ATYPE *A, int Anrows, int Ancols, \
ATYPE B, ATYPE *C, int opn) { \
int N = Anrows*Ancols; \
int nthreads; \
dim3 griddims; \
setsizes(N, &griddims, &nthreads); \
hipLaunchKernelGGL(( __apply_right_const), dim3(griddims),dim3(nthreads), 0, 0, A, B, C, Anrows, Ancols, opn); \
hipDeviceSynchronize(); \
hipError_t err = hipGetLastError(); \
return err; \
}
GENAPPLY(float,optype,operators)
GENAPPLY(int,ioptype,ioperators)
GENAPPLY(long long,loptype,loperators)
GENAPPLY(double,doptype,doperators)
#define GENSPOPERATION(ATYPE,OPTYPE,OPARRAY) \
__global__ void __sdoprow(int nrows, int ncols, int nnz, ATYPE *A, int *Aic, ATYPE *B, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < nnz; i += blockDim.x * gridDim.x * gridDim.y) { \
int col = Aic[i]; \
ATYPE oldA = A[i]; \
A[i] = op(oldA,B[col]); \
} \
} \
\
__global__ void __sdopcol(int nrows, int ncols, int nnz, ATYPE *A, int *Air, ATYPE *B, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < nnz; i += blockDim.x * gridDim.x * gridDim.y) { \
int row = Air[i]; \
ATYPE oldA = A[i]; \
A[i] = op(oldA,B[row]); \
} \
} \
\
__global__ void __sdopval(int nnz, ATYPE *A, ATYPE *B, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
ATYPE bval = B[0]; \
for (int i = ip; i < nnz; i += blockDim.x * gridDim.x * gridDim.y) { \
ATYPE oldA = A[i]; \
A[i] = op(oldA,bval); \
} \
} \
\
int sdoprow(int nrows, int ncols, int nnz, ATYPE *A, int *Aic, \
ATYPE *B, int len, int opn) { \
int nthreads; \
dim3 griddims; \
setsizes(nnz, &griddims, &nthreads); \
if (len > 1) { \
hipLaunchKernelGGL(( __sdoprow), dim3(griddims),dim3(nthreads), 0, 0, nrows, ncols, nnz, A, Aic, B, opn); \
} else { \
hipLaunchKernelGGL(( __sdopval), dim3(griddims),dim3(nthreads), 0, 0, nnz, A, B, opn); \
} \
hipDeviceSynchronize(); \
hipError_t err = hipGetLastError(); \
return err; \
} \
\
int sdopcol(int nrows, int ncols, int nnz, ATYPE *A, int *Air, \
ATYPE *B, int len, int opn) { \
int nthreads; \
dim3 griddims; \
setsizes(nnz, &griddims, &nthreads); \
if (len > 1) { \
hipLaunchKernelGGL(( __sdopcol), dim3(griddims),dim3(nthreads), 0, 0, nrows, ncols, nnz, A, Air, B, opn); \
} else { \
hipLaunchKernelGGL(( __sdopval), dim3(griddims),dim3(nthreads), 0, 0, nnz, A, B, opn); \
} \
hipDeviceSynchronize(); \
hipError_t err = hipGetLastError(); \
return err; \
}
GENSPOPERATION(float,optype,operators)
GENSPOPERATION(double,doptype,doperators)
#define GENREDUCE1OP(ATYPE,OPTYPE,OPARRAY) \
__global__ void __reduce1op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE initval, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int basecol = threadIdx.y + blockDim.y * blockIdx.x; \
ATYPE v; \
for (int icol = basecol; icol < ncols; icol += blockDim.y * gridDim.x) { \
v = initval; \
if (threadIdx.x < nrows) v = A[threadIdx.x + icol * nrows]; \
for (int i = threadIdx.x + blockDim.x; i < nrows; i += blockDim.x) { \
v = op(v, A[i + icol * nrows]); \
} \
for (int i = 1; i < blockDim.x; i *= 2) { \
ATYPE vtmp = __shfl_down(v, i); \
if (threadIdx.x + i < blockDim.x) { \
v = op(v, vtmp); \
} \
} \
if (threadIdx.x == 0) { \
B[icol] = v; \
} \
} \
}
#define GENREDUCE1OPX(ATYPE,OPTYPE,OPARRAY) \
__global__ void __reduce1op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE initval, int opn) { \
__shared__ ATYPE parts[32][33]; \
OPTYPE op = OPARRAY[opn]; \
ATYPE v; \
for (int icol = threadIdx.y + blockIdx.x * blockDim.y; icol < ncols; icol += blockDim.y * gridDim.x) { \
v = initval; \
if (threadIdx.x < nrows) v = A[threadIdx.x + icol * nrows]; \
for (int irow = threadIdx.x + blockDim.x; irow < nrows; irow += blockDim.x) { \
v = op(v, A[irow + icol * nrows]); \
} \
parts[threadIdx.x][threadIdx.y] = v; \
__syncthreads(); \
for (int i = 1; i < blockDim.x; i *= 2) { \
if (i + threadIdx.x < blockDim.x) { \
parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], parts[i + threadIdx.x][threadIdx.y]); \
} \
} \
if (threadIdx.x == 0) { \
B[icol] = parts[0][threadIdx.y]; \
} \
__syncthreads(); \
} \
}
#if __CUDA_ARCH__ > 200
GENREDUCE1OP(float,optype,operators)
GENREDUCE1OP(int,ioptype,ioperators)
#else
GENREDUCE1OPX(float,optype,operators)
GENREDUCE1OPX(int,ioptype,ioperators)
#endif
GENREDUCE1OPX(long long,loptype,loperators)
GENREDUCE1OPX(double,doptype,doperators)
template<typename T>
void reducevec(int n, T *A, T *B, int opn) {
thrust::device_ptr<T> pa(A);
thrust::device_ptr<T> pb(B);
T v;
switch (opn) {
case 0 : // sum
v = thrust::reduce(pa, pa + n);
thrust::fill(pb, pb + 1, v);
break;
case 10 : // max
v = thrust::reduce(pa, pa + n, std::numeric_limits<T>::min(), thrust::maximum<T>());
thrust::fill(pb, pb + 1, v);
break;
case 11: // min
v = thrust::reduce(pa, pa + n, std::numeric_limits<T>::max(), thrust::minimum<T>());
thrust::fill(pb, pb + 1, v);
break;
}
}
#define GENREDUCE1OPY(ATYPE) \
int reduce1op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE initval, int opn) { \
if (ncols == 1) { \
reducevec<ATYPE>(nrows, A, B, opn); \
} else { \
int blkx = 32; \
int blky = min(32, ncols); \
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)))); \
const dim3 blkdims(blkx,blky,1); \
hipLaunchKernelGGL(( __reduce1op), dim3(nblks),dim3(blkdims), 0, 0, nrows, ncols, A, B, initval, opn); \
} \
hipDeviceSynchronize(); \
hipError_t err = hipGetLastError(); \
return err; \
}
GENREDUCE1OPY(float)
GENREDUCE1OPY(int)
GENREDUCE1OPY(long long)
GENREDUCE1OPY(double)
#define GENREDUCEBIN1OP(ATYPE,OPTYPE,OPARRAY) \
__global__ void __reducebin1op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE *C, int opb, int opr) { \
OPTYPE opbf = OPARRAY[opb]; \
OPTYPE oprf = OPARRAY[opr]; \
int basecol = threadIdx.y + blockDim.y * blockIdx.x; \
for (int icol = basecol; icol < ncols; icol += blockDim.y * gridDim.x) { \
ATYPE v = 0; \
for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \
v = oprf(v, opbf(A[i + icol * nrows], B[i + icol * nrows])); \
} \
for (int i = 1; i < blockDim.x; i *= 2) { \
v = oprf(v, __shfl_down(v, i)); \
} \
if (threadIdx.x == 0) { \
C[icol] = v; \
} \
} \
}
#define GENREDUCEBIN1OPX(ATYPE,OPTYPE,OPARRAY) \
__global__ void __reducebin1op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE *C, int opb, int opr) { \
__shared__ ATYPE parts[32][33]; \
OPTYPE opbf = OPARRAY[opb]; \
OPTYPE oprf = OPARRAY[opr]; \
for (int icol = threadIdx.y + blockIdx.x * blockDim.y; icol < ncols; icol += blockDim.y * gridDim.x) { \
ATYPE v = 0; \
for (int irow = threadIdx.x; irow < nrows; irow += blockDim.x) { \
v = oprf(v, opbf(A[irow + icol * nrows], B[irow + icol * nrows])); \
} \
parts[threadIdx.x][threadIdx.y] = v; \
__syncthreads(); \
for (int i = 1; i < blockDim.x; i *= 2) { \
if (i + threadIdx.x < blockDim.x) { \
parts[threadIdx.x][threadIdx.y] = oprf(parts[threadIdx.x][threadIdx.y], parts[i + threadIdx.x][threadIdx.y]); \
} \
} \
if (threadIdx.x == 0) { \
C[icol] = parts[0][threadIdx.y]; \
} \
__syncthreads(); \
} \
}
#if __CUDA_ARCH__ > 200
GENREDUCEBIN1OP(float,optype,operators)
#else
GENREDUCEBIN1OPX(float,optype,operators)
#endif
GENREDUCEBIN1OPX(double,doptype,doperators)
#define GENREDUCEBIN1OPY(ATYPE) \
int reducebin1op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE *C, int opb, int opr) { \
int blkx = min(32, nrows); \
int blky = min(32, ncols); \
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)))); \
const dim3 blkdims(blkx,blky,1); \
hipLaunchKernelGGL(( __reducebin1op), dim3(nblks),dim3(blkdims), 0, 0, nrows, ncols, A, B, C, opb, opr); \
hipDeviceSynchronize(); \
hipError_t err = hipGetLastError(); \
return err; \
}
GENREDUCEBIN1OPY(float)
GENREDUCEBIN1OPY(double)
#define GENREDUCE2OP(ATYPE,OPTYPE,OPARRAY) \
__global__ void __reduce2op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE initval, int opn) { \
__shared__ ATYPE parts[32][33]; \
OPTYPE op = OPARRAY[opn]; \
int baserow = threadIdx.x + blockDim.x * blockIdx.x; \
for (int irow = baserow; irow < nrows; irow += blockDim.x * gridDim.x) { \
ATYPE v = A[irow + threadIdx.y * nrows]; \
for (int icol = threadIdx.y + blockDim.y; icol < ncols; icol += blockDim.y) { \
v = op(v, A[irow + icol * nrows]); \
} \
parts[threadIdx.x][threadIdx.y] = v; \
__syncthreads(); \
ATYPE newv = initval; \
for (int i = 1; i < blockDim.y; i *= 2) { \
if (i + threadIdx.y < blockDim.y) newv = parts[threadIdx.x][i+threadIdx.y]; \
__syncthreads(); \
if (i + threadIdx.y < blockDim.y) parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], newv); \
__syncthreads(); \
} \
if (threadIdx.y == 0) { \
B[irow] = parts[threadIdx.x][0]; \
} \
__syncthreads(); \
} \
} \
\
int reduce2op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE initval, int opn) { \
if (nrows == 1) { \
reducevec<ATYPE>(ncols, A, B, opn); \
} else { \
int blkx = min(32, nrows); \
int blky = min(32, ncols); \
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)))); \
const dim3 blkdims(blkx,blky,1); \
hipLaunchKernelGGL(( __reduce2op), dim3(nblks),dim3(blkdims), 0, 0, nrows, ncols, A, B, initval, opn); \
} \
hipDeviceSynchronize(); \
hipError_t err = hipGetLastError(); \
return err; \
}
GENREDUCE2OP(float,optype,operators)
GENREDUCE2OP(int,ioptype,ioperators)
GENREDUCE2OP(long long,loptype,loperators)
GENREDUCE2OP(double,doptype,doperators)
#define GENREDUCEBIN2OP(ATYPE,OPTYPE,OPARRAY) \
__global__ void __reducebin2op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE *C, int opb, int opr) { \
__shared__ ATYPE parts[32][33]; \
OPTYPE opbf = OPARRAY[opb]; \
OPTYPE oprf = OPARRAY[opr]; \
int baserow = threadIdx.x + blockDim.x * blockIdx.x; \
for (int irow = baserow; irow < nrows; irow += blockDim.x * gridDim.x) { \
float v = opbf(A[irow + threadIdx.y * nrows], B[irow + threadIdx.y * nrows]); \
for (int icol = threadIdx.y + blockDim.y; icol < ncols; icol += blockDim.y) { \
v = oprf(v, opbf(A[irow + icol * nrows], B[irow + icol * nrows])); \
} \
parts[threadIdx.x][threadIdx.y] = v; \
__syncthreads(); \
float newv = 0; \
for (int i = 1; i < blockDim.y; i *= 2) { \
if (i + threadIdx.y < blockDim.y) newv = parts[threadIdx.x][i+threadIdx.y]; \
__syncthreads(); \
if (i + threadIdx.y < blockDim.y) parts[threadIdx.x][threadIdx.y] = oprf(parts[threadIdx.x][threadIdx.y], newv); \
__syncthreads(); \
} \
if (threadIdx.y == 0) { \
C[irow] = parts[threadIdx.x][0]; \
} \
__syncthreads(); \
} \
} \
\
int reducebin2op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE *C, int opb, int opr) { \
int blkx = min(32, nrows); \
int blky = min(32, ncols); \
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)))); \
const dim3 blkdims(blkx,blky,1); \
hipLaunchKernelGGL(( __reducebin2op), dim3(nblks),dim3(blkdims), 0, 0, nrows, ncols, A, B, C, opb, opr); \
hipDeviceSynchronize(); \
hipError_t err = hipGetLastError(); \
return err; \
}
GENREDUCEBIN2OP(float,optype,operators)
GENREDUCEBIN2OP(double,doptype,doperators)
| 5acfec3c9c53e50753ee5f884149324f8d9257bc.cu | /*
* Functions mapped over matrices and reductions using function tables. Unfortunately, it doesnt seem to be possible to
* use templates for this. Function pointers have to be stored as device const arrays, but there doesnt seem to be a way
* to use templated static class fields on the device to do this.
*/
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <MatKernel.hpp>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <thrust/reverse.h>
#include <thrust/reduce.h>
#include <thrust/merge.h>
#include <thrust/fill.h>
#include <thrust/iterator/reverse_iterator.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#if __CUDA_ARCH__ > 200
#define MAXXGRID 2147483647
#else
#define MAXXGRID 65535
#endif
__device__ float op_add(float a, float b) {return a+b;}
__device__ float op_sub(float a, float b) {return a-b;}
__device__ float op_mul(float a, float b) {return a*b;}
__device__ float op_div(float a, float b) {return a/b;}
__device__ float op_gt(float a, float b) {return (a > b) ? 1.0f : 0;}
__device__ float op_lt(float a, float b) {return (a < b) ? 1.0f : 0;}
__device__ float op_eq(float a, float b) {return (a == b) ? 1.0f : 0;}
__device__ float op_ge(float a, float b) {return (a >= b) ? 1.0f : 0;}
__device__ float op_le(float a, float b) {return (a <= b) ? 1.0f : 0;}
__device__ float op_ne(float a, float b) {return (a != b) ? 1.0f : 0;}
__device__ float op_max(float a, float b) {return max(a,b);}
__device__ float op_min(float a, float b) {return min(a,b);}
__device__ float op_atan2(float a, float b) {return atan2f(a, b);}
__device__ float op_pow(float a, float b) {return powf(a, b);}
__device__ int iop_add(int a, int b) {return a+b;}
__device__ int iop_sub(int a, int b) {return a-b;}
__device__ int iop_mul(int a, int b) {return a*b;}
__device__ int iop_div(int a, int b) {return a/b;}
__device__ int iop_gt(int a, int b) {return (a > b) ? 1 : 0;}
__device__ int iop_lt(int a, int b) {return (a < b) ? 1 : 0;}
__device__ int iop_eq(int a, int b) {return (a == b) ? 1 : 0;}
__device__ int iop_ge(int a, int b) {return (a >= b) ? 1 : 0;}
__device__ int iop_le(int a, int b) {return (a <= b) ? 1 : 0;}
__device__ int iop_ne(int a, int b) {return (a != b) ? 1 : 0;}
__device__ int iop_max(int a, int b) {return max(a,b);}
__device__ int iop_min(int a, int b) {return min(a,b);}
__device__ long long lop_add(long long a, long long b) {return a+b;}
__device__ long long lop_sub(long long a, long long b) {return a-b;}
__device__ long long lop_mul(long long a, long long b) {return a*b;}
__device__ long long lop_div(long long a, long long b) {return a/b;}
__device__ long long lop_gt(long long a, long long b) {return (a > b) ? 1 : 0;}
__device__ long long lop_lt(long long a, long long b) {return (a < b) ? 1 : 0;}
__device__ long long lop_eq(long long a, long long b) {return (a == b) ? 1 : 0;}
__device__ long long lop_ge(long long a, long long b) {return (a >= b) ? 1 : 0;}
__device__ long long lop_le(long long a, long long b) {return (a <= b) ? 1 : 0;}
__device__ long long lop_ne(long long a, long long b) {return (a != b) ? 1 : 0;}
__device__ long long lop_max(long long a, long long b) {return max(a,b);}
__device__ long long lop_min(long long a, long long b) {return max(a,b);}
__device__ double dop_add(double a, double b) {return a+b;}
__device__ double dop_sub(double a, double b) {return a-b;}
__device__ double dop_mul(double a, double b) {return a*b;}
__device__ double dop_div(double a, double b) {return a/b;}
__device__ double dop_gt(double a, double b) {return (a > b) ? 1.0 : 0;}
__device__ double dop_lt(double a, double b) {return (a < b) ? 1.0 : 0;}
__device__ double dop_eq(double a, double b) {return (a == b) ? 1.0 : 0;}
__device__ double dop_ge(double a, double b) {return (a >= b) ? 1.0 : 0;}
__device__ double dop_le(double a, double b) {return (a <= b) ? 1.0 : 0;}
__device__ double dop_ne(double a, double b) {return (a != b) ? 1.0 : 0;}
__device__ double dop_max(double a, double b) {return max(a,b);}
__device__ double dop_min(double a, double b) {return min(a,b);}
__device__ double dop_atan2(double a, double b) {return atan2(a, b);}
__device__ double dop_pow(double a, double b) {return pow(a, b);}
// Check reducevec if these ever get changed.
__device__ const optype operators[] = {
op_add,
op_sub,
op_mul,
op_div,
op_gt,
op_lt,
op_eq,
op_ge,
op_le,
op_ne,
op_max,
op_min,
op_atan2,
op_pow};
__device__ const ioptype ioperators[] = {
iop_add,
iop_sub,
iop_mul,
iop_div,
iop_gt,
iop_lt,
iop_eq,
iop_ge,
iop_le,
iop_ne,
iop_max,
iop_min};
__device__ const loptype loperators[] = {
lop_add,
lop_sub,
lop_mul,
lop_div,
lop_gt,
lop_lt,
lop_eq,
lop_ge,
lop_le,
lop_ne,
lop_max,
lop_min};
__device__ const doptype doperators[] = {
dop_add,
dop_sub,
dop_mul,
dop_div,
dop_gt,
dop_lt,
dop_eq,
dop_ge,
dop_le,
dop_ne,
dop_max,
dop_min,
dop_atan2,
dop_pow};
__device__ float fn_abs(float a) {return abs(a);}
__device__ float fn_exp(float a) {return expf(a);}
__device__ float fn_log(float a) {return logf(a);}
__device__ float fn_expm1(float a) {return expm1f(a);}
__device__ float fn_sqrt(float a) {return sqrtf(a);}
__device__ float fn_ln(float a) {return logf(a);}
__device__ float fn_log10(float a) {return log10f(a);}
__device__ float fn_log1p(float a) {return log1pf(a);}
__device__ float fn_cos(float a) {return cosf(a);}
__device__ float fn_sin(float a) {return sinf(a);}
__device__ float fn_tan(float a) {return tanf(a);}
__device__ float fn_cosh(float a) {return coshf(a);}
__device__ float fn_sinh(float a) {return sinhf(a);}
__device__ float fn_tanh(float a) {return tanhf(a);}
__device__ float fn_acos(float a) {return acosf(a);}
__device__ float fn_asin(float a) {return asinf(a);}
__device__ float fn_atan(float a) {return atanf(a);}
__device__ float fn_acosh(float a) {return acoshf(a);}
__device__ float fn_asinh(float a) {return asinhf(a);}
__device__ float fn_atanh(float a) {return atanhf(a);}
__device__ float fn_erf(float a) {return erff(a);}
__device__ float fn_erfinv(float a) {return erfinvf(a);}
__device__ float fn_erfc(float a) {return erfcf(a);}
__device__ float fn_erfcinv(float a) {return erfcinvf(a);}
__device__ float fn_gammaln(float a) {return lgammaf(a);}
__device__ float fn_gamma(float a) {return tgammaf(a);}
__device__ float fn_ceil(float a) {return ceilf(a);}
__device__ float fn_floor(float a) {return floorf(a);}
__device__ float fn_round(float a) {return roundf(a);}
__device__ float fn_trunc(float a) {return truncf(a);}
__device__ float fn_sign(float a) {return (a>0) ? 1.0f : ((a<0) ? -1.0f : 0);}
__device__ float fn_j0(float a) {return j0f(a);}
__device__ float fn_j1(float a) {return j1f(a);}
//__device__ float fn_jn(float a) {return jnf(a);}
__device__ float fn_y0(float a) {return y0f(a);}
__device__ float fn_y1(float a) {return y1f(a);}
//__device__ float fn_yn(float a) {return ynf(a);}
__device__ float fn_exppsi(float a) {return (a<1.0f) ? 0.5f*a*a : a-0.5f;}
__device__ float fn_normcdf(float a) {return normcdff(a);}
__device__ float fn_normcdfinv(float a) {return normcdfinvf(a);}
__device__ float fn_atan2(float a, float b) {return atan2f(a, b);}
__device__ float fn_pow(float a, float b) {return powf(a, b);}
__device__ const fntype fctns[] = {
fn_abs,
fn_exp,
fn_expm1,
fn_sqrt,
fn_ln,
fn_log10,
fn_log1p,
fn_cos,
fn_sin,
fn_tan,
fn_cosh,
fn_sinh,
fn_tanh,
fn_acos,
fn_asin,
fn_atan,
fn_acosh,
fn_asinh,
fn_atanh,
fn_erf,
fn_erfinv,
fn_erfc,
fn_erfcinv,
fn_gammaln,
fn_gamma,
fn_ceil,
fn_floor,
fn_round,
fn_trunc,
fn_sign,
fn_j0,
fn_j1,
fn_y0,
fn_y1,
fn_exppsi,
fn_normcdf,
fn_normcdfinv};
// Some SLATEC functions
// fn_psi,
// fn_psiinv};
__device__ const optype fctns2[] = {
fn_atan2,
fn_pow};
// Some SLATEC functions
// fn_psifn};
__device__ double dfn_abs(double a) {return abs(a);}
__device__ double dfn_exp(double a) {return exp(a);}
__device__ double dfn_log(double a) {return log(a);}
__device__ double dfn_expm1(double a) {return expm1(a);}
__device__ double dfn_sqrt(double a) {return sqrt(a);}
__device__ double dfn_ln(double a) {return log(a);}
__device__ double dfn_log10(double a) {return log10(a);}
__device__ double dfn_log1p(double a) {return log1p(a);}
__device__ double dfn_cos(double a) {return cos(a);}
__device__ double dfn_sin(double a) {return sin(a);}
__device__ double dfn_tan(double a) {return tan(a);}
__device__ double dfn_cosh(double a) {return cosh(a);}
__device__ double dfn_sinh(double a) {return sinh(a);}
__device__ double dfn_tanh(double a) {return tanh(a);}
__device__ double dfn_acos(double a) {return acos(a);}
__device__ double dfn_asin(double a) {return asin(a);}
__device__ double dfn_atan(double a) {return atan(a);}
__device__ double dfn_acosh(double a) {return acosh(a);}
__device__ double dfn_asinh(double a) {return asinh(a);}
__device__ double dfn_atanh(double a) {return atanh(a);}
__device__ double dfn_erf(double a) {return erf(a);}
__device__ double dfn_erfinv(double a) {return erfinv(a);}
__device__ double dfn_erfc(double a) {return erfc(a);}
__device__ double dfn_erfcinv(double a) {return erfcinv(a);}
__device__ double dfn_gammaln(double a) {return lgamma(a);}
__device__ double dfn_gamma(double a) {return tgamma(a);}
__device__ double dfn_ceil(double a) {return ceil(a);}
__device__ double dfn_floor(double a) {return floor(a);}
__device__ double dfn_round(double a) {return round(a);}
__device__ double dfn_trunc(double a) {return trunc(a);}
__device__ double dfn_sign(double a) {return (a>0) ? 1.0 : ((a<0) ? -1.0 : 0);}
__device__ double dfn_j0(double a) {return j0(a);}
__device__ double dfn_j1(double a) {return j1(a);}
//__device__ double dfn_jn(double a) {return jnf(a);}
__device__ double dfn_y0(double a) {return y0(a);}
__device__ double dfn_y1(double a) {return y1(a);}
//__device__ double dfn_yn(double a) {return ynf(a);}
__device__ double dfn_exppsi(double a) {return (a<1.0) ? 0.5*a*a : a-0.5;}
__device__ double dfn_atan2(double a, double b) {return atan2(a, b);}
__device__ double dfn_pow(double a, double b) {return pow(a, b);}
__device__ const dfntype dfctns[35] = {
dfn_abs,
dfn_exp,
dfn_expm1,
dfn_sqrt,
dfn_ln,
dfn_log10,
dfn_log1p,
dfn_cos,
dfn_sin,
dfn_tan,
dfn_cosh,
dfn_sinh,
dfn_tanh,
dfn_acos,
dfn_asin,
dfn_atan,
dfn_acosh,
dfn_asinh,
dfn_atanh,
dfn_erf,
dfn_erfinv,
dfn_erfc,
dfn_erfcinv,
dfn_gammaln,
dfn_gamma,
dfn_ceil,
dfn_floor,
dfn_round,
dfn_trunc,
dfn_sign,
dfn_j0,
dfn_j1,
dfn_y0,
dfn_y1,
dfn_exppsi};
__device__ const doptype dfctns2[2] = {
dfn_atan2,
dfn_pow};
__device__ float psi_(float x);
int getDeviceVersion() {
int igpu;
cudaGetDevice(&igpu);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, igpu);
return 100 * prop.major + 10 * prop.minor;
}
void setsizes(long long N, dim3 *gridp, int *nthreadsp) {
int nblocks = 1;
int nthreads = 32;
int threads_per_block = 1024;
// int version;
// version = getDeviceVersion();
// if (version == 320) threads_per_block = 512;
while (1L * nblocks * nthreads < N) {
if (nblocks < 16) {
nblocks = 2*nblocks;
} else if (nthreads < threads_per_block) {
nthreads = 2*nthreads;
} else {
nblocks = 2*nblocks;
}
}
gridp->y = 1 + (nblocks-1)/65536;
gridp->x = 1 + (nblocks-1)/gridp->y;
gridp->z = 1;
*nthreadsp = nthreads;
}
#define GENGFUN(ATYPE,FNTYPE,FUNCARRAY) \
__global__ void __apply_gfun_##ATYPE(ATYPE *A, ATYPE *B, int N, int opn) { \
FNTYPE fn = FUNCARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { \
B[i] = fn(A[i]); \
} \
} \
\
int apply_gfun(ATYPE *A, ATYPE *B, int N, int opn) { \
int nthreads; \
dim3 griddims; \
setsizes(N, &griddims, &nthreads); \
__apply_gfun_##ATYPE<<<griddims,nthreads>>>(A, B, N, opn); \
cudaDeviceSynchronize(); \
cudaError_t err = cudaGetLastError(); \
return err; \
}
GENGFUN(float,fntype,fctns)
GENGFUN(double,dfntype,dfctns)
#define GENGFUN2(ATYPE,FNTYPE,FUNCARRAY) \
__global__ void __apply_gfun2_##ATYPE(ATYPE *A, ATYPE *B, ATYPE *C, int N, int opn) { \
FNTYPE fn = FUNCARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { \
C[i] = fn(A[i], B[i]); \
} \
} \
\
int apply_gfun2(ATYPE *A, ATYPE *B, ATYPE *C, int N, int opn) { \
int nthreads; \
dim3 griddims; \
setsizes(N, &griddims, &nthreads); \
__apply_gfun2_##ATYPE<<<griddims,nthreads>>>(A, B, C, N, opn); \
cudaDeviceSynchronize(); \
cudaError_t err = cudaGetLastError(); \
return err; \
}
GENGFUN2(float,optype,fctns2)
GENGFUN2(double,doptype,dfctns2)
#define GENAPPLY(ATYPE,OPTYPE,OPARRAY) \
__global__ void __apply_full(ATYPE *A, ATYPE *B, ATYPE *C, int N, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { \
C[i] = op(A[i],B[i]); \
} \
} \
\
__global__ void __apply_right_col(ATYPE *A, ATYPE *B, ATYPE *C, int nrows, int ncols, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \
C[i] = op(A[i],B[i % nrows]); \
} \
} \
\
__global__ void __apply_right_row(ATYPE *A, ATYPE *B, ATYPE *C, int nrows, int ncols, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \
C[i] = op(A[i],B[i / nrows]); \
} \
} \
\
__global__ void __apply_left_col(ATYPE *A, ATYPE *B, ATYPE *C, int nrows, int ncols, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \
C[i] = op(A[i % nrows],B[i]); \
} \
} \
\
__global__ void __apply_left_row(ATYPE *A, ATYPE *B, ATYPE *C, int nrows, int ncols, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \
C[i] = op(A[i / nrows],B[i]); \
} \
} \
\
__global__ void __apply_right_val(ATYPE *A, ATYPE *B, ATYPE *C, int nrows, int ncols, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
ATYPE val = B[0]; \
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \
C[i] = op(A[i],val); \
} \
} \
\
__global__ void __apply_left_val(ATYPE *A, ATYPE *B, ATYPE *C, int nrows, int ncols, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
ATYPE val = A[0]; \
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \
C[i] = op(val,B[i]); \
} \
} \
\
__global__ void __apply_right_const(ATYPE *A, ATYPE B, ATYPE *C, int nrows, int ncols, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \
C[i] = op(A[i],B); \
} \
} \
\
__global__ void __apply_left_const(ATYPE A, ATYPE *B, ATYPE *C, int nrows, int ncols, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < nrows*ncols; i += blockDim.x * gridDim.x * gridDim.y) { \
C[i] = op(A,B[i]); \
} \
} \
\
int apply_binop(ATYPE *A, int Anrows, int Ancols, \
ATYPE *B, int Bnrows, int Bncols, ATYPE *C, int opn) { \
int N = max(Anrows, Bnrows)*max(Ancols, Bncols); \
int nthreads; \
dim3 griddims; \
setsizes(N, &griddims, &nthreads); \
if (Anrows == Bnrows && Ancols == Bncols) { \
__apply_full<<<griddims,nthreads>>>(A, B, C, N, opn); \
} else if (Anrows == Bnrows && Bncols == 1) { \
__apply_right_col<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn); \
} else if (Ancols == Bncols && Bnrows == 1) { \
__apply_right_row<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn); \
} else if (Anrows == Bnrows && Ancols == 1) { \
__apply_left_col<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn); \
} else if (Ancols == Bncols && Anrows == 1) { \
__apply_left_row<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn); \
} else if (Bnrows == 1 && Bncols == 1) { \
__apply_right_val<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn); \
} else if (Anrows == 1 && Ancols == 1) { \
__apply_left_val<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn); \
} \
cudaDeviceSynchronize(); \
cudaError_t err = cudaGetLastError(); \
return err; \
} \
\
int apply_binop_left_const(ATYPE A, \
ATYPE *B, int Bnrows, int Bncols, ATYPE *C, int opn) { \
int N = Bnrows* Bncols; \
int nthreads; \
dim3 griddims; \
setsizes(N, &griddims, &nthreads); \
__apply_left_const<<<griddims,nthreads>>>(A, B, C, Bnrows, Bncols, opn); \
cudaDeviceSynchronize(); \
cudaError_t err = cudaGetLastError(); \
return err; \
} \
\
int apply_binop_right_const(ATYPE *A, int Anrows, int Ancols, \
ATYPE B, ATYPE *C, int opn) { \
int N = Anrows*Ancols; \
int nthreads; \
dim3 griddims; \
setsizes(N, &griddims, &nthreads); \
__apply_right_const<<<griddims,nthreads>>>(A, B, C, Anrows, Ancols, opn); \
cudaDeviceSynchronize(); \
cudaError_t err = cudaGetLastError(); \
return err; \
}
GENAPPLY(float,optype,operators)
GENAPPLY(int,ioptype,ioperators)
GENAPPLY(long long,loptype,loperators)
GENAPPLY(double,doptype,doperators)
#define GENSPOPERATION(ATYPE,OPTYPE,OPARRAY) \
__global__ void __sdoprow(int nrows, int ncols, int nnz, ATYPE *A, int *Aic, ATYPE *B, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < nnz; i += blockDim.x * gridDim.x * gridDim.y) { \
int col = Aic[i]; \
ATYPE oldA = A[i]; \
A[i] = op(oldA,B[col]); \
} \
} \
\
__global__ void __sdopcol(int nrows, int ncols, int nnz, ATYPE *A, int *Air, ATYPE *B, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
for (int i = ip; i < nnz; i += blockDim.x * gridDim.x * gridDim.y) { \
int row = Air[i]; \
ATYPE oldA = A[i]; \
A[i] = op(oldA,B[row]); \
} \
} \
\
__global__ void __sdopval(int nnz, ATYPE *A, ATYPE *B, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); \
ATYPE bval = B[0]; \
for (int i = ip; i < nnz; i += blockDim.x * gridDim.x * gridDim.y) { \
ATYPE oldA = A[i]; \
A[i] = op(oldA,bval); \
} \
} \
\
int sdoprow(int nrows, int ncols, int nnz, ATYPE *A, int *Aic, \
ATYPE *B, int len, int opn) { \
int nthreads; \
dim3 griddims; \
setsizes(nnz, &griddims, &nthreads); \
if (len > 1) { \
__sdoprow<<<griddims,nthreads>>>(nrows, ncols, nnz, A, Aic, B, opn); \
} else { \
__sdopval<<<griddims,nthreads>>>(nnz, A, B, opn); \
} \
cudaDeviceSynchronize(); \
cudaError_t err = cudaGetLastError(); \
return err; \
} \
\
int sdopcol(int nrows, int ncols, int nnz, ATYPE *A, int *Air, \
ATYPE *B, int len, int opn) { \
int nthreads; \
dim3 griddims; \
setsizes(nnz, &griddims, &nthreads); \
if (len > 1) { \
__sdopcol<<<griddims,nthreads>>>(nrows, ncols, nnz, A, Air, B, opn); \
} else { \
__sdopval<<<griddims,nthreads>>>(nnz, A, B, opn); \
} \
cudaDeviceSynchronize(); \
cudaError_t err = cudaGetLastError(); \
return err; \
}
GENSPOPERATION(float,optype,operators)
GENSPOPERATION(double,doptype,doperators)
#define GENREDUCE1OP(ATYPE,OPTYPE,OPARRAY) \
__global__ void __reduce1op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE initval, int opn) { \
OPTYPE op = OPARRAY[opn]; \
int basecol = threadIdx.y + blockDim.y * blockIdx.x; \
ATYPE v; \
for (int icol = basecol; icol < ncols; icol += blockDim.y * gridDim.x) { \
v = initval; \
if (threadIdx.x < nrows) v = A[threadIdx.x + icol * nrows]; \
for (int i = threadIdx.x + blockDim.x; i < nrows; i += blockDim.x) { \
v = op(v, A[i + icol * nrows]); \
} \
for (int i = 1; i < blockDim.x; i *= 2) { \
ATYPE vtmp = __shfl_down(v, i); \
if (threadIdx.x + i < blockDim.x) { \
v = op(v, vtmp); \
} \
} \
if (threadIdx.x == 0) { \
B[icol] = v; \
} \
} \
}
#define GENREDUCE1OPX(ATYPE,OPTYPE,OPARRAY) \
__global__ void __reduce1op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE initval, int opn) { \
__shared__ ATYPE parts[32][33]; \
OPTYPE op = OPARRAY[opn]; \
ATYPE v; \
for (int icol = threadIdx.y + blockIdx.x * blockDim.y; icol < ncols; icol += blockDim.y * gridDim.x) { \
v = initval; \
if (threadIdx.x < nrows) v = A[threadIdx.x + icol * nrows]; \
for (int irow = threadIdx.x + blockDim.x; irow < nrows; irow += blockDim.x) { \
v = op(v, A[irow + icol * nrows]); \
} \
parts[threadIdx.x][threadIdx.y] = v; \
__syncthreads(); \
for (int i = 1; i < blockDim.x; i *= 2) { \
if (i + threadIdx.x < blockDim.x) { \
parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], parts[i + threadIdx.x][threadIdx.y]); \
} \
} \
if (threadIdx.x == 0) { \
B[icol] = parts[0][threadIdx.y]; \
} \
__syncthreads(); \
} \
}
#if __CUDA_ARCH__ > 200
GENREDUCE1OP(float,optype,operators)
GENREDUCE1OP(int,ioptype,ioperators)
#else
GENREDUCE1OPX(float,optype,operators)
GENREDUCE1OPX(int,ioptype,ioperators)
#endif
GENREDUCE1OPX(long long,loptype,loperators)
GENREDUCE1OPX(double,doptype,doperators)
template<typename T>
void reducevec(int n, T *A, T *B, int opn) {
thrust::device_ptr<T> pa(A);
thrust::device_ptr<T> pb(B);
T v;
switch (opn) {
case 0 : // sum
v = thrust::reduce(pa, pa + n);
thrust::fill(pb, pb + 1, v);
break;
case 10 : // max
v = thrust::reduce(pa, pa + n, std::numeric_limits<T>::min(), thrust::maximum<T>());
thrust::fill(pb, pb + 1, v);
break;
case 11: // min
v = thrust::reduce(pa, pa + n, std::numeric_limits<T>::max(), thrust::minimum<T>());
thrust::fill(pb, pb + 1, v);
break;
}
}
#define GENREDUCE1OPY(ATYPE) \
int reduce1op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE initval, int opn) { \
if (ncols == 1) { \
reducevec<ATYPE>(nrows, A, B, opn); \
} else { \
int blkx = 32; \
int blky = min(32, ncols); \
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)))); \
const dim3 blkdims(blkx,blky,1); \
__reduce1op<<<nblks,blkdims>>>(nrows, ncols, A, B, initval, opn); \
} \
cudaDeviceSynchronize(); \
cudaError_t err = cudaGetLastError(); \
return err; \
}
GENREDUCE1OPY(float)
GENREDUCE1OPY(int)
GENREDUCE1OPY(long long)
GENREDUCE1OPY(double)
#define GENREDUCEBIN1OP(ATYPE,OPTYPE,OPARRAY) \
__global__ void __reducebin1op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE *C, int opb, int opr) { \
OPTYPE opbf = OPARRAY[opb]; \
OPTYPE oprf = OPARRAY[opr]; \
int basecol = threadIdx.y + blockDim.y * blockIdx.x; \
for (int icol = basecol; icol < ncols; icol += blockDim.y * gridDim.x) { \
ATYPE v = 0; \
for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \
v = oprf(v, opbf(A[i + icol * nrows], B[i + icol * nrows])); \
} \
for (int i = 1; i < blockDim.x; i *= 2) { \
v = oprf(v, __shfl_down(v, i)); \
} \
if (threadIdx.x == 0) { \
C[icol] = v; \
} \
} \
}
#define GENREDUCEBIN1OPX(ATYPE,OPTYPE,OPARRAY) \
__global__ void __reducebin1op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE *C, int opb, int opr) { \
__shared__ ATYPE parts[32][33]; \
OPTYPE opbf = OPARRAY[opb]; \
OPTYPE oprf = OPARRAY[opr]; \
for (int icol = threadIdx.y + blockIdx.x * blockDim.y; icol < ncols; icol += blockDim.y * gridDim.x) { \
ATYPE v = 0; \
for (int irow = threadIdx.x; irow < nrows; irow += blockDim.x) { \
v = oprf(v, opbf(A[irow + icol * nrows], B[irow + icol * nrows])); \
} \
parts[threadIdx.x][threadIdx.y] = v; \
__syncthreads(); \
for (int i = 1; i < blockDim.x; i *= 2) { \
if (i + threadIdx.x < blockDim.x) { \
parts[threadIdx.x][threadIdx.y] = oprf(parts[threadIdx.x][threadIdx.y], parts[i + threadIdx.x][threadIdx.y]); \
} \
} \
if (threadIdx.x == 0) { \
C[icol] = parts[0][threadIdx.y]; \
} \
__syncthreads(); \
} \
}
#if __CUDA_ARCH__ > 200
GENREDUCEBIN1OP(float,optype,operators)
#else
GENREDUCEBIN1OPX(float,optype,operators)
#endif
GENREDUCEBIN1OPX(double,doptype,doperators)
#define GENREDUCEBIN1OPY(ATYPE) \
int reducebin1op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE *C, int opb, int opr) { \
int blkx = min(32, nrows); \
int blky = min(32, ncols); \
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)))); \
const dim3 blkdims(blkx,blky,1); \
__reducebin1op<<<nblks,blkdims>>>(nrows, ncols, A, B, C, opb, opr); \
cudaDeviceSynchronize(); \
cudaError_t err = cudaGetLastError(); \
return err; \
}
GENREDUCEBIN1OPY(float)
GENREDUCEBIN1OPY(double)
#define GENREDUCE2OP(ATYPE,OPTYPE,OPARRAY) \
__global__ void __reduce2op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE initval, int opn) { \
__shared__ ATYPE parts[32][33]; \
OPTYPE op = OPARRAY[opn]; \
int baserow = threadIdx.x + blockDim.x * blockIdx.x; \
for (int irow = baserow; irow < nrows; irow += blockDim.x * gridDim.x) { \
ATYPE v = A[irow + threadIdx.y * nrows]; \
for (int icol = threadIdx.y + blockDim.y; icol < ncols; icol += blockDim.y) { \
v = op(v, A[irow + icol * nrows]); \
} \
parts[threadIdx.x][threadIdx.y] = v; \
__syncthreads(); \
ATYPE newv = initval; \
for (int i = 1; i < blockDim.y; i *= 2) { \
if (i + threadIdx.y < blockDim.y) newv = parts[threadIdx.x][i+threadIdx.y]; \
__syncthreads(); \
if (i + threadIdx.y < blockDim.y) parts[threadIdx.x][threadIdx.y] = op(parts[threadIdx.x][threadIdx.y], newv); \
__syncthreads(); \
} \
if (threadIdx.y == 0) { \
B[irow] = parts[threadIdx.x][0]; \
} \
__syncthreads(); \
} \
} \
\
int reduce2op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE initval, int opn) { \
if (nrows == 1) { \
reducevec<ATYPE>(ncols, A, B, opn); \
} else { \
int blkx = min(32, nrows); \
int blky = min(32, ncols); \
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)))); \
const dim3 blkdims(blkx,blky,1); \
__reduce2op<<<nblks,blkdims>>>(nrows, ncols, A, B, initval, opn); \
} \
cudaDeviceSynchronize(); \
cudaError_t err = cudaGetLastError(); \
return err; \
}
GENREDUCE2OP(float,optype,operators)
GENREDUCE2OP(int,ioptype,ioperators)
GENREDUCE2OP(long long,loptype,loperators)
GENREDUCE2OP(double,doptype,doperators)
#define GENREDUCEBIN2OP(ATYPE,OPTYPE,OPARRAY) \
__global__ void __reducebin2op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE *C, int opb, int opr) { \
__shared__ ATYPE parts[32][33]; \
OPTYPE opbf = OPARRAY[opb]; \
OPTYPE oprf = OPARRAY[opr]; \
int baserow = threadIdx.x + blockDim.x * blockIdx.x; \
for (int irow = baserow; irow < nrows; irow += blockDim.x * gridDim.x) { \
float v = opbf(A[irow + threadIdx.y * nrows], B[irow + threadIdx.y * nrows]); \
for (int icol = threadIdx.y + blockDim.y; icol < ncols; icol += blockDim.y) { \
v = oprf(v, opbf(A[irow + icol * nrows], B[irow + icol * nrows])); \
} \
parts[threadIdx.x][threadIdx.y] = v; \
__syncthreads(); \
float newv = 0; \
for (int i = 1; i < blockDim.y; i *= 2) { \
if (i + threadIdx.y < blockDim.y) newv = parts[threadIdx.x][i+threadIdx.y]; \
__syncthreads(); \
if (i + threadIdx.y < blockDim.y) parts[threadIdx.x][threadIdx.y] = oprf(parts[threadIdx.x][threadIdx.y], newv); \
__syncthreads(); \
} \
if (threadIdx.y == 0) { \
C[irow] = parts[threadIdx.x][0]; \
} \
__syncthreads(); \
} \
} \
\
int reducebin2op(int nrows, int ncols, ATYPE *A, ATYPE *B, ATYPE *C, int opb, int opr) { \
int blkx = min(32, nrows); \
int blky = min(32, ncols); \
int nblks = min(65536, max(1, ((int)(((long long)nrows) * ncols / blkx / blky / 16)))); \
const dim3 blkdims(blkx,blky,1); \
__reducebin2op<<<nblks,blkdims>>>(nrows, ncols, A, B, C, opb, opr); \
cudaDeviceSynchronize(); \
cudaError_t err = cudaGetLastError(); \
return err; \
}
GENREDUCEBIN2OP(float,optype,operators)
GENREDUCEBIN2OP(double,doptype,doperators)
|
818655483f9ec9d6d8b47283e7219f3720c8dc24.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cmath>
#include <iostream>
#define numThreads 512
const int N = pow(2, 20);
__global__ void stencil01(
float *d_a,
float *d_b,
int size = N)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i == 0)
{
d_b[i] = d_a[i] + d_a[i + 1];
d_b[i] /= 3.0f;
}
else if(i < size - 1)
{
d_b[i] = d_a[i - 1] + d_a[i] + d_a[i + 1];
d_b[i] /= 3.0f;
}
else if (i == size - 1)
{
d_b[i] = d_a[i - 1] + d_a[i];
d_b[i] /= 3.0f;
}
}
__global__ void stencil02(
float *d_a,
int size = N)
{
__shared__ float cache[numThreads + 2];
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= size)
{
return;
}
int cid = threadIdx.x ;
int c = cid + 1;
if (tid == 0)
{
cache[0] = 0;
cache[c] = d_a[tid];
}
else if (cid < numThreads - 1) // What is Missing?
{
cache[c] = d_a[tid];
}
else if (cid == numThreads - 1) // What is Missing?
{
cache[c] = d_a[tid];
cache[c + 1] = d_a[tid + 1];
}
__syncthreads();
d_a[tid] = cache[c - 1] + cache[c] + cache[c + 1];
d_a[tid] /= 3.0f;
}
__device__ float func(float *a, int c)
{
return (a[c - 1] + a[c] + a[c + 1])/3.0f;
}
__global__ void stencil03(
float *d_a,
int size = N)
{
__shared__ float cache[numThreads + 2];
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= size)
{
return;
}
int cid = threadIdx.x;
int c = cid + 1;
if (tid == 0)
{
cache[0] = 0;
cache[c] = d_a[tid];
}
else if (cid < numThreads - 1) // What is Missing?
{
cache[c] = d_a[tid];
}
else if (cid == numThreads - 1) // What is Missing?
{
cache[c] = d_a[tid];
cache[c + 1] = d_a[tid + 1];
}
__syncthreads();
d_a[tid] = func(cache, c);
}
int main()
{
float *h_a1 = new float[N];
float *h_a2 = new float[N];
float *h_a3 = new float[N];
for (int i = 0; i < N; i++)
{
if (i % 3 == 0)
{
h_a1[i] = 1.0f;
h_a2[i] = 1.0f;
h_a3[i] = 1.0f;
}
else if (i % 3 == 1)
{
h_a1[i] = 2.0f;
h_a2[i] = 2.0f;
h_a3[i] = 2.0f;
}
else
{
h_a1[i] = 3.0f;
h_a2[i] = 3.0f;
h_a3[i] = 3.0f;
}
}
float *d_a1, *d_a2, *d_a3, *d_a11;
hipMalloc((void**)&d_a1, N*sizeof(float));
hipMalloc((void**)&d_a2, N*sizeof(float));
hipMalloc((void**)&d_a3, N*sizeof(float));
hipMalloc((void**)&d_a11, N*sizeof(float));
hipMemcpy(d_a1, h_a1, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_a2, h_a1, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_a3, h_a1, N*sizeof(float), hipMemcpyHostToDevice);
stencil01 << < (N + numThreads - 1) / numThreads, numThreads >> >(d_a1, d_a11);
stencil02 << < (N + numThreads - 1) / numThreads, numThreads >> >(d_a2);
stencil03 << < (N + numThreads - 1) / numThreads, numThreads >> >(d_a3);
hipMemcpy(h_a1, d_a11, N*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(h_a2, d_a2, N*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(h_a3, d_a3, N*sizeof(float), hipMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
{
if (h_a1[i] != h_a2[i])
{
std::cout << i << "," << i%numThreads << ": " << h_a1[i] << "," << h_a2[i] << "," << h_a3[i] << std::endl;
}
}
hipFree(d_a1); hipFree(d_a2); hipFree(d_a3); hipFree(d_a11);
delete[] h_a1; delete[] h_a2; delete[] h_a3;
return 0;
}
| 818655483f9ec9d6d8b47283e7219f3720c8dc24.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cmath>
#include <iostream>
#define numThreads 512
const int N = pow(2, 20);
__global__ void stencil01(
float *d_a,
float *d_b,
int size = N)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i == 0)
{
d_b[i] = d_a[i] + d_a[i + 1];
d_b[i] /= 3.0f;
}
else if(i < size - 1)
{
d_b[i] = d_a[i - 1] + d_a[i] + d_a[i + 1];
d_b[i] /= 3.0f;
}
else if (i == size - 1)
{
d_b[i] = d_a[i - 1] + d_a[i];
d_b[i] /= 3.0f;
}
}
__global__ void stencil02(
float *d_a,
int size = N)
{
__shared__ float cache[numThreads + 2];
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= size)
{
return;
}
int cid = threadIdx.x ;
int c = cid + 1;
if (tid == 0)
{
cache[0] = 0;
cache[c] = d_a[tid];
}
else if (cid < numThreads - 1) // What is Missing?
{
cache[c] = d_a[tid];
}
else if (cid == numThreads - 1) // What is Missing?
{
cache[c] = d_a[tid];
cache[c + 1] = d_a[tid + 1];
}
__syncthreads();
d_a[tid] = cache[c - 1] + cache[c] + cache[c + 1];
d_a[tid] /= 3.0f;
}
__device__ float func(float *a, int c)
{
return (a[c - 1] + a[c] + a[c + 1])/3.0f;
}
__global__ void stencil03(
float *d_a,
int size = N)
{
__shared__ float cache[numThreads + 2];
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= size)
{
return;
}
int cid = threadIdx.x;
int c = cid + 1;
if (tid == 0)
{
cache[0] = 0;
cache[c] = d_a[tid];
}
else if (cid < numThreads - 1) // What is Missing?
{
cache[c] = d_a[tid];
}
else if (cid == numThreads - 1) // What is Missing?
{
cache[c] = d_a[tid];
cache[c + 1] = d_a[tid + 1];
}
__syncthreads();
d_a[tid] = func(cache, c);
}
int main()
{
float *h_a1 = new float[N];
float *h_a2 = new float[N];
float *h_a3 = new float[N];
for (int i = 0; i < N; i++)
{
if (i % 3 == 0)
{
h_a1[i] = 1.0f;
h_a2[i] = 1.0f;
h_a3[i] = 1.0f;
}
else if (i % 3 == 1)
{
h_a1[i] = 2.0f;
h_a2[i] = 2.0f;
h_a3[i] = 2.0f;
}
else
{
h_a1[i] = 3.0f;
h_a2[i] = 3.0f;
h_a3[i] = 3.0f;
}
}
float *d_a1, *d_a2, *d_a3, *d_a11;
cudaMalloc((void**)&d_a1, N*sizeof(float));
cudaMalloc((void**)&d_a2, N*sizeof(float));
cudaMalloc((void**)&d_a3, N*sizeof(float));
cudaMalloc((void**)&d_a11, N*sizeof(float));
cudaMemcpy(d_a1, h_a1, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_a2, h_a1, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_a3, h_a1, N*sizeof(float), cudaMemcpyHostToDevice);
stencil01 << < (N + numThreads - 1) / numThreads, numThreads >> >(d_a1, d_a11);
stencil02 << < (N + numThreads - 1) / numThreads, numThreads >> >(d_a2);
stencil03 << < (N + numThreads - 1) / numThreads, numThreads >> >(d_a3);
cudaMemcpy(h_a1, d_a11, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_a2, d_a2, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_a3, d_a3, N*sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
{
if (h_a1[i] != h_a2[i])
{
std::cout << i << "," << i%numThreads << ": " << h_a1[i] << "," << h_a2[i] << "," << h_a3[i] << std::endl;
}
}
cudaFree(d_a1); cudaFree(d_a2); cudaFree(d_a3); cudaFree(d_a11);
delete[] h_a1; delete[] h_a2; delete[] h_a3;
return 0;
}
|
60bb6c84ae9179423b764c9c314ac7e9a06939a6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#define CHECK(call) { \
const hipError_t error = call; \
if (error != hipSuccess) { \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, hipGetErrorString(error)); \
exit(1); \
} \
} \
#define LEN 1<<22
typedef struct innerArray {
float x[LEN];
float y[LEN];
} innerArray;
void initialInnerArray(innerArray *ip, int size);
void incrementInnerArrayOnHost(innerArray *input, innerArray *output, const int n);
void checkResult(innerArray *hostRef, innerArray *gpuRef, const int n);
__global__ void incrementInnerArray(innerArray *input, innerArray * output, const int n);
int main(int argc, char **argv) {
int nElem = LEN;
size_t nBytes = sizeof(innerArray);
clock_t start, end;
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// allocate host memory
innerArray *h_in = (innerArray *)malloc(nBytes);
innerArray *h_out = (innerArray *)malloc(nBytes);
innerArray *h_out_gpu = (innerArray *)malloc(nBytes);
initialInnerArray(h_in, nElem);
// compute on CPU
incrementInnerArrayOnHost(h_in, h_out, nElem);
// allocate device memory
innerArray *d_in, *d_out;
CHECK(hipMalloc((innerArray**)&d_in, nBytes));
CHECK(hipMalloc((innerArray**)&d_out, nBytes));
// copy data from host to device
CHECK(hipMemcpy(d_in, h_in, nBytes, hipMemcpyHostToDevice));
// execute kernel
int blocksize = 128;
if (argc > 1) blocksize = atoi(argv[1]);
dim3 block(blocksize);
dim3 grid((nElem + block.x - 1) / block.x, 1);
start = clock();
hipLaunchKernelGGL(( incrementInnerArray), dim3(grid), dim3(block), 0, 0, d_in, d_out, nElem);
CHECK(hipDeviceSynchronize()); // synchronize kernel only for debugging!
end = clock();
double time = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("<<< %3d, %3d >>> elapsed %f ms\n", grid.x, block.x, time * 1000.0);
// copy data back to CPU
CHECK(hipMemcpy(h_out_gpu, d_out, nBytes, hipMemcpyDeviceToHost));
checkResult(h_out, h_out_gpu, nElem);
CHECK(hipGetLastError());
// free memories
CHECK(hipFree(d_in));
CHECK(hipFree(d_out));
free(h_in);
free(h_out);
free(h_out_gpu);
// reset device
CHECK(hipDeviceReset());
return EXIT_SUCCESS;
}
/**********CUDA kernels**********/
__global__ void incrementInnerArray(innerArray *input, innerArray * output, const int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
float tmpx = input->x[i];
float tmpy = input->y[i];
output->x[i] = tmpx + 10.f;
output->y[i] = tmpy + 20.f;
}
}
/**********host functions**********/
void initialInnerArray(innerArray *ip, int size) {
for (int i = 0; i < size; i++) {
ip->x[i] = (float)( rand() & 0xFF ) / 100.0f;
ip->y[i] = (float)( rand() & 0xFF ) / 100.0f;
}
return;
}
void incrementInnerArrayOnHost(innerArray *input, innerArray *output, const int n)
{
for (int i = 0; i < n; i++) {
output->x[i] = input->x[i] + 10.f;
output->y[i] = input->y[i] + 20.f;
}
return;
}
void checkResult(innerArray *hostRef, innerArray *gpuRef, const int n) {
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < n; i++) {
if (abs(hostRef->x[i] - gpuRef->x[i]) > epsilon) {
match = 0;
printf("different on x %dth element: host %f gpu %f\n",
i, hostRef->x[i], gpuRef->x[i]);
break;
}
if (abs(hostRef->y[i] - gpuRef->y[i]) > epsilon) {
match = 0;
printf("different on y %dth element: host %f gpu %f\n",
i, hostRef->y[i], gpuRef->y[i]);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
} | 60bb6c84ae9179423b764c9c314ac7e9a06939a6.cu | #include <stdio.h>
#include <cuda_runtime.h>
#define CHECK(call) { \
const cudaError_t error = call; \
if (error != cudaSuccess) { \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
} \
#define LEN 1<<22
typedef struct innerArray {
float x[LEN];
float y[LEN];
} innerArray;
void initialInnerArray(innerArray *ip, int size);
void incrementInnerArrayOnHost(innerArray *input, innerArray *output, const int n);
void checkResult(innerArray *hostRef, innerArray *gpuRef, const int n);
__global__ void incrementInnerArray(innerArray *input, innerArray * output, const int n);
int main(int argc, char **argv) {
int nElem = LEN;
size_t nBytes = sizeof(innerArray);
clock_t start, end;
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// allocate host memory
innerArray *h_in = (innerArray *)malloc(nBytes);
innerArray *h_out = (innerArray *)malloc(nBytes);
innerArray *h_out_gpu = (innerArray *)malloc(nBytes);
initialInnerArray(h_in, nElem);
// compute on CPU
incrementInnerArrayOnHost(h_in, h_out, nElem);
// allocate device memory
innerArray *d_in, *d_out;
CHECK(cudaMalloc((innerArray**)&d_in, nBytes));
CHECK(cudaMalloc((innerArray**)&d_out, nBytes));
// copy data from host to device
CHECK(cudaMemcpy(d_in, h_in, nBytes, cudaMemcpyHostToDevice));
// execute kernel
int blocksize = 128;
if (argc > 1) blocksize = atoi(argv[1]);
dim3 block(blocksize);
dim3 grid((nElem + block.x - 1) / block.x, 1);
start = clock();
incrementInnerArray<<<grid, block>>>(d_in, d_out, nElem);
CHECK(cudaDeviceSynchronize()); // synchronize kernel only for debugging!
end = clock();
double time = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("<<< %3d, %3d >>> elapsed %f ms\n", grid.x, block.x, time * 1000.0);
// copy data back to CPU
CHECK(cudaMemcpy(h_out_gpu, d_out, nBytes, cudaMemcpyDeviceToHost));
checkResult(h_out, h_out_gpu, nElem);
CHECK(cudaGetLastError());
// free memories
CHECK(cudaFree(d_in));
CHECK(cudaFree(d_out));
free(h_in);
free(h_out);
free(h_out_gpu);
// reset device
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
/**********CUDA kernels**********/
__global__ void incrementInnerArray(innerArray *input, innerArray * output, const int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
float tmpx = input->x[i];
float tmpy = input->y[i];
output->x[i] = tmpx + 10.f;
output->y[i] = tmpy + 20.f;
}
}
/**********host functions**********/
void initialInnerArray(innerArray *ip, int size) {
for (int i = 0; i < size; i++) {
ip->x[i] = (float)( rand() & 0xFF ) / 100.0f;
ip->y[i] = (float)( rand() & 0xFF ) / 100.0f;
}
return;
}
void incrementInnerArrayOnHost(innerArray *input, innerArray *output, const int n)
{
for (int i = 0; i < n; i++) {
output->x[i] = input->x[i] + 10.f;
output->y[i] = input->y[i] + 20.f;
}
return;
}
void checkResult(innerArray *hostRef, innerArray *gpuRef, const int n) {
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < n; i++) {
if (abs(hostRef->x[i] - gpuRef->x[i]) > epsilon) {
match = 0;
printf("different on x %dth element: host %f gpu %f\n",
i, hostRef->x[i], gpuRef->x[i]);
break;
}
if (abs(hostRef->y[i] - gpuRef->y[i]) > epsilon) {
match = 0;
printf("different on y %dth element: host %f gpu %f\n",
i, hostRef->y[i], gpuRef->y[i]);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
} |
ed71ff90925f61a5fc2f0c7aab687900ecc3200b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void delta_product ( const float * w_ik, const float * d_k, float * output, unsigned int width )
{
// X is layer[i] nodes (size_i)
int x = blockIdx.x * blockDim.x + threadIdx.x;
// Y is layer[k] nodes (size_k) == d_k == w_per_n
int y = blockIdx.y * blockDim.y + threadIdx.y;
// W[ik] * [k] - Row-Major Matrix
output[width*x+y] = __fmul_rz( d_k[y], w_ik[width*x+y]);
} | ed71ff90925f61a5fc2f0c7aab687900ecc3200b.cu | #include "includes.h"
__global__ void delta_product ( const float * w_ik, const float * d_k, float * output, unsigned int width )
{
// X is layer[i] nodes (size_i)
int x = blockIdx.x * blockDim.x + threadIdx.x;
// Y is layer[k] nodes (size_k) == d_k == w_per_n
int y = blockIdx.y * blockDim.y + threadIdx.y;
// W[ik] * δ[k] - Row-Major Matrix
output[width*x+y] = __fmul_rz( d_k[y], w_ik[width*x+y]);
} |
b32f6c06c3b8cc8f49f8f33cfe4e90a150240a10.hip | // !!! This is a file automatically generated by hipify!!!
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
//#include </opt/eecs/Matlab/R2015b/toolbox/rtw/rtwdemos/crl_demo/cblas.h>
// includes, project
#include "rocblas.h"
//=============================================================================
extern "C" void saxpy_(const int *, const float *, const float *, const int *,
const float *, const int *);
extern "C" float snrm2_(const int *, const float *, const int *);
extern "C" float isamax_(const int *, const float *, const int *);
extern "C" void sgeqrf_(int*,int*,float*,int*,float*,float*,int*,int*);
extern "C" int strmm_(char*, char *, char*, char *, int *, int *, float *,
float *, int *, float *, int *);
extern "C" int sgemm_(char *, char *, int *, int *, int *, float *, float *,
int *, float *, int *, float *, float *, int *);
void chol_qr_it(int m, int n, float *A, int lda, float *R);
void chol_qr_it_GPU(int m, int n, float *d_A, int lda, float *d_G, float *R,
float *h_work, int lwork);
//=============================================================================
///////////////////////////////////////////////////////////////////////////////
// Program main
///////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
//CUT_DEVICE_INIT();
unsigned int timer = 0;
/* Matrix size */
int N, M; // NxM would be the size of the matrices
// (M columns) that we would orthogonalize
float *d_A, *d_G; // d_A is array for A on the device (GPU)
float *h_work, *h_tau; // work space and array tau on the host
float *h_A, *h_Q1, *h_Q2; // These would be the same NxM matrices
float *h_R, *h_G;
int info[1], lwork, i;
N = 131072;
M = 128;
if (argc != 1)
for(i = 1; i<argc; i++){
if (strcmp("-N", argv[i])==0)
N = atoi(argv[++i]);
else if (strcmp("-M", argv[i])==0)
M = atoi(argv[++i]);
}
printf("\nUsage: \n");
printf(" chol_qr_it -N %d -M %d\n\n", N, M);
lwork = 2*N*M;
int n2 = N * M;
/* Initialize CUBLAS */
hipblasInit();
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float milliseconds = 0;
/* Allocate host memory for the matrix */
h_A = (float*)malloc(n2 * sizeof( h_A[0]));
h_Q1 = (float*)malloc(n2 * sizeof(h_Q1[0]));
h_Q2 = (float*)malloc(n2 * sizeof(h_Q2[0]));
h_G = (float*)malloc(M*M * sizeof(h_G[0]));
h_R = (float*)malloc(M*M * sizeof(h_R[0]));
hipHostMalloc( (void**)&h_work, lwork*4);
hipHostMalloc( (void**)&h_work, lwork*4);
h_tau = (float*)malloc(N * sizeof(h_tau[0]));
/* Take a random matrix h_A = h_Q1 = h_Q2 */
for (i = 0; i < n2; i++) {
h_A[i] = h_Q1[i] = h_Q2[i] = rand() / (float)RAND_MAX;
}
/* Allocate device memory for the matrices */
hipblasAlloc(n2, sizeof(d_A[0]), (void**)&d_A);
hipblasAlloc(M*M, sizeof(d_G[0]), (void**)&d_G);
// create and start timer
/* timer = 0; */
// /* CUT_SAFE_CALL(cutCreateTimer(&timer)); */
// /* CUT_SAFE_CALL(cutStartTimer(timer)); */
/* =====================================================================
Performs QR on CPU using LAPACK
===================================================================== */
sgeqrf_(&N, &M, h_A, &N, h_tau, h_work, &lwork, info);
if (info[0] < 0)
printf("Argument %d of sgeqrf had an illegal value.\n", -info[0]);
// stop and destroy timer
// CUT_SAFE_CALL(cutStopTimer(timer));
// printf("CPU Processing time: %f (ms) \n", cutGetTimerValue(timer));
//printf("Speed: %f GFlops \n", 4.*N*M*M/
// (3.*1000000*cutGetTimerValue(timer)));
//CUT_SAFE_CALL(cutDeleteTimer(timer));
/* Initialize the device matrix with the host matrices */
hipblasSetVector(n2, sizeof(h_Q2[0]), h_Q2, 1, d_A, 1);
//timer = 0;
// CUT_SAFE_CALL(cutCreateTimer(&timer));
//CUT_SAFE_CALL(cutStartTimer(timer));
/* =====================================================================
Performs orthogonalization on CPU using chol_qr_it
===================================================================== */
hipEventRecord(start);
chol_qr_it(N, M, h_Q2, N, h_R);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
// stop and destroy timer
// CUT_SAFE_CALL(cutStopTimer(timer));
printf("\n\nCPU Processing time: %f (ms) \n", milliseconds);
printf("Speed: %f GFlops \n", 4.*N*M*M/(3.*1000000*milliseconds));
// CUT_SAFE_CALL(cutDeleteTimer(timer));
float one = 1.f, zero = 0.f;
const int MM=M*M,nn=n2;
const int myint=1;
const float minusone = -1.0f;
sgemm_("t", "n", &M, &M, &N, &one, h_Q2, &N, h_Q2, &N, &zero, h_G, &M);
fprintf(stderr, "\nIteration just cpu \n");
for(i=0; i<M*M; i+=(M+1)) h_G[i] -= one;
printf(" ||I - Q'Q||_F = %e, ||I-Q'Q||_max = %e \n", snrm2_(&MM, h_G, &myint), isamax_(&MM, h_G, &myint));
fprintf(stderr, "\nIteration just cpu \n");
strmm_("r", "u", "n", "n", &N, &M, &one, h_R, &M, h_Q2, &N);
saxpy_(&n2, &minusone, h_Q1, &myint, h_Q2, &myint);
printf(" ||A - Q R||_F = %e \n",snrm2_(&nn, h_Q2, &myint));
// chol_qr on GPU
timer = 0;
/* CUT_SAFE_CALL(cutCreateTimer(&timer)); */
/* CUT_SAFE_CALL(cutStartTimer(timer)); */
/* =====================================================================
Performs orthogonalization on CPU-GPU using chol_qr_it
===================================================================== */
//fprintf(stderr,"Madeit to gpuasdlkfljk \n");
hipEventRecord(start);
chol_qr_it_GPU(N, M, d_A, N, d_G, h_R, h_work, lwork);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
// stop and destroy timer
/* CUT_SAFE_CALL(cutStopTimer(timer)); */
printf("\n\nGPU Processing time: %f (ms) \n", milliseconds);
printf("Speed: %f GFlops \n", 4.*N*M*M/(3.*1000000*milliseconds));
/* CUT_SAFE_CALL(cutDeleteTimer(timer)); */
/* Read the result back */
hipblasGetVector(n2, sizeof(h_Q2[0]), d_A, 1, h_Q2, 1);
sgemm_("t", "n", &M, &M, &N, &one, h_Q2, &N, h_Q2, &N, &zero, h_G, &M);
for(i=0; i<M*M; i+=(M+1)) h_G[i] -= one;
printf(" ||I - Q'Q||_F = %e, ||I-Q'Q||_max = %e \n",
snrm2_(&MM, h_G, &myint), isamax_(&MM, h_G, &myint));
strmm_("r", "u", "n", "n", &N, &M, &one, h_R, &M, h_Q2, &N);
saxpy_(&n2, &minusone, h_Q1, &myint, h_Q2, &myint);
printf(" ||A - Q R||_F = %e \n",
snrm2_(&nn, h_Q2, &myint));
/* Memory clean up */
free(h_A);
free(h_Q1);
free(h_Q2);
free(h_R);
free(h_G);
// CUDA_SAFE_CALL( hipblasFree(h_work) );
hipblasFree(h_work);
free(h_tau);
hipblasFree(d_G);
hipblasFree(d_A);
/* Shutdown */
hipblasShutdown();
//CUT_EXIT(argc, argv);
}
| b32f6c06c3b8cc8f49f8f33cfe4e90a150240a10.cu | // includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
//#include </opt/eecs/Matlab/R2015b/toolbox/rtw/rtwdemos/crl_demo/cblas.h>
// includes, project
#include "cublas.h"
//=============================================================================
extern "C" void saxpy_(const int *, const float *, const float *, const int *,
const float *, const int *);
extern "C" float snrm2_(const int *, const float *, const int *);
extern "C" float isamax_(const int *, const float *, const int *);
extern "C" void sgeqrf_(int*,int*,float*,int*,float*,float*,int*,int*);
extern "C" int strmm_(char*, char *, char*, char *, int *, int *, float *,
float *, int *, float *, int *);
extern "C" int sgemm_(char *, char *, int *, int *, int *, float *, float *,
int *, float *, int *, float *, float *, int *);
void chol_qr_it(int m, int n, float *A, int lda, float *R);
void chol_qr_it_GPU(int m, int n, float *d_A, int lda, float *d_G, float *R,
float *h_work, int lwork);
//=============================================================================
///////////////////////////////////////////////////////////////////////////////
// Program main
///////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
//CUT_DEVICE_INIT();
unsigned int timer = 0;
/* Matrix size */
int N, M; // NxM would be the size of the matrices
// (M columns) that we would orthogonalize
float *d_A, *d_G; // d_A is array for A on the device (GPU)
float *h_work, *h_tau; // work space and array tau on the host
float *h_A, *h_Q1, *h_Q2; // These would be the same NxM matrices
float *h_R, *h_G;
int info[1], lwork, i;
N = 131072;
M = 128;
if (argc != 1)
for(i = 1; i<argc; i++){
if (strcmp("-N", argv[i])==0)
N = atoi(argv[++i]);
else if (strcmp("-M", argv[i])==0)
M = atoi(argv[++i]);
}
printf("\nUsage: \n");
printf(" chol_qr_it -N %d -M %d\n\n", N, M);
lwork = 2*N*M;
int n2 = N * M;
/* Initialize CUBLAS */
cublasInit();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
/* Allocate host memory for the matrix */
h_A = (float*)malloc(n2 * sizeof( h_A[0]));
h_Q1 = (float*)malloc(n2 * sizeof(h_Q1[0]));
h_Q2 = (float*)malloc(n2 * sizeof(h_Q2[0]));
h_G = (float*)malloc(M*M * sizeof(h_G[0]));
h_R = (float*)malloc(M*M * sizeof(h_R[0]));
cudaMallocHost( (void**)&h_work, lwork*4);
cudaMallocHost( (void**)&h_work, lwork*4);
h_tau = (float*)malloc(N * sizeof(h_tau[0]));
/* Take a random matrix h_A = h_Q1 = h_Q2 */
for (i = 0; i < n2; i++) {
h_A[i] = h_Q1[i] = h_Q2[i] = rand() / (float)RAND_MAX;
}
/* Allocate device memory for the matrices */
cublasAlloc(n2, sizeof(d_A[0]), (void**)&d_A);
cublasAlloc(M*M, sizeof(d_G[0]), (void**)&d_G);
// create and start timer
/* timer = 0; */
// /* CUT_SAFE_CALL(cutCreateTimer(&timer)); */
// /* CUT_SAFE_CALL(cutStartTimer(timer)); */
/* =====================================================================
Performs QR on CPU using LAPACK
===================================================================== */
sgeqrf_(&N, &M, h_A, &N, h_tau, h_work, &lwork, info);
if (info[0] < 0)
printf("Argument %d of sgeqrf had an illegal value.\n", -info[0]);
// stop and destroy timer
// CUT_SAFE_CALL(cutStopTimer(timer));
// printf("CPU Processing time: %f (ms) \n", cutGetTimerValue(timer));
//printf("Speed: %f GFlops \n", 4.*N*M*M/
// (3.*1000000*cutGetTimerValue(timer)));
//CUT_SAFE_CALL(cutDeleteTimer(timer));
/* Initialize the device matrix with the host matrices */
cublasSetVector(n2, sizeof(h_Q2[0]), h_Q2, 1, d_A, 1);
//timer = 0;
// CUT_SAFE_CALL(cutCreateTimer(&timer));
//CUT_SAFE_CALL(cutStartTimer(timer));
/* =====================================================================
Performs orthogonalization on CPU using chol_qr_it
===================================================================== */
cudaEventRecord(start);
chol_qr_it(N, M, h_Q2, N, h_R);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
// stop and destroy timer
// CUT_SAFE_CALL(cutStopTimer(timer));
printf("\n\nCPU Processing time: %f (ms) \n", milliseconds);
printf("Speed: %f GFlops \n", 4.*N*M*M/(3.*1000000*milliseconds));
// CUT_SAFE_CALL(cutDeleteTimer(timer));
float one = 1.f, zero = 0.f;
const int MM=M*M,nn=n2;
const int myint=1;
const float minusone = -1.0f;
sgemm_("t", "n", &M, &M, &N, &one, h_Q2, &N, h_Q2, &N, &zero, h_G, &M);
fprintf(stderr, "\nIteration just cpu \n");
for(i=0; i<M*M; i+=(M+1)) h_G[i] -= one;
printf(" ||I - Q'Q||_F = %e, ||I-Q'Q||_max = %e \n", snrm2_(&MM, h_G, &myint), isamax_(&MM, h_G, &myint));
fprintf(stderr, "\nIteration just cpu \n");
strmm_("r", "u", "n", "n", &N, &M, &one, h_R, &M, h_Q2, &N);
saxpy_(&n2, &minusone, h_Q1, &myint, h_Q2, &myint);
printf(" ||A - Q R||_F = %e \n",snrm2_(&nn, h_Q2, &myint));
// chol_qr on GPU
timer = 0;
/* CUT_SAFE_CALL(cutCreateTimer(&timer)); */
/* CUT_SAFE_CALL(cutStartTimer(timer)); */
/* =====================================================================
Performs orthogonalization on CPU-GPU using chol_qr_it
===================================================================== */
//fprintf(stderr,"Madeit to gpuasdlkfljk \n");
cudaEventRecord(start);
chol_qr_it_GPU(N, M, d_A, N, d_G, h_R, h_work, lwork);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
// stop and destroy timer
/* CUT_SAFE_CALL(cutStopTimer(timer)); */
printf("\n\nGPU Processing time: %f (ms) \n", milliseconds);
printf("Speed: %f GFlops \n", 4.*N*M*M/(3.*1000000*milliseconds));
/* CUT_SAFE_CALL(cutDeleteTimer(timer)); */
/* Read the result back */
cublasGetVector(n2, sizeof(h_Q2[0]), d_A, 1, h_Q2, 1);
sgemm_("t", "n", &M, &M, &N, &one, h_Q2, &N, h_Q2, &N, &zero, h_G, &M);
for(i=0; i<M*M; i+=(M+1)) h_G[i] -= one;
printf(" ||I - Q'Q||_F = %e, ||I-Q'Q||_max = %e \n",
snrm2_(&MM, h_G, &myint), isamax_(&MM, h_G, &myint));
strmm_("r", "u", "n", "n", &N, &M, &one, h_R, &M, h_Q2, &N);
saxpy_(&n2, &minusone, h_Q1, &myint, h_Q2, &myint);
printf(" ||A - Q R||_F = %e \n",
snrm2_(&nn, h_Q2, &myint));
/* Memory clean up */
free(h_A);
free(h_Q1);
free(h_Q2);
free(h_R);
free(h_G);
// CUDA_SAFE_CALL( cublasFree(h_work) );
cublasFree(h_work);
free(h_tau);
cublasFree(d_G);
cublasFree(d_A);
/* Shutdown */
cublasShutdown();
//CUT_EXIT(argc, argv);
}
|
52563d05c3104cff6eba6df08825434b82d1fb72.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2015 Johns Hopkins University (author: Daniel Povey)
// 2019 Yiwen Shao
// 2020 Yiming Wang
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
#include <cfloat>
#include "chain-kernels-ansi.h"
#include <stdio.h>
static __constant__ float cuMinLogDiffFloat = -15.942385f; // log(1.19209290e-7f)
template <typename Real>
__device__ inline Real log_add(Real x, Real y) {
Real diff;
if (x < y) {
diff = x - y;
x = y;
} else {
diff = y - x;
}
// diff is negative. x is now the larger one.
if (diff >= cuMinLogDiffFloat) {
Real res;
res = x + log1pf(expf(diff));
return res;
} else {
return x; // return the larger one.
}
}
__device__ double atomicLogAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(log_add(val, __longlong_as_double(assumed))));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ float atomicLogAdd(float* address, float val) {
int* address_as_int = (int*)address;
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed,
__float_as_int(log_add(val, __int_as_float(assumed))));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __int_as_float(old);
}
template <typename Real>
__device__ inline void atomic_log_add(Real* address, Real value) {
atomicLogAdd(address, value);
}
// Similiar to those in chain-kernels.cu, but computed in log-domain.
// Basically the operator "*" is replaced with "+", and "+" is replaced with "log_add".
__global__
static void _cuda_chain_hmm_log_domain_forward(const int *backward_transition_indices,
const int *backward_transitions,
const float *backward_transition_probs,
const float *probs,
float *alpha,
int t,
int num_sequences,
int num_frames,
int num_hmm_states,
int num_pdfs,
int num_transitions) {
// s is the index of the sequence within the minibatch,
// from 0 .. num-egs-in-this-minibatch - 1.
// h is the hmm-state index.
int s = threadIdx.x + blockIdx.x * blockDim.x,
h = blockIdx.y;
if (s >= num_sequences)
return;
// T, H, D, K are used as strides
int T = num_frames,
H = num_hmm_states,
D = num_pdfs,
K = num_transitions;
float this_tot_alpha = -INFINITY;
int trans_i = backward_transition_indices[s * H * 2 + h * 2],
trans_end = backward_transition_indices[s * H * 2 + h * 2 + 1];
// Note: regarding this loop unrolling, I tried the automatic unrolling using
// #pragma unroll 2 (after modifying the loop to have an integer index), but I
// did not see any performance improvement, it was slightly slower. So the
// compiler must be doing something different than what I'm doing here.
const int loop_unroll = 2; // don't change this without changing the code
// below.
for (; trans_i + loop_unroll <= trans_end; trans_i += loop_unroll) {
float transition_prob0 = backward_transition_probs[s * K + trans_i];
int pdf_id0 = backward_transitions[s * K * 3 + trans_i * 3 + 2],
prev_hmm_state0 = backward_transitions[s * K * 3 + trans_i * 3];
float transition_prob1 = backward_transition_probs[s * K + trans_i + 1];
int pdf_id1 = backward_transitions[s * K * 3 + (trans_i + 1) * 3 + 2],
prev_hmm_state1 = backward_transitions[s * K * 3 + (trans_i + 1) * 3];
float pseudo_loglike0 = probs[s * T * D + (t-1) * D + pdf_id0],
this_prev_alpha0 = alpha[s * (T+1) * (H+1) + (t-1) * (H+1) + prev_hmm_state0],
pseudo_loglike1 = probs[s * T * D + (t-1) * D + pdf_id1],
this_prev_alpha1 = alpha[s * (T+1) * (H+1) + (t-1) * (H+1) + prev_hmm_state1];
this_tot_alpha = log_add(this_tot_alpha,
log_add(this_prev_alpha0 + transition_prob0 + pseudo_loglike0,
this_prev_alpha1 + transition_prob1 + pseudo_loglike1));
}
if (trans_i != trans_end) {
// mop up the odd transition.
float transition_prob0 = backward_transition_probs[s * K + trans_i];
int pdf_id0 = backward_transitions[s * K * 3 + trans_i * 3 + 2],
prev_hmm_state0 = backward_transitions[s * K * 3 + trans_i * 3];
float pseudo_loglike0 = probs[s * T * D + (t-1) * D + pdf_id0],
this_prev_alpha0 = alpha[s * (T+1) * (H+1) + (t-1) * (H+1) + prev_hmm_state0];
this_tot_alpha = log_add(this_tot_alpha, this_prev_alpha0 + transition_prob0 + pseudo_loglike0);
}
// Let arbitrary_scale be the inverse of the sum of all alpha values on-- the
// previous frame this sum of all the alpha values is stored in the place that
// we'd store the previous alpha for state-index equal to num_hmm_states
// (i.e. one past the end). We multiply this into all the
// transition-probabilities from the previous frame to this frame, in both the
// forward and backward passes, in order to keep the alphas in a good numeric
// range. This won't affect the posteriors, as it's just a constant factor
// for each frame, but when computing the total likelihood we'll need to
// compensate for it later on.
float arbitrary_scale = -alpha[s * (T+1) * (H+1) + (t-1) * (H+1) + H];
alpha[s * (T+1) * (H+1) + t * (H+1) + h] = this_tot_alpha + arbitrary_scale;
}
__global__
static void _cuda_chain_hmm_log_domain_backward(const int *forward_transition_indices,
const int *forward_transitions,
const float *forward_transition_probs,
const float *probs,
const float *alpha,
float *beta,
float *log_prob_deriv,
int t,
int num_sequences,
int num_frames,
int num_hmm_states,
int num_pdfs,
int num_transitions) {
// s is the index of the sequence within the minibatch,
// from 0 .. num-egs-in-this-minibatch - 1.
// h is the hmm-state index.
int s = threadIdx.x + blockIdx.x * blockDim.x,
h = blockIdx.y;
if (s >= num_sequences)
return;
// T, H, D, K are used as strides
int T = num_frames,
H = num_hmm_states,
D = num_pdfs,
K = num_transitions;
// See where arbitrary_scale is defined in the forward computation above
float this_alpha_prob = alpha[s * (T+1) * (H+1) + t * (H+1) + h],
arbitrary_scale = -alpha[s * (T+1) * (H+1) + t * (H+1) + H];
float tot_variable_factor = -INFINITY;
float occupation_factor = this_alpha_prob + arbitrary_scale;
int trans_i = forward_transition_indices[s * H * 2 + h * 2],
trans_end = forward_transition_indices[s * H * 2 + h * 2 + 1];
const int loop_unroll = 2; // don't change this without changing the code
// below.
for (; trans_i + loop_unroll <= trans_end; trans_i += loop_unroll) {
float transition_prob0 = forward_transition_probs[s * K + trans_i];
int pdf_id0 = forward_transitions[s * K * 3 + trans_i * 3 + 2],
next_hmm_state0 = forward_transitions[s * K * 3 + trans_i * 3 + 1];
float transition_prob1 = forward_transition_probs[s * K + trans_i + 1];
int pdf_id1 = forward_transitions[s * K * 3 + (trans_i + 1) * 3 + 2],
next_hmm_state1 = forward_transitions[s * K * 3 + (trans_i + 1) * 3 + 1];
float variable_factor0 = transition_prob0 +
beta[s * 2 * H + ((t+1) % 2) * H + next_hmm_state0] +
probs[s * T * D + t * D + pdf_id0];
float variable_factor1 = transition_prob1 +
beta[s * 2 * H + ((t+1) % 2) * H + next_hmm_state1] +
probs[s * T * D + t * D + pdf_id1];
tot_variable_factor = log_add(log_add(tot_variable_factor, variable_factor0),
variable_factor1);
float occupation_prob0 = variable_factor0 + occupation_factor;
atomic_log_add(log_prob_deriv + s * T * D + t * D + pdf_id0,
occupation_prob0);
float occupation_prob1 = variable_factor1 + occupation_factor;
atomic_log_add(log_prob_deriv + s * T * D + t * D + pdf_id1,
occupation_prob1);
}
if (trans_i != trans_end) {
// mop up the odd transition.
float transition_prob0 = forward_transition_probs[s * K + trans_i];
int pdf_id0 = forward_transitions[s * K * 3 + trans_i * 3 + 2],
next_hmm_state0 = forward_transitions[s * K * 3 + trans_i * 3 + 1];
float variable_factor0 = transition_prob0 +
beta[s * 2 * H + ((t+1) % 2) * H + next_hmm_state0] +
probs[s * T * D + t * D + pdf_id0];
tot_variable_factor = log_add(tot_variable_factor, variable_factor0);
float occupation_prob0 = variable_factor0 + occupation_factor;
atomic_log_add(log_prob_deriv + s * T * D + t * D + pdf_id0,
occupation_prob0);
}
beta[s * 2 * H + (t%2) * H + h] = tot_variable_factor + arbitrary_scale;
}
void cuda_chain_hmm_log_domain_forward(dim3 Gr, dim3 Bl,
const int *backward_transition_indices,
const int *backward_transitions,
const float *backward_transition_probs,
const float *probs,
float *alpha,
int t,
int num_sequences,
int num_frames,
int num_hmm_states,
int num_pdfs,
int num_transitions) {
hipLaunchKernelGGL(( _cuda_chain_hmm_log_domain_forward), dim3(Gr),dim3(Bl), 0, 0, backward_transition_indices,
backward_transitions,
backward_transition_probs,
probs,
alpha,
t,
num_sequences,
num_frames,
num_hmm_states,
num_pdfs,
num_transitions);
}
void cuda_chain_hmm_log_domain_backward(dim3 Gr, dim3 Bl,
const int *forward_transition_indices,
const int *forward_transitions,
const float *forward_transition_probs,
const float *probs,
const float *alpha,
float *beta,
float *log_prob_deriv,
int t,
int num_sequences,
int num_frames,
int num_hmm_states,
int num_pdfs,
int num_transitions) {
hipLaunchKernelGGL(( _cuda_chain_hmm_log_domain_backward), dim3(Gr),dim3(Bl), 0, 0, forward_transition_indices,
forward_transitions,
forward_transition_probs,
probs,
alpha,
beta,
log_prob_deriv,
t,
num_sequences,
num_frames,
num_hmm_states,
num_pdfs,
num_transitions);
}
| 52563d05c3104cff6eba6df08825434b82d1fb72.cu | // Copyright 2015 Johns Hopkins University (author: Daniel Povey)
// 2019 Yiwen Shao
// 2020 Yiming Wang
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
#include <cfloat>
#include "chain-kernels-ansi.h"
#include <stdio.h>
static __constant__ float cuMinLogDiffFloat = -15.942385f; // log(1.19209290e-7f)
template <typename Real>
__device__ inline Real log_add(Real x, Real y) {
Real diff;
if (x < y) {
diff = x - y;
x = y;
} else {
diff = y - x;
}
// diff is negative. x is now the larger one.
if (diff >= cuMinLogDiffFloat) {
Real res;
res = x + log1pf(expf(diff));
return res;
} else {
return x; // return the larger one.
}
}
__device__ double atomicLogAdd(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(log_add(val, __longlong_as_double(assumed))));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ float atomicLogAdd(float* address, float val) {
int* address_as_int = (int*)address;
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed,
__float_as_int(log_add(val, __int_as_float(assumed))));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN)
} while (assumed != old);
return __int_as_float(old);
}
template <typename Real>
__device__ inline void atomic_log_add(Real* address, Real value) {
atomicLogAdd(address, value);
}
// Similiar to those in chain-kernels.cu, but computed in log-domain.
// Basically the operator "*" is replaced with "+", and "+" is replaced with "log_add".
__global__
static void _cuda_chain_hmm_log_domain_forward(const int *backward_transition_indices,
const int *backward_transitions,
const float *backward_transition_probs,
const float *probs,
float *alpha,
int t,
int num_sequences,
int num_frames,
int num_hmm_states,
int num_pdfs,
int num_transitions) {
// s is the index of the sequence within the minibatch,
// from 0 .. num-egs-in-this-minibatch - 1.
// h is the hmm-state index.
int s = threadIdx.x + blockIdx.x * blockDim.x,
h = blockIdx.y;
if (s >= num_sequences)
return;
// T, H, D, K are used as strides
int T = num_frames,
H = num_hmm_states,
D = num_pdfs,
K = num_transitions;
float this_tot_alpha = -INFINITY;
int trans_i = backward_transition_indices[s * H * 2 + h * 2],
trans_end = backward_transition_indices[s * H * 2 + h * 2 + 1];
// Note: regarding this loop unrolling, I tried the automatic unrolling using
// #pragma unroll 2 (after modifying the loop to have an integer index), but I
// did not see any performance improvement, it was slightly slower. So the
// compiler must be doing something different than what I'm doing here.
const int loop_unroll = 2; // don't change this without changing the code
// below.
for (; trans_i + loop_unroll <= trans_end; trans_i += loop_unroll) {
float transition_prob0 = backward_transition_probs[s * K + trans_i];
int pdf_id0 = backward_transitions[s * K * 3 + trans_i * 3 + 2],
prev_hmm_state0 = backward_transitions[s * K * 3 + trans_i * 3];
float transition_prob1 = backward_transition_probs[s * K + trans_i + 1];
int pdf_id1 = backward_transitions[s * K * 3 + (trans_i + 1) * 3 + 2],
prev_hmm_state1 = backward_transitions[s * K * 3 + (trans_i + 1) * 3];
float pseudo_loglike0 = probs[s * T * D + (t-1) * D + pdf_id0],
this_prev_alpha0 = alpha[s * (T+1) * (H+1) + (t-1) * (H+1) + prev_hmm_state0],
pseudo_loglike1 = probs[s * T * D + (t-1) * D + pdf_id1],
this_prev_alpha1 = alpha[s * (T+1) * (H+1) + (t-1) * (H+1) + prev_hmm_state1];
this_tot_alpha = log_add(this_tot_alpha,
log_add(this_prev_alpha0 + transition_prob0 + pseudo_loglike0,
this_prev_alpha1 + transition_prob1 + pseudo_loglike1));
}
if (trans_i != trans_end) {
// mop up the odd transition.
float transition_prob0 = backward_transition_probs[s * K + trans_i];
int pdf_id0 = backward_transitions[s * K * 3 + trans_i * 3 + 2],
prev_hmm_state0 = backward_transitions[s * K * 3 + trans_i * 3];
float pseudo_loglike0 = probs[s * T * D + (t-1) * D + pdf_id0],
this_prev_alpha0 = alpha[s * (T+1) * (H+1) + (t-1) * (H+1) + prev_hmm_state0];
this_tot_alpha = log_add(this_tot_alpha, this_prev_alpha0 + transition_prob0 + pseudo_loglike0);
}
// Let arbitrary_scale be the inverse of the sum of all alpha values on-- the
// previous frame this sum of all the alpha values is stored in the place that
// we'd store the previous alpha for state-index equal to num_hmm_states
// (i.e. one past the end). We multiply this into all the
// transition-probabilities from the previous frame to this frame, in both the
// forward and backward passes, in order to keep the alphas in a good numeric
// range. This won't affect the posteriors, as it's just a constant factor
// for each frame, but when computing the total likelihood we'll need to
// compensate for it later on.
float arbitrary_scale = -alpha[s * (T+1) * (H+1) + (t-1) * (H+1) + H];
alpha[s * (T+1) * (H+1) + t * (H+1) + h] = this_tot_alpha + arbitrary_scale;
}
__global__
static void _cuda_chain_hmm_log_domain_backward(const int *forward_transition_indices,
const int *forward_transitions,
const float *forward_transition_probs,
const float *probs,
const float *alpha,
float *beta,
float *log_prob_deriv,
int t,
int num_sequences,
int num_frames,
int num_hmm_states,
int num_pdfs,
int num_transitions) {
// s is the index of the sequence within the minibatch,
// from 0 .. num-egs-in-this-minibatch - 1.
// h is the hmm-state index.
int s = threadIdx.x + blockIdx.x * blockDim.x,
h = blockIdx.y;
if (s >= num_sequences)
return;
// T, H, D, K are used as strides
int T = num_frames,
H = num_hmm_states,
D = num_pdfs,
K = num_transitions;
// See where arbitrary_scale is defined in the forward computation above
float this_alpha_prob = alpha[s * (T+1) * (H+1) + t * (H+1) + h],
arbitrary_scale = -alpha[s * (T+1) * (H+1) + t * (H+1) + H];
float tot_variable_factor = -INFINITY;
float occupation_factor = this_alpha_prob + arbitrary_scale;
int trans_i = forward_transition_indices[s * H * 2 + h * 2],
trans_end = forward_transition_indices[s * H * 2 + h * 2 + 1];
const int loop_unroll = 2; // don't change this without changing the code
// below.
for (; trans_i + loop_unroll <= trans_end; trans_i += loop_unroll) {
float transition_prob0 = forward_transition_probs[s * K + trans_i];
int pdf_id0 = forward_transitions[s * K * 3 + trans_i * 3 + 2],
next_hmm_state0 = forward_transitions[s * K * 3 + trans_i * 3 + 1];
float transition_prob1 = forward_transition_probs[s * K + trans_i + 1];
int pdf_id1 = forward_transitions[s * K * 3 + (trans_i + 1) * 3 + 2],
next_hmm_state1 = forward_transitions[s * K * 3 + (trans_i + 1) * 3 + 1];
float variable_factor0 = transition_prob0 +
beta[s * 2 * H + ((t+1) % 2) * H + next_hmm_state0] +
probs[s * T * D + t * D + pdf_id0];
float variable_factor1 = transition_prob1 +
beta[s * 2 * H + ((t+1) % 2) * H + next_hmm_state1] +
probs[s * T * D + t * D + pdf_id1];
tot_variable_factor = log_add(log_add(tot_variable_factor, variable_factor0),
variable_factor1);
float occupation_prob0 = variable_factor0 + occupation_factor;
atomic_log_add(log_prob_deriv + s * T * D + t * D + pdf_id0,
occupation_prob0);
float occupation_prob1 = variable_factor1 + occupation_factor;
atomic_log_add(log_prob_deriv + s * T * D + t * D + pdf_id1,
occupation_prob1);
}
if (trans_i != trans_end) {
// mop up the odd transition.
float transition_prob0 = forward_transition_probs[s * K + trans_i];
int pdf_id0 = forward_transitions[s * K * 3 + trans_i * 3 + 2],
next_hmm_state0 = forward_transitions[s * K * 3 + trans_i * 3 + 1];
float variable_factor0 = transition_prob0 +
beta[s * 2 * H + ((t+1) % 2) * H + next_hmm_state0] +
probs[s * T * D + t * D + pdf_id0];
tot_variable_factor = log_add(tot_variable_factor, variable_factor0);
float occupation_prob0 = variable_factor0 + occupation_factor;
atomic_log_add(log_prob_deriv + s * T * D + t * D + pdf_id0,
occupation_prob0);
}
beta[s * 2 * H + (t%2) * H + h] = tot_variable_factor + arbitrary_scale;
}
void cuda_chain_hmm_log_domain_forward(dim3 Gr, dim3 Bl,
const int *backward_transition_indices,
const int *backward_transitions,
const float *backward_transition_probs,
const float *probs,
float *alpha,
int t,
int num_sequences,
int num_frames,
int num_hmm_states,
int num_pdfs,
int num_transitions) {
_cuda_chain_hmm_log_domain_forward<<<Gr,Bl>>>(backward_transition_indices,
backward_transitions,
backward_transition_probs,
probs,
alpha,
t,
num_sequences,
num_frames,
num_hmm_states,
num_pdfs,
num_transitions);
}
void cuda_chain_hmm_log_domain_backward(dim3 Gr, dim3 Bl,
const int *forward_transition_indices,
const int *forward_transitions,
const float *forward_transition_probs,
const float *probs,
const float *alpha,
float *beta,
float *log_prob_deriv,
int t,
int num_sequences,
int num_frames,
int num_hmm_states,
int num_pdfs,
int num_transitions) {
_cuda_chain_hmm_log_domain_backward<<<Gr,Bl>>>(forward_transition_indices,
forward_transitions,
forward_transition_probs,
probs,
alpha,
beta,
log_prob_deriv,
t,
num_sequences,
num_frames,
num_hmm_states,
num_pdfs,
num_transitions);
}
|
41b9ddfc32dad2b1d85910c87f54f40fecd338ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <aggregation/selectors/size2_selector.h>
#include <cutil.h>
#include <util.h>
#include <types.h>
#include <basic_types.h>
#include <texture.h>
#include <matrix_analysis.h>
#include <async_event.h>
#include <determinism_checker.h>
#include <thrust/count.h> //count
#include <thrust/sort.h> //sort
#include <thrust/binary_search.h> //lower_bound
#include <thrust/unique.h> //unique
#include <cusp/detail/format_utils.h> //offsets_to_indices
#include <logger.h>
#define EXPERIMENTAL_ITERATIVE_MATCHING
namespace amgx
{
namespace aggregation
{
namespace size2_selector
{
// include common routines for all selectors
#include <aggregation/selectors/common_selector.h>
// ------------------------
// Kernels
// ------------------------
#ifndef DELETE
// Kernel to compute the weight of the edges with block_dia_csr format
template <typename IndexType, typename ValueType>
__global__
void computeEdgeWeightsBlockDiaCsr(const IndexType *row_offsets, const IndexType *column_indices,
const ValueType *dia_values, const ValueType *nonzero_values, const IndexType num_block_rows, float *edge_weights, int bsize)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int jcol;
int bsize_sq = bsize * bsize;
while (tid < num_block_rows)
{
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
jcol = column_indices[j];
if (jcol >= num_block_rows) { continue; }
// Compute edge weight
for (int k = row_offsets[jcol]; k < row_offsets[jcol + 1]; k++)
{
if (column_indices[k] == tid)
{
edge_weights[j] = (float) 0.5 * (fabs(nonzero_values[j * bsize_sq]) + fabs(nonzero_values[k * bsize_sq]))
/ max( fabs(dia_values[tid * bsize_sq]), fabs(dia_values[jcol * bsize_sq]));
break;
}
}
}
tid += gridDim.x * blockDim.x;
}
}
#endif
// findStrongestNeighbour kernel for csr_matrix format
// computes weight on the fly
template <typename IndexType, typename ValueType>
__global__
void findStrongestNeighbourCsr(const IndexType *row_offsets, const IndexType *column_indices,
const ValueType *values, const ValueType *diag, const IndexType num_rows, IndexType *aggregates, int *strongest_neighbour)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
ValueType weight;
int jcol;
while (tid < num_rows)
{
ValueType max_weight_unaggregated = 0.;
ValueType max_weight_aggregated = 0.;
int strongest_unaggregated = -1;
int strongest_aggregated = -1;
if (aggregates[tid] == -1) // Unaggregated row
{
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
jcol = column_indices[j];
if (tid == jcol || jcol >= num_rows) { continue; }
// Compute edge weight
//weight = fabs(values[j])/max( fabs(diag[tid]),fabs(diag[jcol]));
for (int k = row_offsets[jcol]; k < row_offsets[jcol + 1]; k++)
{
if (column_indices[k] == tid)
{
weight = 0.5 * (fabs(values[j]) + fabs(values[k])) / max( fabs(diag[tid]), fabs(diag[jcol]));
break;
}
}
// Identify strongest aggregated and unaggregated neighbours
if (aggregates[jcol] == -1 && (weight > max_weight_unaggregated || (weight == max_weight_unaggregated && jcol > strongest_unaggregated))) // unaggregated
{
max_weight_unaggregated = weight;
strongest_unaggregated = jcol;
}
else if (aggregates[jcol] != -1 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && jcol > strongest_aggregated))) // aggregated
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
if (strongest_unaggregated == -1 && strongest_aggregated != -1) // All neighbours are aggregated
{
aggregates[tid] = aggregates[strongest_aggregated];
}
else if (strongest_unaggregated != -1)
{
strongest_neighbour[tid] = strongest_unaggregated;
}
else
{
strongest_neighbour[tid] = tid;
}
}
tid += gridDim.x * blockDim.x;
}
}
// findStrongestNeighbour kernel for block_dia_csr_matrix format
// computes weight on the fly
template <typename IndexType, typename ValueType>
__global__
void findStrongestNeighbourBlockDiaCsr(const IndexType *row_offsets, const IndexType *column_indices,
const ValueType *dia_values, const ValueType *nonzero_values, const IndexType num_block_rows, IndexType *aggregates, int *strongest_neighbour, int bsize)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
ValueType weight;
int jcol;
int bsize_sq = bsize * bsize;
while (tid < num_block_rows)
{
ValueType max_weight_unaggregated = 0.;
ValueType max_weight_aggregated = 0.;
int strongest_unaggregated = -1;
int strongest_aggregated = -1;
if (aggregates[tid] == -1) // Unaggregated row
{
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
jcol = column_indices[j];
if (jcol >= num_block_rows) { continue; }
// Compute edge weight
for (int k = row_offsets[jcol]; k < row_offsets[jcol + 1]; k++)
{
if (column_indices[k] == tid)
{
weight = 0.5 * (fabs(nonzero_values[j * bsize_sq]) + fabs(nonzero_values[k * bsize_sq]))
/ max( fabs(dia_values[tid * bsize_sq]), fabs(dia_values[jcol * bsize_sq]));
break;
}
}
// Identify strongest aggregated and unaggregated neighbours
if (aggregates[jcol] == -1 && (weight > max_weight_unaggregated || (weight == max_weight_unaggregated && jcol > strongest_unaggregated))) // unaggregated
{
max_weight_unaggregated = weight;
strongest_unaggregated = jcol;
}
else if (aggregates[jcol] != -1 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && jcol > strongest_aggregated))) // aggregated
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
if (strongest_unaggregated == -1 && strongest_aggregated != -1) // All neighbours are aggregated
// Put in same aggregate as strongest neighbour
{
aggregates[tid] = aggregates[strongest_aggregated];
}
else if (strongest_unaggregated != -1)
{
strongest_neighbour[tid] = strongest_unaggregated;
}
else
{
strongest_neighbour[tid] = tid;
}
}
tid += gridDim.x * blockDim.x;
}
}
__device__
float random_weight2(int i, int j)
{
#define RAND_MULTIPLIER 1145637293
unsigned long i_min = (min(i, j) * RAND_MULTIPLIER);
unsigned long i_max = (max(i, j) * RAND_MULTIPLIER);
return ((float)i_min / i_max);
}
// findStrongestNeighbour kernel for block_dia_csr_matrix format
// Reads the weight from edge_weights array
template <typename IndexType>
__global__
void findStrongestNeighbourBlockDiaCsr_V2(const IndexType *row_offsets, const IndexType *column_indices,
float *edge_weights, IndexType num_block_rows, IndexType *aggregates,
IndexType *strongest_neighbour_1phase, IndexType *strongest_neighbour,
const size_t bsize, int phase, bool merge_singletons)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
float weight;
int jcol;
while (tid < num_block_rows)
{
int strongest_unaggregated = -1;
int strongest_aggregated = -1;
float max_weight_unaggregated = 0.;
float max_weight_aggregated = 0.;
if (aggregates[tid] == -1) // Unaggregated row
{
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
jcol = column_indices[j];
if (phase == 1) { weight = edge_weights[j]; }
else { weight = random_weight2(tid, jcol); }
if (tid == jcol || jcol >= num_block_rows) { continue; } // skip diagonal and halo
if (phase == 2 && strongest_neighbour_1phase[jcol] != tid) { continue; } // if 2nd phase only accept those who gave a hand on the 1st phase
// Identify strongest aggregated and unaggregated neighbours
if (aggregates[jcol] == -1 && (weight > max_weight_unaggregated || (weight == max_weight_unaggregated && jcol > strongest_unaggregated))) // unaggregated
{
max_weight_unaggregated = weight;
strongest_unaggregated = jcol;
}
else if (aggregates[jcol] != -1 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && jcol > strongest_aggregated))) // aggregated
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
if (strongest_unaggregated == -1 && strongest_aggregated != -1) // All neighbours are aggregated
{
if ( merge_singletons )
// Put in same aggregate as strongest neighbour
{
aggregates[tid] = aggregates[strongest_aggregated];
}
else
{
aggregates[tid] = tid;
}
}
else if (strongest_unaggregated != -1)
{
if (phase == 2)
{
float rand_w1 = random_weight2(tid, strongest_neighbour_1phase[tid]);
strongest_neighbour[tid] = max_weight_unaggregated > rand_w1 ? strongest_unaggregated : strongest_neighbour_1phase[tid];
}
else { strongest_neighbour_1phase[tid] = strongest_unaggregated; }
}
else
{
if (phase == 2) { strongest_neighbour[tid] = strongest_neighbour_1phase[tid]; }
else { strongest_neighbour_1phase[tid] = tid; }
}
}
tid += gridDim.x * blockDim.x;
}
}
// Kernel that checks if perfect matchs exist
template <typename IndexType>
__global__
void matchEdges(const IndexType num_rows, IndexType *aggregates, int *strongest_neighbour)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int potential_match, potential_match_neighbour;
while (tid < num_rows)
{
if (aggregates[tid] == -1) // Unaggregated row
{
potential_match = strongest_neighbour[tid];
potential_match_neighbour = strongest_neighbour[potential_match];
if (potential_match != -1 && potential_match_neighbour == tid) // we have a match
{
aggregates[tid] = ( potential_match > tid ) ? tid : potential_match;
}
}
tid += gridDim.x * blockDim.x;
}
}
template <typename IndexType, int block_size>
__global__
void countAggregates(const IndexType num_rows, IndexType *aggregates, int *num_unaggregated)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int c = 0;
int i = tid;
while ( i < num_rows )
{
c += ( aggregates[i] == -1 );
i += gridDim.x * blockDim.x;
}
__shared__ volatile int smem[block_size];
smem[threadIdx.x] = c;
__syncthreads();
for ( int off = blockDim.x / 2; off >= 32; off = off / 2 )
{
if ( threadIdx.x < off )
{
smem[threadIdx.x] += smem[threadIdx.x + off];
}
__syncthreads();
}
// warp reduce
if ( threadIdx.x < 32 )
{
smem[threadIdx.x] += smem[threadIdx.x + 16];
smem[threadIdx.x] += smem[threadIdx.x + 8];
smem[threadIdx.x] += smem[threadIdx.x + 4];
smem[threadIdx.x] += smem[threadIdx.x + 2];
smem[threadIdx.x] += smem[threadIdx.x + 1];
}
if ( threadIdx.x == 0 )
{
atomicAdd(num_unaggregated, smem[0]);
}
}
// Kernel that merges unaggregated vertices its strongest aggregated neighbour
// For csr_matrix_format
template <typename IndexType, typename ValueType>
__global__
void mergeWithExistingAggregatesCsr(const IndexType *row_offsets, const IndexType *column_indices, const ValueType *values,
const ValueType *diag, const int num_rows, IndexType *aggregates, int deterministic, IndexType *aggregates_candidate)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int jcol;
ValueType weight;
while (tid < num_rows)
{
ValueType max_weight_aggregated = 0.;
int strongest_aggregated = -1;
if (aggregates[tid] == -1) // Unaggregated row
{
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
jcol = column_indices[j];
if (tid == jcol || jcol >= num_rows) { continue; }
// Compute edge weight
weight = fabs(values[j]) / max( fabs(diag[tid]), fabs(diag[jcol]));
// Identify strongest aggregated neighbour
if (aggregates[jcol] != -1 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && jcol > strongest_aggregated))) // aggregated
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
if (strongest_aggregated != -1) // Found a neighbour to aggregate to
{
if (deterministic)
{
aggregates_candidate[tid] = aggregates[strongest_aggregated];
}
else
{
// Put in same aggregate as strongest neighbour
aggregates[tid] = aggregates[strongest_aggregated];
}
}
else // All neighbours are unaggregated, leave alone
{
if (deterministic)
{
aggregates_candidate[tid] = tid;
}
else
{
aggregates[tid] = tid;
}
}
}
tid += gridDim.x * blockDim.x;
}
}
template <typename IndexType>
__global__
void joinExistingAggregates(IndexType num_rows, IndexType *aggregates, IndexType *aggregates_candidate)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
while (tid < num_rows)
{
if (aggregates[tid] == -1 && aggregates_candidate[tid] != -1) // Unaggregated row
{
aggregates[tid] = aggregates_candidate[tid];
}
tid += gridDim.x * blockDim.x;
}
}
// Kernel that merges unaggregated vertices its strongest aggregated neighbour
// Edge weights are computed on the fly
// For block_dia_csr_matrix_format
template <typename IndexType, typename ValueType>
__global__
void mergeWithExistingAggregatesBlockDiaCsr(const IndexType *row_offsets, const IndexType *column_indices, const ValueType *dia_values, const ValueType *nonzero_values,
const int num_block_rows, IndexType *aggregates, int bsize, int deterministic, IndexType *aggregates_candidate)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int jcol;
ValueType weight;
int bsize_sq = bsize * bsize;
while (tid < num_block_rows)
{
int strongest_aggregated = -1;
ValueType max_weight_aggregated = 0.;
if (aggregates[tid] == -1) // Unaggregated row
{
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
jcol = column_indices[j];
if (jcol >= num_block_rows) { continue; }
// Compute edge weight
weight = fabs(nonzero_values[j * bsize_sq]) / max( fabs(dia_values[tid * bsize_sq]), fabs(dia_values[jcol * bsize_sq]));
// Identify strongest aggregated neighbour
if (aggregates[jcol] != -1 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && jcol > strongest_aggregated))) // aggregated
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
if (strongest_aggregated != -1) // Found a neighbour to aggregate to
{
if (deterministic)
{
aggregates_candidate[tid] = aggregates[strongest_aggregated];
}
else
{
// Put in same aggregate as strongest neighbour
aggregates[tid] = aggregates[strongest_aggregated];
}
}
else // All neighbours are unaggregated, leave alone
{
if (deterministic)
{
aggregates_candidate[tid] = tid;
}
else
{
aggregates[tid] = tid;
}
}
}
tid += gridDim.x * blockDim.x;
}
}
// Kernel that merges unaggregated vertices its strongest aggregated neighbour
// Weights are read from edge_weights array
// For block_dia_csr_matrix_format
template <typename IndexType>
__global__
void mergeWithExistingAggregatesBlockDiaCsr_V2(const IndexType *row_offsets, const IndexType *column_indices, const float *edge_weights,
const int num_block_rows, IndexType *aggregates, int bsize, const int deterministic, IndexType *aggregates_candidate)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int jcol;
float weight;
while (tid < num_block_rows)
{
float max_weight_aggregated = 0.;
int strongest_aggregated = -1;
if (aggregates[tid] == -1) // Unaggregated row
{
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
// Compute edge weight
weight = edge_weights[j];
jcol = column_indices[j];
if (jcol == tid || jcol >= num_block_rows) { continue; } // skip diagonal
// Identify strongest aggregated neighbour
if (aggregates[jcol] != -1 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && jcol > strongest_aggregated))) //
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
if (strongest_aggregated != -1) // Found a neighbour to aggregate to
{
if (deterministic)
{
aggregates_candidate[tid] = aggregates[strongest_aggregated];
}
else
{
// Put in same aggregate as strongest neighbour
aggregates[tid] = aggregates[strongest_aggregated];
}
}
else // All neighbours are unaggregated, leave alone
{
if (deterministic)
{
aggregates_candidate[tid] = tid;
}
else
{
aggregates[tid] = tid;
}
}
}
tid += gridDim.x * blockDim.x;
}
}
// Kernel to extract diagonal for csr_matrix format
template <typename IndexType, typename ValueType>
__global__
void getDiagonalKernel(const IndexType *offsets, const IndexType *column_indices,
const ValueType *values, const IndexType numRows, ValueType *diagonal)
{
int tIdx = threadIdx.x + blockDim.x * blockIdx.x;
while (tIdx < numRows)
{
const int offset = offsets[tIdx];
const int numj = offsets[tIdx + 1] - offset;
for (int j = offset; j < offset + numj; j++)
{
int jcol = column_indices[j];
if (tIdx == jcol)
{
diagonal[tIdx] = values[j];
}
}
tIdx += gridDim.x * blockDim.x;
}
}
// Kernel to extract diagonal for csr_matrix format
template <typename IndexType, typename ValueType>
__global__
void getDiagonalKernelNoDiaProp(const IndexType *dia_idx, const ValueType *values, const IndexType numRows, ValueType *diagonal)
{
int tIdx = threadIdx.x + blockDim.x * blockIdx.x;
while (tIdx < numRows)
{
diagonal[tIdx] = values[dia_idx[tIdx]];
tIdx += gridDim.x * blockDim.x;
}
}
// -----------------
// Methods
// ----------------
// Constructor
template<class T_Config>
Size2SelectorBase<T_Config>::Size2SelectorBase(AMG_Config &cfg, const std::string &cfg_scope)
{
deterministic = cfg.AMG_Config::getParameter<IndexType>("determinism_flag", "default");
max_iterations = cfg.AMG_Config::getParameter<IndexType>("max_matching_iterations", cfg_scope);
numUnassigned_tol = cfg.AMG_Config::getParameter<double>("max_unassigned_percentage", cfg_scope);
two_phase = cfg.AMG_Config::getParameter<int>("handshaking_phases", cfg_scope) == 2;
m_aggregation_edge_weight_component = cfg.AMG_Config::getParameter<int>("aggregation_edge_weight_component", cfg_scope);
merge_singletons = cfg.AMG_Config::getParameter<int>("merge_singletons", cfg_scope) == 1;
weight_formula = cfg.AMG_Config::getParameter<int>("weight_formula", cfg_scope);
}
// setAggregates for csr_matrix_h format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Size2Selector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_1x1(const Matrix_h &A,
typename Matrix_h::IVector &aggregates, typename Matrix_h::IVector &aggregates_global, int &num_aggregates)
{
FatalError("Size2 selector: setAggregates not implemented on CPU, exiting", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
// setAggregates for block_dia_csr_matrix_h format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Size2Selector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_common_sqblocks(const Matrix_h &A,
typename Matrix_h::IVector &aggregates, typename Matrix_h::IVector &aggregates_global, int &num_aggregates)
{
FatalError("Size2 selector: setAggregates not implemented on CPU, exiting", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
#ifndef DELETE
// setAggregates for csr_matrix_d format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Size2Selector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_1x1(
const Matrix_d &A, typename Matrix_d::IVector &aggregates, typename Matrix_d::IVector &aggregates_global, int &num_aggregates)
{
if (!A.is_matrix_singleGPU())
{
aggregates.resize(A.manager->halo_offset(A.manager->num_neighbors()));
}
else
{
aggregates.resize(A.get_num_rows());
}
thrust::fill(aggregates.begin(), aggregates.end(), -1);
cudaCheckError();
//typedef typename csr_matrix_d::index_type IndexType;
//typedef typename csr_matrix_d::value_type ValueType;
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const ValueType *A_values_ptr = A.values.raw();
const IndexType *A_dia_ptr = A.diag.raw();
IndexType num_rows = (int)A.get_num_rows();
typename Matrix_d::IVector strongest_neighbour(num_rows, -1);
typename Matrix_d::MVector diag(num_rows, 0);
Vector<TemplateConfig<AMGX_device, AMGX_vecUInt, t_matPrec, t_indPrec> > aggregated(num_rows, 0);
IndexType *strongest_neighbour_ptr = strongest_neighbour.raw();
ValueType *diag_ptr = diag.raw();
IndexType *aggregates_ptr = aggregates.raw();
const int threads_per_block = 256;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (num_rows - 1) / threads_per_block + 1 );
hipLaunchKernelGGL(( getDiagonalKernelNoDiaProp) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_dia_ptr, A_values_ptr, num_rows, diag_ptr);
cudaCheckError();
int numUnassigned = num_rows;
int numUnassigned_previous = numUnassigned;
int icount = 0;
do
{
// For each row, find the strongest neighbour who hasn't been assigned
hipLaunchKernelGGL(( findStrongestNeighbourCsr) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_values_ptr, diag_ptr, num_rows, aggregates_ptr, strongest_neighbour_ptr);
cudaCheckError();
// Look for perfect matches. Also, for nodes without unaggregated neighbours, merge with aggregate containing strongest neighbour
hipLaunchKernelGGL(( matchEdges) , dim3(num_blocks), dim3(threads_per_block), 0, 0, num_rows, aggregates_ptr, strongest_neighbour_ptr);
cudaCheckError();
numUnassigned_previous = numUnassigned;
numUnassigned = (int)thrust::count(aggregates.begin(), aggregates.begin() + num_rows, -1);
cudaCheckError();
icount++;
}
while (!(numUnassigned == 0 || icount > this->max_iterations || 1.0 * numUnassigned / num_rows < this->numUnassigned_tol || numUnassigned_previous == numUnassigned));
// Merge remaining vertices with current aggregates
if (!this->deterministic)
{
while (numUnassigned != 0 )
{
hipLaunchKernelGGL(( mergeWithExistingAggregatesCsr) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_values_ptr,
diag_ptr, num_rows, aggregates_ptr, this->deterministic, (IndexType *) NULL);
cudaCheckError();
numUnassigned = (int)thrust::count(aggregates.begin(), aggregates.begin() + num_rows, -1);
cudaCheckError();
};
}
else
{
typename Matrix_d::IVector aggregates_candidate(num_rows, -1);
while (numUnassigned != 0 )
{
hipLaunchKernelGGL(( mergeWithExistingAggregatesCsr) , dim3(num_blocks), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_values_ptr,
diag_ptr, num_rows, aggregates_ptr, this->deterministic, aggregates_candidate.raw());
// Sync here
hipLaunchKernelGGL(( joinExistingAggregates) , dim3(num_blocks), dim3(threads_per_block), 0, 0, num_rows, aggregates_ptr, aggregates_candidate.raw());
cudaCheckError();
numUnassigned = (int)thrust::count(aggregates.begin(), aggregates.begin() + num_rows, -1);
cudaCheckError();
};
aggregates_candidate.resize(0);
}
this->renumberAndCountAggregates(aggregates, aggregates_global, num_rows, num_aggregates);
}
#endif
// setAggregates for block_dia_csr_matrix_d format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Size2Selector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_common_sqblocks(
const Matrix_d &A, typename Matrix_d::IVector &aggregates, typename Matrix_d::IVector &aggregates_global, int &num_aggregates)
{
IndexType num_block_rows = (int) A.get_num_rows();
IndexType num_nonzero_blocks = (int) A.get_num_nz();
// both ways are supported
IndexType total_nz = (A.is_matrix_singleGPU()) ? num_nonzero_blocks : A.manager->num_nz_all();
typename Matrix_d::IVector row_indices(total_nz);
cusp::detail::offsets_to_indices(A.row_offsets, row_indices);
IndexType total_rows = (A.is_matrix_singleGPU()) ? A.get_num_rows() : A.manager->num_rows_all();
aggregates.resize(total_rows);
thrust::fill(aggregates.begin(), aggregates.end(), -1);
cudaCheckError();
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_row_indices_ptr = row_indices.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_idx_ptr = A.diag.raw();
const ValueType *A_nonzero_values_ptr = A.values.raw();
typename Matrix_d::IVector strongest_neighbour(num_block_rows, -1);
typename Matrix_d::IVector strongest_neighbour_1phase(num_block_rows, -1);
Vector<TemplateConfig<AMGX_device, AMGX_vecUInt, t_matPrec, t_indPrec> > aggregated(num_block_rows, 0);
IndexType *strongest_neighbour_ptr = strongest_neighbour.raw();
IndexType *strongest_neighbour_1phase_ptr = strongest_neighbour_1phase.raw();
IndexType *aggregates_ptr = aggregates.raw();
const int threads_per_block = 256;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (num_block_rows - 1) / threads_per_block + 1 );
const int num_blocks_V2 = min( AMGX_GRID_MAX_SIZE, (num_nonzero_blocks - 1) / threads_per_block + 1);
int numUnassigned = num_block_rows;
int numUnassigned_previous = numUnassigned;
Vector<TemplateConfig<AMGX_device, AMGX_vecFloat, t_matPrec, t_indPrec> > edge_weights(num_nonzero_blocks, -1);
float *edge_weights_ptr = edge_weights.raw();
float *rand_edge_weights_ptr = NULL;//(this->two_phase ? rand_edge_weights.raw() : NULL);
// Compute the edge weights
hipFuncSetCacheConfig(computeEdgeWeightsBlockDiaCsr_V2<IndexType, ValueType, float>, hipFuncCachePreferL1);
hipLaunchKernelGGL(( computeEdgeWeightsBlockDiaCsr_V2) , dim3(num_blocks_V2), dim3(threads_per_block), 0, thrust::global_thread_handle::get_stream(), A_row_offsets_ptr, A_row_indices_ptr, A_column_indices_ptr, A_dia_idx_ptr, A_nonzero_values_ptr, num_nonzero_blocks, edge_weights_ptr, rand_edge_weights_ptr, num_block_rows, A.get_block_dimy(), this->m_aggregation_edge_weight_component, this->weight_formula);
cudaCheckError();
hipStream_t str = thrust::global_thread_handle::get_stream();
#ifdef EXPERIMENTAL_ITERATIVE_MATCHING
AsyncEvent *throttle_event = new AsyncEvent;
throttle_event->create();
// TODO: pinned host memory
typename Matrix_h::IVector h_unagg_vec(1);
typename Matrix_d::IVector d_unagg_vec(1);
int *unaggregated = h_unagg_vec.raw();
int *d_unaggregated = d_unagg_vec.raw();
#endif
int icount, s = 1;
{
icount = 0;
float *weights_ptr = edge_weights_ptr;
do
{
if ( !this->two_phase )
{
// 1-phase handshaking
hipLaunchKernelGGL(( findStrongestNeighbourBlockDiaCsr_V2) , dim3(num_blocks), dim3(threads_per_block), 0, str, A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, num_block_rows, aggregates_ptr, strongest_neighbour_ptr, strongest_neighbour_ptr, A.get_block_dimy(), 1, this->merge_singletons);
cudaCheckError();
}
else
{
// 2-phase handshaking
hipLaunchKernelGGL(( findStrongestNeighbourBlockDiaCsr_V2) , dim3(num_blocks), dim3(threads_per_block), 0, str, A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, num_block_rows, aggregates_ptr, strongest_neighbour_1phase_ptr, strongest_neighbour_ptr, A.get_block_dimy(), 1, this->merge_singletons);
cudaCheckError();
// 2nd phase: for each block_row, find the strongest neighbour among those who gave hand on 1st phase
hipLaunchKernelGGL(( findStrongestNeighbourBlockDiaCsr_V2) , dim3(num_blocks), dim3(threads_per_block), 0, str, A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, num_block_rows, aggregates_ptr, strongest_neighbour_1phase_ptr, strongest_neighbour_ptr, A.get_block_dimy(), 2, this->merge_singletons);
cudaCheckError();
}
// Look for perfect matches. Also, for nodes without unaggregated neighbours, merge with aggregate containing strongest neighbour
hipLaunchKernelGGL(( matchEdges) , dim3(num_blocks), dim3(threads_per_block), 0, str, num_block_rows, aggregates_ptr, strongest_neighbour_ptr);
cudaCheckError();
#ifdef EXPERIMENTAL_ITERATIVE_MATCHING
s = (icount & 1);
if ( s == 0 )
{
// count unaggregated vertices
hipMemsetAsync(d_unaggregated, 0, sizeof(int), str);
hipLaunchKernelGGL(( countAggregates<IndexType, threads_per_block>) , dim3(num_blocks), dim3(threads_per_block), 0, str, num_block_rows, aggregates_ptr, d_unaggregated);
cudaCheckError();
hipMemcpyAsync(unaggregated, d_unaggregated, sizeof(int), hipMemcpyDeviceToHost, str);
throttle_event->record(str);
}
else
{
throttle_event->sync();
numUnassigned_previous = numUnassigned;
numUnassigned = *unaggregated;
}
#else
hipStreamSynchronize(str);
numUnassigned_previous = numUnassigned;
numUnassigned = (int)thrust::count(aggregates.begin(), aggregates.begin() + num_block_rows, -1);
cudaCheckError();
#endif
icount++;
}
while ( (s == 0) || !(numUnassigned == 0 || icount > this->max_iterations || 1.0 * numUnassigned / num_block_rows < this->numUnassigned_tol || numUnassigned == numUnassigned_previous));
}
#ifdef EXPERIMENTAL_ITERATIVE_MATCHING
delete throttle_event;
#endif
if ( this->merge_singletons )
{
// Merge remaining vertices with current aggregates
if (!this->deterministic)
{
while (numUnassigned != 0)
{
hipLaunchKernelGGL(( mergeWithExistingAggregatesBlockDiaCsr_V2) , dim3(num_blocks), dim3(threads_per_block), 0, str, A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregates_ptr, A.get_block_dimy(), this->deterministic, (IndexType *) NULL);
cudaCheckError();
numUnassigned = (int)thrust::count(aggregates.begin(), aggregates.begin() + num_block_rows, -1);
cudaCheckError();
}
}
else
{
typename Matrix_d::IVector aggregates_candidate(num_block_rows, -1);
while (numUnassigned != 0)
{
hipLaunchKernelGGL(( mergeWithExistingAggregatesBlockDiaCsr_V2) , dim3(num_blocks), dim3(threads_per_block), 0, str, A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregates_ptr, A.get_block_dimy(), this->deterministic, aggregates_candidate.raw());
cudaCheckError();
hipLaunchKernelGGL(( joinExistingAggregates) , dim3(num_blocks), dim3(threads_per_block), 0, str, num_block_rows, aggregates_ptr, aggregates_candidate.raw());
cudaCheckError();
numUnassigned = (int)thrust::count(aggregates.begin(), aggregates.begin() + num_block_rows, -1);
cudaCheckError();
}
aggregates_candidate.resize(0);
}
}
else
{
//make singletons
hipLaunchKernelGGL(( aggregateSingletons) , dim3(num_blocks), dim3(threads_per_block), 0, str, aggregates_ptr, num_block_rows );
cudaCheckError();
}
this->renumberAndCountAggregates(aggregates, aggregates_global, num_block_rows, num_aggregates);
}
template<class T_Config>
void Size2SelectorBase<T_Config>::setAggregates(Matrix<T_Config> &A,
IVector &aggregates, IVector &aggregates_global, int &num_aggregates)
{
if (A.get_block_dimx() == A.get_block_dimy())
{
setAggregates_common_sqblocks( A, aggregates, aggregates_global, num_aggregates );
}
else
{
FatalError("Unsupported block size for Size2", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
}
// -------------------------
// Explict instantiations
// -------------------------
#define AMGX_CASE_LINE(CASE) template class Size2SelectorBase<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class Size2Selector<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
}
}
| 41b9ddfc32dad2b1d85910c87f54f40fecd338ed.cu | /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <aggregation/selectors/size2_selector.h>
#include <cutil.h>
#include <util.h>
#include <types.h>
#include <basic_types.h>
#include <texture.h>
#include <matrix_analysis.h>
#include <async_event.h>
#include <determinism_checker.h>
#include <thrust/count.h> //count
#include <thrust/sort.h> //sort
#include <thrust/binary_search.h> //lower_bound
#include <thrust/unique.h> //unique
#include <cusp/detail/format_utils.h> //offsets_to_indices
#include <logger.h>
#define EXPERIMENTAL_ITERATIVE_MATCHING
namespace amgx
{
namespace aggregation
{
namespace size2_selector
{
// include common routines for all selectors
#include <aggregation/selectors/common_selector.h>
// ------------------------
// Kernels
// ------------------------
#ifndef DELETE
// Kernel to compute the weight of the edges with block_dia_csr format
template <typename IndexType, typename ValueType>
__global__
void computeEdgeWeightsBlockDiaCsr(const IndexType *row_offsets, const IndexType *column_indices,
const ValueType *dia_values, const ValueType *nonzero_values, const IndexType num_block_rows, float *edge_weights, int bsize)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int jcol;
int bsize_sq = bsize * bsize;
while (tid < num_block_rows)
{
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
jcol = column_indices[j];
if (jcol >= num_block_rows) { continue; }
// Compute edge weight
for (int k = row_offsets[jcol]; k < row_offsets[jcol + 1]; k++)
{
if (column_indices[k] == tid)
{
edge_weights[j] = (float) 0.5 * (fabs(nonzero_values[j * bsize_sq]) + fabs(nonzero_values[k * bsize_sq]))
/ max( fabs(dia_values[tid * bsize_sq]), fabs(dia_values[jcol * bsize_sq]));
break;
}
}
}
tid += gridDim.x * blockDim.x;
}
}
#endif
// findStrongestNeighbour kernel for csr_matrix format
// computes weight on the fly
template <typename IndexType, typename ValueType>
__global__
void findStrongestNeighbourCsr(const IndexType *row_offsets, const IndexType *column_indices,
const ValueType *values, const ValueType *diag, const IndexType num_rows, IndexType *aggregates, int *strongest_neighbour)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
ValueType weight;
int jcol;
while (tid < num_rows)
{
ValueType max_weight_unaggregated = 0.;
ValueType max_weight_aggregated = 0.;
int strongest_unaggregated = -1;
int strongest_aggregated = -1;
if (aggregates[tid] == -1) // Unaggregated row
{
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
jcol = column_indices[j];
if (tid == jcol || jcol >= num_rows) { continue; }
// Compute edge weight
//weight = fabs(values[j])/max( fabs(diag[tid]),fabs(diag[jcol]));
for (int k = row_offsets[jcol]; k < row_offsets[jcol + 1]; k++)
{
if (column_indices[k] == tid)
{
weight = 0.5 * (fabs(values[j]) + fabs(values[k])) / max( fabs(diag[tid]), fabs(diag[jcol]));
break;
}
}
// Identify strongest aggregated and unaggregated neighbours
if (aggregates[jcol] == -1 && (weight > max_weight_unaggregated || (weight == max_weight_unaggregated && jcol > strongest_unaggregated))) // unaggregated
{
max_weight_unaggregated = weight;
strongest_unaggregated = jcol;
}
else if (aggregates[jcol] != -1 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && jcol > strongest_aggregated))) // aggregated
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
if (strongest_unaggregated == -1 && strongest_aggregated != -1) // All neighbours are aggregated
{
aggregates[tid] = aggregates[strongest_aggregated];
}
else if (strongest_unaggregated != -1)
{
strongest_neighbour[tid] = strongest_unaggregated;
}
else
{
strongest_neighbour[tid] = tid;
}
}
tid += gridDim.x * blockDim.x;
}
}
// findStrongestNeighbour kernel for block_dia_csr_matrix format
// computes weight on the fly
template <typename IndexType, typename ValueType>
__global__
void findStrongestNeighbourBlockDiaCsr(const IndexType *row_offsets, const IndexType *column_indices,
const ValueType *dia_values, const ValueType *nonzero_values, const IndexType num_block_rows, IndexType *aggregates, int *strongest_neighbour, int bsize)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
ValueType weight;
int jcol;
int bsize_sq = bsize * bsize;
while (tid < num_block_rows)
{
ValueType max_weight_unaggregated = 0.;
ValueType max_weight_aggregated = 0.;
int strongest_unaggregated = -1;
int strongest_aggregated = -1;
if (aggregates[tid] == -1) // Unaggregated row
{
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
jcol = column_indices[j];
if (jcol >= num_block_rows) { continue; }
// Compute edge weight
for (int k = row_offsets[jcol]; k < row_offsets[jcol + 1]; k++)
{
if (column_indices[k] == tid)
{
weight = 0.5 * (fabs(nonzero_values[j * bsize_sq]) + fabs(nonzero_values[k * bsize_sq]))
/ max( fabs(dia_values[tid * bsize_sq]), fabs(dia_values[jcol * bsize_sq]));
break;
}
}
// Identify strongest aggregated and unaggregated neighbours
if (aggregates[jcol] == -1 && (weight > max_weight_unaggregated || (weight == max_weight_unaggregated && jcol > strongest_unaggregated))) // unaggregated
{
max_weight_unaggregated = weight;
strongest_unaggregated = jcol;
}
else if (aggregates[jcol] != -1 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && jcol > strongest_aggregated))) // aggregated
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
if (strongest_unaggregated == -1 && strongest_aggregated != -1) // All neighbours are aggregated
// Put in same aggregate as strongest neighbour
{
aggregates[tid] = aggregates[strongest_aggregated];
}
else if (strongest_unaggregated != -1)
{
strongest_neighbour[tid] = strongest_unaggregated;
}
else
{
strongest_neighbour[tid] = tid;
}
}
tid += gridDim.x * blockDim.x;
}
}
__device__
float random_weight2(int i, int j)
{
#define RAND_MULTIPLIER 1145637293
unsigned long i_min = (min(i, j) * RAND_MULTIPLIER);
unsigned long i_max = (max(i, j) * RAND_MULTIPLIER);
return ((float)i_min / i_max);
}
// findStrongestNeighbour kernel for block_dia_csr_matrix format
// Reads the weight from edge_weights array
template <typename IndexType>
__global__
void findStrongestNeighbourBlockDiaCsr_V2(const IndexType *row_offsets, const IndexType *column_indices,
float *edge_weights, IndexType num_block_rows, IndexType *aggregates,
IndexType *strongest_neighbour_1phase, IndexType *strongest_neighbour,
const size_t bsize, int phase, bool merge_singletons)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
float weight;
int jcol;
while (tid < num_block_rows)
{
int strongest_unaggregated = -1;
int strongest_aggregated = -1;
float max_weight_unaggregated = 0.;
float max_weight_aggregated = 0.;
if (aggregates[tid] == -1) // Unaggregated row
{
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
jcol = column_indices[j];
if (phase == 1) { weight = edge_weights[j]; }
else { weight = random_weight2(tid, jcol); }
if (tid == jcol || jcol >= num_block_rows) { continue; } // skip diagonal and halo
if (phase == 2 && strongest_neighbour_1phase[jcol] != tid) { continue; } // if 2nd phase only accept those who gave a hand on the 1st phase
// Identify strongest aggregated and unaggregated neighbours
if (aggregates[jcol] == -1 && (weight > max_weight_unaggregated || (weight == max_weight_unaggregated && jcol > strongest_unaggregated))) // unaggregated
{
max_weight_unaggregated = weight;
strongest_unaggregated = jcol;
}
else if (aggregates[jcol] != -1 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && jcol > strongest_aggregated))) // aggregated
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
if (strongest_unaggregated == -1 && strongest_aggregated != -1) // All neighbours are aggregated
{
if ( merge_singletons )
// Put in same aggregate as strongest neighbour
{
aggregates[tid] = aggregates[strongest_aggregated];
}
else
{
aggregates[tid] = tid;
}
}
else if (strongest_unaggregated != -1)
{
if (phase == 2)
{
float rand_w1 = random_weight2(tid, strongest_neighbour_1phase[tid]);
strongest_neighbour[tid] = max_weight_unaggregated > rand_w1 ? strongest_unaggregated : strongest_neighbour_1phase[tid];
}
else { strongest_neighbour_1phase[tid] = strongest_unaggregated; }
}
else
{
if (phase == 2) { strongest_neighbour[tid] = strongest_neighbour_1phase[tid]; }
else { strongest_neighbour_1phase[tid] = tid; }
}
}
tid += gridDim.x * blockDim.x;
}
}
// Kernel that checks if perfect matchs exist
template <typename IndexType>
__global__
void matchEdges(const IndexType num_rows, IndexType *aggregates, int *strongest_neighbour)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int potential_match, potential_match_neighbour;
while (tid < num_rows)
{
if (aggregates[tid] == -1) // Unaggregated row
{
potential_match = strongest_neighbour[tid];
potential_match_neighbour = strongest_neighbour[potential_match];
if (potential_match != -1 && potential_match_neighbour == tid) // we have a match
{
aggregates[tid] = ( potential_match > tid ) ? tid : potential_match;
}
}
tid += gridDim.x * blockDim.x;
}
}
template <typename IndexType, int block_size>
__global__
void countAggregates(const IndexType num_rows, IndexType *aggregates, int *num_unaggregated)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int c = 0;
int i = tid;
while ( i < num_rows )
{
c += ( aggregates[i] == -1 );
i += gridDim.x * blockDim.x;
}
__shared__ volatile int smem[block_size];
smem[threadIdx.x] = c;
__syncthreads();
for ( int off = blockDim.x / 2; off >= 32; off = off / 2 )
{
if ( threadIdx.x < off )
{
smem[threadIdx.x] += smem[threadIdx.x + off];
}
__syncthreads();
}
// warp reduce
if ( threadIdx.x < 32 )
{
smem[threadIdx.x] += smem[threadIdx.x + 16];
smem[threadIdx.x] += smem[threadIdx.x + 8];
smem[threadIdx.x] += smem[threadIdx.x + 4];
smem[threadIdx.x] += smem[threadIdx.x + 2];
smem[threadIdx.x] += smem[threadIdx.x + 1];
}
if ( threadIdx.x == 0 )
{
atomicAdd(num_unaggregated, smem[0]);
}
}
// Kernel that merges unaggregated vertices its strongest aggregated neighbour
// For csr_matrix_format
template <typename IndexType, typename ValueType>
__global__
void mergeWithExistingAggregatesCsr(const IndexType *row_offsets, const IndexType *column_indices, const ValueType *values,
const ValueType *diag, const int num_rows, IndexType *aggregates, int deterministic, IndexType *aggregates_candidate)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int jcol;
ValueType weight;
while (tid < num_rows)
{
ValueType max_weight_aggregated = 0.;
int strongest_aggregated = -1;
if (aggregates[tid] == -1) // Unaggregated row
{
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
jcol = column_indices[j];
if (tid == jcol || jcol >= num_rows) { continue; }
// Compute edge weight
weight = fabs(values[j]) / max( fabs(diag[tid]), fabs(diag[jcol]));
// Identify strongest aggregated neighbour
if (aggregates[jcol] != -1 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && jcol > strongest_aggregated))) // aggregated
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
if (strongest_aggregated != -1) // Found a neighbour to aggregate to
{
if (deterministic)
{
aggregates_candidate[tid] = aggregates[strongest_aggregated];
}
else
{
// Put in same aggregate as strongest neighbour
aggregates[tid] = aggregates[strongest_aggregated];
}
}
else // All neighbours are unaggregated, leave alone
{
if (deterministic)
{
aggregates_candidate[tid] = tid;
}
else
{
aggregates[tid] = tid;
}
}
}
tid += gridDim.x * blockDim.x;
}
}
template <typename IndexType>
__global__
void joinExistingAggregates(IndexType num_rows, IndexType *aggregates, IndexType *aggregates_candidate)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
while (tid < num_rows)
{
if (aggregates[tid] == -1 && aggregates_candidate[tid] != -1) // Unaggregated row
{
aggregates[tid] = aggregates_candidate[tid];
}
tid += gridDim.x * blockDim.x;
}
}
// Kernel that merges unaggregated vertices its strongest aggregated neighbour
// Edge weights are computed on the fly
// For block_dia_csr_matrix_format
template <typename IndexType, typename ValueType>
__global__
void mergeWithExistingAggregatesBlockDiaCsr(const IndexType *row_offsets, const IndexType *column_indices, const ValueType *dia_values, const ValueType *nonzero_values,
const int num_block_rows, IndexType *aggregates, int bsize, int deterministic, IndexType *aggregates_candidate)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int jcol;
ValueType weight;
int bsize_sq = bsize * bsize;
while (tid < num_block_rows)
{
int strongest_aggregated = -1;
ValueType max_weight_aggregated = 0.;
if (aggregates[tid] == -1) // Unaggregated row
{
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
jcol = column_indices[j];
if (jcol >= num_block_rows) { continue; }
// Compute edge weight
weight = fabs(nonzero_values[j * bsize_sq]) / max( fabs(dia_values[tid * bsize_sq]), fabs(dia_values[jcol * bsize_sq]));
// Identify strongest aggregated neighbour
if (aggregates[jcol] != -1 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && jcol > strongest_aggregated))) // aggregated
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
if (strongest_aggregated != -1) // Found a neighbour to aggregate to
{
if (deterministic)
{
aggregates_candidate[tid] = aggregates[strongest_aggregated];
}
else
{
// Put in same aggregate as strongest neighbour
aggregates[tid] = aggregates[strongest_aggregated];
}
}
else // All neighbours are unaggregated, leave alone
{
if (deterministic)
{
aggregates_candidate[tid] = tid;
}
else
{
aggregates[tid] = tid;
}
}
}
tid += gridDim.x * blockDim.x;
}
}
// Kernel that merges unaggregated vertices its strongest aggregated neighbour
// Weights are read from edge_weights array
// For block_dia_csr_matrix_format
template <typename IndexType>
__global__
void mergeWithExistingAggregatesBlockDiaCsr_V2(const IndexType *row_offsets, const IndexType *column_indices, const float *edge_weights,
const int num_block_rows, IndexType *aggregates, int bsize, const int deterministic, IndexType *aggregates_candidate)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int jcol;
float weight;
while (tid < num_block_rows)
{
float max_weight_aggregated = 0.;
int strongest_aggregated = -1;
if (aggregates[tid] == -1) // Unaggregated row
{
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
// Compute edge weight
weight = edge_weights[j];
jcol = column_indices[j];
if (jcol == tid || jcol >= num_block_rows) { continue; } // skip diagonal
// Identify strongest aggregated neighbour
if (aggregates[jcol] != -1 && (weight > max_weight_aggregated || (weight == max_weight_aggregated && jcol > strongest_aggregated))) //
{
max_weight_aggregated = weight;
strongest_aggregated = jcol;
}
}
if (strongest_aggregated != -1) // Found a neighbour to aggregate to
{
if (deterministic)
{
aggregates_candidate[tid] = aggregates[strongest_aggregated];
}
else
{
// Put in same aggregate as strongest neighbour
aggregates[tid] = aggregates[strongest_aggregated];
}
}
else // All neighbours are unaggregated, leave alone
{
if (deterministic)
{
aggregates_candidate[tid] = tid;
}
else
{
aggregates[tid] = tid;
}
}
}
tid += gridDim.x * blockDim.x;
}
}
// Kernel to extract diagonal for csr_matrix format
template <typename IndexType, typename ValueType>
__global__
void getDiagonalKernel(const IndexType *offsets, const IndexType *column_indices,
const ValueType *values, const IndexType numRows, ValueType *diagonal)
{
int tIdx = threadIdx.x + blockDim.x * blockIdx.x;
while (tIdx < numRows)
{
const int offset = offsets[tIdx];
const int numj = offsets[tIdx + 1] - offset;
for (int j = offset; j < offset + numj; j++)
{
int jcol = column_indices[j];
if (tIdx == jcol)
{
diagonal[tIdx] = values[j];
}
}
tIdx += gridDim.x * blockDim.x;
}
}
// Kernel to extract diagonal for csr_matrix format
template <typename IndexType, typename ValueType>
__global__
void getDiagonalKernelNoDiaProp(const IndexType *dia_idx, const ValueType *values, const IndexType numRows, ValueType *diagonal)
{
int tIdx = threadIdx.x + blockDim.x * blockIdx.x;
while (tIdx < numRows)
{
diagonal[tIdx] = values[dia_idx[tIdx]];
tIdx += gridDim.x * blockDim.x;
}
}
// -----------------
// Methods
// ----------------
// Constructor
template<class T_Config>
Size2SelectorBase<T_Config>::Size2SelectorBase(AMG_Config &cfg, const std::string &cfg_scope)
{
deterministic = cfg.AMG_Config::getParameter<IndexType>("determinism_flag", "default");
max_iterations = cfg.AMG_Config::getParameter<IndexType>("max_matching_iterations", cfg_scope);
numUnassigned_tol = cfg.AMG_Config::getParameter<double>("max_unassigned_percentage", cfg_scope);
two_phase = cfg.AMG_Config::getParameter<int>("handshaking_phases", cfg_scope) == 2;
m_aggregation_edge_weight_component = cfg.AMG_Config::getParameter<int>("aggregation_edge_weight_component", cfg_scope);
merge_singletons = cfg.AMG_Config::getParameter<int>("merge_singletons", cfg_scope) == 1;
weight_formula = cfg.AMG_Config::getParameter<int>("weight_formula", cfg_scope);
}
// setAggregates for csr_matrix_h format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Size2Selector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_1x1(const Matrix_h &A,
typename Matrix_h::IVector &aggregates, typename Matrix_h::IVector &aggregates_global, int &num_aggregates)
{
FatalError("Size2 selector: setAggregates not implemented on CPU, exiting", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
// setAggregates for block_dia_csr_matrix_h format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Size2Selector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_common_sqblocks(const Matrix_h &A,
typename Matrix_h::IVector &aggregates, typename Matrix_h::IVector &aggregates_global, int &num_aggregates)
{
FatalError("Size2 selector: setAggregates not implemented on CPU, exiting", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
#ifndef DELETE
// setAggregates for csr_matrix_d format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Size2Selector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_1x1(
const Matrix_d &A, typename Matrix_d::IVector &aggregates, typename Matrix_d::IVector &aggregates_global, int &num_aggregates)
{
if (!A.is_matrix_singleGPU())
{
aggregates.resize(A.manager->halo_offset(A.manager->num_neighbors()));
}
else
{
aggregates.resize(A.get_num_rows());
}
thrust::fill(aggregates.begin(), aggregates.end(), -1);
cudaCheckError();
//typedef typename csr_matrix_d::index_type IndexType;
//typedef typename csr_matrix_d::value_type ValueType;
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const ValueType *A_values_ptr = A.values.raw();
const IndexType *A_dia_ptr = A.diag.raw();
IndexType num_rows = (int)A.get_num_rows();
typename Matrix_d::IVector strongest_neighbour(num_rows, -1);
typename Matrix_d::MVector diag(num_rows, 0);
Vector<TemplateConfig<AMGX_device, AMGX_vecUInt, t_matPrec, t_indPrec> > aggregated(num_rows, 0);
IndexType *strongest_neighbour_ptr = strongest_neighbour.raw();
ValueType *diag_ptr = diag.raw();
IndexType *aggregates_ptr = aggregates.raw();
const int threads_per_block = 256;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (num_rows - 1) / threads_per_block + 1 );
getDiagonalKernelNoDiaProp <<< num_blocks, threads_per_block>>>(A_dia_ptr, A_values_ptr, num_rows, diag_ptr);
cudaCheckError();
int numUnassigned = num_rows;
int numUnassigned_previous = numUnassigned;
int icount = 0;
do
{
// For each row, find the strongest neighbour who hasn't been assigned
findStrongestNeighbourCsr <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_values_ptr, diag_ptr, num_rows, aggregates_ptr, strongest_neighbour_ptr);
cudaCheckError();
// Look for perfect matches. Also, for nodes without unaggregated neighbours, merge with aggregate containing strongest neighbour
matchEdges <<< num_blocks, threads_per_block>>>(num_rows, aggregates_ptr, strongest_neighbour_ptr);
cudaCheckError();
numUnassigned_previous = numUnassigned;
numUnassigned = (int)thrust::count(aggregates.begin(), aggregates.begin() + num_rows, -1);
cudaCheckError();
icount++;
}
while (!(numUnassigned == 0 || icount > this->max_iterations || 1.0 * numUnassigned / num_rows < this->numUnassigned_tol || numUnassigned_previous == numUnassigned));
// Merge remaining vertices with current aggregates
if (!this->deterministic)
{
while (numUnassigned != 0 )
{
mergeWithExistingAggregatesCsr <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_values_ptr,
diag_ptr, num_rows, aggregates_ptr, this->deterministic, (IndexType *) NULL);
cudaCheckError();
numUnassigned = (int)thrust::count(aggregates.begin(), aggregates.begin() + num_rows, -1);
cudaCheckError();
};
}
else
{
typename Matrix_d::IVector aggregates_candidate(num_rows, -1);
while (numUnassigned != 0 )
{
mergeWithExistingAggregatesCsr <<< num_blocks, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_values_ptr,
diag_ptr, num_rows, aggregates_ptr, this->deterministic, aggregates_candidate.raw());
// Sync here
joinExistingAggregates <<< num_blocks, threads_per_block>>>(num_rows, aggregates_ptr, aggregates_candidate.raw());
cudaCheckError();
numUnassigned = (int)thrust::count(aggregates.begin(), aggregates.begin() + num_rows, -1);
cudaCheckError();
};
aggregates_candidate.resize(0);
}
this->renumberAndCountAggregates(aggregates, aggregates_global, num_rows, num_aggregates);
}
#endif
// setAggregates for block_dia_csr_matrix_d format
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void Size2Selector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_common_sqblocks(
const Matrix_d &A, typename Matrix_d::IVector &aggregates, typename Matrix_d::IVector &aggregates_global, int &num_aggregates)
{
IndexType num_block_rows = (int) A.get_num_rows();
IndexType num_nonzero_blocks = (int) A.get_num_nz();
// both ways are supported
IndexType total_nz = (A.is_matrix_singleGPU()) ? num_nonzero_blocks : A.manager->num_nz_all();
typename Matrix_d::IVector row_indices(total_nz);
cusp::detail::offsets_to_indices(A.row_offsets, row_indices);
IndexType total_rows = (A.is_matrix_singleGPU()) ? A.get_num_rows() : A.manager->num_rows_all();
aggregates.resize(total_rows);
thrust::fill(aggregates.begin(), aggregates.end(), -1);
cudaCheckError();
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_row_indices_ptr = row_indices.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_idx_ptr = A.diag.raw();
const ValueType *A_nonzero_values_ptr = A.values.raw();
typename Matrix_d::IVector strongest_neighbour(num_block_rows, -1);
typename Matrix_d::IVector strongest_neighbour_1phase(num_block_rows, -1);
Vector<TemplateConfig<AMGX_device, AMGX_vecUInt, t_matPrec, t_indPrec> > aggregated(num_block_rows, 0);
IndexType *strongest_neighbour_ptr = strongest_neighbour.raw();
IndexType *strongest_neighbour_1phase_ptr = strongest_neighbour_1phase.raw();
IndexType *aggregates_ptr = aggregates.raw();
const int threads_per_block = 256;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (num_block_rows - 1) / threads_per_block + 1 );
const int num_blocks_V2 = min( AMGX_GRID_MAX_SIZE, (num_nonzero_blocks - 1) / threads_per_block + 1);
int numUnassigned = num_block_rows;
int numUnassigned_previous = numUnassigned;
Vector<TemplateConfig<AMGX_device, AMGX_vecFloat, t_matPrec, t_indPrec> > edge_weights(num_nonzero_blocks, -1);
float *edge_weights_ptr = edge_weights.raw();
float *rand_edge_weights_ptr = NULL;//(this->two_phase ? rand_edge_weights.raw() : NULL);
// Compute the edge weights
cudaFuncSetCacheConfig(computeEdgeWeightsBlockDiaCsr_V2<IndexType, ValueType, float>, cudaFuncCachePreferL1);
computeEdgeWeightsBlockDiaCsr_V2 <<< num_blocks_V2, threads_per_block, 0, thrust::global_thread_handle::get_stream()>>>(A_row_offsets_ptr, A_row_indices_ptr, A_column_indices_ptr, A_dia_idx_ptr, A_nonzero_values_ptr, num_nonzero_blocks, edge_weights_ptr, rand_edge_weights_ptr, num_block_rows, A.get_block_dimy(), this->m_aggregation_edge_weight_component, this->weight_formula);
cudaCheckError();
cudaStream_t str = thrust::global_thread_handle::get_stream();
#ifdef EXPERIMENTAL_ITERATIVE_MATCHING
AsyncEvent *throttle_event = new AsyncEvent;
throttle_event->create();
// TODO: pinned host memory
typename Matrix_h::IVector h_unagg_vec(1);
typename Matrix_d::IVector d_unagg_vec(1);
int *unaggregated = h_unagg_vec.raw();
int *d_unaggregated = d_unagg_vec.raw();
#endif
int icount, s = 1;
{
icount = 0;
float *weights_ptr = edge_weights_ptr;
do
{
if ( !this->two_phase )
{
// 1-phase handshaking
findStrongestNeighbourBlockDiaCsr_V2 <<< num_blocks, threads_per_block, 0, str>>>(A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, num_block_rows, aggregates_ptr, strongest_neighbour_ptr, strongest_neighbour_ptr, A.get_block_dimy(), 1, this->merge_singletons);
cudaCheckError();
}
else
{
// 2-phase handshaking
findStrongestNeighbourBlockDiaCsr_V2 <<< num_blocks, threads_per_block, 0, str>>>(A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, num_block_rows, aggregates_ptr, strongest_neighbour_1phase_ptr, strongest_neighbour_ptr, A.get_block_dimy(), 1, this->merge_singletons);
cudaCheckError();
// 2nd phase: for each block_row, find the strongest neighbour among those who gave hand on 1st phase
findStrongestNeighbourBlockDiaCsr_V2 <<< num_blocks, threads_per_block, 0, str>>>(A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, num_block_rows, aggregates_ptr, strongest_neighbour_1phase_ptr, strongest_neighbour_ptr, A.get_block_dimy(), 2, this->merge_singletons);
cudaCheckError();
}
// Look for perfect matches. Also, for nodes without unaggregated neighbours, merge with aggregate containing strongest neighbour
matchEdges <<< num_blocks, threads_per_block, 0, str>>>(num_block_rows, aggregates_ptr, strongest_neighbour_ptr);
cudaCheckError();
#ifdef EXPERIMENTAL_ITERATIVE_MATCHING
s = (icount & 1);
if ( s == 0 )
{
// count unaggregated vertices
cudaMemsetAsync(d_unaggregated, 0, sizeof(int), str);
countAggregates<IndexType, threads_per_block> <<< num_blocks, threads_per_block, 0, str>>>(num_block_rows, aggregates_ptr, d_unaggregated);
cudaCheckError();
cudaMemcpyAsync(unaggregated, d_unaggregated, sizeof(int), cudaMemcpyDeviceToHost, str);
throttle_event->record(str);
}
else
{
throttle_event->sync();
numUnassigned_previous = numUnassigned;
numUnassigned = *unaggregated;
}
#else
cudaStreamSynchronize(str);
numUnassigned_previous = numUnassigned;
numUnassigned = (int)thrust::count(aggregates.begin(), aggregates.begin() + num_block_rows, -1);
cudaCheckError();
#endif
icount++;
}
while ( (s == 0) || !(numUnassigned == 0 || icount > this->max_iterations || 1.0 * numUnassigned / num_block_rows < this->numUnassigned_tol || numUnassigned == numUnassigned_previous));
}
#ifdef EXPERIMENTAL_ITERATIVE_MATCHING
delete throttle_event;
#endif
if ( this->merge_singletons )
{
// Merge remaining vertices with current aggregates
if (!this->deterministic)
{
while (numUnassigned != 0)
{
mergeWithExistingAggregatesBlockDiaCsr_V2 <<< num_blocks, threads_per_block, 0, str>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregates_ptr, A.get_block_dimy(), this->deterministic, (IndexType *) NULL);
cudaCheckError();
numUnassigned = (int)thrust::count(aggregates.begin(), aggregates.begin() + num_block_rows, -1);
cudaCheckError();
}
}
else
{
typename Matrix_d::IVector aggregates_candidate(num_block_rows, -1);
while (numUnassigned != 0)
{
mergeWithExistingAggregatesBlockDiaCsr_V2 <<< num_blocks, threads_per_block, 0, str>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, num_block_rows, aggregates_ptr, A.get_block_dimy(), this->deterministic, aggregates_candidate.raw());
cudaCheckError();
joinExistingAggregates <<< num_blocks, threads_per_block, 0, str>>>(num_block_rows, aggregates_ptr, aggregates_candidate.raw());
cudaCheckError();
numUnassigned = (int)thrust::count(aggregates.begin(), aggregates.begin() + num_block_rows, -1);
cudaCheckError();
}
aggregates_candidate.resize(0);
}
}
else
{
//make singletons
aggregateSingletons <<< num_blocks, threads_per_block, 0, str>>>( aggregates_ptr, num_block_rows );
cudaCheckError();
}
this->renumberAndCountAggregates(aggregates, aggregates_global, num_block_rows, num_aggregates);
}
template<class T_Config>
void Size2SelectorBase<T_Config>::setAggregates(Matrix<T_Config> &A,
IVector &aggregates, IVector &aggregates_global, int &num_aggregates)
{
if (A.get_block_dimx() == A.get_block_dimy())
{
setAggregates_common_sqblocks( A, aggregates, aggregates_global, num_aggregates );
}
else
{
FatalError("Unsupported block size for Size2", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
}
// -------------------------
// Explict instantiations
// -------------------------
#define AMGX_CASE_LINE(CASE) template class Size2SelectorBase<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class Size2Selector<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
}
}
|
9de25ae6148e937e42632df8d1a853ca51b2717d.hip | // !!! This is a file automatically generated by hipify!!!
/** @file vl_imreadjpeg2.cu
** @brief Load images asynchronously
** @author Andrea Vedaldi
**/
/*
Copyright (C) 2014-16 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "bits/impl/tinythread.h"
#include "bits/impl/blashelper.hpp"
#include "bits/imread.hpp"
#include "bits/impl/imread_helpers.hpp"
#include <assert.h>
#include <vector>
#include <string>
#include <algorithm>
#include <iostream>
#include <sstream>
#include <cstdlib>
#include "bits/datamex.hpp"
#include "bits/mexutils.h"
#ifdef _MSC_VER
#undef max
#undef min
#endif
static int verbosity = 0 ;
/* option codes */
enum {
opt_num_threads = 0,
opt_prefetch,
opt_resize,
opt_pack,
opt_gpu,
opt_verbose,
opt_subtract_average,
opt_crop_size,
opt_crop_location,
opt_crop_anisotropy,
opt_flip,
opt_contrast,
opt_saturation,
opt_brightness
} ;
/* options */
VLMXOption options [] = {
{"NumThreads", 1, opt_num_threads },
{"Prefetch", 0, opt_prefetch },
{"Verbose", 0, opt_verbose },
{"Resize", 1, opt_resize },
{"Pack", 0, opt_pack },
{"GPU", 0, opt_gpu },
{"SubtractAverage", 1, opt_subtract_average },
{"CropAnisotropy", 1, opt_crop_anisotropy },
{"CropSize", 1, opt_crop_size },
{"CropLocation", 1, opt_crop_location },
{"Flip", 0, opt_flip },
{"Brightness", 1, opt_brightness },
{"Contrast", 1, opt_contrast },
{"Saturation", 1, opt_saturation },
{0, 0, 0 }
} ;
enum {
IN_FILENAMES = 0, IN_END
} ;
enum {
OUT_IMAGES = 0, OUT_END
} ;
/* ---------------------------------------------------------------- */
/* Logger */
/* ---------------------------------------------------------------- */
namespace vl {
class Logger
{
public:
Logger() ;
~Logger() ;
std::ostringstream & getStream() ;
protected:
std::ostringstream stringStream ;
private:
// Disable
Logger(const Logger&) ;
Logger& operator= (const Logger&) ;
} ;
}
vl::Logger::Logger()
{ }
vl::Logger::~Logger()
{
printf("%s\n", stringStream.str().c_str()) ;
//fflush(stdout) ;
}
std::ostringstream &
vl::Logger::getStream()
{
return stringStream ;
}
#define LOGERROR \
vl::Logger().getStream() \
<<"[info] "<<__func__<<"::"
#define LOG(level) \
if (verbosity < level) { } \
else vl::Logger().getStream() \
<<"[info] "<<__func__<<"::"
/* ---------------------------------------------------------------- */
/* Batch */
/* ---------------------------------------------------------------- */
class Batch
{
public:
struct Item
{
enum State {
prefetch,
fetch,
ready
} state ;
Batch const & batch ;
std::string name ;
vl::ImageShape shape ;
mxArray * array ;
vl::ErrorCode error ;
char errorMessage [512] ;
bool borrowed ;
vl::MexTensor cpuArray ;
vl::MexTensor gpuArray ;
int index ;
size_t outputWidth ;
size_t outputHeight ;
size_t outputNumChannels ;
size_t cropWidth ;
size_t cropHeight ;
size_t cropOffsetX ;
size_t cropOffsetY ;
bool flip ;
float brightnessShift [3] ;
float contrastShift ;
float saturationShift ;
Item(Batch const & batch) ;
mxArray * relinquishArray() ;
} ;
enum ResizeMethod {
noResize,
resizeShortestSide,
fixedSize
} ;
enum PackingMethod {
individualArrays,
singleArray
};
enum CropLocation {
cropCenter,
cropRandom
} ;
Batch(vl::MexContext & context) ;
~Batch() ;
vl::ErrorCode init() ;
void finalize() ;
vl::ErrorCode registerItem(std::string const & name) ;
size_t getNumberOfItems() const ;
Item * getItem(int index) ;
void clear() ;
void sync() const ;
vl::ErrorCode prefetch() ;
mxArray * relinquishArray() ;
void setGpuMode(bool gpu) ;
void setPackingMethod(PackingMethod method) ;
void setResizeMethod(ResizeMethod method, int height, int width) ;
void setAverage(double average []) ;
void setAverageImage(float const * image) ;
void setColorDeviation(double brightness [], double contrast, double saturation) ;
void setFlipMode(bool x) ;
void setCropAnisotropy(double minAnisotropy, double maxAnisotropy) ;
void setCropSize(double minSize, double maxSize) ;
void setCropLocation(CropLocation location) ;
PackingMethod getPackingMethod() const ;
Item * borrowNextItem() ;
void returnItem(Item * item) ;
private:
vl::MexContext & context ;
tthread::mutex mutable mutex ;
tthread::condition_variable mutable waitNextItemToBorrow ;
tthread::condition_variable mutable waitCompletion ;
bool quit ;
typedef std::vector<Item*> items_t ;
items_t items ;
int nextItem ;
int numReturnedItems ;
enum PackingMethod packingMethod ;
enum ResizeMethod resizeMethod ;
int resizeHeight ;
int resizeWidth ;
bool gpuMode ;
double average [3] ;
float * averageImage ;
double contrastDeviation ;
double saturationDeviation ;
double brightnessDeviation [9] ;
double minCropAnisotropy ;
double maxCropAnisotropy ;
double minCropSize ;
double maxCropSize ;
CropLocation cropLocation ;
bool flipMode ;
vl::MexTensor cpuPack ;
vl::MexTensor gpuPack ;
friend class ReaderTask ;
int gpuDevice ;
#if ENABLE_GPU
bool cudaStreamInitialized ;
hipStream_t cudaStream ;
float * cpuPinnedPack ;
size_t cpuPinnedPackSize ;
#endif
} ;
Batch::Item::Item(Batch const & batch)
: batch(batch),
cpuArray(batch.context),
gpuArray(batch.context),
borrowed(false),
error(vl::VLE_Success),
state(ready),
flip(false)
{
memset(errorMessage,sizeof(errorMessage),0) ;
}
mxArray * Batch::Item::relinquishArray()
{
if (batch.gpuMode) {
return gpuArray.relinquish() ;
} else {
return cpuArray.relinquish() ;
}
}
mxArray * Batch::relinquishArray()
{
if (gpuMode) {
return gpuPack.relinquish() ;
} else {
return cpuPack.relinquish() ;
}
}
Batch::Batch(vl::MexContext & context)
: context(context),
cpuPack(context),
gpuPack(context),
quit(true),
resizeMethod(noResize),
packingMethod(individualArrays),
gpuMode(false),
numReturnedItems(0),
averageImage(NULL)
#if ENABLE_GPU
, cpuPinnedPack(NULL),
cpuPinnedPackSize(0)
#endif
{ }
Batch::~Batch()
{
finalize() ;
}
size_t Batch::getNumberOfItems() const
{
return items.size() ;
}
Batch::Item * Batch::getItem(int index)
{
return items[index] ;
}
vl::ErrorCode Batch::init()
{
finalize() ;
LOG(2)<<"beginning batch" ;
quit = false ;
nextItem = 0 ;
numReturnedItems = 0 ;
// Restore defaults
memset(brightnessDeviation, 0, sizeof(brightnessDeviation)) ;
contrastDeviation = 0. ;
saturationDeviation = 0. ;
memset(average, 0, sizeof(average)) ;
averageImage = NULL ;
cropLocation = cropCenter ;
minCropSize = 1. ;
maxCropSize = 1. ;
minCropAnisotropy = 1. ;
maxCropAnisotropy = 1. ;
flipMode = false ;
packingMethod = individualArrays ;
resizeMethod = noResize ;
gpuMode = false ;
gpuDevice = -1 ;
#if ENABLE_GPU
if (cudaStreamInitialized) {
hipStreamDestroy(cudaStream) ;
cudaStreamInitialized = false ;
}
#endif
return vl::VLE_Success ;
}
void Batch::finalize()
{
LOG(2)<<"finalizing batch" ;
// Clear current batch
clear() ;
// Release memory
#if ENABLE_GPU
if (cpuPinnedPack) {
hipHostFree(cpuPinnedPack) ;
cpuPinnedPack = 0 ;
cpuPinnedPackSize = 0 ;
}
#endif
// Signal waiting threads that we are quitting
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
quit = true ;
waitNextItemToBorrow.notify_all() ;
}
}
Batch::Item * Batch::borrowNextItem()
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
while (true) {
if (quit) { return NULL ; }
if (nextItem < items.size()) {
Item * item = items[nextItem++] ;
item->borrowed = true ;
return item ;
}
waitNextItemToBorrow.wait(mutex) ;
}
}
void Batch::returnItem(Batch::Item * item)
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
numReturnedItems ++ ;
if (item->state == Item::fetch &&
numReturnedItems == items.size() &&
packingMethod == singleArray &&
gpuMode) {
#if ENABLE_GPU
LOG(2) << "push to GPU the pack" ;
hipError_t cerror ;
cerror = hipMemcpyAsync (gpuPack.getMemory(),
cpuPinnedPack,
gpuPack.getNumElements() * sizeof(float),
hipMemcpyHostToDevice,
cudaStream) ;
if (cerror != hipSuccess) {
item->error = vl::VLE_Cuda ;
snprintf(item->errorMessage, sizeof(item->errorMessage),
"cudaMemcpyAsnyc : '%s'", hipGetErrorString(cerror)) ;
}
#endif
}
item->borrowed = false ;
item->state = Batch::Item::ready ;
waitCompletion.notify_all() ;
}
void Batch::setAverageImage(float const * image)
{
if (image == NULL) {
if (averageImage) {
free(averageImage) ;
averageImage = NULL ;
}
return ;
}
assert (resizeMethod == fixedSize) ;
averageImage = (float*)malloc(sizeof(float) * resizeHeight * resizeWidth * 3) ;
memcpy(averageImage, image, sizeof(float) * resizeHeight * resizeWidth * 3) ;
}
void Batch::clear()
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
// Stop threads from getting more tasks. After this any call to borrowItem() by a worker will
// stop in a waiting state. Thus, we simply wait for all of them to return their items.
nextItem = (int)items.size() ;
// Wait for all thread to return their items
for (int i = 0 ; i < items.size() ; ++i) {
while (items[i]->borrowed) {
waitCompletion.wait(mutex) ;
}
}
for (int i = 0 ; i < items.size() ; ++i) {
delete items[i] ;
}
items.clear() ;
// Clear average image
setAverageImage(NULL) ;
// At the end of the current (empty) list
nextItem = 0 ;
numReturnedItems = 0 ;
}
void Batch::sync() const
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
// Wait for threads to complete work for all items.
// Note that it is not enough to check that threads are all in a
// "done" state as this does not mean that all work has been done yet.
// Instead, we look at the number of items returned.
while (numReturnedItems < items.size()) {
waitCompletion.wait(mutex) ;
}
if (gpuMode) {
#if ENABLE_GPU
hipError_t cerror ;
cerror = hipStreamSynchronize(cudaStream) ;
if (cerror != hipSuccess) {
LOGERROR << "CUDA error while synchronizing a stream: '" << hipGetErrorString(cerror) << '\'' ;
}
#endif
}
}
vl::ErrorCode Batch::registerItem(std::string const & name)
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
Item * item = new Item(*this) ;
item->index = (int)items.size() ;
item->name = name ;
item->state = Item::prefetch ;
items.push_back(item) ;
waitNextItemToBorrow.notify_one() ;
return vl::VLE_Success ;
}
void Batch::setGpuMode(bool gpu)
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
#if ENABLE_GPU
if (gpu) {
hipGetDevice(&gpuDevice) ;
if (!cudaStreamInitialized) {
hipError_t cerror ;
cerror = hipStreamCreateWithFlags(&cudaStream, hipStreamNonBlocking) ;
if (cerror != hipSuccess) {
LOGERROR
<< "CUDA error while creating a stream '"
<< hipGetErrorString(cerror) << '\"' ;
} else {
cudaStreamInitialized = true ;
}
}
}
#endif
gpuMode = gpu ;
}
void Batch::setResizeMethod(Batch::ResizeMethod method, int height, int width)
{
resizeMethod = method ;
resizeHeight = height ;
resizeWidth = width ;
}
void Batch::setPackingMethod(Batch::PackingMethod method)
{
assert(method == individualArrays || method == singleArray) ;
packingMethod = method ;
}
Batch::PackingMethod Batch::getPackingMethod() const
{
return packingMethod ;
}
void Batch::setAverage(double average [])
{
::memcpy(this->average, average, sizeof(this->average)) ;
}
void Batch::setColorDeviation(double brightness [], double contrast, double saturation)
{
::memcpy(brightnessDeviation, brightness, sizeof(brightnessDeviation)) ;
contrastDeviation = contrast ;
saturationDeviation = saturation ;
}
void Batch::setFlipMode(bool x)
{
flipMode = x ;
}
void Batch::setCropAnisotropy(double minAnisotropy, double maxAnisotropy)
{
assert(minAnisotropy <= maxAnisotropy) ;
assert(0.0 <= minAnisotropy && minAnisotropy <= 1.0) ;
minCropAnisotropy = minAnisotropy ;
maxCropAnisotropy = maxAnisotropy ;
}
void Batch::setCropSize(double minSize, double maxSize)
{
assert(minSize <= maxSize) ;
assert(0.0 <= minSize && minSize <= 1.0) ;
assert(0.0 <= maxSize && maxSize <= 1.0) ;
minCropSize = minSize ;
maxCropSize = maxSize ;
}
void Batch::setCropLocation(CropLocation location)
{
assert(location == cropCenter || location == cropRandom) ;
cropLocation = location ;
}
//void Batch::getItemTransformation(Item * item)
//{
//
//}
vl::ErrorCode Batch::prefetch()
{
// Wait for reader threads to initialize the shape of the images
// and then perform the requried allocations.
sync() ;
// In packing mode, preallocate all memory here.
if (packingMethod == singleArray) {
assert(resizeMethod == fixedSize) ;
vl::TensorShape shape(resizeHeight, resizeWidth, 3, getNumberOfItems()) ;
if (gpuMode) {
#if ENABLE_GPU
gpuPack.init(vl::VLDT_GPU, vl::VLDT_Float, shape) ;
gpuPack.makePersistent() ;
size_t memSize = shape.getNumElements() * sizeof(float) ;
if (cpuPinnedPackSize < memSize) {
if (cpuPinnedPack) {
hipHostFree(cpuPinnedPack) ;
}
hipHostMalloc(&cpuPinnedPack, memSize) ;
cpuPinnedPackSize = memSize ;
}
#endif
} else {
cpuPack.init(vl::VLDT_CPU, vl::VLDT_Float, shape) ;
cpuPack.makePersistent() ;
}
}
// Get ready to reprocess all items.
nextItem = 0 ;
numReturnedItems = 0 ;
for (int i = 0 ; i < getNumberOfItems() ; ++ i) {
Batch::Item * item = getItem(i) ;
if (item->error == vl::VLE_Success) {
if (verbosity >= 2) {
mexPrintf("%20s: %d x %d x %d\n", item->name.c_str(), item->shape.width, item->shape.height, item->shape.depth) ;
}
} else {
mexPrintf("%20s: error '%s'\n", item->name.c_str(), item->errorMessage) ;
}
// Determine the shape of (height and width) of the output image. This is either
// the same as the input image, or with a fixed size for the shortest side,
// or a fixed size for both sides.
int outputHeight ;
int outputWidth ;
double cropHeight ;
double cropWidth ;
int dx ;
int dy ;
switch (resizeMethod) {
case noResize:
outputHeight = (int)item->shape.height ;
outputWidth = (int)item->shape.width ;
break ;
case resizeShortestSide: {
double scale1 = (double)resizeHeight / item->shape.width ;
double scale2 = (double)resizeHeight / item->shape.height ;
double scale = ::max(scale1, scale2) ;
outputHeight = ::max(1.0, round(scale * item->shape.height)) ;
outputWidth = ::max(1.0, round(scale * item->shape.width)) ;
break ;
}
case fixedSize:
outputHeight = resizeHeight ;
outputWidth = resizeWidth ;
break ;
}
// Determine the aspect ratio of the crop in the input image.
{
double anisotropyRatio = 1.0 ;
if (minCropAnisotropy == 0 && maxCropAnisotropy == 0) {
// Stretch crop to have the same shape as the input.
double inputAspect = (double)item->shape.width / item->shape.height ;
double outputAspect = (double)outputWidth / outputHeight ;
anisotropyRatio = inputAspect / outputAspect ;
} else {
double z = (double)rand() / RAND_MAX ;
anisotropyRatio = z * (maxCropAnisotropy - minCropAnisotropy) + minCropAnisotropy ;
}
cropWidth = outputWidth * sqrt(anisotropyRatio) ;
cropHeight = outputHeight / sqrt(anisotropyRatio) ;
}
// Determine the crop size.
{
double scale = ::min(item->shape.width / cropWidth,
item->shape.height / cropHeight) ;
double z = (double)rand() / RAND_MAX ;
double size = z * (maxCropSize - minCropSize) + minCropSize ;
cropWidth *= scale * size ;
cropHeight *= scale * size ;
}
cropWidth = ::min(round(cropWidth), (double)item->shape.width) ;
cropHeight = ::min(round(cropHeight), (double)item->shape.height) ;
// Determine the crop location.
{
dx = item->shape.width - cropWidth ;
dy = item->shape.height - cropHeight ;
switch (cropLocation) {
case cropCenter:
dx /= 2 ;
dy /= 2 ;
break ;
case cropRandom:
dx = rand() % (dx + 1) ;
dy = rand() % (dy + 1) ;
break ;
default:
LOGERROR << "cropLocation not set" ;
}
}
// Save.
item->outputWidth = outputWidth ;
item->outputHeight = outputHeight ;
item->outputNumChannels = (packingMethod == individualArrays) ? item->shape.depth : 3 ;
;
item->cropWidth = cropWidth ;
item->cropHeight = cropHeight ;
item->cropOffsetX = dx ;
item->cropOffsetY = dy ;
item->flip = flipMode && (rand() > RAND_MAX/2) ;
// Color processing.
item->saturationShift = 1. + saturationDeviation * (2.*(double)rand()/RAND_MAX - 1) ;
item->contrastShift = 1. + contrastDeviation * (2.*(double)rand()/RAND_MAX - 1.) ;
{
int numChannels = item->outputNumChannels ;
double w [3] ;
for (int i = 0 ; i < numChannels ; ++i) { w[i] = vl::randn() ; }
for (int i = 0 ; i < numChannels ; ++i) {
item->brightnessShift[i] = 0. ;
for (int j = 0 ; j < numChannels ; ++j) {
item->brightnessShift[i] += brightnessDeviation[i + 3*j] * w[i] ;
}
}
}
LOG(2)
<< "input (" << item->shape.width << " x " << item->shape.height << " x " << item->shape.depth << ") "
<< "output (" << item->outputWidth << " x " << item->outputHeight << " x " << item->outputNumChannels << ") "
<< "crop (" << item->cropWidth << " x " << item->cropHeight << ") "
<< "offset (" << item->cropOffsetX << ", " << item->cropOffsetY << ")" ;
if (packingMethod == individualArrays) {
vl::TensorShape shape(outputHeight, outputWidth, item->outputNumChannels, 1) ;
item->cpuArray.init(vl::VLDT_CPU, vl::VLDT_Float, shape) ;
item->cpuArray.makePersistent() ;
if (gpuMode) {
item->gpuArray.init(vl::VLDT_GPU, vl::VLDT_Float, shape) ;
item->gpuArray.makePersistent() ;
}
}
// Ready to fetch
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
item->state = Item::fetch ;
waitNextItemToBorrow.notify_one() ;
}
}
return vl::VLE_Success ;
}
/* ---------------------------------------------------------------- */
/* ReaderTask */
/* ---------------------------------------------------------------- */
class ReaderTask
{
public:
ReaderTask() ;
~ReaderTask() { finalize() ; }
vl::ErrorCode init(Batch * batch, int index) ;
void finalize() ;
private:
int index ;
Batch * batch ;
tthread::thread * thread ;
vl::ImageReader * reader ;
static void threadEntryPoint(void * thing) ;
void entryPoint() ;
void * getBuffer(int index, size_t size) ;
int gpuDevice ;
private:
ReaderTask(ReaderTask const &) ;
ReaderTask & operator= (ReaderTask const &) ;
struct Buffer {
void * memory ;
size_t size ;
} buffers [2] ;
} ;
void ReaderTask::threadEntryPoint(void * thing)
{
((ReaderTask*)thing)->entryPoint() ;
}
ReaderTask::ReaderTask()
: batch(NULL), thread(NULL), reader(NULL)
{
memset(buffers, 0, sizeof(buffers)) ;
}
void * ReaderTask::getBuffer(int index, size_t size)
{
if (buffers[index].size < size) {
if (buffers[index].memory) {
free(buffers[index].memory) ;
}
buffers[index].memory = malloc(size) ;
buffers[index].size = size ;
}
return buffers[index].memory ;
}
void ReaderTask::entryPoint()
{
LOG(2) << "reader " << index << " task staring" ;
while (true) {
#if ENABLE_GPU
if (batch->gpuMode && batch->gpuDevice != gpuDevice) {
LOG(2) << "reader " << index << " setting GPU device" ;
hipSetDevice(batch->gpuDevice) ;
hipGetDevice(&gpuDevice) ;
}
#endif
Batch::Item * item = batch->borrowNextItem() ;
LOG(3) << "borrowed " << item ;
if (item == NULL) { break ; }
if (item->error != vl::VLE_Success) {
batch->returnItem(item) ;
continue ;
}
switch (item->state) {
case Batch::Item::prefetch: {
item->error = reader->readShape(item->shape, item->name.c_str()) ;
if (item->error != vl::VLE_Success) {
snprintf(item->errorMessage, sizeof(item->errorMessage), "%s", reader->getLastErrorMessage()) ;
}
break ;
}
case Batch::Item::fetch: {
// Get the CPU buffer that will hold the pixels.
float * outputPixels;
if (batch->getPackingMethod() == Batch::individualArrays) {
outputPixels = (float*)item->cpuArray.getMemory() ;
} else {
if (batch->gpuMode) {
#if ENABLE_GPU
outputPixels = batch->cpuPinnedPack ;
#else
snprintf(item->errorMessage, sizeof(item->errorMessage), "GPU support not compiled.") ;
break;
#endif
} else {
outputPixels = (float*)batch->cpuPack.getMemory() ;
}
outputPixels += item->outputHeight*item->outputWidth*3*item->index ;
}
// Read full image.
float * inputPixels = (float*)getBuffer(0,
item->shape.height *
item->shape.width *
item->shape.depth * sizeof(float)) ;
item->error = reader->readPixels(inputPixels, item->name.c_str()) ;
if (item->error != vl::VLE_Success) {
snprintf(item->errorMessage, sizeof(item->errorMessage), "%s", reader->getLastErrorMessage()) ;
break ;
}
// Crop.
float * temp = (float*)getBuffer(1,
item->outputHeight *
item->shape.width *
item->shape.depth * sizeof(float)) ;
vl::impl::imageResizeVertical(temp, inputPixels,
item->outputHeight,
item->shape.height,
item->shape.width,
item->shape.depth,
item->cropHeight,
item->cropOffsetY) ;
vl::impl::imageResizeVertical(outputPixels, temp,
item->outputWidth,
item->shape.width,
item->outputHeight,
item->shape.depth,
item->cropWidth,
item->cropOffsetX,
item->flip) ;
// Postprocess colors.
{
size_t inputNumChannels = item->shape.depth ;
size_t K = item->outputNumChannels ;
size_t n = item->outputHeight*item->outputWidth ;
if (batch->averageImage) {
// If there is an average image, then subtract it now.
// Grayscale images are expanded here to color if needed.
// Withouth an average image,
// they are expanded later.
for (int k = inputNumChannels ; k < K ; ++k) {
::memcpy(outputPixels + n*k, outputPixels, sizeof(float) * n) ;
}
vl::impl::blas<vl::VLDT_CPU,vl::VLDT_Float>::axpy
(batch->context,
n * item->outputNumChannels,
-1.0f,
batch->averageImage, 1,
outputPixels, 1) ;
inputNumChannels = K ;
}
float dv [3] ;
float * channels [3] ;
for (int k = 0 ; k < K ; ++k) {
channels[k] = outputPixels + n * k ;
}
for (int k = 0 ; k < inputNumChannels ; ++k) {
dv[k] = (1. - 2. * item->contrastShift) *
(batch->average[k] + item->brightnessShift[k]);
if (item->contrastShift != 1.) {
float mu = 0.f ;
float const * pixel = channels[k] ;
float const * end = channels[k] + n ;
while (pixel != end) { mu += *pixel++ ; }
mu /= n ;
dv[k] -= (1.0 - item->contrastShift) * mu ;
}
}
{
float const * end = channels[0] + n ;
float v [3] ;
if (K == 3 && inputNumChannels == 3) {
float const a = item->contrastShift * item->saturationShift ;
float const b = item->contrastShift * (1. - item->saturationShift) / K ;
while (channels[0] != end) {
float mu = 0.f ;
v[0] = *channels[0] + dv[0] ; mu += v[0] ;
v[1] = *channels[1] + dv[1] ; mu += v[1] ;
v[2] = *channels[2] + dv[2] ; mu += v[2] ;
*channels[0]++ = a * v[0] + b * mu ;
*channels[1]++ = a * v[1] + b * mu ;
*channels[2]++ = a * v[2] + b * mu ;
}
} else if (K == 3 && inputNumChannels == 1) {
float const a = item->contrastShift * item->saturationShift ;
float const b = item->contrastShift * (1. - item->saturationShift) / K ;
while (channels[0] != end) {
float mu = 0.f ;
v[0] = *channels[0] + dv[0] ; mu += v[0] ;
v[1] = *channels[0] + dv[1] ; mu += v[1] ;
v[2] = *channels[0] + dv[2] ; mu += v[2] ;
*channels[0]++ = a * v[0] + b * mu ;
*channels[1]++ = a * v[1] + b * mu ;
*channels[2]++ = a * v[2] + b * mu ;
}
} else {
float const a = item->contrastShift ;
while (channels[0] != end) {
*channels[0]++ = a * (*channels[0] + dv[0]) ;
}
}
}
}
// Copy to GPU.
if (batch->getPackingMethod() == Batch::individualArrays && batch->gpuMode) {
#if ENABLE_GPU
hipError_t cerror ;
cerror = hipMemcpyAsync (item->gpuArray.getMemory(),
outputPixels,
item->gpuArray.getNumElements() * sizeof(float),
hipMemcpyHostToDevice,
batch->cudaStream) ;
if (cerror != hipSuccess) {
item->error = vl::VLE_Cuda ;
snprintf(item->errorMessage, sizeof(item->errorMessage),
"CUDA error while copying memory from host to device: '%s'", hipGetErrorString(cerror)) ;
break ;
}
#endif
}
break ;
}
case Batch::Item::ready:
break ;
}
batch->returnItem(item) ;
}
LOG(2) << "reader " << index << " task quitting" ;
}
void ReaderTask::finalize()
{
LOG(2)<<"finalizing reader " << index ;
if (thread) {
if (thread->joinable()) {
thread->join() ;
}
delete thread ;
thread = NULL ;
}
for (int i = 0 ; i < sizeof(buffers)/sizeof(Buffer) ; ++i) {
if (buffers[i].memory) {
free(buffers[i].memory) ;
buffers[i].memory = NULL ;
buffers[i].size = 0 ;
}
}
if (reader) {
delete reader ;
reader = NULL ;
}
index = -1 ;
batch = NULL ;
}
vl::ErrorCode ReaderTask::init(Batch * batch, int index)
{
finalize() ;
this->batch = batch ;
this->index = index ;
thread = new tthread::thread(threadEntryPoint, this) ;
reader = new vl::ImageReader() ;
return vl::VLE_Success ;
}
/* ---------------------------------------------------------------- */
/* Cache */
/* ---------------------------------------------------------------- */
vl::MexContext context ;
Batch batch(context) ;
bool batchIsInitialized = false ;
typedef std::vector<ReaderTask*> readers_t ;
readers_t readers ;
void atExit()
{
if (batchIsInitialized) {
batch.finalize() ;
batchIsInitialized = false ;
}
for (int r = 0 ; r < readers.size() ; ++r) {
readers[r]->finalize() ;
delete readers[r] ;
}
readers.clear() ;
}
/* ---------------------------------------------------------------- */
/* Cache */
/* ---------------------------------------------------------------- */
void mexFunction(int nout, mxArray *out[],
int nin, mxArray const *in[])
{
bool prefetch = false ;
bool gpuMode = false ;
int requestedNumThreads = readers.size() ;
int opt ;
int next = IN_END ;
mxArray const *optarg ;
Batch::PackingMethod packingMethod = Batch::individualArrays ;
Batch::ResizeMethod resizeMethod = Batch::noResize ;
int resizeWidth = -1 ;
int resizeHeight = -1 ;
vl::ErrorCode error ;
double average [3] = {0.} ;
vl::MexTensor averageImage(context) ;
double brightnessDeviation [9] = {0.} ;
double saturationDeviation = 0. ;
double contrastDeviation = 0. ;
bool flipMode = false ;
Batch::CropLocation cropLocation = Batch::cropCenter ;
double minCropSize = 1.0, maxCropSize = 1.0 ;
double minCropAnisotropy = 1.0, maxCropAnisotropy = 1.0 ;
verbosity = 0 ;
/* -------------------------------------------------------------- */
/* Check the arguments */
/* -------------------------------------------------------------- */
mexAtExit(atExit) ;
if (nin < 1) {
vlmxError(VLMXE_IllegalArgument, "There is less than one argument.") ;
}
while ((opt = vlmxNextOption (in, nin, options, &next, &optarg)) >= 0) {
switch (opt) {
case opt_verbose :
++ verbosity ;
break ;
case opt_prefetch :
prefetch = true ;
break ;
case opt_pack :
packingMethod = Batch::singleArray ;
break ;
case opt_gpu :
#ifndef ENABLE_GPU
vlmxError(VLMXE_IllegalArgument, "Not compiled with GPU support.") ;
#endif
gpuMode = true ;
break ;
case opt_num_threads :
requestedNumThreads = (int)mxGetScalar(optarg) ;
break ;
case opt_resize :
if (!vlmxIsPlainVector(optarg, -1)) {
vlmxError(VLMXE_IllegalArgument, "RESIZE is not a plain vector.") ;
}
switch (mxGetNumberOfElements(optarg)) {
case 1 :
resizeMethod = Batch::resizeShortestSide ;
resizeHeight = (int)mxGetPr(optarg)[0] ;
resizeWidth = (int)mxGetPr(optarg)[0] ;
break ;
case 2 :
resizeMethod = Batch::fixedSize ;
resizeHeight = (int)mxGetPr(optarg)[0] ;
resizeWidth = (int)mxGetPr(optarg)[1] ;
break;
default:
vlmxError(VLMXE_IllegalArgument, "RESIZE does not have one or two dimensions.") ;
break ;
}
if (resizeHeight < 1 || resizeWidth < 1) {
vlmxError(VLMXE_IllegalArgument, "An element of RESIZE is smaller than one.") ;
}
break ;
case opt_brightness: {
if (!vlmxIsPlainMatrix(optarg, -1, -1)) {
vlmxError(VLMXE_IllegalArgument, "BRIGHTNESS is not a plain matrix.") ;
}
size_t n = mxGetNumberOfElements(optarg) ;
memset(brightnessDeviation, 0, sizeof(brightnessDeviation)) ;
if (n == 1) {
double x = mxGetPr(optarg)[0] ;
brightnessDeviation[0] = x;
brightnessDeviation[3] = x;
brightnessDeviation[8] = x;
} else if (n == 3) {
double const* x = mxGetPr(optarg) ;
brightnessDeviation[0] = x[0];
brightnessDeviation[3] = x[1];
brightnessDeviation[8] = x[2];
} else if (n == 9) {
memcpy(brightnessDeviation, mxGetPr(optarg), sizeof(brightnessDeviation)) ;
} else {
vlmxError(VLMXE_IllegalArgument, "BRIGHTNESS does not have 1, 3, or 9 elements.") ;
}
break ;
}
case opt_saturation: {
if (!vlmxIsPlainScalar(optarg)) {
vlmxError(VLMXE_IllegalArgument, "SATURATION is not a plain scalar.") ;
}
double x = mxGetPr(optarg)[0] ;
if (x < 0 || x > 1.0) {
vlmxError(VLMXE_IllegalArgument, "SATURATION is not in the [0,1] range..") ;
}
saturationDeviation = x ;
break ;
}
case opt_contrast: {
if (!vlmxIsPlainScalar(optarg)) {
vlmxError(VLMXE_IllegalArgument, "CONTRAST is not a plain scalar.") ;
}
double x = mxGetPr(optarg)[0] ;
if (x < 0 || x > 1.0) {
vlmxError(VLMXE_IllegalArgument, "CONTRAST is not in the [0,1] range..") ;
}
contrastDeviation = x ;
break ;
}
case opt_crop_anisotropy: {
if (!vlmxIsPlainScalar(optarg) && !vlmxIsPlainVector(optarg, 2)) {
vlmxError(VLMXE_IllegalArgument, "CROPANISOTROPY is not a plain scalar or vector with two components.") ;
}
minCropAnisotropy = mxGetPr(optarg)[0] ;
maxCropAnisotropy = mxGetPr(optarg)[::min((mwSize)1, mxGetNumberOfElements(optarg)-1)] ;
if (minCropAnisotropy < 0.0 || minCropAnisotropy > maxCropAnisotropy) {
vlmxError(VLMXE_IllegalArgument, "CROPANISOTROPY values are not in the legal range.") ;
}
break ;
}
case opt_crop_size: {
if (!vlmxIsPlainScalar(optarg) && !vlmxIsPlainVector(optarg, 2)) {
vlmxError(VLMXE_IllegalArgument, "CROPSIZE is not a plain scalar or vector with two components.") ;
}
minCropSize = mxGetPr(optarg)[0] ;
maxCropSize = mxGetPr(optarg)[::min((mwSize)1, mxGetNumberOfElements(optarg)-1)] ;
if (minCropSize < 0.0 || minCropSize > maxCropSize || maxCropSize > 1.0) {
vlmxError(VLMXE_IllegalArgument, "CROPSIZE values are not in the legal range.") ;
}
break ;
}
case opt_crop_location: {
if (!vlmxIsString(optarg, -1)) {
vlmxError(VLMXE_IllegalArgument, "CROPLOCATION is not a string") ;
}
if (vlmxCompareToStringI(optarg, "random") == 0) {
cropLocation = Batch::cropRandom ;
} else if (vlmxCompareToStringI(optarg, "center") == 0) {
cropLocation = Batch::cropCenter ;
} else {
vlmxError(VLMXE_IllegalArgument, "CROPLOCATION value unknown.") ;
}
break ;
}
case opt_subtract_average: {
if (vlmxIsVector(optarg,1) || vlmxIsVector(optarg, 3)) {
size_t n = mxGetNumberOfElements(optarg) ;
switch (mxGetClassID(optarg)) {
case mxSINGLE_CLASS: {
float * x = (float*)mxGetData(optarg) ;
average[0] = x[::min((size_t)0,n-1)] ;
average[1] = x[::min((size_t)1,n-1)] ;
average[2] = x[::min((size_t)2,n-1)] ;
break ;
}
case mxDOUBLE_CLASS: {
double * x = mxGetPr(optarg) ;
average[0] = (float)x[::min((size_t)0,n-1)] ;
average[1] = (float)x[::min((size_t)1,n-1)] ;
average[2] = (float)x[::min((size_t)2,n-1)] ;
break ;
}
default:
vlmxError(VLMXE_IllegalArgument, "SUBTRACTAVERAGE is not SINGLE or DOUBLE vector.") ;
}
} else {
if (mxGetClassID(optarg) != mxSINGLE_CLASS ||
mxGetNumberOfDimensions(optarg) > 3) {
vlmxError(VLMXE_IllegalArgument, "SUBTRACTAVERAGE is not a SINGLE image of a compatible shape.") ;
}
averageImage.init(optarg) ;
}
break ;
}
case opt_flip: {
flipMode = true ;
break ;
}
}
}
if (averageImage) {
if (resizeMethod != Batch::fixedSize) {
vlmxError(VLMXE_IllegalArgument, "Cannot subtract an average image unless RESIZE is used to set the size of the output.") ;
}
if (averageImage.getNumDimensions() != 3 ||
averageImage.getHeight() != resizeHeight ||
averageImage.getWidth() != resizeWidth ||
averageImage.getDepth() !=3) {
vlmxError(VLMXE_IllegalArgument, "The average image is not a RESIZEHEIGHT x RESIZEWIDTH x 3 array.") ;
}
}
/* -------------------------------------------------------------- */
/* Do the work */
/* -------------------------------------------------------------- */
if (!mxIsCell(in[IN_FILENAMES])) {
vlmxError(VLMXE_IllegalArgument, "FILENAMES is not a cell array of strings.") ;
}
// If the requested number of threads changes, finalize everything
requestedNumThreads = ::max(requestedNumThreads, 1) ;
if (readers.size() != requestedNumThreads) {
atExit() ; // Delete threads and current batch
}
// Prepare batch.
if (!batchIsInitialized) {
error = batch.init() ;
if (error != vl::VLE_Success) {
vlmxError(VLMXE_Execution, "Could not initialize a batch structure") ;
}
batchIsInitialized = true ;
}
// Prepare reader tasks.
for (int r = readers.size() ; r < requestedNumThreads ; ++r) {
readers.push_back(new ReaderTask()) ;
vl::ErrorCode error = readers[r]->init(&batch, r) ;
if (error != vl::VLE_Success) {
vlmxError(VLMXE_Execution, "Could not create the requested number of threads") ;
}
}
// Extract filenames as strings.
bool sameAsPrefeteched = true ;
std::vector<std::string> filenames ;
for (int i = 0 ; i < (int)mxGetNumberOfElements(in[IN_FILENAMES]) ; ++i) {
mxArray* filenameArray = mxGetCell(in[IN_FILENAMES], i) ;
if (!vlmxIsString(filenameArray,-1)) {
vlmxError(VLMXE_IllegalArgument, "FILENAMES contains an entry that is not a string.") ;
}
char filename [512] ;
mxGetString (filenameArray, filename, sizeof(filename)/sizeof(char)) ;
filenames.push_back(std::string(filename)) ;
sameAsPrefeteched &= (i < batch.getNumberOfItems() && batch.getItem(i)->name == filenames[i]) ;
}
// If the list of names is not the same as the prefetched ones,
// start a new cycle.
if (!sameAsPrefeteched) {
batch.clear() ;
// Check compatibility of options
if (packingMethod == Batch::singleArray && resizeMethod != Batch::fixedSize) {
vlmxError(VLMXE_IllegalArgument, "PACK must be used in combination with resizing to a fixed size.") ;
}
if (verbosity >= 2) {
mexPrintf("vl_imreadjpeg: gpu mode: %s\n", gpuMode?"yes":"no") ;
mexPrintf("vl_imreadjpeg: crop anisotropy: [%.1g, %.1g]\n",
minCropAnisotropy, maxCropAnisotropy) ;
mexPrintf("vl_imreadjpeg: crop size: [%.1g, %.1g]\n",
minCropSize, maxCropSize) ;
}
batch.setResizeMethod(resizeMethod, resizeHeight, resizeWidth) ;
batch.setPackingMethod(packingMethod) ;
batch.setGpuMode(gpuMode) ;
batch.setFlipMode(flipMode) ;
batch.setCropLocation(cropLocation) ;
batch.setCropAnisotropy(minCropAnisotropy, maxCropAnisotropy) ;
batch.setCropSize(minCropSize, maxCropSize) ;
batch.setColorDeviation(brightnessDeviation,
contrastDeviation,
saturationDeviation) ;
batch.setAverage(average) ;
if (averageImage) {
batch.setAverageImage((float const*)averageImage.getMemory()) ;
}
for (int i = 0 ; i < filenames.size() ; ++ i) {
batch.registerItem(filenames[i]) ;
}
batch.prefetch() ;
}
// Done if prefetching only.
if (prefetch) { return ; }
// Return result.
batch.sync() ;
switch (batch.getPackingMethod()) {
case Batch::singleArray: {
mwSize dims [] = {1,1} ;
out[OUT_IMAGES] = mxCreateCellArray(2, dims) ;
mxSetCell(out[OUT_IMAGES], 0, batch.relinquishArray()) ;
break ;
}
case Batch::individualArrays:
out[OUT_IMAGES] = mxCreateCellArray(mxGetNumberOfDimensions(in[IN_FILENAMES]),
mxGetDimensions(in[IN_FILENAMES])) ;
for (int i = 0 ; i < batch.getNumberOfItems() ; ++i) {
Batch::Item * item = batch.getItem(i) ;
if (item->error != vl::VLE_Success) {
vlmxWarning(VLMXE_Execution, "could not read image '%s' because '%s'",
item->name.c_str(),
item->errorMessage) ;
} else {
mxSetCell(out[OUT_IMAGES], i, item->relinquishArray()) ;
}
}
break ;
}
// Finalize.
batch.clear() ;
}
| 9de25ae6148e937e42632df8d1a853ca51b2717d.cu | /** @file vl_imreadjpeg2.cu
** @brief Load images asynchronously
** @author Andrea Vedaldi
**/
/*
Copyright (C) 2014-16 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "bits/impl/tinythread.h"
#include "bits/impl/blashelper.hpp"
#include "bits/imread.hpp"
#include "bits/impl/imread_helpers.hpp"
#include <assert.h>
#include <vector>
#include <string>
#include <algorithm>
#include <iostream>
#include <sstream>
#include <cstdlib>
#include "bits/datamex.hpp"
#include "bits/mexutils.h"
#ifdef _MSC_VER
#undef max
#undef min
#endif
static int verbosity = 0 ;
/* option codes */
enum {
opt_num_threads = 0,
opt_prefetch,
opt_resize,
opt_pack,
opt_gpu,
opt_verbose,
opt_subtract_average,
opt_crop_size,
opt_crop_location,
opt_crop_anisotropy,
opt_flip,
opt_contrast,
opt_saturation,
opt_brightness
} ;
/* options */
VLMXOption options [] = {
{"NumThreads", 1, opt_num_threads },
{"Prefetch", 0, opt_prefetch },
{"Verbose", 0, opt_verbose },
{"Resize", 1, opt_resize },
{"Pack", 0, opt_pack },
{"GPU", 0, opt_gpu },
{"SubtractAverage", 1, opt_subtract_average },
{"CropAnisotropy", 1, opt_crop_anisotropy },
{"CropSize", 1, opt_crop_size },
{"CropLocation", 1, opt_crop_location },
{"Flip", 0, opt_flip },
{"Brightness", 1, opt_brightness },
{"Contrast", 1, opt_contrast },
{"Saturation", 1, opt_saturation },
{0, 0, 0 }
} ;
enum {
IN_FILENAMES = 0, IN_END
} ;
enum {
OUT_IMAGES = 0, OUT_END
} ;
/* ---------------------------------------------------------------- */
/* Logger */
/* ---------------------------------------------------------------- */
namespace vl {
class Logger
{
public:
Logger() ;
~Logger() ;
std::ostringstream & getStream() ;
protected:
std::ostringstream stringStream ;
private:
// Disable
Logger(const Logger&) ;
Logger& operator= (const Logger&) ;
} ;
}
vl::Logger::Logger()
{ }
vl::Logger::~Logger()
{
printf("%s\n", stringStream.str().c_str()) ;
//fflush(stdout) ;
}
std::ostringstream &
vl::Logger::getStream()
{
return stringStream ;
}
#define LOGERROR \
vl::Logger().getStream() \
<<"[info] "<<__func__<<"::"
#define LOG(level) \
if (verbosity < level) { } \
else vl::Logger().getStream() \
<<"[info] "<<__func__<<"::"
/* ---------------------------------------------------------------- */
/* Batch */
/* ---------------------------------------------------------------- */
class Batch
{
public:
struct Item
{
enum State {
prefetch,
fetch,
ready
} state ;
Batch const & batch ;
std::string name ;
vl::ImageShape shape ;
mxArray * array ;
vl::ErrorCode error ;
char errorMessage [512] ;
bool borrowed ;
vl::MexTensor cpuArray ;
vl::MexTensor gpuArray ;
int index ;
size_t outputWidth ;
size_t outputHeight ;
size_t outputNumChannels ;
size_t cropWidth ;
size_t cropHeight ;
size_t cropOffsetX ;
size_t cropOffsetY ;
bool flip ;
float brightnessShift [3] ;
float contrastShift ;
float saturationShift ;
Item(Batch const & batch) ;
mxArray * relinquishArray() ;
} ;
enum ResizeMethod {
noResize,
resizeShortestSide,
fixedSize
} ;
enum PackingMethod {
individualArrays,
singleArray
};
enum CropLocation {
cropCenter,
cropRandom
} ;
Batch(vl::MexContext & context) ;
~Batch() ;
vl::ErrorCode init() ;
void finalize() ;
vl::ErrorCode registerItem(std::string const & name) ;
size_t getNumberOfItems() const ;
Item * getItem(int index) ;
void clear() ;
void sync() const ;
vl::ErrorCode prefetch() ;
mxArray * relinquishArray() ;
void setGpuMode(bool gpu) ;
void setPackingMethod(PackingMethod method) ;
void setResizeMethod(ResizeMethod method, int height, int width) ;
void setAverage(double average []) ;
void setAverageImage(float const * image) ;
void setColorDeviation(double brightness [], double contrast, double saturation) ;
void setFlipMode(bool x) ;
void setCropAnisotropy(double minAnisotropy, double maxAnisotropy) ;
void setCropSize(double minSize, double maxSize) ;
void setCropLocation(CropLocation location) ;
PackingMethod getPackingMethod() const ;
Item * borrowNextItem() ;
void returnItem(Item * item) ;
private:
vl::MexContext & context ;
tthread::mutex mutable mutex ;
tthread::condition_variable mutable waitNextItemToBorrow ;
tthread::condition_variable mutable waitCompletion ;
bool quit ;
typedef std::vector<Item*> items_t ;
items_t items ;
int nextItem ;
int numReturnedItems ;
enum PackingMethod packingMethod ;
enum ResizeMethod resizeMethod ;
int resizeHeight ;
int resizeWidth ;
bool gpuMode ;
double average [3] ;
float * averageImage ;
double contrastDeviation ;
double saturationDeviation ;
double brightnessDeviation [9] ;
double minCropAnisotropy ;
double maxCropAnisotropy ;
double minCropSize ;
double maxCropSize ;
CropLocation cropLocation ;
bool flipMode ;
vl::MexTensor cpuPack ;
vl::MexTensor gpuPack ;
friend class ReaderTask ;
int gpuDevice ;
#if ENABLE_GPU
bool cudaStreamInitialized ;
cudaStream_t cudaStream ;
float * cpuPinnedPack ;
size_t cpuPinnedPackSize ;
#endif
} ;
Batch::Item::Item(Batch const & batch)
: batch(batch),
cpuArray(batch.context),
gpuArray(batch.context),
borrowed(false),
error(vl::VLE_Success),
state(ready),
flip(false)
{
memset(errorMessage,sizeof(errorMessage),0) ;
}
mxArray * Batch::Item::relinquishArray()
{
if (batch.gpuMode) {
return gpuArray.relinquish() ;
} else {
return cpuArray.relinquish() ;
}
}
mxArray * Batch::relinquishArray()
{
if (gpuMode) {
return gpuPack.relinquish() ;
} else {
return cpuPack.relinquish() ;
}
}
Batch::Batch(vl::MexContext & context)
: context(context),
cpuPack(context),
gpuPack(context),
quit(true),
resizeMethod(noResize),
packingMethod(individualArrays),
gpuMode(false),
numReturnedItems(0),
averageImage(NULL)
#if ENABLE_GPU
, cpuPinnedPack(NULL),
cpuPinnedPackSize(0)
#endif
{ }
Batch::~Batch()
{
finalize() ;
}
size_t Batch::getNumberOfItems() const
{
return items.size() ;
}
Batch::Item * Batch::getItem(int index)
{
return items[index] ;
}
vl::ErrorCode Batch::init()
{
finalize() ;
LOG(2)<<"beginning batch" ;
quit = false ;
nextItem = 0 ;
numReturnedItems = 0 ;
// Restore defaults
memset(brightnessDeviation, 0, sizeof(brightnessDeviation)) ;
contrastDeviation = 0. ;
saturationDeviation = 0. ;
memset(average, 0, sizeof(average)) ;
averageImage = NULL ;
cropLocation = cropCenter ;
minCropSize = 1. ;
maxCropSize = 1. ;
minCropAnisotropy = 1. ;
maxCropAnisotropy = 1. ;
flipMode = false ;
packingMethod = individualArrays ;
resizeMethod = noResize ;
gpuMode = false ;
gpuDevice = -1 ;
#if ENABLE_GPU
if (cudaStreamInitialized) {
cudaStreamDestroy(cudaStream) ;
cudaStreamInitialized = false ;
}
#endif
return vl::VLE_Success ;
}
void Batch::finalize()
{
LOG(2)<<"finalizing batch" ;
// Clear current batch
clear() ;
// Release memory
#if ENABLE_GPU
if (cpuPinnedPack) {
cudaFreeHost(cpuPinnedPack) ;
cpuPinnedPack = 0 ;
cpuPinnedPackSize = 0 ;
}
#endif
// Signal waiting threads that we are quitting
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
quit = true ;
waitNextItemToBorrow.notify_all() ;
}
}
Batch::Item * Batch::borrowNextItem()
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
while (true) {
if (quit) { return NULL ; }
if (nextItem < items.size()) {
Item * item = items[nextItem++] ;
item->borrowed = true ;
return item ;
}
waitNextItemToBorrow.wait(mutex) ;
}
}
void Batch::returnItem(Batch::Item * item)
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
numReturnedItems ++ ;
if (item->state == Item::fetch &&
numReturnedItems == items.size() &&
packingMethod == singleArray &&
gpuMode) {
#if ENABLE_GPU
LOG(2) << "push to GPU the pack" ;
cudaError_t cerror ;
cerror = cudaMemcpyAsync (gpuPack.getMemory(),
cpuPinnedPack,
gpuPack.getNumElements() * sizeof(float),
cudaMemcpyHostToDevice,
cudaStream) ;
if (cerror != cudaSuccess) {
item->error = vl::VLE_Cuda ;
snprintf(item->errorMessage, sizeof(item->errorMessage),
"cudaMemcpyAsnyc : '%s'", cudaGetErrorString(cerror)) ;
}
#endif
}
item->borrowed = false ;
item->state = Batch::Item::ready ;
waitCompletion.notify_all() ;
}
void Batch::setAverageImage(float const * image)
{
if (image == NULL) {
if (averageImage) {
free(averageImage) ;
averageImage = NULL ;
}
return ;
}
assert (resizeMethod == fixedSize) ;
averageImage = (float*)malloc(sizeof(float) * resizeHeight * resizeWidth * 3) ;
memcpy(averageImage, image, sizeof(float) * resizeHeight * resizeWidth * 3) ;
}
void Batch::clear()
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
// Stop threads from getting more tasks. After this any call to borrowItem() by a worker will
// stop in a waiting state. Thus, we simply wait for all of them to return their items.
nextItem = (int)items.size() ;
// Wait for all thread to return their items
for (int i = 0 ; i < items.size() ; ++i) {
while (items[i]->borrowed) {
waitCompletion.wait(mutex) ;
}
}
for (int i = 0 ; i < items.size() ; ++i) {
delete items[i] ;
}
items.clear() ;
// Clear average image
setAverageImage(NULL) ;
// At the end of the current (empty) list
nextItem = 0 ;
numReturnedItems = 0 ;
}
void Batch::sync() const
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
// Wait for threads to complete work for all items.
// Note that it is not enough to check that threads are all in a
// "done" state as this does not mean that all work has been done yet.
// Instead, we look at the number of items returned.
while (numReturnedItems < items.size()) {
waitCompletion.wait(mutex) ;
}
if (gpuMode) {
#if ENABLE_GPU
cudaError_t cerror ;
cerror = cudaStreamSynchronize(cudaStream) ;
if (cerror != cudaSuccess) {
LOGERROR << "CUDA error while synchronizing a stream: '" << cudaGetErrorString(cerror) << '\'' ;
}
#endif
}
}
vl::ErrorCode Batch::registerItem(std::string const & name)
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
Item * item = new Item(*this) ;
item->index = (int)items.size() ;
item->name = name ;
item->state = Item::prefetch ;
items.push_back(item) ;
waitNextItemToBorrow.notify_one() ;
return vl::VLE_Success ;
}
void Batch::setGpuMode(bool gpu)
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
#if ENABLE_GPU
if (gpu) {
cudaGetDevice(&gpuDevice) ;
if (!cudaStreamInitialized) {
cudaError_t cerror ;
cerror = cudaStreamCreateWithFlags(&cudaStream, cudaStreamNonBlocking) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "CUDA error while creating a stream '"
<< cudaGetErrorString(cerror) << '\"' ;
} else {
cudaStreamInitialized = true ;
}
}
}
#endif
gpuMode = gpu ;
}
void Batch::setResizeMethod(Batch::ResizeMethod method, int height, int width)
{
resizeMethod = method ;
resizeHeight = height ;
resizeWidth = width ;
}
void Batch::setPackingMethod(Batch::PackingMethod method)
{
assert(method == individualArrays || method == singleArray) ;
packingMethod = method ;
}
Batch::PackingMethod Batch::getPackingMethod() const
{
return packingMethod ;
}
void Batch::setAverage(double average [])
{
::memcpy(this->average, average, sizeof(this->average)) ;
}
void Batch::setColorDeviation(double brightness [], double contrast, double saturation)
{
::memcpy(brightnessDeviation, brightness, sizeof(brightnessDeviation)) ;
contrastDeviation = contrast ;
saturationDeviation = saturation ;
}
void Batch::setFlipMode(bool x)
{
flipMode = x ;
}
void Batch::setCropAnisotropy(double minAnisotropy, double maxAnisotropy)
{
assert(minAnisotropy <= maxAnisotropy) ;
assert(0.0 <= minAnisotropy && minAnisotropy <= 1.0) ;
minCropAnisotropy = minAnisotropy ;
maxCropAnisotropy = maxAnisotropy ;
}
void Batch::setCropSize(double minSize, double maxSize)
{
assert(minSize <= maxSize) ;
assert(0.0 <= minSize && minSize <= 1.0) ;
assert(0.0 <= maxSize && maxSize <= 1.0) ;
minCropSize = minSize ;
maxCropSize = maxSize ;
}
void Batch::setCropLocation(CropLocation location)
{
assert(location == cropCenter || location == cropRandom) ;
cropLocation = location ;
}
//void Batch::getItemTransformation(Item * item)
//{
//
//}
vl::ErrorCode Batch::prefetch()
{
// Wait for reader threads to initialize the shape of the images
// and then perform the requried allocations.
sync() ;
// In packing mode, preallocate all memory here.
if (packingMethod == singleArray) {
assert(resizeMethod == fixedSize) ;
vl::TensorShape shape(resizeHeight, resizeWidth, 3, getNumberOfItems()) ;
if (gpuMode) {
#if ENABLE_GPU
gpuPack.init(vl::VLDT_GPU, vl::VLDT_Float, shape) ;
gpuPack.makePersistent() ;
size_t memSize = shape.getNumElements() * sizeof(float) ;
if (cpuPinnedPackSize < memSize) {
if (cpuPinnedPack) {
cudaFreeHost(cpuPinnedPack) ;
}
cudaMallocHost(&cpuPinnedPack, memSize) ;
cpuPinnedPackSize = memSize ;
}
#endif
} else {
cpuPack.init(vl::VLDT_CPU, vl::VLDT_Float, shape) ;
cpuPack.makePersistent() ;
}
}
// Get ready to reprocess all items.
nextItem = 0 ;
numReturnedItems = 0 ;
for (int i = 0 ; i < getNumberOfItems() ; ++ i) {
Batch::Item * item = getItem(i) ;
if (item->error == vl::VLE_Success) {
if (verbosity >= 2) {
mexPrintf("%20s: %d x %d x %d\n", item->name.c_str(), item->shape.width, item->shape.height, item->shape.depth) ;
}
} else {
mexPrintf("%20s: error '%s'\n", item->name.c_str(), item->errorMessage) ;
}
// Determine the shape of (height and width) of the output image. This is either
// the same as the input image, or with a fixed size for the shortest side,
// or a fixed size for both sides.
int outputHeight ;
int outputWidth ;
double cropHeight ;
double cropWidth ;
int dx ;
int dy ;
switch (resizeMethod) {
case noResize:
outputHeight = (int)item->shape.height ;
outputWidth = (int)item->shape.width ;
break ;
case resizeShortestSide: {
double scale1 = (double)resizeHeight / item->shape.width ;
double scale2 = (double)resizeHeight / item->shape.height ;
double scale = std::max(scale1, scale2) ;
outputHeight = std::max(1.0, round(scale * item->shape.height)) ;
outputWidth = std::max(1.0, round(scale * item->shape.width)) ;
break ;
}
case fixedSize:
outputHeight = resizeHeight ;
outputWidth = resizeWidth ;
break ;
}
// Determine the aspect ratio of the crop in the input image.
{
double anisotropyRatio = 1.0 ;
if (minCropAnisotropy == 0 && maxCropAnisotropy == 0) {
// Stretch crop to have the same shape as the input.
double inputAspect = (double)item->shape.width / item->shape.height ;
double outputAspect = (double)outputWidth / outputHeight ;
anisotropyRatio = inputAspect / outputAspect ;
} else {
double z = (double)rand() / RAND_MAX ;
anisotropyRatio = z * (maxCropAnisotropy - minCropAnisotropy) + minCropAnisotropy ;
}
cropWidth = outputWidth * sqrt(anisotropyRatio) ;
cropHeight = outputHeight / sqrt(anisotropyRatio) ;
}
// Determine the crop size.
{
double scale = std::min(item->shape.width / cropWidth,
item->shape.height / cropHeight) ;
double z = (double)rand() / RAND_MAX ;
double size = z * (maxCropSize - minCropSize) + minCropSize ;
cropWidth *= scale * size ;
cropHeight *= scale * size ;
}
cropWidth = std::min(round(cropWidth), (double)item->shape.width) ;
cropHeight = std::min(round(cropHeight), (double)item->shape.height) ;
// Determine the crop location.
{
dx = item->shape.width - cropWidth ;
dy = item->shape.height - cropHeight ;
switch (cropLocation) {
case cropCenter:
dx /= 2 ;
dy /= 2 ;
break ;
case cropRandom:
dx = rand() % (dx + 1) ;
dy = rand() % (dy + 1) ;
break ;
default:
LOGERROR << "cropLocation not set" ;
}
}
// Save.
item->outputWidth = outputWidth ;
item->outputHeight = outputHeight ;
item->outputNumChannels = (packingMethod == individualArrays) ? item->shape.depth : 3 ;
;
item->cropWidth = cropWidth ;
item->cropHeight = cropHeight ;
item->cropOffsetX = dx ;
item->cropOffsetY = dy ;
item->flip = flipMode && (rand() > RAND_MAX/2) ;
// Color processing.
item->saturationShift = 1. + saturationDeviation * (2.*(double)rand()/RAND_MAX - 1) ;
item->contrastShift = 1. + contrastDeviation * (2.*(double)rand()/RAND_MAX - 1.) ;
{
int numChannels = item->outputNumChannels ;
double w [3] ;
for (int i = 0 ; i < numChannels ; ++i) { w[i] = vl::randn() ; }
for (int i = 0 ; i < numChannels ; ++i) {
item->brightnessShift[i] = 0. ;
for (int j = 0 ; j < numChannels ; ++j) {
item->brightnessShift[i] += brightnessDeviation[i + 3*j] * w[i] ;
}
}
}
LOG(2)
<< "input (" << item->shape.width << " x " << item->shape.height << " x " << item->shape.depth << ") "
<< "output (" << item->outputWidth << " x " << item->outputHeight << " x " << item->outputNumChannels << ") "
<< "crop (" << item->cropWidth << " x " << item->cropHeight << ") "
<< "offset (" << item->cropOffsetX << ", " << item->cropOffsetY << ")" ;
if (packingMethod == individualArrays) {
vl::TensorShape shape(outputHeight, outputWidth, item->outputNumChannels, 1) ;
item->cpuArray.init(vl::VLDT_CPU, vl::VLDT_Float, shape) ;
item->cpuArray.makePersistent() ;
if (gpuMode) {
item->gpuArray.init(vl::VLDT_GPU, vl::VLDT_Float, shape) ;
item->gpuArray.makePersistent() ;
}
}
// Ready to fetch
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
item->state = Item::fetch ;
waitNextItemToBorrow.notify_one() ;
}
}
return vl::VLE_Success ;
}
/* ---------------------------------------------------------------- */
/* ReaderTask */
/* ---------------------------------------------------------------- */
class ReaderTask
{
public:
ReaderTask() ;
~ReaderTask() { finalize() ; }
vl::ErrorCode init(Batch * batch, int index) ;
void finalize() ;
private:
int index ;
Batch * batch ;
tthread::thread * thread ;
vl::ImageReader * reader ;
static void threadEntryPoint(void * thing) ;
void entryPoint() ;
void * getBuffer(int index, size_t size) ;
int gpuDevice ;
private:
ReaderTask(ReaderTask const &) ;
ReaderTask & operator= (ReaderTask const &) ;
struct Buffer {
void * memory ;
size_t size ;
} buffers [2] ;
} ;
void ReaderTask::threadEntryPoint(void * thing)
{
((ReaderTask*)thing)->entryPoint() ;
}
ReaderTask::ReaderTask()
: batch(NULL), thread(NULL), reader(NULL)
{
memset(buffers, 0, sizeof(buffers)) ;
}
void * ReaderTask::getBuffer(int index, size_t size)
{
if (buffers[index].size < size) {
if (buffers[index].memory) {
free(buffers[index].memory) ;
}
buffers[index].memory = malloc(size) ;
buffers[index].size = size ;
}
return buffers[index].memory ;
}
void ReaderTask::entryPoint()
{
LOG(2) << "reader " << index << " task staring" ;
while (true) {
#if ENABLE_GPU
if (batch->gpuMode && batch->gpuDevice != gpuDevice) {
LOG(2) << "reader " << index << " setting GPU device" ;
cudaSetDevice(batch->gpuDevice) ;
cudaGetDevice(&gpuDevice) ;
}
#endif
Batch::Item * item = batch->borrowNextItem() ;
LOG(3) << "borrowed " << item ;
if (item == NULL) { break ; }
if (item->error != vl::VLE_Success) {
batch->returnItem(item) ;
continue ;
}
switch (item->state) {
case Batch::Item::prefetch: {
item->error = reader->readShape(item->shape, item->name.c_str()) ;
if (item->error != vl::VLE_Success) {
snprintf(item->errorMessage, sizeof(item->errorMessage), "%s", reader->getLastErrorMessage()) ;
}
break ;
}
case Batch::Item::fetch: {
// Get the CPU buffer that will hold the pixels.
float * outputPixels;
if (batch->getPackingMethod() == Batch::individualArrays) {
outputPixels = (float*)item->cpuArray.getMemory() ;
} else {
if (batch->gpuMode) {
#if ENABLE_GPU
outputPixels = batch->cpuPinnedPack ;
#else
snprintf(item->errorMessage, sizeof(item->errorMessage), "GPU support not compiled.") ;
break;
#endif
} else {
outputPixels = (float*)batch->cpuPack.getMemory() ;
}
outputPixels += item->outputHeight*item->outputWidth*3*item->index ;
}
// Read full image.
float * inputPixels = (float*)getBuffer(0,
item->shape.height *
item->shape.width *
item->shape.depth * sizeof(float)) ;
item->error = reader->readPixels(inputPixels, item->name.c_str()) ;
if (item->error != vl::VLE_Success) {
snprintf(item->errorMessage, sizeof(item->errorMessage), "%s", reader->getLastErrorMessage()) ;
break ;
}
// Crop.
float * temp = (float*)getBuffer(1,
item->outputHeight *
item->shape.width *
item->shape.depth * sizeof(float)) ;
vl::impl::imageResizeVertical(temp, inputPixels,
item->outputHeight,
item->shape.height,
item->shape.width,
item->shape.depth,
item->cropHeight,
item->cropOffsetY) ;
vl::impl::imageResizeVertical(outputPixels, temp,
item->outputWidth,
item->shape.width,
item->outputHeight,
item->shape.depth,
item->cropWidth,
item->cropOffsetX,
item->flip) ;
// Postprocess colors.
{
size_t inputNumChannels = item->shape.depth ;
size_t K = item->outputNumChannels ;
size_t n = item->outputHeight*item->outputWidth ;
if (batch->averageImage) {
// If there is an average image, then subtract it now.
// Grayscale images are expanded here to color if needed.
// Withouth an average image,
// they are expanded later.
for (int k = inputNumChannels ; k < K ; ++k) {
::memcpy(outputPixels + n*k, outputPixels, sizeof(float) * n) ;
}
vl::impl::blas<vl::VLDT_CPU,vl::VLDT_Float>::axpy
(batch->context,
n * item->outputNumChannels,
-1.0f,
batch->averageImage, 1,
outputPixels, 1) ;
inputNumChannels = K ;
}
float dv [3] ;
float * channels [3] ;
for (int k = 0 ; k < K ; ++k) {
channels[k] = outputPixels + n * k ;
}
for (int k = 0 ; k < inputNumChannels ; ++k) {
dv[k] = (1. - 2. * item->contrastShift) *
(batch->average[k] + item->brightnessShift[k]);
if (item->contrastShift != 1.) {
float mu = 0.f ;
float const * pixel = channels[k] ;
float const * end = channels[k] + n ;
while (pixel != end) { mu += *pixel++ ; }
mu /= n ;
dv[k] -= (1.0 - item->contrastShift) * mu ;
}
}
{
float const * end = channels[0] + n ;
float v [3] ;
if (K == 3 && inputNumChannels == 3) {
float const a = item->contrastShift * item->saturationShift ;
float const b = item->contrastShift * (1. - item->saturationShift) / K ;
while (channels[0] != end) {
float mu = 0.f ;
v[0] = *channels[0] + dv[0] ; mu += v[0] ;
v[1] = *channels[1] + dv[1] ; mu += v[1] ;
v[2] = *channels[2] + dv[2] ; mu += v[2] ;
*channels[0]++ = a * v[0] + b * mu ;
*channels[1]++ = a * v[1] + b * mu ;
*channels[2]++ = a * v[2] + b * mu ;
}
} else if (K == 3 && inputNumChannels == 1) {
float const a = item->contrastShift * item->saturationShift ;
float const b = item->contrastShift * (1. - item->saturationShift) / K ;
while (channels[0] != end) {
float mu = 0.f ;
v[0] = *channels[0] + dv[0] ; mu += v[0] ;
v[1] = *channels[0] + dv[1] ; mu += v[1] ;
v[2] = *channels[0] + dv[2] ; mu += v[2] ;
*channels[0]++ = a * v[0] + b * mu ;
*channels[1]++ = a * v[1] + b * mu ;
*channels[2]++ = a * v[2] + b * mu ;
}
} else {
float const a = item->contrastShift ;
while (channels[0] != end) {
*channels[0]++ = a * (*channels[0] + dv[0]) ;
}
}
}
}
// Copy to GPU.
if (batch->getPackingMethod() == Batch::individualArrays && batch->gpuMode) {
#if ENABLE_GPU
cudaError_t cerror ;
cerror = cudaMemcpyAsync (item->gpuArray.getMemory(),
outputPixels,
item->gpuArray.getNumElements() * sizeof(float),
cudaMemcpyHostToDevice,
batch->cudaStream) ;
if (cerror != cudaSuccess) {
item->error = vl::VLE_Cuda ;
snprintf(item->errorMessage, sizeof(item->errorMessage),
"CUDA error while copying memory from host to device: '%s'", cudaGetErrorString(cerror)) ;
break ;
}
#endif
}
break ;
}
case Batch::Item::ready:
break ;
}
batch->returnItem(item) ;
}
LOG(2) << "reader " << index << " task quitting" ;
}
void ReaderTask::finalize()
{
LOG(2)<<"finalizing reader " << index ;
if (thread) {
if (thread->joinable()) {
thread->join() ;
}
delete thread ;
thread = NULL ;
}
for (int i = 0 ; i < sizeof(buffers)/sizeof(Buffer) ; ++i) {
if (buffers[i].memory) {
free(buffers[i].memory) ;
buffers[i].memory = NULL ;
buffers[i].size = 0 ;
}
}
if (reader) {
delete reader ;
reader = NULL ;
}
index = -1 ;
batch = NULL ;
}
vl::ErrorCode ReaderTask::init(Batch * batch, int index)
{
finalize() ;
this->batch = batch ;
this->index = index ;
thread = new tthread::thread(threadEntryPoint, this) ;
reader = new vl::ImageReader() ;
return vl::VLE_Success ;
}
/* ---------------------------------------------------------------- */
/* Cache */
/* ---------------------------------------------------------------- */
vl::MexContext context ;
Batch batch(context) ;
bool batchIsInitialized = false ;
typedef std::vector<ReaderTask*> readers_t ;
readers_t readers ;
void atExit()
{
if (batchIsInitialized) {
batch.finalize() ;
batchIsInitialized = false ;
}
for (int r = 0 ; r < readers.size() ; ++r) {
readers[r]->finalize() ;
delete readers[r] ;
}
readers.clear() ;
}
/* ---------------------------------------------------------------- */
/* Cache */
/* ---------------------------------------------------------------- */
void mexFunction(int nout, mxArray *out[],
int nin, mxArray const *in[])
{
bool prefetch = false ;
bool gpuMode = false ;
int requestedNumThreads = readers.size() ;
int opt ;
int next = IN_END ;
mxArray const *optarg ;
Batch::PackingMethod packingMethod = Batch::individualArrays ;
Batch::ResizeMethod resizeMethod = Batch::noResize ;
int resizeWidth = -1 ;
int resizeHeight = -1 ;
vl::ErrorCode error ;
double average [3] = {0.} ;
vl::MexTensor averageImage(context) ;
double brightnessDeviation [9] = {0.} ;
double saturationDeviation = 0. ;
double contrastDeviation = 0. ;
bool flipMode = false ;
Batch::CropLocation cropLocation = Batch::cropCenter ;
double minCropSize = 1.0, maxCropSize = 1.0 ;
double minCropAnisotropy = 1.0, maxCropAnisotropy = 1.0 ;
verbosity = 0 ;
/* -------------------------------------------------------------- */
/* Check the arguments */
/* -------------------------------------------------------------- */
mexAtExit(atExit) ;
if (nin < 1) {
vlmxError(VLMXE_IllegalArgument, "There is less than one argument.") ;
}
while ((opt = vlmxNextOption (in, nin, options, &next, &optarg)) >= 0) {
switch (opt) {
case opt_verbose :
++ verbosity ;
break ;
case opt_prefetch :
prefetch = true ;
break ;
case opt_pack :
packingMethod = Batch::singleArray ;
break ;
case opt_gpu :
#ifndef ENABLE_GPU
vlmxError(VLMXE_IllegalArgument, "Not compiled with GPU support.") ;
#endif
gpuMode = true ;
break ;
case opt_num_threads :
requestedNumThreads = (int)mxGetScalar(optarg) ;
break ;
case opt_resize :
if (!vlmxIsPlainVector(optarg, -1)) {
vlmxError(VLMXE_IllegalArgument, "RESIZE is not a plain vector.") ;
}
switch (mxGetNumberOfElements(optarg)) {
case 1 :
resizeMethod = Batch::resizeShortestSide ;
resizeHeight = (int)mxGetPr(optarg)[0] ;
resizeWidth = (int)mxGetPr(optarg)[0] ;
break ;
case 2 :
resizeMethod = Batch::fixedSize ;
resizeHeight = (int)mxGetPr(optarg)[0] ;
resizeWidth = (int)mxGetPr(optarg)[1] ;
break;
default:
vlmxError(VLMXE_IllegalArgument, "RESIZE does not have one or two dimensions.") ;
break ;
}
if (resizeHeight < 1 || resizeWidth < 1) {
vlmxError(VLMXE_IllegalArgument, "An element of RESIZE is smaller than one.") ;
}
break ;
case opt_brightness: {
if (!vlmxIsPlainMatrix(optarg, -1, -1)) {
vlmxError(VLMXE_IllegalArgument, "BRIGHTNESS is not a plain matrix.") ;
}
size_t n = mxGetNumberOfElements(optarg) ;
memset(brightnessDeviation, 0, sizeof(brightnessDeviation)) ;
if (n == 1) {
double x = mxGetPr(optarg)[0] ;
brightnessDeviation[0] = x;
brightnessDeviation[3] = x;
brightnessDeviation[8] = x;
} else if (n == 3) {
double const* x = mxGetPr(optarg) ;
brightnessDeviation[0] = x[0];
brightnessDeviation[3] = x[1];
brightnessDeviation[8] = x[2];
} else if (n == 9) {
memcpy(brightnessDeviation, mxGetPr(optarg), sizeof(brightnessDeviation)) ;
} else {
vlmxError(VLMXE_IllegalArgument, "BRIGHTNESS does not have 1, 3, or 9 elements.") ;
}
break ;
}
case opt_saturation: {
if (!vlmxIsPlainScalar(optarg)) {
vlmxError(VLMXE_IllegalArgument, "SATURATION is not a plain scalar.") ;
}
double x = mxGetPr(optarg)[0] ;
if (x < 0 || x > 1.0) {
vlmxError(VLMXE_IllegalArgument, "SATURATION is not in the [0,1] range..") ;
}
saturationDeviation = x ;
break ;
}
case opt_contrast: {
if (!vlmxIsPlainScalar(optarg)) {
vlmxError(VLMXE_IllegalArgument, "CONTRAST is not a plain scalar.") ;
}
double x = mxGetPr(optarg)[0] ;
if (x < 0 || x > 1.0) {
vlmxError(VLMXE_IllegalArgument, "CONTRAST is not in the [0,1] range..") ;
}
contrastDeviation = x ;
break ;
}
case opt_crop_anisotropy: {
if (!vlmxIsPlainScalar(optarg) && !vlmxIsPlainVector(optarg, 2)) {
vlmxError(VLMXE_IllegalArgument, "CROPANISOTROPY is not a plain scalar or vector with two components.") ;
}
minCropAnisotropy = mxGetPr(optarg)[0] ;
maxCropAnisotropy = mxGetPr(optarg)[std::min((mwSize)1, mxGetNumberOfElements(optarg)-1)] ;
if (minCropAnisotropy < 0.0 || minCropAnisotropy > maxCropAnisotropy) {
vlmxError(VLMXE_IllegalArgument, "CROPANISOTROPY values are not in the legal range.") ;
}
break ;
}
case opt_crop_size: {
if (!vlmxIsPlainScalar(optarg) && !vlmxIsPlainVector(optarg, 2)) {
vlmxError(VLMXE_IllegalArgument, "CROPSIZE is not a plain scalar or vector with two components.") ;
}
minCropSize = mxGetPr(optarg)[0] ;
maxCropSize = mxGetPr(optarg)[std::min((mwSize)1, mxGetNumberOfElements(optarg)-1)] ;
if (minCropSize < 0.0 || minCropSize > maxCropSize || maxCropSize > 1.0) {
vlmxError(VLMXE_IllegalArgument, "CROPSIZE values are not in the legal range.") ;
}
break ;
}
case opt_crop_location: {
if (!vlmxIsString(optarg, -1)) {
vlmxError(VLMXE_IllegalArgument, "CROPLOCATION is not a string") ;
}
if (vlmxCompareToStringI(optarg, "random") == 0) {
cropLocation = Batch::cropRandom ;
} else if (vlmxCompareToStringI(optarg, "center") == 0) {
cropLocation = Batch::cropCenter ;
} else {
vlmxError(VLMXE_IllegalArgument, "CROPLOCATION value unknown.") ;
}
break ;
}
case opt_subtract_average: {
if (vlmxIsVector(optarg,1) || vlmxIsVector(optarg, 3)) {
size_t n = mxGetNumberOfElements(optarg) ;
switch (mxGetClassID(optarg)) {
case mxSINGLE_CLASS: {
float * x = (float*)mxGetData(optarg) ;
average[0] = x[std::min((size_t)0,n-1)] ;
average[1] = x[std::min((size_t)1,n-1)] ;
average[2] = x[std::min((size_t)2,n-1)] ;
break ;
}
case mxDOUBLE_CLASS: {
double * x = mxGetPr(optarg) ;
average[0] = (float)x[std::min((size_t)0,n-1)] ;
average[1] = (float)x[std::min((size_t)1,n-1)] ;
average[2] = (float)x[std::min((size_t)2,n-1)] ;
break ;
}
default:
vlmxError(VLMXE_IllegalArgument, "SUBTRACTAVERAGE is not SINGLE or DOUBLE vector.") ;
}
} else {
if (mxGetClassID(optarg) != mxSINGLE_CLASS ||
mxGetNumberOfDimensions(optarg) > 3) {
vlmxError(VLMXE_IllegalArgument, "SUBTRACTAVERAGE is not a SINGLE image of a compatible shape.") ;
}
averageImage.init(optarg) ;
}
break ;
}
case opt_flip: {
flipMode = true ;
break ;
}
}
}
if (averageImage) {
if (resizeMethod != Batch::fixedSize) {
vlmxError(VLMXE_IllegalArgument, "Cannot subtract an average image unless RESIZE is used to set the size of the output.") ;
}
if (averageImage.getNumDimensions() != 3 ||
averageImage.getHeight() != resizeHeight ||
averageImage.getWidth() != resizeWidth ||
averageImage.getDepth() !=3) {
vlmxError(VLMXE_IllegalArgument, "The average image is not a RESIZEHEIGHT x RESIZEWIDTH x 3 array.") ;
}
}
/* -------------------------------------------------------------- */
/* Do the work */
/* -------------------------------------------------------------- */
if (!mxIsCell(in[IN_FILENAMES])) {
vlmxError(VLMXE_IllegalArgument, "FILENAMES is not a cell array of strings.") ;
}
// If the requested number of threads changes, finalize everything
requestedNumThreads = std::max(requestedNumThreads, 1) ;
if (readers.size() != requestedNumThreads) {
atExit() ; // Delete threads and current batch
}
// Prepare batch.
if (!batchIsInitialized) {
error = batch.init() ;
if (error != vl::VLE_Success) {
vlmxError(VLMXE_Execution, "Could not initialize a batch structure") ;
}
batchIsInitialized = true ;
}
// Prepare reader tasks.
for (int r = readers.size() ; r < requestedNumThreads ; ++r) {
readers.push_back(new ReaderTask()) ;
vl::ErrorCode error = readers[r]->init(&batch, r) ;
if (error != vl::VLE_Success) {
vlmxError(VLMXE_Execution, "Could not create the requested number of threads") ;
}
}
// Extract filenames as strings.
bool sameAsPrefeteched = true ;
std::vector<std::string> filenames ;
for (int i = 0 ; i < (int)mxGetNumberOfElements(in[IN_FILENAMES]) ; ++i) {
mxArray* filenameArray = mxGetCell(in[IN_FILENAMES], i) ;
if (!vlmxIsString(filenameArray,-1)) {
vlmxError(VLMXE_IllegalArgument, "FILENAMES contains an entry that is not a string.") ;
}
char filename [512] ;
mxGetString (filenameArray, filename, sizeof(filename)/sizeof(char)) ;
filenames.push_back(std::string(filename)) ;
sameAsPrefeteched &= (i < batch.getNumberOfItems() && batch.getItem(i)->name == filenames[i]) ;
}
// If the list of names is not the same as the prefetched ones,
// start a new cycle.
if (!sameAsPrefeteched) {
batch.clear() ;
// Check compatibility of options
if (packingMethod == Batch::singleArray && resizeMethod != Batch::fixedSize) {
vlmxError(VLMXE_IllegalArgument, "PACK must be used in combination with resizing to a fixed size.") ;
}
if (verbosity >= 2) {
mexPrintf("vl_imreadjpeg: gpu mode: %s\n", gpuMode?"yes":"no") ;
mexPrintf("vl_imreadjpeg: crop anisotropy: [%.1g, %.1g]\n",
minCropAnisotropy, maxCropAnisotropy) ;
mexPrintf("vl_imreadjpeg: crop size: [%.1g, %.1g]\n",
minCropSize, maxCropSize) ;
}
batch.setResizeMethod(resizeMethod, resizeHeight, resizeWidth) ;
batch.setPackingMethod(packingMethod) ;
batch.setGpuMode(gpuMode) ;
batch.setFlipMode(flipMode) ;
batch.setCropLocation(cropLocation) ;
batch.setCropAnisotropy(minCropAnisotropy, maxCropAnisotropy) ;
batch.setCropSize(minCropSize, maxCropSize) ;
batch.setColorDeviation(brightnessDeviation,
contrastDeviation,
saturationDeviation) ;
batch.setAverage(average) ;
if (averageImage) {
batch.setAverageImage((float const*)averageImage.getMemory()) ;
}
for (int i = 0 ; i < filenames.size() ; ++ i) {
batch.registerItem(filenames[i]) ;
}
batch.prefetch() ;
}
// Done if prefetching only.
if (prefetch) { return ; }
// Return result.
batch.sync() ;
switch (batch.getPackingMethod()) {
case Batch::singleArray: {
mwSize dims [] = {1,1} ;
out[OUT_IMAGES] = mxCreateCellArray(2, dims) ;
mxSetCell(out[OUT_IMAGES], 0, batch.relinquishArray()) ;
break ;
}
case Batch::individualArrays:
out[OUT_IMAGES] = mxCreateCellArray(mxGetNumberOfDimensions(in[IN_FILENAMES]),
mxGetDimensions(in[IN_FILENAMES])) ;
for (int i = 0 ; i < batch.getNumberOfItems() ; ++i) {
Batch::Item * item = batch.getItem(i) ;
if (item->error != vl::VLE_Success) {
vlmxWarning(VLMXE_Execution, "could not read image '%s' because '%s'",
item->name.c_str(),
item->errorMessage) ;
} else {
mxSetCell(out[OUT_IMAGES], i, item->relinquishArray()) ;
}
}
break ;
}
// Finalize.
batch.clear() ;
}
|
03fbc95193777c56e8fc7427e218575e51f55ce0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "boundary.cuh"
#include "boundary_kernel.cuh"
extern "C"
{
unsigned int iDivU(unsigned int a, unsigned int b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
void computeGridSiz(unsigned int n, unsigned int blockSize, unsigned int &numBlocks, unsigned int &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = iDivU(n, numThreads);
}
void updateVbi(SReal* boundary_pos, SReal* vbi, SReal ir, unsigned int num_boundaries)
{
SReal* d_boundary_pos;
SReal* d_vbi;
hipMalloc((void**)&d_boundary_pos, num_boundaries*sizeof(SReal)*4);
hipMalloc((void**)&d_vbi, num_boundaries*sizeof(SReal));
//hipMemcpy
hipMemcpy(d_vbi, vbi, num_boundaries*sizeof(SReal), hipMemcpyHostToDevice);
hipMemcpy(d_boundary_pos, boundary_pos, num_boundaries*sizeof(SReal)*4, hipMemcpyHostToDevice);
/*//kernel call*/
unsigned int numThreads, numBlocks;
computeGridSiz(num_boundaries, 256, numBlocks, numThreads);
hipLaunchKernelGGL(( computeVbi), dim3(numBlocks), dim3(numThreads), 0, 0, (SVec4*)d_boundary_pos, d_vbi, ir,num_boundaries);
/*//transfer back to host mem*/
hipMemcpy(vbi, d_vbi, num_boundaries*sizeof(SReal), hipMemcpyDeviceToHost);
/*//hipFree*/
hipFree(d_boundary_pos);
hipFree(d_vbi);
}
}
| 03fbc95193777c56e8fc7427e218575e51f55ce0.cu | #include "boundary.cuh"
#include "boundary_kernel.cuh"
extern "C"
{
unsigned int iDivU(unsigned int a, unsigned int b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
void computeGridSiz(unsigned int n, unsigned int blockSize, unsigned int &numBlocks, unsigned int &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = iDivU(n, numThreads);
}
void updateVbi(SReal* boundary_pos, SReal* vbi, SReal ir, unsigned int num_boundaries)
{
SReal* d_boundary_pos;
SReal* d_vbi;
cudaMalloc((void**)&d_boundary_pos, num_boundaries*sizeof(SReal)*4);
cudaMalloc((void**)&d_vbi, num_boundaries*sizeof(SReal));
//cudaMemcpy
cudaMemcpy(d_vbi, vbi, num_boundaries*sizeof(SReal), cudaMemcpyHostToDevice);
cudaMemcpy(d_boundary_pos, boundary_pos, num_boundaries*sizeof(SReal)*4, cudaMemcpyHostToDevice);
/*//kernel call*/
unsigned int numThreads, numBlocks;
computeGridSiz(num_boundaries, 256, numBlocks, numThreads);
computeVbi<<<numBlocks, numThreads>>>((SVec4*)d_boundary_pos, d_vbi, ir,num_boundaries);
/*//transfer back to host mem*/
cudaMemcpy(vbi, d_vbi, num_boundaries*sizeof(SReal), cudaMemcpyDeviceToHost);
/*//cudaFree*/
cudaFree(d_boundary_pos);
cudaFree(d_vbi);
}
}
|
95c52cba9c7a1e083c434551b6fc5ea10e15aee9.hip | // !!! This is a file automatically generated by hipify!!!
#define PY_ARRAY_UNIQUE_SYMBOL cool_ARRAY_API
#define NO_IMPORT_ARRAY
#include <stdio.h>
#include <stdlib.h>
#include "Python.h"
#include "math.h"
#include "numpy/arrayobject.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
/* Needs to be compiled as C files because of the Naming problem in Namespace */
#ifdef __cplusplus
extern "C" {
#endif
/*
Calculate the new centers in the received block.
*/
#define THREAD_NUM 128
__global__ void chunk_centers_sum_cuda(double *cu_data,double *cu_centers, int* cu_centers_counter, double* cu_new_centers,
int* cu_data_assigns, int* cluster_size,int *dimension,int *chunk_size)
/*
cu_data : a chunk of points, which are given pointwise.
cu_centers : current centers.
cu_centers_counter : to count how many points are nearest to a given center, count blockwise.
cu_new_centers : to calculate the sum of the points which are nearest to a given center, add blockwise.
cu_data_assigns : the index of the center which is nearest to a given point.
cluster_size : how many clusters are there.
dimension : dimension of the points.
chunk_size : how many points in the chunk.
*/
{
int k = threadIdx.x+blockIdx.x * blockDim.x;
int bid = blockIdx.x;
int i,j;
while (k< (*chunk_size))
{
/*Calculate the index of the center which is nearest to a given point.*/
double min_distance = 1E100;
double distance;
*(cu_data_assigns+k)=0;
for (i = 0; i < *cluster_size; i++)
{
distance = 0;
for (j = 0; j < *dimension; j++)
{
distance +=(*(cu_data+k*(*dimension)+j)-*(cu_centers+i*(*dimension)+j)) * (*(cu_data+k*(*dimension)+j)-*(cu_centers+i*(*dimension)+j));
}
if (distance <= min_distance)
{
min_distance = distance;
*(cu_data_assigns+k) = i;
}
}
__syncthreads();
/*add up cu_centers_counter and cu_new_centers in each block,
in order to avoid IO problem when two kernels try to write one data*/
if(threadIdx.x == 0)
{
for (i=0 ; i<THREAD_NUM && (k+i)<(*chunk_size); i++)
{
*(cu_centers_counter+bid*(*cluster_size)+*(cu_data_assigns+bid*THREAD_NUM+i))+=1;
for (j = 0; j < *dimension; j++)
{
*(cu_new_centers +bid*(*cluster_size)*(*dimension) +(*(cu_data_assigns+bid*THREAD_NUM+i))* (*dimension) + j) += *(cu_data+(bid*THREAD_NUM+i)*(*dimension)+j);
}
}
//printf("\n%d\n",bid);
//printf("%d %d %d\n", *(cu_centers_counter+bid*(*cluster_size)),*(cu_centers_counter+bid*(*cluster_size)+1),*(cu_centers_counter+bid*(*cluster_size)+2));
}
k+=blockDim.x * gridDim.x;
}
}
PyObject* kmeans_chunk_center_cuda(PyArrayObject *data, PyArrayObject *centers, PyObject *data_assigns)
{
/* Record the nearest center of each point and renew the centers with the points near one given center. */
int device_count;
hipError_t device_error;
device_error = hipGetDeviceCount(&device_count);
if (device_error != hipSuccess){
if (device_error == hipErrorNoDevice){
PyErr_SetString(PyExc_Exception, "No available device detected");
return NULL;
}
if (device_error == hipErrorInsufficientDriver){
PyErr_SetString(PyExc_Exception, "Compute compacity is not enough");
return NULL;
}
}
if (device_count >1){
PyErr_SetString(PyExc_Exception, "Only 1 device is supported currently");
return NULL;
}
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
int cluster_size, dimension, chunk_size;
cluster_size = *(int *)PyArray_DIMS(centers);
dimension = PyArray_DIM(centers, 1);
chunk_size = *(int *)PyArray_DIMS(data);
if (cluster_size<1 || dimension<1 || chunk_size<1){
PyErr_SetString(PyExc_ValueError, "Paramenters size error");
return NULL;
}
int BLOCK_NUM = (chunk_size + 127 ) / 128;
if (BLOCK_NUM > prop.maxThreadsPerMultiProcessor * prop.multiProcessorCount / 128 ) BLOCK_NUM = prop.maxThreadsPerMultiProcessor * prop.multiProcessorCount / 128 ;
/*GPU has number limitation of paralleled threads, to improve efficiency*/
int *centers_counter = (int *)malloc(sizeof(int) * BLOCK_NUM* cluster_size*1E100);
double *new_centers = (double *)malloc(sizeof(double)* BLOCK_NUM * cluster_size * dimension);
int* p_data_assigns= (int *)malloc(sizeof(int) * chunk_size);
if (centers_counter == NULL || new_centers == NULL || p_data_assigns == NULL){
PyErr_SetString(PyExc_MemoryError, "RAM Malloc Error");
return NULL;
}
int i, j, k;
for (i = 0; i < cluster_size* BLOCK_NUM; i++)
{
(*(centers_counter + i)) = 0;
}
for (i = 0; i < cluster_size * dimension* BLOCK_NUM; i++)
{
(*(new_centers + i)) = 0;
}
double* p_data=(double *)PyArray_DATA(data);
double* p_centers=(double *)PyArray_DATA(centers);
double* cu_data, *cu_centers, *cu_new_centers;
int* cu_centers_counter, *cu_cluster_size, *cu_dimension, *cu_data_assigns,*cu_chunk_size;
/*malloc memory to graphic card and copy data from memory to G-memory*/
if (hipMalloc((void**) &cu_data, sizeof(double) * chunk_size * dimension)
|| hipMalloc((void**) &cu_centers, sizeof(double) * cluster_size * dimension)
|| hipMalloc((void**) &cu_centers_counter, sizeof(int) * BLOCK_NUM * cluster_size)
|| hipMalloc((void**) &cu_new_centers, sizeof(double) * BLOCK_NUM * cluster_size * dimension)
|| hipMalloc((void**) &cu_data_assigns, sizeof(int) * chunk_size )
|| hipMalloc((void**) &cu_cluster_size, sizeof(int) *1)
|| hipMalloc((void**) &cu_dimension, sizeof(int) *1)
|| hipMalloc((void**) &cu_chunk_size, sizeof(int) *1)){
PyErr_SetString(PyExc_MemoryError, "CUDA RAM malloc error");
return NULL;
}
if (hipMemcpy(cu_data, p_data, sizeof(double) * chunk_size * dimension, hipMemcpyHostToDevice)
|| hipMemcpy(cu_centers, p_centers, sizeof(double) * cluster_size * dimension, hipMemcpyHostToDevice)
|| hipMemcpy(cu_centers_counter, centers_counter, sizeof(int)* BLOCK_NUM * cluster_size,hipMemcpyHostToDevice)
|| hipMemcpy(cu_new_centers, new_centers, sizeof(double) * BLOCK_NUM * cluster_size * dimension, hipMemcpyHostToDevice)
|| hipMemcpy(cu_data_assigns, p_data_assigns, sizeof(int) *chunk_size , hipMemcpyHostToDevice)
|| hipMemcpy(cu_cluster_size, &cluster_size, sizeof(int) * 1, hipMemcpyHostToDevice)
|| hipMemcpy(cu_dimension, &dimension, sizeof(int) * 1, hipMemcpyHostToDevice)
|| hipMemcpy(cu_chunk_size, &chunk_size, sizeof(int) * 1, hipMemcpyHostToDevice)){
PyErr_SetString(PyExc_MemoryError, "Memory copy error, from host to device");
return NULL;
}
/*Caculate parallelly wich BLOCK_NUM blocks in one grid and THREAD_NUM threads in one block*/
hipLaunchKernelGGL(( chunk_centers_sum_cuda), dim3(BLOCK_NUM), dim3(THREAD_NUM), 0, 0, cu_data,cu_centers,cu_centers_counter,cu_new_centers,cu_data_assigns,cu_cluster_size,cu_dimension,cu_chunk_size);
/*Capy back the results and free G-memory*/
if (hipMemcpy(centers_counter, cu_centers_counter,sizeof(int) * BLOCK_NUM *cluster_size, hipMemcpyDeviceToHost)
|| hipMemcpy(new_centers, cu_new_centers, sizeof(double) * BLOCK_NUM* cluster_size * dimension, hipMemcpyDeviceToHost)
|| hipMemcpy(p_data_assigns, cu_data_assigns, sizeof(int) * chunk_size , hipMemcpyDeviceToHost)){
PyErr_SetString(PyExc_MemoryError, "Memory copy error, from device to host");
return NULL;
}
if (hipFree(cu_data)
|| hipFree(cu_centers)
|| hipFree(cu_centers_counter)
|| hipFree(cu_new_centers)
|| hipFree(cu_data_assigns)
|| hipFree(cu_cluster_size)
|| hipFree(cu_dimension)
|| hipFree(cu_chunk_size)){
PyErr_SetString(PyExc_MemoryError, "CUDA free memory error");
return NULL;
}
/*Since we add blockwise, here we need to get the general results.*/
for (i=0; i<BLOCK_NUM ;i++)
{
for (j=0;j<cluster_size; j++)
{
*(centers_counter+j)+=*(centers_counter+i*cluster_size+j);
}
}
for (i=0; i<BLOCK_NUM ;i++)
{
for (j=0;j<cluster_size; j++)
{
for (k=0;k<dimension;k++)
*(new_centers+j*dimension+k)+=*(new_centers+i*cluster_size*dimension+j*dimension+k);
}
}
for (i = 0; i < cluster_size; i++)
{
if (*(centers_counter + i) == 0)
{
for (j = 0; j < dimension; j++)
{
(*(new_centers + i * dimension + j)) = (*(double*)PyArray_GETPTR2(centers, i, j));
}
}
else
{
for (j=0; j < dimension; j++)
{
(*(new_centers + i * dimension + j)) /= (*(centers_counter + i));
//printf("%lf ",(*(new_centers + i * dimension + j)) );
}
}
}
for (i=0; i<chunk_size; i++)
{
PyList_SetItem(data_assigns, i, PyInt_FromLong(*(p_data_assigns+i)));
}
PyObject* return_new_centers;
npy_intp dims[2] = {cluster_size, dimension};
return_new_centers = PyArray_SimpleNew(2, dims, NPY_DOUBLE);
if (return_new_centers == NULL){
PyErr_SetString(PyExc_MemoryError, "Error occurs when creating a new PyArray");
return NULL;
}
void *arr_data = PyArray_DATA((PyArrayObject*)return_new_centers);
memcpy(arr_data, new_centers, PyArray_ITEMSIZE((PyArrayObject*) return_new_centers) * cluster_size * dimension);
/* Need to copy the data of the malloced buffer to the PyObject
since the malloced buffer will disappear after the C extension is called. */
free(centers_counter);
free(new_centers);
free(p_data_assigns);
return (PyObject*) return_new_centers;
}
#ifdef __cplusplus
}
#endif
| 95c52cba9c7a1e083c434551b6fc5ea10e15aee9.cu | #define PY_ARRAY_UNIQUE_SYMBOL cool_ARRAY_API
#define NO_IMPORT_ARRAY
#include <stdio.h>
#include <stdlib.h>
#include "Python.h"
#include "math.h"
#include "numpy/arrayobject.h"
#include <cuda_runtime.h>
#include <cuda.h>
/* Needs to be compiled as C files because of the Naming problem in Namespace */
#ifdef __cplusplus
extern "C" {
#endif
/*
Calculate the new centers in the received block.
*/
#define THREAD_NUM 128
__global__ void chunk_centers_sum_cuda(double *cu_data,double *cu_centers, int* cu_centers_counter, double* cu_new_centers,
int* cu_data_assigns, int* cluster_size,int *dimension,int *chunk_size)
/*
cu_data : a chunk of points, which are given pointwise.
cu_centers : current centers.
cu_centers_counter : to count how many points are nearest to a given center, count blockwise.
cu_new_centers : to calculate the sum of the points which are nearest to a given center, add blockwise.
cu_data_assigns : the index of the center which is nearest to a given point.
cluster_size : how many clusters are there.
dimension : dimension of the points.
chunk_size : how many points in the chunk.
*/
{
int k = threadIdx.x+blockIdx.x * blockDim.x;
int bid = blockIdx.x;
int i,j;
while (k< (*chunk_size))
{
/*Calculate the index of the center which is nearest to a given point.*/
double min_distance = 1E100;
double distance;
*(cu_data_assigns+k)=0;
for (i = 0; i < *cluster_size; i++)
{
distance = 0;
for (j = 0; j < *dimension; j++)
{
distance +=(*(cu_data+k*(*dimension)+j)-*(cu_centers+i*(*dimension)+j)) * (*(cu_data+k*(*dimension)+j)-*(cu_centers+i*(*dimension)+j));
}
if (distance <= min_distance)
{
min_distance = distance;
*(cu_data_assigns+k) = i;
}
}
__syncthreads();
/*add up cu_centers_counter and cu_new_centers in each block,
in order to avoid IO problem when two kernels try to write one data*/
if(threadIdx.x == 0)
{
for (i=0 ; i<THREAD_NUM && (k+i)<(*chunk_size); i++)
{
*(cu_centers_counter+bid*(*cluster_size)+*(cu_data_assigns+bid*THREAD_NUM+i))+=1;
for (j = 0; j < *dimension; j++)
{
*(cu_new_centers +bid*(*cluster_size)*(*dimension) +(*(cu_data_assigns+bid*THREAD_NUM+i))* (*dimension) + j) += *(cu_data+(bid*THREAD_NUM+i)*(*dimension)+j);
}
}
//printf("\n%d\n",bid);
//printf("%d %d %d\n", *(cu_centers_counter+bid*(*cluster_size)),*(cu_centers_counter+bid*(*cluster_size)+1),*(cu_centers_counter+bid*(*cluster_size)+2));
}
k+=blockDim.x * gridDim.x;
}
}
PyObject* kmeans_chunk_center_cuda(PyArrayObject *data, PyArrayObject *centers, PyObject *data_assigns)
{
/* Record the nearest center of each point and renew the centers with the points near one given center. */
int device_count;
cudaError_t device_error;
device_error = cudaGetDeviceCount(&device_count);
if (device_error != cudaSuccess){
if (device_error == cudaErrorNoDevice){
PyErr_SetString(PyExc_Exception, "No available device detected");
return NULL;
}
if (device_error == cudaErrorInsufficientDriver){
PyErr_SetString(PyExc_Exception, "Compute compacity is not enough");
return NULL;
}
}
if (device_count >1){
PyErr_SetString(PyExc_Exception, "Only 1 device is supported currently");
return NULL;
}
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
int cluster_size, dimension, chunk_size;
cluster_size = *(int *)PyArray_DIMS(centers);
dimension = PyArray_DIM(centers, 1);
chunk_size = *(int *)PyArray_DIMS(data);
if (cluster_size<1 || dimension<1 || chunk_size<1){
PyErr_SetString(PyExc_ValueError, "Paramenters size error");
return NULL;
}
int BLOCK_NUM = (chunk_size + 127 ) / 128;
if (BLOCK_NUM > prop.maxThreadsPerMultiProcessor * prop.multiProcessorCount / 128 ) BLOCK_NUM = prop.maxThreadsPerMultiProcessor * prop.multiProcessorCount / 128 ;
/*GPU has number limitation of paralleled threads, to improve efficiency*/
int *centers_counter = (int *)malloc(sizeof(int) * BLOCK_NUM* cluster_size*1E100);
double *new_centers = (double *)malloc(sizeof(double)* BLOCK_NUM * cluster_size * dimension);
int* p_data_assigns= (int *)malloc(sizeof(int) * chunk_size);
if (centers_counter == NULL || new_centers == NULL || p_data_assigns == NULL){
PyErr_SetString(PyExc_MemoryError, "RAM Malloc Error");
return NULL;
}
int i, j, k;
for (i = 0; i < cluster_size* BLOCK_NUM; i++)
{
(*(centers_counter + i)) = 0;
}
for (i = 0; i < cluster_size * dimension* BLOCK_NUM; i++)
{
(*(new_centers + i)) = 0;
}
double* p_data=(double *)PyArray_DATA(data);
double* p_centers=(double *)PyArray_DATA(centers);
double* cu_data, *cu_centers, *cu_new_centers;
int* cu_centers_counter, *cu_cluster_size, *cu_dimension, *cu_data_assigns,*cu_chunk_size;
/*malloc memory to graphic card and copy data from memory to G-memory*/
if (cudaMalloc((void**) &cu_data, sizeof(double) * chunk_size * dimension)
|| cudaMalloc((void**) &cu_centers, sizeof(double) * cluster_size * dimension)
|| cudaMalloc((void**) &cu_centers_counter, sizeof(int) * BLOCK_NUM * cluster_size)
|| cudaMalloc((void**) &cu_new_centers, sizeof(double) * BLOCK_NUM * cluster_size * dimension)
|| cudaMalloc((void**) &cu_data_assigns, sizeof(int) * chunk_size )
|| cudaMalloc((void**) &cu_cluster_size, sizeof(int) *1)
|| cudaMalloc((void**) &cu_dimension, sizeof(int) *1)
|| cudaMalloc((void**) &cu_chunk_size, sizeof(int) *1)){
PyErr_SetString(PyExc_MemoryError, "CUDA RAM malloc error");
return NULL;
}
if (cudaMemcpy(cu_data, p_data, sizeof(double) * chunk_size * dimension, cudaMemcpyHostToDevice)
|| cudaMemcpy(cu_centers, p_centers, sizeof(double) * cluster_size * dimension, cudaMemcpyHostToDevice)
|| cudaMemcpy(cu_centers_counter, centers_counter, sizeof(int)* BLOCK_NUM * cluster_size,cudaMemcpyHostToDevice)
|| cudaMemcpy(cu_new_centers, new_centers, sizeof(double) * BLOCK_NUM * cluster_size * dimension, cudaMemcpyHostToDevice)
|| cudaMemcpy(cu_data_assigns, p_data_assigns, sizeof(int) *chunk_size , cudaMemcpyHostToDevice)
|| cudaMemcpy(cu_cluster_size, &cluster_size, sizeof(int) * 1, cudaMemcpyHostToDevice)
|| cudaMemcpy(cu_dimension, &dimension, sizeof(int) * 1, cudaMemcpyHostToDevice)
|| cudaMemcpy(cu_chunk_size, &chunk_size, sizeof(int) * 1, cudaMemcpyHostToDevice)){
PyErr_SetString(PyExc_MemoryError, "Memory copy error, from host to device");
return NULL;
}
/*Caculate parallelly wich BLOCK_NUM blocks in one grid and THREAD_NUM threads in one block*/
chunk_centers_sum_cuda<<<BLOCK_NUM, THREAD_NUM>>>(cu_data,cu_centers,cu_centers_counter,cu_new_centers,cu_data_assigns,cu_cluster_size,cu_dimension,cu_chunk_size);
/*Capy back the results and free G-memory*/
if (cudaMemcpy(centers_counter, cu_centers_counter,sizeof(int) * BLOCK_NUM *cluster_size, cudaMemcpyDeviceToHost)
|| cudaMemcpy(new_centers, cu_new_centers, sizeof(double) * BLOCK_NUM* cluster_size * dimension, cudaMemcpyDeviceToHost)
|| cudaMemcpy(p_data_assigns, cu_data_assigns, sizeof(int) * chunk_size , cudaMemcpyDeviceToHost)){
PyErr_SetString(PyExc_MemoryError, "Memory copy error, from device to host");
return NULL;
}
if (cudaFree(cu_data)
|| cudaFree(cu_centers)
|| cudaFree(cu_centers_counter)
|| cudaFree(cu_new_centers)
|| cudaFree(cu_data_assigns)
|| cudaFree(cu_cluster_size)
|| cudaFree(cu_dimension)
|| cudaFree(cu_chunk_size)){
PyErr_SetString(PyExc_MemoryError, "CUDA free memory error");
return NULL;
}
/*Since we add blockwise, here we need to get the general results.*/
for (i=0; i<BLOCK_NUM ;i++)
{
for (j=0;j<cluster_size; j++)
{
*(centers_counter+j)+=*(centers_counter+i*cluster_size+j);
}
}
for (i=0; i<BLOCK_NUM ;i++)
{
for (j=0;j<cluster_size; j++)
{
for (k=0;k<dimension;k++)
*(new_centers+j*dimension+k)+=*(new_centers+i*cluster_size*dimension+j*dimension+k);
}
}
for (i = 0; i < cluster_size; i++)
{
if (*(centers_counter + i) == 0)
{
for (j = 0; j < dimension; j++)
{
(*(new_centers + i * dimension + j)) = (*(double*)PyArray_GETPTR2(centers, i, j));
}
}
else
{
for (j=0; j < dimension; j++)
{
(*(new_centers + i * dimension + j)) /= (*(centers_counter + i));
//printf("%lf ",(*(new_centers + i * dimension + j)) );
}
}
}
for (i=0; i<chunk_size; i++)
{
PyList_SetItem(data_assigns, i, PyInt_FromLong(*(p_data_assigns+i)));
}
PyObject* return_new_centers;
npy_intp dims[2] = {cluster_size, dimension};
return_new_centers = PyArray_SimpleNew(2, dims, NPY_DOUBLE);
if (return_new_centers == NULL){
PyErr_SetString(PyExc_MemoryError, "Error occurs when creating a new PyArray");
return NULL;
}
void *arr_data = PyArray_DATA((PyArrayObject*)return_new_centers);
memcpy(arr_data, new_centers, PyArray_ITEMSIZE((PyArrayObject*) return_new_centers) * cluster_size * dimension);
/* Need to copy the data of the malloced buffer to the PyObject
since the malloced buffer will disappear after the C extension is called. */
free(centers_counter);
free(new_centers);
free(p_data_assigns);
return (PyObject*) return_new_centers;
}
#ifdef __cplusplus
}
#endif
|
781b0aefa5498aaaba23d53e251e22031a7770e7.hip | // !!! This is a file automatically generated by hipify!!!
#include <mex.h>
#include <string.h>
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include "../../include/cpp/OperationType.h"
#include "../../include/cuda/ErrorCheckingCuda.cuh"
#include "../../include/cuda/MatrixMain.cuh"
#include "../../include/cuda/NcclMultiplicationEnvironment.cuh"
#include <vector>
using namespace std;
static int initiated = 0;
static bool isAlreadyInitialized=false;
static bool areMatricesDestroyed=false;
enum type_method_f{ no_method, taylor, bernoulli, hermite };
enum eval_method{ PatMey };
class funcion_matricial {
protected:
int n; /* Matrix size */
NcclMultiplicationEnvironment<double> *ncclMultEnv ;
vector< MatrixMain<double>* > pA;
MatrixMain<double>* R; /* Matrix result */
int scaled;
int evaluated;
int unscaled;
int nProd;
eval_method e_method;
type_method_f metodo_f;
public:
funcion_matricial( int n, type_method_f metodo_f, eval_method e_method, const double *A );
~funcion_matricial( );
void createInitialMatrices(int n, const double * A,type_method_f metodo_f, eval_method e_method);
void destroyAllMatrices();
int getN( ) const { return n; }
int getQ( ) const { return pA.size()-1; } /* Returns the order of the PatMey polynomial */
void get( const int i, double *A );
virtual void power( );
//void power( const int i );
double norm1( const int i );
void scale( const int s, const double e );
virtual void scale( const int s ) = 0;
int evaluate( const int m, const double *p );
int eval_PatMey( const int m, const double *p );
virtual void unscale( const int s ) = 0;
void free( const int n );
void finalize( mxArray **plhs );
};
class cos_matricial : public funcion_matricial {
public:
cos_matricial( int n, type_method_f metodo_f, eval_method e_method, const double *A );
cos_matricial(int m);
void power( );
void scale( const int s );
void unscale( const int s );
};
class cosh_matricial : public funcion_matricial {
public:
cosh_matricial( int n, type_method_f metodo_f, eval_method e_method, const double *A );
void power( );
void scale( const int s );
void unscale( const int s );
};
class exp_matricial : public funcion_matricial {
public:
exp_matricial( int n, type_method_f metodo_f, eval_method e_method, const double *A );
void power( );
void scale( const int s );
void unscale( const int s );
};
funcion_matricial::funcion_matricial( int n, type_method_f metodo_f, eval_method e_method, const double *A ) : n(n), scaled(0), evaluated(0), unscaled(0), metodo_f(metodo_f), e_method(e_method) {
int gpuSizeSystem;
CUDACHECK(hipGetDeviceCount(&gpuSizeSystem));
ncclMultEnv = new NcclMultiplicationEnvironment<double>(gpuSizeSystem, 0, MultDouble, false);
isAlreadyInitialized=true;
createInitialMatrices(n,A,metodo_f,e_method);
}
cos_matricial::cos_matricial( int n, type_method_f metodo_f, eval_method e_method, const double *A ) : funcion_matricial(n,metodo_f,e_method,A) {
}
cosh_matricial::cosh_matricial( int n, type_method_f metodo_f, eval_method e_method, const double *A ) : funcion_matricial(n,metodo_f,e_method,A) {
}
exp_matricial::exp_matricial( int n, type_method_f metodo_f, eval_method e_method, const double *A ) : funcion_matricial(n,metodo_f,e_method,A) {
}
void funcion_matricial::createInitialMatrices(int n, const double * A,type_method_f metodo_f, eval_method e_method)
{
this->n=n;
this->scaled=0;
this->unscaled=0;
this->evaluated=0;
this->metodo_f=metodo_f;
this->e_method=e_method;
R = new MatrixMain<double>(ncclMultEnv, n, n);
pA.push_back( new MatrixMain<double>(ncclMultEnv, n, n, (double*)A) );
nProd = 0;
}
void funcion_matricial::power( ) {
MatrixMain<double> *auxMult;
auxMult=&((*pA[0]) * (*pA[pA.size()-1]));
pA.push_back(auxMult );
nProd++;
}
static bool a = true;
void cos_matricial::power( ) {
funcion_matricial::power();
if( a && metodo_f != bernoulli ) {
pA[0] = pA[1];
pA.pop_back();
a = false;
}
}
void cosh_matricial::power( ) {
funcion_matricial::power();
if( a && metodo_f != bernoulli ) {
pA[0] = pA[1];
pA.pop_back();
a = false;
}
}
void exp_matricial::power( ) {
funcion_matricial::power();
}
void funcion_matricial::get( int i, double *A ) {
if( !( i>=0 && i<pA.size() ) ) return;
pA[i]->getHostMatrixInThisPointer(A);
}
double funcion_matricial::norm1( const int i ) {
if( i < 0 || i > pA.size() ) {
printf("There's no MatrixMain %d\n",i);
return 0.0;
}
return pA[i]->norm1();
}
void funcion_matricial::free( int n ) {
if( !( n>0 && n<pA.size() ) ) return;
for(auto it=pA.end()-n;it<pA.end();it++)
{
(*it)->setDeleteMatrixHostAtDestroyment(true);
delete *it;
}
pA.erase(pA.end()-n, pA.end());
}
void funcion_matricial::scale( const int s, const double e ) {
if( scaled ) return;
int i = 1;
for( auto it : pA )
{
(*it)/=pow( e, s*(i++));
}
scaled = 1;
}
void cos_matricial::scale( const int s ) {
double e;
if( metodo_f == bernoulli ) e = 2.0;
else e = 4.0;
funcion_matricial::scale( s, e );
}
void cosh_matricial::scale( const int s ) {
double e;
if( metodo_f == bernoulli ) e = 2.0;
else e = 4.0;
funcion_matricial::scale( s, e );
}
void exp_matricial::scale( const int s ) {
funcion_matricial::scale( s, 2.0 );
}
int funcion_matricial::evaluate( const int m, const double *p ) {
if( evaluated ) return 0;
int nProd;
switch( e_method ) {
case PatMey:
nProd = eval_PatMey( m, p );
break;
default: cout << "No valid evaluation method " << endl;
}
evaluated = 1;
return nProd;
}
int funcion_matricial::eval_PatMey( const int m, const double *p ) {
int n = pA[0]->getRowsReal();
int degree = m - 1;
int q = pA.size();
int c = degree + 1;
int k = degree / q;
int nProd = 0;
R->setMatrixHostToFullValue( 0.0 ); /* R=zeros(n); */
for( int j = k; j > 0; j-- ) {
int inic;
if( j == k ) {
inic = q;
} else {
inic = q-1;
}
for( int i = inic; i > 0; i-- ) {
// axpy( p[c-1], pA[i-1], *R ); /* R += p[c] * pA[i]; */
R->axpy(p[c-1],*pA[i-1]);
c = c - 1;
}
/* R = R + p[c] * I; */
*R += p[c-1];
c = c - 1;
if( j != 1 ) {
*R *= *pA[q-1]; /* R = R * pA[q]; */
nProd = nProd + 1;
}
}
return nProd;
}
void cos_matricial::unscale( const int s ) {
if( unscaled ) return;
R->setAlphaGemm(2);
for( int i=0; i<s; i++ ) {
/* F:=2*F*F-I; */
// *R = 2.0*(*R)*(*R)-1.0;
*R = (*R)*(*R)-1.0;
}
R->setAlphaGemm(1);
unscaled = 1;
}
void cosh_matricial::unscale( const int s ) {
if( unscaled ) return;
R->setAlphaGemm(2);
for( int i=0; i<s; i++ ) {
/* F:=2*F*F-I; */
// *R = 2.0*(*R)*(*R)-1.0;
*R = (*R)*(*R)-1.0;
}
R->setAlphaGemm(1);
unscaled = 1;
}
void exp_matricial::unscale( const int s ) {
if( unscaled ) return;
for( int i=0; i<s; i++ ) {
/* F:=F*F; */
*R = (*R)*(*R);
}
unscaled = 1;
}
void funcion_matricial::finalize( mxArray **plhs ) {
*plhs = mxCreateDoubleMatrix((mwSize)n, (mwSize)n, mxREAL);
R->getHostMatrixInThisPointer( mxGetPr(*plhs) );
}
void funcion_matricial::destroyAllMatrices()
{
int i;
for(i =0;i<pA.size();i++)
{
if(i!=0)
{
pA[i]->setDeleteMatrixHostAtDestroyment(true);
}
delete pA[i];
}
pA.clear();
R->setDeleteMatrixHostAtDestroyment(true);
delete R;
areMatricesDestroyed=true;
}
funcion_matricial::~funcion_matricial() {
if(!areMatricesDestroyed)
{
destroyAllMatrices();
}
areMatricesDestroyed=true;
delete ncclMultEnv;
}
funcion_matricial *F;
/* Interface routines */
void initialize() {
/* Check number of GPUs */
int deviceCount;
CUDACHECK(hipGetDeviceCount(&deviceCount));
if( deviceCount<1 ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu","Not enough GPUs available.");
}
/*
if( deviceCount>2 ) {
mexPrintf("MATLAB:Warning: Not yet implemented for more than 1 GPU.\n");
}
*/
}
void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) {
/* Check for proper number of arguments. */
if(nlhs>1) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:maxlhs","Too many output arguments.");
}
char comando[80];
mxGetString( prhs[0], comando, 80 );
if( strcmp( comando, "init" ) && !initiated && !isAlreadyInitialized ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidCommand","Not yet initiated.");
}
if( !strcmp( comando, "init" ) ) {
if( initiated ) return;
if( nrhs!=4 ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidNumInputs","Arguments: cos|exp|cosh, taylor|bernoulli|hermite, matrix ");
}
char funcion[80];
mxGetString( prhs[1], funcion, 80 );
if( !strcmp( funcion, "cos" ) && !strcmp( funcion, "exp" ) && !strcmp( funcion, "cosh" ) ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidInput","Function should be exp or cosh.");
}
char pol_method_name[80];
mxGetString( prhs[2], pol_method_name, 80 );
type_method_f metodo_f = no_method;
if( !strcmp( pol_method_name, "taylor" ) ) metodo_f = taylor;
if( !strcmp( pol_method_name, "bernoulli" ) ) metodo_f = bernoulli;
if( !strcmp( pol_method_name, "hermite" ) ) metodo_f = hermite;
if( metodo_f == no_method ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidInput","Evaluation method should be taylor, bernoulli or hermite.");
}
eval_method e_method = PatMey;
if( !mxIsDouble(prhs[3]) || mxIsComplex(prhs[3]) ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidInput","Matrix must be real.");
}
if( mxGetM(prhs[3]) != mxGetN(prhs[3]) ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidInput","Matrix must be square.");
}
initialize();
if(!isAlreadyInitialized)
{
if( !strcmp( funcion, "cos" ) ) {
F = new cos_matricial( mxGetM(prhs[3]), metodo_f, e_method, mxGetPr(prhs[3]) );
}
if( !strcmp( funcion, "cosh" ) ) {
F = new cosh_matricial( mxGetM(prhs[3]), metodo_f, e_method, mxGetPr(prhs[3]) );
}
if( !strcmp( funcion, "exp" ) ) {
F = new exp_matricial( mxGetM(prhs[3]), metodo_f, e_method, mxGetPr(prhs[3]) );
}
}else
{
if(!areMatricesDestroyed)
{
F->destroyAllMatrices();
}
areMatricesDestroyed=false;
if( !strcmp( funcion, "cos" ) ) {
F = reinterpret_cast<cos_matricial*>(F);
}
if( !strcmp( funcion, "cosh" ) ) {
F = reinterpret_cast<cosh_matricial*>(F);
}
if( !strcmp( funcion, "exp" ) ) {
F = reinterpret_cast<exp_matricial*>(F);
}
F->createInitialMatrices( mxGetM(prhs[3]),mxGetPr(prhs[3]),metodo_f,e_method);
}
initiated = 1;
} else if( !strcmp( comando, "power" ) ) {
F->power( );
if( nlhs==1 ) {
F->get( F->getQ(), mxGetPr( plhs[0] = mxCreateDoubleMatrix((mwSize)F->getN(), (mwSize)F->getN(), mxREAL) ) );
}
} else if( !strcmp( comando, "norm1" ) ) {
if( nrhs!=2 ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidNumInputs","A matrix index (integer) as second argument is required.");
}
*mxGetPr( plhs[0] = mxCreateDoubleMatrix((mwSize)1, (mwSize)1, mxREAL) ) = F->norm1((int) *mxGetPr(prhs[1])-1);
} else if( !strcmp( comando, "scale" ) ) {
if( nrhs!=2 ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidNumInputs","An integer with the scaling as second argument is required.");
}
F->scale((int) *mxGetPr(prhs[1]));
} else if( !strcmp( comando, "evaluate" ) ) {
if( nrhs!=2 ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidNumInputs","An array of coefficients as second argument is required.");
}
*mxGetPr( plhs[0] = mxCreateDoubleMatrix((mwSize)1, (mwSize)1, mxREAL) ) = F->eval_PatMey( mxGetN(prhs[1]), mxGetPr(prhs[1]) );
} else if( !strcmp( comando, "unscale" ) ) {
if( nrhs!=2 ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidNumInputs","An integer with the scaling as second argument is required.");
}
F->unscale((int) *mxGetPr(prhs[1]));
} else if( !strcmp( comando, "free" ) ) {
if( nrhs!=2 ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidNumInputs","An integer with the power as second argument is required.");
}
F->free((int) *mxGetPr(prhs[1]));
} else if( !strcmp( comando, "finalize" ) ) {
F->finalize( &plhs[0] );
F->destroyAllMatrices();
initiated = 0;
} else if( !strcmp( comando, "destroy" ) ) {
if( F!=NULL ) {
delete F;
F = NULL;
}
initiated=0;
isAlreadyInitialized=false;
}
else {
printf("Command unknown\n");
}
}
| 781b0aefa5498aaaba23d53e251e22031a7770e7.cu |
#include <mex.h>
#include <string.h>
#include "cuda.h"
#include "cublas_v2.h"
#include "../../include/cpp/OperationType.h"
#include "../../include/cuda/ErrorCheckingCuda.cuh"
#include "../../include/cuda/MatrixMain.cuh"
#include "../../include/cuda/NcclMultiplicationEnvironment.cuh"
#include <vector>
using namespace std;
static int initiated = 0;
static bool isAlreadyInitialized=false;
static bool areMatricesDestroyed=false;
enum type_method_f{ no_method, taylor, bernoulli, hermite };
enum eval_method{ PatMey };
class funcion_matricial {
protected:
int n; /* Matrix size */
NcclMultiplicationEnvironment<double> *ncclMultEnv ;
vector< MatrixMain<double>* > pA;
MatrixMain<double>* R; /* Matrix result */
int scaled;
int evaluated;
int unscaled;
int nProd;
eval_method e_method;
type_method_f metodo_f;
public:
funcion_matricial( int n, type_method_f metodo_f, eval_method e_method, const double *A );
~funcion_matricial( );
void createInitialMatrices(int n, const double * A,type_method_f metodo_f, eval_method e_method);
void destroyAllMatrices();
int getN( ) const { return n; }
int getQ( ) const { return pA.size()-1; } /* Returns the order of the PatMey polynomial */
void get( const int i, double *A );
virtual void power( );
//void power( const int i );
double norm1( const int i );
void scale( const int s, const double e );
virtual void scale( const int s ) = 0;
int evaluate( const int m, const double *p );
int eval_PatMey( const int m, const double *p );
virtual void unscale( const int s ) = 0;
void free( const int n );
void finalize( mxArray **plhs );
};
class cos_matricial : public funcion_matricial {
public:
cos_matricial( int n, type_method_f metodo_f, eval_method e_method, const double *A );
cos_matricial(int m);
void power( );
void scale( const int s );
void unscale( const int s );
};
class cosh_matricial : public funcion_matricial {
public:
cosh_matricial( int n, type_method_f metodo_f, eval_method e_method, const double *A );
void power( );
void scale( const int s );
void unscale( const int s );
};
class exp_matricial : public funcion_matricial {
public:
exp_matricial( int n, type_method_f metodo_f, eval_method e_method, const double *A );
void power( );
void scale( const int s );
void unscale( const int s );
};
funcion_matricial::funcion_matricial( int n, type_method_f metodo_f, eval_method e_method, const double *A ) : n(n), scaled(0), evaluated(0), unscaled(0), metodo_f(metodo_f), e_method(e_method) {
int gpuSizeSystem;
CUDACHECK(cudaGetDeviceCount(&gpuSizeSystem));
ncclMultEnv = new NcclMultiplicationEnvironment<double>(gpuSizeSystem, 0, MultDouble, false);
isAlreadyInitialized=true;
createInitialMatrices(n,A,metodo_f,e_method);
}
cos_matricial::cos_matricial( int n, type_method_f metodo_f, eval_method e_method, const double *A ) : funcion_matricial(n,metodo_f,e_method,A) {
}
cosh_matricial::cosh_matricial( int n, type_method_f metodo_f, eval_method e_method, const double *A ) : funcion_matricial(n,metodo_f,e_method,A) {
}
exp_matricial::exp_matricial( int n, type_method_f metodo_f, eval_method e_method, const double *A ) : funcion_matricial(n,metodo_f,e_method,A) {
}
void funcion_matricial::createInitialMatrices(int n, const double * A,type_method_f metodo_f, eval_method e_method)
{
this->n=n;
this->scaled=0;
this->unscaled=0;
this->evaluated=0;
this->metodo_f=metodo_f;
this->e_method=e_method;
R = new MatrixMain<double>(ncclMultEnv, n, n);
pA.push_back( new MatrixMain<double>(ncclMultEnv, n, n, (double*)A) );
nProd = 0;
}
void funcion_matricial::power( ) {
MatrixMain<double> *auxMult;
auxMult=&((*pA[0]) * (*pA[pA.size()-1]));
pA.push_back(auxMult );
nProd++;
}
static bool a = true;
void cos_matricial::power( ) {
funcion_matricial::power();
if( a && metodo_f != bernoulli ) {
pA[0] = pA[1];
pA.pop_back();
a = false;
}
}
void cosh_matricial::power( ) {
funcion_matricial::power();
if( a && metodo_f != bernoulli ) {
pA[0] = pA[1];
pA.pop_back();
a = false;
}
}
void exp_matricial::power( ) {
funcion_matricial::power();
}
void funcion_matricial::get( int i, double *A ) {
if( !( i>=0 && i<pA.size() ) ) return;
pA[i]->getHostMatrixInThisPointer(A);
}
double funcion_matricial::norm1( const int i ) {
if( i < 0 || i > pA.size() ) {
printf("There's no MatrixMain %d\n",i);
return 0.0;
}
return pA[i]->norm1();
}
void funcion_matricial::free( int n ) {
if( !( n>0 && n<pA.size() ) ) return;
for(auto it=pA.end()-n;it<pA.end();it++)
{
(*it)->setDeleteMatrixHostAtDestroyment(true);
delete *it;
}
pA.erase(pA.end()-n, pA.end());
}
void funcion_matricial::scale( const int s, const double e ) {
if( scaled ) return;
int i = 1;
for( auto it : pA )
{
(*it)/=pow( e, s*(i++));
}
scaled = 1;
}
void cos_matricial::scale( const int s ) {
double e;
if( metodo_f == bernoulli ) e = 2.0;
else e = 4.0;
funcion_matricial::scale( s, e );
}
void cosh_matricial::scale( const int s ) {
double e;
if( metodo_f == bernoulli ) e = 2.0;
else e = 4.0;
funcion_matricial::scale( s, e );
}
void exp_matricial::scale( const int s ) {
funcion_matricial::scale( s, 2.0 );
}
int funcion_matricial::evaluate( const int m, const double *p ) {
if( evaluated ) return 0;
int nProd;
switch( e_method ) {
case PatMey:
nProd = eval_PatMey( m, p );
break;
default: cout << "No valid evaluation method " << endl;
}
evaluated = 1;
return nProd;
}
int funcion_matricial::eval_PatMey( const int m, const double *p ) {
int n = pA[0]->getRowsReal();
int degree = m - 1;
int q = pA.size();
int c = degree + 1;
int k = degree / q;
int nProd = 0;
R->setMatrixHostToFullValue( 0.0 ); /* R=zeros(n); */
for( int j = k; j > 0; j-- ) {
int inic;
if( j == k ) {
inic = q;
} else {
inic = q-1;
}
for( int i = inic; i > 0; i-- ) {
// axpy( p[c-1], pA[i-1], *R ); /* R += p[c] * pA[i]; */
R->axpy(p[c-1],*pA[i-1]);
c = c - 1;
}
/* R = R + p[c] * I; */
*R += p[c-1];
c = c - 1;
if( j != 1 ) {
*R *= *pA[q-1]; /* R = R * pA[q]; */
nProd = nProd + 1;
}
}
return nProd;
}
void cos_matricial::unscale( const int s ) {
if( unscaled ) return;
R->setAlphaGemm(2);
for( int i=0; i<s; i++ ) {
/* F:=2*F*F-I; */
// *R = 2.0*(*R)*(*R)-1.0;
*R = (*R)*(*R)-1.0;
}
R->setAlphaGemm(1);
unscaled = 1;
}
void cosh_matricial::unscale( const int s ) {
if( unscaled ) return;
R->setAlphaGemm(2);
for( int i=0; i<s; i++ ) {
/* F:=2*F*F-I; */
// *R = 2.0*(*R)*(*R)-1.0;
*R = (*R)*(*R)-1.0;
}
R->setAlphaGemm(1);
unscaled = 1;
}
void exp_matricial::unscale( const int s ) {
if( unscaled ) return;
for( int i=0; i<s; i++ ) {
/* F:=F*F; */
*R = (*R)*(*R);
}
unscaled = 1;
}
void funcion_matricial::finalize( mxArray **plhs ) {
*plhs = mxCreateDoubleMatrix((mwSize)n, (mwSize)n, mxREAL);
R->getHostMatrixInThisPointer( mxGetPr(*plhs) );
}
void funcion_matricial::destroyAllMatrices()
{
int i;
for(i =0;i<pA.size();i++)
{
if(i!=0)
{
pA[i]->setDeleteMatrixHostAtDestroyment(true);
}
delete pA[i];
}
pA.clear();
R->setDeleteMatrixHostAtDestroyment(true);
delete R;
areMatricesDestroyed=true;
}
funcion_matricial::~funcion_matricial() {
if(!areMatricesDestroyed)
{
destroyAllMatrices();
}
areMatricesDestroyed=true;
delete ncclMultEnv;
}
funcion_matricial *F;
/* Interface routines */
void initialize() {
/* Check number of GPUs */
int deviceCount;
CUDACHECK(cudaGetDeviceCount(&deviceCount));
if( deviceCount<1 ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu","Not enough GPUs available.");
}
/*
if( deviceCount>2 ) {
mexPrintf("MATLAB:Warning: Not yet implemented for more than 1 GPU.\n");
}
*/
}
void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) {
/* Check for proper number of arguments. */
if(nlhs>1) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:maxlhs","Too many output arguments.");
}
char comando[80];
mxGetString( prhs[0], comando, 80 );
if( strcmp( comando, "init" ) && !initiated && !isAlreadyInitialized ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidCommand","Not yet initiated.");
}
if( !strcmp( comando, "init" ) ) {
if( initiated ) return;
if( nrhs!=4 ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidNumInputs","Arguments: cos|exp|cosh, taylor|bernoulli|hermite, matrix ");
}
char funcion[80];
mxGetString( prhs[1], funcion, 80 );
if( !strcmp( funcion, "cos" ) && !strcmp( funcion, "exp" ) && !strcmp( funcion, "cosh" ) ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidInput","Function should be exp or cosh.");
}
char pol_method_name[80];
mxGetString( prhs[2], pol_method_name, 80 );
type_method_f metodo_f = no_method;
if( !strcmp( pol_method_name, "taylor" ) ) metodo_f = taylor;
if( !strcmp( pol_method_name, "bernoulli" ) ) metodo_f = bernoulli;
if( !strcmp( pol_method_name, "hermite" ) ) metodo_f = hermite;
if( metodo_f == no_method ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidInput","Evaluation method should be taylor, bernoulli or hermite.");
}
eval_method e_method = PatMey;
if( !mxIsDouble(prhs[3]) || mxIsComplex(prhs[3]) ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidInput","Matrix must be real.");
}
if( mxGetM(prhs[3]) != mxGetN(prhs[3]) ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidInput","Matrix must be square.");
}
initialize();
if(!isAlreadyInitialized)
{
if( !strcmp( funcion, "cos" ) ) {
F = new cos_matricial( mxGetM(prhs[3]), metodo_f, e_method, mxGetPr(prhs[3]) );
}
if( !strcmp( funcion, "cosh" ) ) {
F = new cosh_matricial( mxGetM(prhs[3]), metodo_f, e_method, mxGetPr(prhs[3]) );
}
if( !strcmp( funcion, "exp" ) ) {
F = new exp_matricial( mxGetM(prhs[3]), metodo_f, e_method, mxGetPr(prhs[3]) );
}
}else
{
if(!areMatricesDestroyed)
{
F->destroyAllMatrices();
}
areMatricesDestroyed=false;
if( !strcmp( funcion, "cos" ) ) {
F = reinterpret_cast<cos_matricial*>(F);
}
if( !strcmp( funcion, "cosh" ) ) {
F = reinterpret_cast<cosh_matricial*>(F);
}
if( !strcmp( funcion, "exp" ) ) {
F = reinterpret_cast<exp_matricial*>(F);
}
F->createInitialMatrices( mxGetM(prhs[3]),mxGetPr(prhs[3]),metodo_f,e_method);
}
initiated = 1;
} else if( !strcmp( comando, "power" ) ) {
F->power( );
if( nlhs==1 ) {
F->get( F->getQ(), mxGetPr( plhs[0] = mxCreateDoubleMatrix((mwSize)F->getN(), (mwSize)F->getN(), mxREAL) ) );
}
} else if( !strcmp( comando, "norm1" ) ) {
if( nrhs!=2 ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidNumInputs","A matrix index (integer) as second argument is required.");
}
*mxGetPr( plhs[0] = mxCreateDoubleMatrix((mwSize)1, (mwSize)1, mxREAL) ) = F->norm1((int) *mxGetPr(prhs[1])-1);
} else if( !strcmp( comando, "scale" ) ) {
if( nrhs!=2 ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidNumInputs","An integer with the scaling as second argument is required.");
}
F->scale((int) *mxGetPr(prhs[1]));
} else if( !strcmp( comando, "evaluate" ) ) {
if( nrhs!=2 ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidNumInputs","An array of coefficients as second argument is required.");
}
*mxGetPr( plhs[0] = mxCreateDoubleMatrix((mwSize)1, (mwSize)1, mxREAL) ) = F->eval_PatMey( mxGetN(prhs[1]), mxGetPr(prhs[1]) );
} else if( !strcmp( comando, "unscale" ) ) {
if( nrhs!=2 ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidNumInputs","An integer with the scaling as second argument is required.");
}
F->unscale((int) *mxGetPr(prhs[1]));
} else if( !strcmp( comando, "free" ) ) {
if( nrhs!=2 ) {
mexErrMsgIdAndTxt("MATLAB:call_gpu:invalidNumInputs","An integer with the power as second argument is required.");
}
F->free((int) *mxGetPr(prhs[1]));
} else if( !strcmp( comando, "finalize" ) ) {
F->finalize( &plhs[0] );
F->destroyAllMatrices();
initiated = 0;
} else if( !strcmp( comando, "destroy" ) ) {
if( F!=NULL ) {
delete F;
F = NULL;
}
initiated=0;
isAlreadyInitialized=false;
}
else {
printf("Command unknown\n");
}
}
|
ea773b14efbbc395a05ff646354107f129d92692.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/** Kernels for use in computing squared euclidean distance matrix
* @file euclid_dist.cu
* @author Alex Kyllo
* @date 2021-02
*/
#include "euclid_dist.cuh"
/** CUDA kernel to compute the squared euclidean norm of matrix X
* @param m Height (rows) of matrix X
* @param k Width (columns) of matrix X
* @param XX a length m vector for the result
*/
__global__ void sq_euclid_norm(const uint m, const uint k, const float *X,
float *XX)
{
uint i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < m)
{
for (uint j = 0; j < k; j++)
{
float x = X[i * k + j];
XX[i] += x * x;
}
}
}
/** CUDA kernel to compute the euclidean distance between two Euclidean norm
* vectors XX and YY, i.e. X*X + Y*Y - 2X*Y
* @param m The length of vectors in X
* @param n The length of vectors in Y
* @param XX Squared Euclidean norm of X
* @param YY Squared Euclidean norm of Y
* @param XY 2 * X * Y^T (matrix multiplication result)
* @param D The result euclidean distance matrix with dimensions (m x n)
*/
__global__ void euclid_dist(const uint m, const uint n, const float *XX,
const float *YY, const float *XY, float *D)
{
uint i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < m)
{
for (uint j = 0; j < n; j++)
{
D[i * n + j] = XX[i] + YY[j] - (XY[i * n + j]);
}
}
}
| ea773b14efbbc395a05ff646354107f129d92692.cu | /** Kernels for use in computing squared euclidean distance matrix
* @file euclid_dist.cu
* @author Alex Kyllo
* @date 2021-02
*/
#include "euclid_dist.cuh"
/** CUDA kernel to compute the squared euclidean norm of matrix X
* @param m Height (rows) of matrix X
* @param k Width (columns) of matrix X
* @param XX a length m vector for the result
*/
__global__ void sq_euclid_norm(const uint m, const uint k, const float *X,
float *XX)
{
uint i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < m)
{
for (uint j = 0; j < k; j++)
{
float x = X[i * k + j];
XX[i] += x * x;
}
}
}
/** CUDA kernel to compute the euclidean distance between two Euclidean norm
* vectors XX and YY, i.e. X*X + Y*Y - 2X*Y
* @param m The length of vectors in X
* @param n The length of vectors in Y
* @param XX Squared Euclidean norm of X
* @param YY Squared Euclidean norm of Y
* @param XY 2 * X * Y^T (matrix multiplication result)
* @param D The result euclidean distance matrix with dimensions (m x n)
*/
__global__ void euclid_dist(const uint m, const uint n, const float *XX,
const float *YY, const float *XY, float *D)
{
uint i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < m)
{
for (uint j = 0; j < n; j++)
{
D[i * n + j] = XX[i] + YY[j] - (XY[i * n + j]);
}
}
}
|
543188079c5ee580055818923f8b637261ddc63d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from magmablas/zlarf.cu, normal z -> s, Thu Oct 8 23:05:33 2020
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
/******************************************************************************/
__global__
void magma_slarf_kernel(
int m, const float *dv, const float *dtau,
float *dc, int lddc )
{
if ( !MAGMA_S_EQUAL(*dtau, MAGMA_S_ZERO) ) {
const int tx = threadIdx.x;
dc = dc + blockIdx.x * lddc;
__shared__ float sum[ BLOCK_SIZE ];
float tmp;
/* perform w := v**H * C */
if (tx == 0)
tmp = dc[0]; //since V[0] should be one
else
tmp = MAGMA_S_ZERO;
for( int j = tx+1; j < m; j += BLOCK_SIZE ) {
tmp += MAGMA_S_MUL( MAGMA_S_CONJ( dv[j] ), dc[j] );
}
sum[tx] = tmp;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
/* C := C - v * w */
__syncthreads();
tmp = - MAGMA_S_CONJ(*dtau) * sum[0];
for( int j = m-tx-1; j > 0; j -= BLOCK_SIZE )
dc[j] += tmp * dv[j];
if (tx == 0) dc[0] += tmp;
}
}
/******************************************************************************/
__global__
void magma_slarf_smkernel(
int m, int n, float *dv, float *dtau,
float *dc, int lddc )
{
if ( ! MAGMA_S_EQUAL(*dtau, MAGMA_S_ZERO) ) {
const int i = threadIdx.x, col= threadIdx.y;
for( int k = col; k < n; k += BLOCK_SIZEy ) {
dc = dc + k * lddc;
__shared__ float sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1];
float lsum;
/* w := v**H * C */
lsum = MAGMA_S_ZERO;
for( int j = i; j < m; j += BLOCK_SIZEx ) {
if (j == 0)
lsum += MAGMA_S_MUL( MAGMA_S_ONE, dc[j] );
else
lsum += MAGMA_S_MUL( MAGMA_S_CONJ( dv[j] ), dc[j] );
}
sum[i][col] = lsum;
magma_sum_reduce_2d< BLOCK_SIZEx, BLOCK_SIZEy+1 >( i, col, sum );
/* C := C - v * w */
__syncthreads();
float z__1 = - MAGMA_S_CONJ(*dtau) * sum[0][col];
for( int j = m-i-1; j >= 0; j -= BLOCK_SIZEx ) {
if (j == 0)
dc[j] += z__1;
else
dc[j] += z__1 * dv[j];
}
}
}
}
/******************************************************************************/
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
This routine uses only one SM (block).
*/
extern "C" void
magma_slarf_sm(
magma_int_t m, magma_int_t n,
float *dv, float *dtau,
float *dc, magma_int_t lddc,
magma_queue_t queue )
{
dim3 blocks( 1 );
dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy );
hipLaunchKernelGGL(( magma_slarf_smkernel)
, dim3(blocks), dim3(threads), 0, queue->cuda_stream() ,
m, n, dv, dtau, dc, lddc );
}
/***************************************************************************//**
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
*******************************************************************************/
extern "C" magma_int_t
magma_slarf_gpu(
magma_int_t m, magma_int_t n,
magmaFloat_const_ptr dv,
magmaFloat_const_ptr dtau,
magmaFloat_ptr dC, magma_int_t lddc,
magma_queue_t queue )
{
dim3 grid( n, 1, 1 );
dim3 threads( BLOCK_SIZE );
if ( n > 0 ) {
hipLaunchKernelGGL(( magma_slarf_kernel)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, dv, dtau, dC, lddc);
}
// The computation can be done on 1 SM with the following routine.
// magma_slarf_sm(m, n, dv, dtau, dc, lddc);
return MAGMA_SUCCESS;
}
| 543188079c5ee580055818923f8b637261ddc63d.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from magmablas/zlarf.cu, normal z -> s, Thu Oct 8 23:05:33 2020
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
/******************************************************************************/
__global__
void magma_slarf_kernel(
int m, const float *dv, const float *dtau,
float *dc, int lddc )
{
if ( !MAGMA_S_EQUAL(*dtau, MAGMA_S_ZERO) ) {
const int tx = threadIdx.x;
dc = dc + blockIdx.x * lddc;
__shared__ float sum[ BLOCK_SIZE ];
float tmp;
/* perform w := v**H * C */
if (tx == 0)
tmp = dc[0]; //since V[0] should be one
else
tmp = MAGMA_S_ZERO;
for( int j = tx+1; j < m; j += BLOCK_SIZE ) {
tmp += MAGMA_S_MUL( MAGMA_S_CONJ( dv[j] ), dc[j] );
}
sum[tx] = tmp;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
/* C := C - v * w */
__syncthreads();
tmp = - MAGMA_S_CONJ(*dtau) * sum[0];
for( int j = m-tx-1; j > 0; j -= BLOCK_SIZE )
dc[j] += tmp * dv[j];
if (tx == 0) dc[0] += tmp;
}
}
/******************************************************************************/
__global__
void magma_slarf_smkernel(
int m, int n, float *dv, float *dtau,
float *dc, int lddc )
{
if ( ! MAGMA_S_EQUAL(*dtau, MAGMA_S_ZERO) ) {
const int i = threadIdx.x, col= threadIdx.y;
for( int k = col; k < n; k += BLOCK_SIZEy ) {
dc = dc + k * lddc;
__shared__ float sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1];
float lsum;
/* w := v**H * C */
lsum = MAGMA_S_ZERO;
for( int j = i; j < m; j += BLOCK_SIZEx ) {
if (j == 0)
lsum += MAGMA_S_MUL( MAGMA_S_ONE, dc[j] );
else
lsum += MAGMA_S_MUL( MAGMA_S_CONJ( dv[j] ), dc[j] );
}
sum[i][col] = lsum;
magma_sum_reduce_2d< BLOCK_SIZEx, BLOCK_SIZEy+1 >( i, col, sum );
/* C := C - v * w */
__syncthreads();
float z__1 = - MAGMA_S_CONJ(*dtau) * sum[0][col];
for( int j = m-i-1; j >= 0; j -= BLOCK_SIZEx ) {
if (j == 0)
dc[j] += z__1;
else
dc[j] += z__1 * dv[j];
}
}
}
}
/******************************************************************************/
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
This routine uses only one SM (block).
*/
extern "C" void
magma_slarf_sm(
magma_int_t m, magma_int_t n,
float *dv, float *dtau,
float *dc, magma_int_t lddc,
magma_queue_t queue )
{
dim3 blocks( 1 );
dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy );
magma_slarf_smkernel
<<< blocks, threads, 0, queue->cuda_stream() >>>
( m, n, dv, dtau, dc, lddc );
}
/***************************************************************************//**
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
*******************************************************************************/
extern "C" magma_int_t
magma_slarf_gpu(
magma_int_t m, magma_int_t n,
magmaFloat_const_ptr dv,
magmaFloat_const_ptr dtau,
magmaFloat_ptr dC, magma_int_t lddc,
magma_queue_t queue )
{
dim3 grid( n, 1, 1 );
dim3 threads( BLOCK_SIZE );
if ( n > 0 ) {
magma_slarf_kernel
<<< grid, threads, 0, queue->cuda_stream() >>>
( m, dv, dtau, dC, lddc);
}
// The computation can be done on 1 SM with the following routine.
// magma_slarf_sm(m, n, dv, dtau, dc, lddc);
return MAGMA_SUCCESS;
}
|
5f132c0f3d40a54e5eddc779ec533d3349d34e23.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../spec/book.h"
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#define imin(a,b) (a<b?a:b)
const int N = 33 * 1024 *1024;
const int threadsPerBlock = 256;
const int blocksPerGrid =
imin( 32, (N+threadsPerBlock-1) / threadsPerBlock );
__global__ void dot( int *a, int *b, int *c ) {
__shared__ int cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
int temp = 0;
while (tid < N) {
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
// set the cache values
cache[cacheIndex] = temp;
// synchronize threads in this block
__syncthreads();
// for reductions, threadsPerBlock must be a power of 2
// because of the following code
int i = blockDim.x/2;
while (i != 0) {
if (cacheIndex < i){
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
}
i /= 2;
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
int main( void ) {
int *a, *b, c, *partial_c;
int *dev_a, *dev_b, *dev_partial_c;
struct timespec t_start, t_end;
int i;
// allocate memory on the cpu side
a = (int*)malloc( N*sizeof(int) );
b = (int*)malloc( N*sizeof(int) );
partial_c = (int*)malloc( blocksPerGrid*sizeof(int) );
// allocate the memory on the GPU
HANDLE_ERROR( hipMalloc( (void**)&dev_a,
N*sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_b,
N*sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_partial_c,
blocksPerGrid*sizeof(int) ) );
// fill in the host memory with data
for (i=0; i<N; i++) {
a[i] = rand()%256;
b[i] = rand()%256;
}
// Get start time event
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR( hipMemcpy( dev_a, a, N*sizeof(int),
hipMemcpyHostToDevice ) );
HANDLE_ERROR( hipMemcpy( dev_b, b, N*sizeof(int),
hipMemcpyHostToDevice ) );
hipLaunchKernelGGL(( dot), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, dev_a, dev_b,
dev_partial_c );
//check cuda error
hipError_t status = hipGetLastError();
if ( hipSuccess != status ){
fprintf(stderr, "Error: %s\n", hipGetErrorString(status));
exit(1) ;
}
// copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR( hipMemcpy( partial_c, dev_partial_c,
blocksPerGrid*sizeof(int),
hipMemcpyDeviceToHost ) );
// Get stop time event
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Compute execution time
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %13f msec\n", elapsedTime);
hipEventDestroy(start);
hipEventDestroy(stop);
// finish up on the CPU side
c = 0;
for (int i=0; i<blocksPerGrid; i++) {
c += partial_c[i];
}
//printf("GPU result is %d\n",c);
// start time
clock_gettime( CLOCK_REALTIME, &t_start);
/*CPU version*/
int dot=0;
for(i=0;i<N;i++){
dot+=a[i]*b[i];
}
// stop time
clock_gettime( CLOCK_REALTIME, &t_end);
// compute and print the elapsed time in millisec
elapsedTime = (t_end.tv_sec - t_start.tv_sec) * 1000.0;
elapsedTime += (t_end.tv_nsec - t_start.tv_nsec) / 1000000.0;
printf("CPU time: %13lf ms\n", elapsedTime);
//printf("CPU result is %d\n",dot);
if(c==dot)
printf("test pass!\n");
else
printf("test fail!\n");
// free memory on the gpu side
HANDLE_ERROR( hipFree( dev_a ) );
HANDLE_ERROR( hipFree( dev_b ) );
HANDLE_ERROR( hipFree( dev_partial_c ) );
// free memory on the cpu side
free( a );
free( b );
free( partial_c );
}
| 5f132c0f3d40a54e5eddc779ec533d3349d34e23.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../spec/book.h"
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#define imin(a,b) (a<b?a:b)
const int N = 33 * 1024 *1024;
const int threadsPerBlock = 256;
const int blocksPerGrid =
imin( 32, (N+threadsPerBlock-1) / threadsPerBlock );
__global__ void dot( int *a, int *b, int *c ) {
__shared__ int cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
int temp = 0;
while (tid < N) {
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
// set the cache values
cache[cacheIndex] = temp;
// synchronize threads in this block
__syncthreads();
// for reductions, threadsPerBlock must be a power of 2
// because of the following code
int i = blockDim.x/2;
while (i != 0) {
if (cacheIndex < i){
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
}
i /= 2;
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
int main( void ) {
int *a, *b, c, *partial_c;
int *dev_a, *dev_b, *dev_partial_c;
struct timespec t_start, t_end;
int i;
// allocate memory on the cpu side
a = (int*)malloc( N*sizeof(int) );
b = (int*)malloc( N*sizeof(int) );
partial_c = (int*)malloc( blocksPerGrid*sizeof(int) );
// allocate the memory on the GPU
HANDLE_ERROR( cudaMalloc( (void**)&dev_a,
N*sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_b,
N*sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_partial_c,
blocksPerGrid*sizeof(int) ) );
// fill in the host memory with data
for (i=0; i<N; i++) {
a[i] = rand()%256;
b[i] = rand()%256;
}
// Get start time event
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR( cudaMemcpy( dev_a, a, N*sizeof(int),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b, b, N*sizeof(int),
cudaMemcpyHostToDevice ) );
dot<<<blocksPerGrid,threadsPerBlock>>>( dev_a, dev_b,
dev_partial_c );
//check cuda error
cudaError_t status = cudaGetLastError();
if ( cudaSuccess != status ){
fprintf(stderr, "Error: %s\n", cudaGetErrorString(status));
exit(1) ;
}
// copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR( cudaMemcpy( partial_c, dev_partial_c,
blocksPerGrid*sizeof(int),
cudaMemcpyDeviceToHost ) );
// Get stop time event
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Compute execution time
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %13f msec\n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// finish up on the CPU side
c = 0;
for (int i=0; i<blocksPerGrid; i++) {
c += partial_c[i];
}
//printf("GPU result is %d\n",c);
// start time
clock_gettime( CLOCK_REALTIME, &t_start);
/*CPU version*/
int dot=0;
for(i=0;i<N;i++){
dot+=a[i]*b[i];
}
// stop time
clock_gettime( CLOCK_REALTIME, &t_end);
// compute and print the elapsed time in millisec
elapsedTime = (t_end.tv_sec - t_start.tv_sec) * 1000.0;
elapsedTime += (t_end.tv_nsec - t_start.tv_nsec) / 1000000.0;
printf("CPU time: %13lf ms\n", elapsedTime);
//printf("CPU result is %d\n",dot);
if(c==dot)
printf("test pass!\n");
else
printf("test fail!\n");
// free memory on the gpu side
HANDLE_ERROR( cudaFree( dev_a ) );
HANDLE_ERROR( cudaFree( dev_b ) );
HANDLE_ERROR( cudaFree( dev_partial_c ) );
// free memory on the cpu side
free( a );
free( b );
free( partial_c );
}
|
f49b30a6562b6a4d6e20be6bc1b2c989aa11b7a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thrust/device_vector.h>
#include <sstream>
#include "gtest/gtest.h"
#include "paddle/fluid/framework/dim.h"
__global__ void test(paddle::framework::Dim<2>* o) {
o[0] = paddle::framework::make_dim(5, 6);
}
__global__ void dyn_idx_gpu(int64_t* o) {
auto d = paddle::framework::make_dim(5, 6);
o[0] = d[1];
}
TEST(Dim, Equality) {
// construct a Dim on the CPU
auto a = paddle::framework::make_dim(3, 4);
EXPECT_EQ(paddle::framework::get<0>(a), 3);
EXPECT_EQ(paddle::framework::get<1>(a), 4);
// construct a Dim on the GPU
thrust::device_vector<paddle::framework::Dim<2>> t(2);
hipLaunchKernelGGL(( test), dim3(1), dim3(1), 0, 0, thrust::raw_pointer_cast(t.data()));
a = t[0];
EXPECT_EQ(paddle::framework::get<0>(a), 5);
EXPECT_EQ(paddle::framework::get<1>(a), 6);
// linearization
auto b = paddle::framework::make_dim(7, 8);
EXPECT_EQ(paddle::framework::linearize(a, b), 83);
// product
EXPECT_EQ(paddle::framework::product(a), 30);
// mutate a Dim
paddle::framework::get<1>(b) = 10;
EXPECT_EQ(paddle::framework::get<0>(b), 7);
EXPECT_EQ(paddle::framework::get<1>(b), 10);
// dynamic access
paddle::framework::get(b, 0) = 8;
b[1] = 11;
EXPECT_EQ(paddle::framework::get<0>(b), 8);
EXPECT_EQ(paddle::framework::get<1>(b), 11);
EXPECT_EQ(paddle::framework::get(b, 0), 8);
EXPECT_EQ(b[1], 11);
// dynamic access on GPU
thrust::device_vector<int64_t> r(1);
hipLaunchKernelGGL(( dyn_idx_gpu), dim3(1), dim3(1), 0, 0, thrust::raw_pointer_cast(r.data()));
int64_t res = r[0];
EXPECT_EQ(res, 6);
// ex_prefix_mul
paddle::framework::Dim<3> c =
paddle::framework::ex_prefix_mul(paddle::framework::Dim<3>(3, 4, 5));
EXPECT_EQ(paddle::framework::get<0>(c), 1);
EXPECT_EQ(paddle::framework::get<1>(c), 3);
EXPECT_EQ(paddle::framework::get<2>(c), 12);
// generate from an index
auto size = paddle::framework::make_dim(4, 5, 2);
c = paddle::framework::Dim<3>(14, size);
EXPECT_EQ(paddle::framework::get<0>(c), 2);
EXPECT_EQ(paddle::framework::get<1>(c), 3);
EXPECT_EQ(paddle::framework::get<2>(c), 0);
c = paddle::framework::Dim<3>(25, size);
EXPECT_EQ(paddle::framework::get<0>(c), 1);
EXPECT_EQ(paddle::framework::get<1>(c), 1);
EXPECT_EQ(paddle::framework::get<2>(c), 1);
}
TEST(Dim, Bool) {
auto a = paddle::framework::make_dim(3, 4);
auto b = paddle::framework::make_dim(5, 6);
auto c = paddle::framework::make_dim(3, 4);
// in_bounds check
EXPECT_TRUE(paddle::framework::contained(a, b));
EXPECT_FALSE(paddle::framework::contained(b, a));
// comparison
EXPECT_TRUE(a == a);
EXPECT_FALSE(a == b);
EXPECT_TRUE(a == c);
}
TEST(Dim, Print) {
{
std::stringstream ss;
auto a = paddle::framework::make_dim(2, 3);
ss << a;
EXPECT_EQ(ss.str(), "2, 3");
}
{
std::stringstream ss;
ss << paddle::framework::make_dim(8);
EXPECT_EQ(ss.str(), "8");
}
}
| f49b30a6562b6a4d6e20be6bc1b2c989aa11b7a6.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <thrust/device_vector.h>
#include <sstream>
#include "gtest/gtest.h"
#include "paddle/fluid/framework/dim.h"
__global__ void test(paddle::framework::Dim<2>* o) {
o[0] = paddle::framework::make_dim(5, 6);
}
__global__ void dyn_idx_gpu(int64_t* o) {
auto d = paddle::framework::make_dim(5, 6);
o[0] = d[1];
}
TEST(Dim, Equality) {
// construct a Dim on the CPU
auto a = paddle::framework::make_dim(3, 4);
EXPECT_EQ(paddle::framework::get<0>(a), 3);
EXPECT_EQ(paddle::framework::get<1>(a), 4);
// construct a Dim on the GPU
thrust::device_vector<paddle::framework::Dim<2>> t(2);
test<<<1, 1>>>(thrust::raw_pointer_cast(t.data()));
a = t[0];
EXPECT_EQ(paddle::framework::get<0>(a), 5);
EXPECT_EQ(paddle::framework::get<1>(a), 6);
// linearization
auto b = paddle::framework::make_dim(7, 8);
EXPECT_EQ(paddle::framework::linearize(a, b), 83);
// product
EXPECT_EQ(paddle::framework::product(a), 30);
// mutate a Dim
paddle::framework::get<1>(b) = 10;
EXPECT_EQ(paddle::framework::get<0>(b), 7);
EXPECT_EQ(paddle::framework::get<1>(b), 10);
// dynamic access
paddle::framework::get(b, 0) = 8;
b[1] = 11;
EXPECT_EQ(paddle::framework::get<0>(b), 8);
EXPECT_EQ(paddle::framework::get<1>(b), 11);
EXPECT_EQ(paddle::framework::get(b, 0), 8);
EXPECT_EQ(b[1], 11);
// dynamic access on GPU
thrust::device_vector<int64_t> r(1);
dyn_idx_gpu<<<1, 1>>>(thrust::raw_pointer_cast(r.data()));
int64_t res = r[0];
EXPECT_EQ(res, 6);
// ex_prefix_mul
paddle::framework::Dim<3> c =
paddle::framework::ex_prefix_mul(paddle::framework::Dim<3>(3, 4, 5));
EXPECT_EQ(paddle::framework::get<0>(c), 1);
EXPECT_EQ(paddle::framework::get<1>(c), 3);
EXPECT_EQ(paddle::framework::get<2>(c), 12);
// generate from an index
auto size = paddle::framework::make_dim(4, 5, 2);
c = paddle::framework::Dim<3>(14, size);
EXPECT_EQ(paddle::framework::get<0>(c), 2);
EXPECT_EQ(paddle::framework::get<1>(c), 3);
EXPECT_EQ(paddle::framework::get<2>(c), 0);
c = paddle::framework::Dim<3>(25, size);
EXPECT_EQ(paddle::framework::get<0>(c), 1);
EXPECT_EQ(paddle::framework::get<1>(c), 1);
EXPECT_EQ(paddle::framework::get<2>(c), 1);
}
TEST(Dim, Bool) {
auto a = paddle::framework::make_dim(3, 4);
auto b = paddle::framework::make_dim(5, 6);
auto c = paddle::framework::make_dim(3, 4);
// in_bounds check
EXPECT_TRUE(paddle::framework::contained(a, b));
EXPECT_FALSE(paddle::framework::contained(b, a));
// comparison
EXPECT_TRUE(a == a);
EXPECT_FALSE(a == b);
EXPECT_TRUE(a == c);
}
TEST(Dim, Print) {
{
std::stringstream ss;
auto a = paddle::framework::make_dim(2, 3);
ss << a;
EXPECT_EQ(ss.str(), "2, 3");
}
{
std::stringstream ss;
ss << paddle::framework::make_dim(8);
EXPECT_EQ(ss.str(), "8");
}
}
|
54fa4f65d500961938af366698b8740af565882b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
__global__ void recipSummation(double* data, double* recip, int len)
{
const int y = blockIdx.y * gridDim.x * blockDim.x;
const int x = blockIdx.x * blockDim.x;
const int i = threadIdx.x + x + y;
if (i < len) {
const int j = 2 * i;
data[j] *= recip[i];
data[j + 1] *= recip[i];
}
} | 54fa4f65d500961938af366698b8740af565882b.cu | #include "includes.h"
extern "C"
__global__ void recipSummation(double* data, double* recip, int len)
{
const int y = blockIdx.y * gridDim.x * blockDim.x;
const int x = blockIdx.x * blockDim.x;
const int i = threadIdx.x + x + y;
if (i < len) {
const int j = 2 * i;
data[j] *= recip[i];
data[j + 1] *= recip[i];
}
} |
6b0d01dca117c8db2b9b94ceb0a148db054a8b27.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include "../Utils/utils.h"
#include "../Utils/matutils.h"
#include "omp.h"
using namespace std;
__global__ void julia_kernel( int n, double *a, double *b, double *c ){
int i = threadIdx.y;
int j = threadIdx.x;
int gi = threadIdx.y + blockDim.y*blockIdx.y;
int gj = threadIdx.x + blockDim.x*blockIdx.x;
}
int main( int argc, char** argv ){
int args_needed = 1;
if (argc < args_needed + 1 ){
printf(" Arg number error, needed: %d \n", args_needed);
return 0;
}
// Timers
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// OMP
int ncpu = 1;
omp_set_num_threads(ncpu);
printf(" CUDA - Template \n");
//Init parameters
int n = 10;
// Host Data
double *a;
double *b;
double *c;
a = (double *) malloc( sizeof(double)*n );
b = (double *) malloc( sizeof(double)*n );
// Device Data
double *a_dev;
double *b_dev;
double *c_dev;
HANDLE_ERROR( hipMalloc((void **)&a, sizeof(double)*n) );
// Copy Data to Device
HANDLE_ERROR( hipMemcpy(a_dev, a, sizeof(double) *n , hipMemcpyHostToDevice ) );
// Kernel Implementation
float ms = 0.0;
dim3 block(1,1,1);
dim3 grid(1,1,1);
hipEventRecord(start);
hipLaunchKernelGGL(( julia_kernel), dim3(grid), dim3(block), 0, 0, n, a_dev, b_dev, c_dev);
hipDeviceSynchronize();
hipEventRecord(stop);
// Retrieve Data from Device
// Get data Devices
HANDLE_ERROR( hipMemcpy(c, c_dev, sizeof(double) * n, hipMemcpyDeviceToHost ) );
ms = 0;
hipEventElapsedTime(&ms, start, stop);
printf("GPU SM WHILE Time: %f\n", ms );
// Free memory
hipFree( a_dev );
hipFree( b_dev );
hipFree( c_dev );
free(a);
free(b);
free(c);
return 0;
}
| 6b0d01dca117c8db2b9b94ceb0a148db054a8b27.cu |
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include "../Utils/utils.h"
#include "../Utils/matutils.h"
#include "omp.h"
using namespace std;
__global__ void julia_kernel( int n, double *a, double *b, double *c ){
int i = threadIdx.y;
int j = threadIdx.x;
int gi = threadIdx.y + blockDim.y*blockIdx.y;
int gj = threadIdx.x + blockDim.x*blockIdx.x;
}
int main( int argc, char** argv ){
int args_needed = 1;
if (argc < args_needed + 1 ){
printf(" Arg number error, needed: %d \n", args_needed);
return 0;
}
// Timers
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// OMP
int ncpu = 1;
omp_set_num_threads(ncpu);
printf(" CUDA - Template \n");
//Init parameters
int n = 10;
// Host Data
double *a;
double *b;
double *c;
a = (double *) malloc( sizeof(double)*n );
b = (double *) malloc( sizeof(double)*n );
// Device Data
double *a_dev;
double *b_dev;
double *c_dev;
HANDLE_ERROR( cudaMalloc((void **)&a, sizeof(double)*n) );
// Copy Data to Device
HANDLE_ERROR( cudaMemcpy(a_dev, a, sizeof(double) *n , cudaMemcpyHostToDevice ) );
// Kernel Implementation
float ms = 0.0;
dim3 block(1,1,1);
dim3 grid(1,1,1);
cudaEventRecord(start);
julia_kernel<<<grid, block>>>(n, a_dev, b_dev, c_dev);
cudaDeviceSynchronize();
cudaEventRecord(stop);
// Retrieve Data from Device
// Get data Devices
HANDLE_ERROR( cudaMemcpy(c, c_dev, sizeof(double) * n, cudaMemcpyDeviceToHost ) );
ms = 0;
cudaEventElapsedTime(&ms, start, stop);
printf("GPU SM WHILE Time: %f\n", ms );
// Free memory
cudaFree( a_dev );
cudaFree( b_dev );
cudaFree( c_dev );
free(a);
free(b);
free(c);
return 0;
}
|
34505f35a456ef9b5766f75abe6aa1081f135168.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <paddle/fluid/memory/allocation/allocator.h>
#include <stdio.h>
#include <string>
#include <vector>
#include "hipcub/hipcub.hpp"
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/gather.cu.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/for_range.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
namespace {
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
int const kThreadsPerBlock = sizeof(uint64_t) * 8;
static const double kBBoxClipDefault = ::log(1000.0 / 16.0);
struct RangeInitFunctor {
int start_;
int delta_;
int *out_;
__device__ void operator()(size_t i) { out_[i] = start_ + i * delta_; }
};
template <typename T>
static void SortDescending(const platform::CUDADeviceContext &ctx,
const Tensor &value, Tensor *value_out,
Tensor *index_out) {
int num = static_cast<int>(value.numel());
Tensor index_in_t;
int *idx_in = index_in_t.mutable_data<int>({num}, ctx.GetPlace());
platform::ForRange<platform::CUDADeviceContext> for_range(ctx, num);
for_range(RangeInitFunctor{0, 1, idx_in});
int *idx_out = index_out->mutable_data<int>({num}, ctx.GetPlace());
const T *keys_in = value.data<T>();
T *keys_out = value_out->mutable_data<T>({num}, ctx.GetPlace());
// Determine temporary device storage requirements
size_t temp_storage_bytes = 0;
hipcub::DeviceRadixSort::SortPairsDescending<T, int>(
nullptr, temp_storage_bytes, keys_in, keys_out, idx_in, idx_out, num);
// Allocate temporary storage
auto place = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace());
auto d_temp_storage = memory::Alloc(place, temp_storage_bytes);
// Run sorting operation
hipcub::DeviceRadixSort::SortPairsDescending<T, int>(
d_temp_storage->ptr(), temp_storage_bytes, keys_in, keys_out, idx_in,
idx_out, num);
}
template <typename T>
struct BoxDecodeAndClipFunctor {
const T *anchor;
const T *deltas;
const T *var;
const int *index;
const T *im_info;
T *proposals;
BoxDecodeAndClipFunctor(const T *anchor, const T *deltas, const T *var,
const int *index, const T *im_info, T *proposals)
: anchor(anchor),
deltas(deltas),
var(var),
index(index),
im_info(im_info),
proposals(proposals) {}
T bbox_clip_default{static_cast<T>(kBBoxClipDefault)};
__device__ void operator()(size_t i) {
int k = index[i] * 4;
T axmin = anchor[k];
T aymin = anchor[k + 1];
T axmax = anchor[k + 2];
T aymax = anchor[k + 3];
T w = axmax - axmin + 1.0;
T h = aymax - aymin + 1.0;
T cx = axmin + 0.5 * w;
T cy = aymin + 0.5 * h;
T dxmin = deltas[k];
T dymin = deltas[k + 1];
T dxmax = deltas[k + 2];
T dymax = deltas[k + 3];
T d_cx, d_cy, d_w, d_h;
if (var) {
d_cx = cx + dxmin * w * var[k];
d_cy = cy + dymin * h * var[k + 1];
d_w = exp(Min(dxmax * var[k + 2], bbox_clip_default)) * w;
d_h = exp(Min(dymax * var[k + 3], bbox_clip_default)) * h;
} else {
d_cx = cx + dxmin * w;
d_cy = cy + dymin * h;
d_w = exp(Min(dxmax, bbox_clip_default)) * w;
d_h = exp(Min(dymax, bbox_clip_default)) * h;
}
T oxmin = d_cx - d_w * 0.5;
T oymin = d_cy - d_h * 0.5;
T oxmax = d_cx + d_w * 0.5 - 1.;
T oymax = d_cy + d_h * 0.5 - 1.;
proposals[i * 4] = Max(Min(oxmin, im_info[1] - 1.), 0.);
proposals[i * 4 + 1] = Max(Min(oymin, im_info[0] - 1.), 0.);
proposals[i * 4 + 2] = Max(Min(oxmax, im_info[1] - 1.), 0.);
proposals[i * 4 + 3] = Max(Min(oymax, im_info[0] - 1.), 0.);
}
__device__ __forceinline__ T Min(T a, T b) const { return a > b ? b : a; }
__device__ __forceinline__ T Max(T a, T b) const { return a > b ? a : b; }
};
template <typename T, int BlockSize>
static __global__ void FilterBBoxes(const T *bboxes, const T *im_info,
const T min_size, const int num,
int *keep_num, int *keep) {
T im_h = im_info[0];
T im_w = im_info[1];
T im_scale = im_info[2];
int cnt = 0;
__shared__ int keep_index[BlockSize];
CUDA_KERNEL_LOOP(i, num) {
keep_index[threadIdx.x] = -1;
__syncthreads();
int k = i * 4;
T xmin = bboxes[k];
T ymin = bboxes[k + 1];
T xmax = bboxes[k + 2];
T ymax = bboxes[k + 3];
T w = xmax - xmin + 1.0;
T h = ymax - ymin + 1.0;
T cx = xmin + w / 2.;
T cy = ymin + h / 2.;
T w_s = (xmax - xmin) / im_scale + 1.;
T h_s = (ymax - ymin) / im_scale + 1.;
if (w_s >= min_size && h_s >= min_size && cx <= im_w && cy <= im_h) {
keep_index[threadIdx.x] = i;
}
__syncthreads();
if (threadIdx.x == 0) {
int size = (num - i) < BlockSize ? num - i : BlockSize;
for (int j = 0; j < size; ++j) {
if (keep_index[j] > -1) {
keep[cnt++] = keep_index[j];
}
}
}
__syncthreads();
}
if (threadIdx.x == 0) {
keep_num[0] = cnt;
}
}
static __device__ inline float IoU(const float *a, const float *b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float inter_s = width * height;
float s_a = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float s_b = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return inter_s / (s_a + s_b - inter_s);
}
static __global__ void NMSKernel(const int n_boxes,
const float nms_overlap_thresh,
const float *dev_boxes, uint64_t *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
const int row_size =
min(n_boxes - row_start * kThreadsPerBlock, kThreadsPerBlock);
const int col_size =
min(n_boxes - col_start * kThreadsPerBlock, kThreadsPerBlock);
__shared__ float block_boxes[kThreadsPerBlock * 4];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 4 + 0] =
dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 0];
block_boxes[threadIdx.x * 4 + 1] =
dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 1];
block_boxes[threadIdx.x * 4 + 2] =
dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 2];
block_boxes[threadIdx.x * 4 + 3] =
dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 3];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = kThreadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 4;
int i = 0;
uint64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (IoU(cur_box, block_boxes + i * 4) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, kThreadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
template <typename T>
static void NMS(const platform::CUDADeviceContext &ctx, const Tensor &proposals,
const Tensor &sorted_indices, const T nms_threshold,
Tensor *keep_out) {
int boxes_num = proposals.dims()[0];
PADDLE_ENFORCE_EQ(boxes_num, sorted_indices.dims()[0]);
const int col_blocks = DIVUP(boxes_num, kThreadsPerBlock);
dim3 blocks(DIVUP(boxes_num, kThreadsPerBlock),
DIVUP(boxes_num, kThreadsPerBlock));
dim3 threads(kThreadsPerBlock);
const T *boxes = proposals.data<T>();
auto place = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace());
framework::Vector<uint64_t> mask(boxes_num * col_blocks);
hipLaunchKernelGGL(( NMSKernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_threshold, boxes,
mask.CUDAMutableData(BOOST_GET_CONST(
platform::CUDAPlace, ctx.GetPlace())));
std::vector<uint64_t> remv(col_blocks);
memset(&remv[0], 0, sizeof(uint64_t) * col_blocks);
std::vector<int> keep_vec;
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / kThreadsPerBlock;
int inblock = i % kThreadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
++num_to_keep;
keep_vec.push_back(i);
uint64_t *p = &mask[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
int *keep = keep_out->mutable_data<int>({num_to_keep}, ctx.GetPlace());
memory::Copy(place, keep, platform::CPUPlace(), keep_vec.data(),
sizeof(int) * num_to_keep, ctx.stream());
ctx.Wait();
}
template <typename T>
static std::pair<Tensor, Tensor> ProposalForOneImage(
const platform::CUDADeviceContext &ctx, const Tensor &im_info,
const Tensor &anchors, const Tensor &variances,
const Tensor &bbox_deltas, // [M, 4]
const Tensor &scores, // [N, 1]
int pre_nms_top_n, int post_nms_top_n, float nms_thresh, float min_size,
float eta) {
// 1. pre nms
Tensor scores_sort, index_sort;
SortDescending<T>(ctx, scores, &scores_sort, &index_sort);
int num = scores.numel();
int pre_nms_num = (pre_nms_top_n <= 0 || pre_nms_top_n > num) ? scores.numel()
: pre_nms_top_n;
scores_sort.Resize({pre_nms_num, 1});
index_sort.Resize({pre_nms_num, 1});
// 2. box decode and clipping
Tensor proposals;
proposals.mutable_data<T>({pre_nms_num, 4}, ctx.GetPlace());
{
platform::ForRange<platform::CUDADeviceContext> for_range(ctx, pre_nms_num);
for_range(BoxDecodeAndClipFunctor<T>{
anchors.data<T>(), bbox_deltas.data<T>(), variances.data<T>(),
index_sort.data<int>(), im_info.data<T>(), proposals.data<T>()});
}
// 3. filter
Tensor keep_index, keep_num_t;
keep_index.mutable_data<int>({pre_nms_num}, ctx.GetPlace());
keep_num_t.mutable_data<int>({1}, ctx.GetPlace());
min_size = ::max(min_size, 1.0f);
auto stream = ctx.stream();
hipLaunchKernelGGL(( FilterBBoxes<T, 512>), dim3(1), dim3(512), 0, stream,
proposals.data<T>(), im_info.data<T>(), min_size, pre_nms_num,
keep_num_t.data<int>(), keep_index.data<int>());
int keep_num;
const auto gpu_place = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace());
memory::Copy(platform::CPUPlace(), &keep_num, gpu_place,
keep_num_t.data<int>(), sizeof(int), ctx.stream());
ctx.Wait();
keep_index.Resize({keep_num});
Tensor scores_filter, proposals_filter;
proposals_filter.mutable_data<T>({keep_num, 4}, ctx.GetPlace());
scores_filter.mutable_data<T>({keep_num, 1}, ctx.GetPlace());
GPUGather<T>(ctx, proposals, keep_index, &proposals_filter);
GPUGather<T>(ctx, scores_sort, keep_index, &scores_filter);
if (nms_thresh <= 0) {
return std::make_pair(proposals_filter, scores_filter);
}
// 4. nms
Tensor keep_nms;
NMS<T>(ctx, proposals_filter, keep_index, nms_thresh, &keep_nms);
if (post_nms_top_n > 0 && post_nms_top_n < keep_nms.numel()) {
keep_nms.Resize({post_nms_top_n});
}
Tensor scores_nms, proposals_nms;
proposals_nms.mutable_data<T>({keep_nms.numel(), 4}, ctx.GetPlace());
scores_nms.mutable_data<T>({keep_nms.numel(), 1}, ctx.GetPlace());
GPUGather<T>(ctx, proposals_filter, keep_nms, &proposals_nms);
GPUGather<T>(ctx, scores_filter, keep_nms, &scores_nms);
return std::make_pair(proposals_nms, scores_nms);
}
} // namespace
template <typename DeviceContext, typename T>
class CUDAGenerateProposalsKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *scores = context.Input<Tensor>("Scores");
auto *bbox_deltas = context.Input<Tensor>("BboxDeltas");
auto *im_info = context.Input<Tensor>("ImInfo");
auto anchors = GET_DATA_SAFELY(context.Input<Tensor>("Anchors"), "Input",
"Anchors", "GenerateProposals");
auto variances = GET_DATA_SAFELY(context.Input<Tensor>("Variances"),
"Input", "Variances", "GenerateProposals");
auto *rpn_rois = context.Output<LoDTensor>("RpnRois");
auto *rpn_roi_probs = context.Output<LoDTensor>("RpnRoiProbs");
int pre_nms_top_n = context.Attr<int>("pre_nms_topN");
int post_nms_top_n = context.Attr<int>("post_nms_topN");
float nms_thresh = context.Attr<float>("nms_thresh");
float min_size = context.Attr<float>("min_size");
float eta = context.Attr<float>("eta");
PADDLE_ENFORCE_GE(eta, 1.,
platform::errors::InvalidArgument(
"Not support adaptive NMS. The attribute 'eta' "
"should not less than 1. But received eta=[%d]",
eta));
auto &dev_ctx = context.template device_context<DeviceContext>();
auto scores_dim = scores->dims();
int64_t num = scores_dim[0];
int64_t c_score = scores_dim[1];
int64_t h_score = scores_dim[2];
int64_t w_score = scores_dim[3];
auto bbox_dim = bbox_deltas->dims();
int64_t c_bbox = bbox_dim[1];
int64_t h_bbox = bbox_dim[2];
int64_t w_bbox = bbox_dim[3];
Tensor bbox_deltas_swap, scores_swap;
bbox_deltas_swap.mutable_data<T>({num, h_bbox, w_bbox, c_bbox},
dev_ctx.GetPlace());
scores_swap.mutable_data<T>({num, h_score, w_score, c_score},
dev_ctx.GetPlace());
math::Transpose<DeviceContext, T, 4> trans;
std::vector<int> axis = {0, 2, 3, 1};
trans(dev_ctx, *bbox_deltas, &bbox_deltas_swap, axis);
trans(dev_ctx, *scores, &scores_swap, axis);
anchors.Resize({anchors.numel() / 4, 4});
variances.Resize({variances.numel() / 4, 4});
rpn_rois->mutable_data<T>({bbox_deltas->numel() / 4, 4},
context.GetPlace());
rpn_roi_probs->mutable_data<T>({scores->numel(), 1}, context.GetPlace());
T *rpn_rois_data = rpn_rois->data<T>();
T *rpn_roi_probs_data = rpn_roi_probs->data<T>();
auto place = BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace());
auto cpu_place = platform::CPUPlace();
int64_t num_proposals = 0;
std::vector<size_t> offset(1, 0);
std::vector<int64_t> tmp_lod;
for (int64_t i = 0; i < num; ++i) {
Tensor im_info_slice = im_info->Slice(i, i + 1);
Tensor bbox_deltas_slice = bbox_deltas_swap.Slice(i, i + 1);
Tensor scores_slice = scores_swap.Slice(i, i + 1);
bbox_deltas_slice.Resize({h_bbox * w_bbox * c_bbox / 4, 4});
scores_slice.Resize({h_score * w_score * c_score, 1});
std::pair<Tensor, Tensor> box_score_pair =
ProposalForOneImage<T>(dev_ctx, im_info_slice, anchors, variances,
bbox_deltas_slice, scores_slice, pre_nms_top_n,
post_nms_top_n, nms_thresh, min_size, eta);
Tensor &proposals = box_score_pair.first;
Tensor &scores = box_score_pair.second;
memory::Copy(place, rpn_rois_data + num_proposals * 4, place,
proposals.data<T>(), sizeof(T) * proposals.numel(),
dev_ctx.stream());
memory::Copy(place, rpn_roi_probs_data + num_proposals, place,
scores.data<T>(), sizeof(T) * scores.numel(),
dev_ctx.stream());
dev_ctx.Wait();
num_proposals += proposals.dims()[0];
offset.emplace_back(num_proposals);
tmp_lod.push_back(num_proposals);
}
if (context.HasOutput("RpnRoisLod")) {
auto *rpn_rois_lod = context.Output<Tensor>("RpnRoisLod");
rpn_rois_lod->mutable_data<int64_t>({num}, context.GetPlace());
int64_t *lod_data = rpn_rois_lod->data<int64_t>();
memory::Copy(place, lod_data, cpu_place, &tmp_lod[0],
sizeof(int64_t) * num, dev_ctx.stream());
rpn_rois_lod->Resize({num});
}
framework::LoD lod;
lod.emplace_back(offset);
rpn_rois->set_lod(lod);
rpn_roi_probs->set_lod(lod);
rpn_rois->Resize({num_proposals, 4});
rpn_roi_probs->Resize({num_proposals, 1});
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(generate_proposals,
ops::CUDAGenerateProposalsKernel<
paddle::platform::CUDADeviceContext, float>);
| 34505f35a456ef9b5766f75abe6aa1081f135168.cu | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <paddle/fluid/memory/allocation/allocator.h>
#include <stdio.h>
#include <string>
#include <vector>
#include "cub/cub.cuh"
#include "paddle/fluid/framework/mixed_vector.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/gather.cu.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/for_range.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
namespace {
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
int const kThreadsPerBlock = sizeof(uint64_t) * 8;
static const double kBBoxClipDefault = std::log(1000.0 / 16.0);
struct RangeInitFunctor {
int start_;
int delta_;
int *out_;
__device__ void operator()(size_t i) { out_[i] = start_ + i * delta_; }
};
template <typename T>
static void SortDescending(const platform::CUDADeviceContext &ctx,
const Tensor &value, Tensor *value_out,
Tensor *index_out) {
int num = static_cast<int>(value.numel());
Tensor index_in_t;
int *idx_in = index_in_t.mutable_data<int>({num}, ctx.GetPlace());
platform::ForRange<platform::CUDADeviceContext> for_range(ctx, num);
for_range(RangeInitFunctor{0, 1, idx_in});
int *idx_out = index_out->mutable_data<int>({num}, ctx.GetPlace());
const T *keys_in = value.data<T>();
T *keys_out = value_out->mutable_data<T>({num}, ctx.GetPlace());
// Determine temporary device storage requirements
size_t temp_storage_bytes = 0;
cub::DeviceRadixSort::SortPairsDescending<T, int>(
nullptr, temp_storage_bytes, keys_in, keys_out, idx_in, idx_out, num);
// Allocate temporary storage
auto place = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace());
auto d_temp_storage = memory::Alloc(place, temp_storage_bytes);
// Run sorting operation
cub::DeviceRadixSort::SortPairsDescending<T, int>(
d_temp_storage->ptr(), temp_storage_bytes, keys_in, keys_out, idx_in,
idx_out, num);
}
template <typename T>
struct BoxDecodeAndClipFunctor {
const T *anchor;
const T *deltas;
const T *var;
const int *index;
const T *im_info;
T *proposals;
BoxDecodeAndClipFunctor(const T *anchor, const T *deltas, const T *var,
const int *index, const T *im_info, T *proposals)
: anchor(anchor),
deltas(deltas),
var(var),
index(index),
im_info(im_info),
proposals(proposals) {}
T bbox_clip_default{static_cast<T>(kBBoxClipDefault)};
__device__ void operator()(size_t i) {
int k = index[i] * 4;
T axmin = anchor[k];
T aymin = anchor[k + 1];
T axmax = anchor[k + 2];
T aymax = anchor[k + 3];
T w = axmax - axmin + 1.0;
T h = aymax - aymin + 1.0;
T cx = axmin + 0.5 * w;
T cy = aymin + 0.5 * h;
T dxmin = deltas[k];
T dymin = deltas[k + 1];
T dxmax = deltas[k + 2];
T dymax = deltas[k + 3];
T d_cx, d_cy, d_w, d_h;
if (var) {
d_cx = cx + dxmin * w * var[k];
d_cy = cy + dymin * h * var[k + 1];
d_w = exp(Min(dxmax * var[k + 2], bbox_clip_default)) * w;
d_h = exp(Min(dymax * var[k + 3], bbox_clip_default)) * h;
} else {
d_cx = cx + dxmin * w;
d_cy = cy + dymin * h;
d_w = exp(Min(dxmax, bbox_clip_default)) * w;
d_h = exp(Min(dymax, bbox_clip_default)) * h;
}
T oxmin = d_cx - d_w * 0.5;
T oymin = d_cy - d_h * 0.5;
T oxmax = d_cx + d_w * 0.5 - 1.;
T oymax = d_cy + d_h * 0.5 - 1.;
proposals[i * 4] = Max(Min(oxmin, im_info[1] - 1.), 0.);
proposals[i * 4 + 1] = Max(Min(oymin, im_info[0] - 1.), 0.);
proposals[i * 4 + 2] = Max(Min(oxmax, im_info[1] - 1.), 0.);
proposals[i * 4 + 3] = Max(Min(oymax, im_info[0] - 1.), 0.);
}
__device__ __forceinline__ T Min(T a, T b) const { return a > b ? b : a; }
__device__ __forceinline__ T Max(T a, T b) const { return a > b ? a : b; }
};
template <typename T, int BlockSize>
static __global__ void FilterBBoxes(const T *bboxes, const T *im_info,
const T min_size, const int num,
int *keep_num, int *keep) {
T im_h = im_info[0];
T im_w = im_info[1];
T im_scale = im_info[2];
int cnt = 0;
__shared__ int keep_index[BlockSize];
CUDA_KERNEL_LOOP(i, num) {
keep_index[threadIdx.x] = -1;
__syncthreads();
int k = i * 4;
T xmin = bboxes[k];
T ymin = bboxes[k + 1];
T xmax = bboxes[k + 2];
T ymax = bboxes[k + 3];
T w = xmax - xmin + 1.0;
T h = ymax - ymin + 1.0;
T cx = xmin + w / 2.;
T cy = ymin + h / 2.;
T w_s = (xmax - xmin) / im_scale + 1.;
T h_s = (ymax - ymin) / im_scale + 1.;
if (w_s >= min_size && h_s >= min_size && cx <= im_w && cy <= im_h) {
keep_index[threadIdx.x] = i;
}
__syncthreads();
if (threadIdx.x == 0) {
int size = (num - i) < BlockSize ? num - i : BlockSize;
for (int j = 0; j < size; ++j) {
if (keep_index[j] > -1) {
keep[cnt++] = keep_index[j];
}
}
}
__syncthreads();
}
if (threadIdx.x == 0) {
keep_num[0] = cnt;
}
}
static __device__ inline float IoU(const float *a, const float *b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float inter_s = width * height;
float s_a = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float s_b = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return inter_s / (s_a + s_b - inter_s);
}
static __global__ void NMSKernel(const int n_boxes,
const float nms_overlap_thresh,
const float *dev_boxes, uint64_t *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
const int row_size =
min(n_boxes - row_start * kThreadsPerBlock, kThreadsPerBlock);
const int col_size =
min(n_boxes - col_start * kThreadsPerBlock, kThreadsPerBlock);
__shared__ float block_boxes[kThreadsPerBlock * 4];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 4 + 0] =
dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 0];
block_boxes[threadIdx.x * 4 + 1] =
dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 1];
block_boxes[threadIdx.x * 4 + 2] =
dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 2];
block_boxes[threadIdx.x * 4 + 3] =
dev_boxes[(kThreadsPerBlock * col_start + threadIdx.x) * 4 + 3];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = kThreadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 4;
int i = 0;
uint64_t t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (IoU(cur_box, block_boxes + i * 4) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, kThreadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
template <typename T>
static void NMS(const platform::CUDADeviceContext &ctx, const Tensor &proposals,
const Tensor &sorted_indices, const T nms_threshold,
Tensor *keep_out) {
int boxes_num = proposals.dims()[0];
PADDLE_ENFORCE_EQ(boxes_num, sorted_indices.dims()[0]);
const int col_blocks = DIVUP(boxes_num, kThreadsPerBlock);
dim3 blocks(DIVUP(boxes_num, kThreadsPerBlock),
DIVUP(boxes_num, kThreadsPerBlock));
dim3 threads(kThreadsPerBlock);
const T *boxes = proposals.data<T>();
auto place = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace());
framework::Vector<uint64_t> mask(boxes_num * col_blocks);
NMSKernel<<<blocks, threads>>>(boxes_num, nms_threshold, boxes,
mask.CUDAMutableData(BOOST_GET_CONST(
platform::CUDAPlace, ctx.GetPlace())));
std::vector<uint64_t> remv(col_blocks);
memset(&remv[0], 0, sizeof(uint64_t) * col_blocks);
std::vector<int> keep_vec;
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / kThreadsPerBlock;
int inblock = i % kThreadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
++num_to_keep;
keep_vec.push_back(i);
uint64_t *p = &mask[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
int *keep = keep_out->mutable_data<int>({num_to_keep}, ctx.GetPlace());
memory::Copy(place, keep, platform::CPUPlace(), keep_vec.data(),
sizeof(int) * num_to_keep, ctx.stream());
ctx.Wait();
}
template <typename T>
static std::pair<Tensor, Tensor> ProposalForOneImage(
const platform::CUDADeviceContext &ctx, const Tensor &im_info,
const Tensor &anchors, const Tensor &variances,
const Tensor &bbox_deltas, // [M, 4]
const Tensor &scores, // [N, 1]
int pre_nms_top_n, int post_nms_top_n, float nms_thresh, float min_size,
float eta) {
// 1. pre nms
Tensor scores_sort, index_sort;
SortDescending<T>(ctx, scores, &scores_sort, &index_sort);
int num = scores.numel();
int pre_nms_num = (pre_nms_top_n <= 0 || pre_nms_top_n > num) ? scores.numel()
: pre_nms_top_n;
scores_sort.Resize({pre_nms_num, 1});
index_sort.Resize({pre_nms_num, 1});
// 2. box decode and clipping
Tensor proposals;
proposals.mutable_data<T>({pre_nms_num, 4}, ctx.GetPlace());
{
platform::ForRange<platform::CUDADeviceContext> for_range(ctx, pre_nms_num);
for_range(BoxDecodeAndClipFunctor<T>{
anchors.data<T>(), bbox_deltas.data<T>(), variances.data<T>(),
index_sort.data<int>(), im_info.data<T>(), proposals.data<T>()});
}
// 3. filter
Tensor keep_index, keep_num_t;
keep_index.mutable_data<int>({pre_nms_num}, ctx.GetPlace());
keep_num_t.mutable_data<int>({1}, ctx.GetPlace());
min_size = std::max(min_size, 1.0f);
auto stream = ctx.stream();
FilterBBoxes<T, 512><<<1, 512, 0, stream>>>(
proposals.data<T>(), im_info.data<T>(), min_size, pre_nms_num,
keep_num_t.data<int>(), keep_index.data<int>());
int keep_num;
const auto gpu_place = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace());
memory::Copy(platform::CPUPlace(), &keep_num, gpu_place,
keep_num_t.data<int>(), sizeof(int), ctx.stream());
ctx.Wait();
keep_index.Resize({keep_num});
Tensor scores_filter, proposals_filter;
proposals_filter.mutable_data<T>({keep_num, 4}, ctx.GetPlace());
scores_filter.mutable_data<T>({keep_num, 1}, ctx.GetPlace());
GPUGather<T>(ctx, proposals, keep_index, &proposals_filter);
GPUGather<T>(ctx, scores_sort, keep_index, &scores_filter);
if (nms_thresh <= 0) {
return std::make_pair(proposals_filter, scores_filter);
}
// 4. nms
Tensor keep_nms;
NMS<T>(ctx, proposals_filter, keep_index, nms_thresh, &keep_nms);
if (post_nms_top_n > 0 && post_nms_top_n < keep_nms.numel()) {
keep_nms.Resize({post_nms_top_n});
}
Tensor scores_nms, proposals_nms;
proposals_nms.mutable_data<T>({keep_nms.numel(), 4}, ctx.GetPlace());
scores_nms.mutable_data<T>({keep_nms.numel(), 1}, ctx.GetPlace());
GPUGather<T>(ctx, proposals_filter, keep_nms, &proposals_nms);
GPUGather<T>(ctx, scores_filter, keep_nms, &scores_nms);
return std::make_pair(proposals_nms, scores_nms);
}
} // namespace
template <typename DeviceContext, typename T>
class CUDAGenerateProposalsKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
auto *scores = context.Input<Tensor>("Scores");
auto *bbox_deltas = context.Input<Tensor>("BboxDeltas");
auto *im_info = context.Input<Tensor>("ImInfo");
auto anchors = GET_DATA_SAFELY(context.Input<Tensor>("Anchors"), "Input",
"Anchors", "GenerateProposals");
auto variances = GET_DATA_SAFELY(context.Input<Tensor>("Variances"),
"Input", "Variances", "GenerateProposals");
auto *rpn_rois = context.Output<LoDTensor>("RpnRois");
auto *rpn_roi_probs = context.Output<LoDTensor>("RpnRoiProbs");
int pre_nms_top_n = context.Attr<int>("pre_nms_topN");
int post_nms_top_n = context.Attr<int>("post_nms_topN");
float nms_thresh = context.Attr<float>("nms_thresh");
float min_size = context.Attr<float>("min_size");
float eta = context.Attr<float>("eta");
PADDLE_ENFORCE_GE(eta, 1.,
platform::errors::InvalidArgument(
"Not support adaptive NMS. The attribute 'eta' "
"should not less than 1. But received eta=[%d]",
eta));
auto &dev_ctx = context.template device_context<DeviceContext>();
auto scores_dim = scores->dims();
int64_t num = scores_dim[0];
int64_t c_score = scores_dim[1];
int64_t h_score = scores_dim[2];
int64_t w_score = scores_dim[3];
auto bbox_dim = bbox_deltas->dims();
int64_t c_bbox = bbox_dim[1];
int64_t h_bbox = bbox_dim[2];
int64_t w_bbox = bbox_dim[3];
Tensor bbox_deltas_swap, scores_swap;
bbox_deltas_swap.mutable_data<T>({num, h_bbox, w_bbox, c_bbox},
dev_ctx.GetPlace());
scores_swap.mutable_data<T>({num, h_score, w_score, c_score},
dev_ctx.GetPlace());
math::Transpose<DeviceContext, T, 4> trans;
std::vector<int> axis = {0, 2, 3, 1};
trans(dev_ctx, *bbox_deltas, &bbox_deltas_swap, axis);
trans(dev_ctx, *scores, &scores_swap, axis);
anchors.Resize({anchors.numel() / 4, 4});
variances.Resize({variances.numel() / 4, 4});
rpn_rois->mutable_data<T>({bbox_deltas->numel() / 4, 4},
context.GetPlace());
rpn_roi_probs->mutable_data<T>({scores->numel(), 1}, context.GetPlace());
T *rpn_rois_data = rpn_rois->data<T>();
T *rpn_roi_probs_data = rpn_roi_probs->data<T>();
auto place = BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace());
auto cpu_place = platform::CPUPlace();
int64_t num_proposals = 0;
std::vector<size_t> offset(1, 0);
std::vector<int64_t> tmp_lod;
for (int64_t i = 0; i < num; ++i) {
Tensor im_info_slice = im_info->Slice(i, i + 1);
Tensor bbox_deltas_slice = bbox_deltas_swap.Slice(i, i + 1);
Tensor scores_slice = scores_swap.Slice(i, i + 1);
bbox_deltas_slice.Resize({h_bbox * w_bbox * c_bbox / 4, 4});
scores_slice.Resize({h_score * w_score * c_score, 1});
std::pair<Tensor, Tensor> box_score_pair =
ProposalForOneImage<T>(dev_ctx, im_info_slice, anchors, variances,
bbox_deltas_slice, scores_slice, pre_nms_top_n,
post_nms_top_n, nms_thresh, min_size, eta);
Tensor &proposals = box_score_pair.first;
Tensor &scores = box_score_pair.second;
memory::Copy(place, rpn_rois_data + num_proposals * 4, place,
proposals.data<T>(), sizeof(T) * proposals.numel(),
dev_ctx.stream());
memory::Copy(place, rpn_roi_probs_data + num_proposals, place,
scores.data<T>(), sizeof(T) * scores.numel(),
dev_ctx.stream());
dev_ctx.Wait();
num_proposals += proposals.dims()[0];
offset.emplace_back(num_proposals);
tmp_lod.push_back(num_proposals);
}
if (context.HasOutput("RpnRoisLod")) {
auto *rpn_rois_lod = context.Output<Tensor>("RpnRoisLod");
rpn_rois_lod->mutable_data<int64_t>({num}, context.GetPlace());
int64_t *lod_data = rpn_rois_lod->data<int64_t>();
memory::Copy(place, lod_data, cpu_place, &tmp_lod[0],
sizeof(int64_t) * num, dev_ctx.stream());
rpn_rois_lod->Resize({num});
}
framework::LoD lod;
lod.emplace_back(offset);
rpn_rois->set_lod(lod);
rpn_roi_probs->set_lod(lod);
rpn_rois->Resize({num_proposals, 4});
rpn_roi_probs->Resize({num_proposals, 1});
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(generate_proposals,
ops::CUDAGenerateProposalsKernel<
paddle::platform::CUDADeviceContext, float>);
|
36c8040852f261fb6e92f9dce1726a3ed92594e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/integral_image_op.h"
namespace caffe2 {
namespace {
__global__ void RowPassKernel(
int count,
int rows_out,
int cols_out,
int chans,
const float* in,
float* out) {
CUDA_1D_KERNEL_LOOP(i, count) {
// Figure out which row, channel, and batch element we're processing
int row = i % rows_out;
int chan = (i / rows_out) % chans;
int ind = i / rows_out / chans;
// Input is (H, W) and output is (H + 1, W + 1)
int rows_in = rows_out - 1;
int cols_in = cols_out - 1;
// Row pointer to input data
// Input data is shift (-1, -1) relative to output data, hence row - 1
const float* row_in_data =
in + cols_in * ((row - 1) + rows_in * (chan + ind * chans));
// Row pointer to output data
float* row_out_data =
out + cols_out * (row + rows_out * (chan + ind * chans));
// The first row and first column of the output is all zeros
row_out_data[0] = 0.;
if (row == 0) {
for (int i = 1; i < cols_out; ++i) {
row_out_data[i] = 0.;
}
} else {
for (int i = 1; i < cols_out; ++i) {
// Recall that input data is shift (-1, -1) relative to the output,
// hence i - 1
row_out_data[i] = row_out_data[i - 1] + row_in_data[i - 1];
}
}
}
}
__global__ void RowPassGradientKernel(
int count,
int rows_out,
int cols_out,
int chans,
const float* in,
float* out) {
CUDA_1D_KERNEL_LOOP(i, count) {
// Figure out which row, channel, and batch element we're processing
int row = i % rows_out;
int chan = (i / rows_out) % chans;
int ind = i / rows_out / chans;
// Input in (H + 1, W + 1) and output is (H + 1, W)
int rows_in = rows_out;
int cols_in = cols_out + 1;
// Col pointer to input data
const float* row_in_data =
in + cols_in * (row + rows_in * (chan + ind * chans));
// Col pointer to output data
float* row_out_data =
out + cols_out * (row + rows_out * (chan + ind * chans));
row_out_data[0] = row_in_data[0];
for (int i = 1; i < cols_out; ++i) {
row_out_data[i] = row_out_data[i - 1] + row_in_data[i];
}
}
}
__global__ void
ColPassKernel(int count, int rows_out, int cols_out, int chans, float* out) {
CUDA_1D_KERNEL_LOOP(i, count) {
// Figure out which col, channel, and batch element we're processing
int col = i % cols_out;
int chan = (i / cols_out) % chans;
int ind = i / cols_out / chans;
float* col_out_data =
out + col + cols_out * rows_out * (chan + ind * chans);
for (int i = 1; i < rows_out; ++i) {
col_out_data[i * cols_out] += col_out_data[(i - 1) * cols_out];
}
}
}
__global__ void ColPassGradientKernel(
int count,
int rows_out,
int cols_out,
int chans,
const float* in,
float* out) {
CUDA_1D_KERNEL_LOOP(i, count) {
// Figure out which col, channel, and batch element we're processing
int col = i % cols_out;
int chan = (i / cols_out) % chans;
int ind = i / cols_out / chans;
// Input is (H + 1, W) and output is (H, W)
int rows_in = rows_out + 1;
int cols_in = cols_out;
// Col pointer to input data
const float* col_in_data =
in + col + cols_in * rows_in * (chan + ind * chans);
// Col pointer to output data
float* col_out_data =
out + col + cols_out * rows_out * (chan + ind * chans);
col_out_data[0] = col_in_data[0];
for (int i = 1; i < rows_out; ++i) {
col_out_data[i * cols_out] =
col_out_data[(i - 1) * cols_out] + col_in_data[i * cols_in];
}
}
}
} // namespace
template <>
bool IntegralImageOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
CAFFE_ENFORCE(X.dim() == 4, "Only supports 4D tensors for the momement");
// Input is (N, C, H, W)
// Output is (N, C, H + 1, W + 1)
vector<int64_t> out_shape(X.sizes().vec());
out_shape[2] += 1; // H + 1 output size
out_shape[3] += 1; // W + 1 output size
auto* Y = Output(0, out_shape, at::dtype<float>());
const int chans = X.dim32(1);
const int rows_out = Y->dim32(2);
const int cols_out = Y->dim32(3);
// Integral image over rows of input X
const int row_pass_size = X.dim32(0) * chans * rows_out;
hipLaunchKernelGGL(( RowPassKernel),
dim3(CAFFE_GET_BLOCKS(row_pass_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
row_pass_size,
rows_out,
cols_out,
chans,
X.data<float>(),
Y->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
// Integral image over columns of the integral image over rows
const int col_pass_size = X.dim32(0) * chans * cols_out;
hipLaunchKernelGGL(( ColPassKernel),
dim3(CAFFE_GET_BLOCKS(col_pass_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
col_pass_size,
rows_out,
cols_out,
chans,
Y->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool IntegralImageGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Original input to "forward" op
auto& dY = Input(1); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(
0, X.sizes(), at::dtype<float>()); // Gradient of net w.r.t. input to
// "forward" op (aka "gradInput")
// Row pass reduces shape of dY from (N, C, H + 1, W + 1)
// to (N, C, H + 1, W)
// Col pass reduces shape to (N, C, H, W)
vector<int64_t> row_pass_shape(dY.sizes().vec());
row_pass_shape[3] -= 1;
ReinitializeTensor(&row_pass_buffer_, row_pass_shape, at::dtype<float>().device(CUDA));
const int chans = row_pass_buffer_.dim32(1);
const int rows_out = row_pass_buffer_.dim32(2);
const int cols_out = row_pass_buffer_.dim32(3);
// Integral image over rows of input X
const int row_pass_size = X.dim32(0) * chans * rows_out;
hipLaunchKernelGGL(( RowPassGradientKernel),
dim3(CAFFE_GET_BLOCKS(row_pass_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
row_pass_size,
rows_out,
cols_out,
chans,
dY.data<float>(),
row_pass_buffer_.mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
// Integral image over columns of the integral image over rows
const int col_pass_size = X.dim32(0) * chans * cols_out;
hipLaunchKernelGGL(( ColPassGradientKernel),
dim3(CAFFE_GET_BLOCKS(col_pass_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
col_pass_size,
rows_out - 1,
cols_out,
chans,
row_pass_buffer_.data<float>(),
dX->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(IntegralImage, IntegralImageOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
IntegralImageGradient,
IntegralImageGradientOp<float, CUDAContext>);
} // namespace caffe2
| 36c8040852f261fb6e92f9dce1726a3ed92594e0.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/integral_image_op.h"
namespace caffe2 {
namespace {
__global__ void RowPassKernel(
int count,
int rows_out,
int cols_out,
int chans,
const float* in,
float* out) {
CUDA_1D_KERNEL_LOOP(i, count) {
// Figure out which row, channel, and batch element we're processing
int row = i % rows_out;
int chan = (i / rows_out) % chans;
int ind = i / rows_out / chans;
// Input is (H, W) and output is (H + 1, W + 1)
int rows_in = rows_out - 1;
int cols_in = cols_out - 1;
// Row pointer to input data
// Input data is shift (-1, -1) relative to output data, hence row - 1
const float* row_in_data =
in + cols_in * ((row - 1) + rows_in * (chan + ind * chans));
// Row pointer to output data
float* row_out_data =
out + cols_out * (row + rows_out * (chan + ind * chans));
// The first row and first column of the output is all zeros
row_out_data[0] = 0.;
if (row == 0) {
for (int i = 1; i < cols_out; ++i) {
row_out_data[i] = 0.;
}
} else {
for (int i = 1; i < cols_out; ++i) {
// Recall that input data is shift (-1, -1) relative to the output,
// hence i - 1
row_out_data[i] = row_out_data[i - 1] + row_in_data[i - 1];
}
}
}
}
__global__ void RowPassGradientKernel(
int count,
int rows_out,
int cols_out,
int chans,
const float* in,
float* out) {
CUDA_1D_KERNEL_LOOP(i, count) {
// Figure out which row, channel, and batch element we're processing
int row = i % rows_out;
int chan = (i / rows_out) % chans;
int ind = i / rows_out / chans;
// Input in (H + 1, W + 1) and output is (H + 1, W)
int rows_in = rows_out;
int cols_in = cols_out + 1;
// Col pointer to input data
const float* row_in_data =
in + cols_in * (row + rows_in * (chan + ind * chans));
// Col pointer to output data
float* row_out_data =
out + cols_out * (row + rows_out * (chan + ind * chans));
row_out_data[0] = row_in_data[0];
for (int i = 1; i < cols_out; ++i) {
row_out_data[i] = row_out_data[i - 1] + row_in_data[i];
}
}
}
__global__ void
ColPassKernel(int count, int rows_out, int cols_out, int chans, float* out) {
CUDA_1D_KERNEL_LOOP(i, count) {
// Figure out which col, channel, and batch element we're processing
int col = i % cols_out;
int chan = (i / cols_out) % chans;
int ind = i / cols_out / chans;
float* col_out_data =
out + col + cols_out * rows_out * (chan + ind * chans);
for (int i = 1; i < rows_out; ++i) {
col_out_data[i * cols_out] += col_out_data[(i - 1) * cols_out];
}
}
}
__global__ void ColPassGradientKernel(
int count,
int rows_out,
int cols_out,
int chans,
const float* in,
float* out) {
CUDA_1D_KERNEL_LOOP(i, count) {
// Figure out which col, channel, and batch element we're processing
int col = i % cols_out;
int chan = (i / cols_out) % chans;
int ind = i / cols_out / chans;
// Input is (H + 1, W) and output is (H, W)
int rows_in = rows_out + 1;
int cols_in = cols_out;
// Col pointer to input data
const float* col_in_data =
in + col + cols_in * rows_in * (chan + ind * chans);
// Col pointer to output data
float* col_out_data =
out + col + cols_out * rows_out * (chan + ind * chans);
col_out_data[0] = col_in_data[0];
for (int i = 1; i < rows_out; ++i) {
col_out_data[i * cols_out] =
col_out_data[(i - 1) * cols_out] + col_in_data[i * cols_in];
}
}
}
} // namespace
template <>
bool IntegralImageOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
CAFFE_ENFORCE(X.dim() == 4, "Only supports 4D tensors for the momement");
// Input is (N, C, H, W)
// Output is (N, C, H + 1, W + 1)
vector<int64_t> out_shape(X.sizes().vec());
out_shape[2] += 1; // H + 1 output size
out_shape[3] += 1; // W + 1 output size
auto* Y = Output(0, out_shape, at::dtype<float>());
const int chans = X.dim32(1);
const int rows_out = Y->dim32(2);
const int cols_out = Y->dim32(3);
// Integral image over rows of input X
const int row_pass_size = X.dim32(0) * chans * rows_out;
RowPassKernel<<<
CAFFE_GET_BLOCKS(row_pass_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
row_pass_size,
rows_out,
cols_out,
chans,
X.data<float>(),
Y->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
// Integral image over columns of the integral image over rows
const int col_pass_size = X.dim32(0) * chans * cols_out;
ColPassKernel<<<
CAFFE_GET_BLOCKS(col_pass_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
col_pass_size,
rows_out,
cols_out,
chans,
Y->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool IntegralImageGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Original input to "forward" op
auto& dY = Input(1); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(
0, X.sizes(), at::dtype<float>()); // Gradient of net w.r.t. input to
// "forward" op (aka "gradInput")
// Row pass reduces shape of dY from (N, C, H + 1, W + 1)
// to (N, C, H + 1, W)
// Col pass reduces shape to (N, C, H, W)
vector<int64_t> row_pass_shape(dY.sizes().vec());
row_pass_shape[3] -= 1;
ReinitializeTensor(&row_pass_buffer_, row_pass_shape, at::dtype<float>().device(CUDA));
const int chans = row_pass_buffer_.dim32(1);
const int rows_out = row_pass_buffer_.dim32(2);
const int cols_out = row_pass_buffer_.dim32(3);
// Integral image over rows of input X
const int row_pass_size = X.dim32(0) * chans * rows_out;
RowPassGradientKernel<<<
CAFFE_GET_BLOCKS(row_pass_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
row_pass_size,
rows_out,
cols_out,
chans,
dY.data<float>(),
row_pass_buffer_.mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
// Integral image over columns of the integral image over rows
const int col_pass_size = X.dim32(0) * chans * cols_out;
ColPassGradientKernel<<<
CAFFE_GET_BLOCKS(col_pass_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
col_pass_size,
rows_out - 1,
cols_out,
chans,
row_pass_buffer_.data<float>(),
dX->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(IntegralImage, IntegralImageOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
IntegralImageGradient,
IntegralImageGradientOp<float, CUDAContext>);
} // namespace caffe2
|
3858f399bbd89bc4b5b54f462dd26addbf4e1578.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* CA.cpp
*
* Created on: 21/mar/2014
* Author: davide
*/
#include "CA2D.cuh"
#define CUDA_CHECK_RETURN(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
//numcells=yDim*xDim in constructor CA
void* CA2D::allocateGPUBuffer(void * d_buffer,TYPE type){
switch(type){
case FLOAT:
CUDA_CHECK_RETURN(hipMalloc((void**)&d_buffer,sizeof(float)*numCells));
break;
case DOUBLE:
CUDA_CHECK_RETURN(hipMalloc((void**)&d_buffer,sizeof(double)*numCells));
break;
case CHAR:
CUDA_CHECK_RETURN(hipMalloc((void**)&d_buffer,sizeof(char)*numCells));
break;
case INT:
CUDA_CHECK_RETURN(hipMalloc((void**)&d_buffer,sizeof(int)*numCells));
break;
case BOOL:
CUDA_CHECK_RETURN(hipMalloc((void**)&d_buffer,sizeof(bool)*numCells));
break;
}
return d_buffer;
}
void CA2D::copyBufferFromGPU(void* h_to, void* d_from, TYPE type){
switch(type){
case FLOAT:
CUDA_CHECK_RETURN(hipMemcpy(h_to,d_from,sizeof(float)*numCells,hipMemcpyDeviceToHost));
break;
case DOUBLE:
CUDA_CHECK_RETURN(hipMemcpy(h_to,d_from,sizeof(double)*numCells,hipMemcpyDeviceToHost));
break;
case CHAR:
CUDA_CHECK_RETURN(hipMemcpy(h_to,d_from,sizeof(char)*numCells,hipMemcpyDeviceToHost));
break;
case INT:
CUDA_CHECK_RETURN(hipMemcpy(h_to,d_from,sizeof(int)*numCells,hipMemcpyDeviceToHost));
break;
case BOOL:
CUDA_CHECK_RETURN(hipMemcpy(h_to,d_from,sizeof(bool)*numCells,hipMemcpyDeviceToHost));
break;
}
}
void CA2D::copyBufferToGPU(void* d_to, void* h_from, TYPE type){
switch(type){
case FLOAT:
CUDA_CHECK_RETURN(hipMemcpy(d_to,h_from,sizeof(float)*numCells,hipMemcpyHostToDevice));
break;
case DOUBLE:
CUDA_CHECK_RETURN(hipMemcpy(d_to,h_from,sizeof(double)*numCells,hipMemcpyHostToDevice));
break;
case CHAR:
CUDA_CHECK_RETURN(hipMemcpy(d_to,h_from,sizeof(char)*numCells,hipMemcpyHostToDevice));
break;
case INT:
CUDA_CHECK_RETURN(hipMemcpy(d_to,h_from,sizeof(int)*numCells,hipMemcpyHostToDevice));
break;
case BOOL:
CUDA_CHECK_RETURN(hipMemcpy(d_to,h_from,sizeof(bool)*numCells,hipMemcpyHostToDevice));
break;
}
}
__global__ void initializeDCA(void** d_AllocatedpointerSubstates,TYPE* d_substateTypes,SCALARS_CA_GPU2D* scalarsTOCPY,CA_GPU2D* d_CA){
d_CA->d_substates=d_AllocatedpointerSubstates;
d_CA->d_substateTypes=d_substateTypes;
d_CA->scalars=scalarsTOCPY;
}
__global__ void printValues(CA_GPU2D* d_CA){
//printf("(%i,%i),(%i,%i)\n",((int*)d_CA->d_substates[0])[threadIdx.x],d_CA->d_substateTypes[0],((int*)d_CA->d_substates[1])[threadIdx.x],d_CA->d_substateTypes[1]);
//d_CA->d_substates=d_AllocatedpointerSubstates;
//printf("SCALARS\n yDim=%i, xDim=%i value=%i",d_CA->scalars->yDim,d_CA->scalars->xDim,d_CA->getSubstateValue_INT(Q,threadIdx.x));
}
void CA2D::initializeGPUAutomata(){
//allocate GPU_CA on GPU
CUDA_CHECK_RETURN(hipMalloc(&d_CA,sizeof(CA_GPU2D)));
d_CA_TOCOPY= new CA_GPU2D();
//cancellaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
// for(int i=0;i<yDim;i++){
// for(int j=0;j<xDim;j++){
// if(i%substates[Q]==0){
// ((bool*)substates[Q])[getLinearIndexNormal(i,j,yDim,xDim)]=true;
// ((bool*)substates[Q_NEW])[getLinearIndexNormal(i,j,yDim,xDim)]=false;
// }
// }
// }
//glider
// ((bool*)substates[Q])[getLinearIndexNormal(5,5,yDim,xDim)]=true;
// ((bool*)substates[Q])[getLinearIndexNormal(6,5,yDim,xDim)]=true;
// ((bool*)substates[Q])[getLinearIndexNormal(5,6,yDim,xDim)]=true;
// ((bool*)substates[Q])[getLinearIndexNormal(6,6,yDim,xDim)]=true;
//
// ((bool*)substates[Q])[getLinearIndexNormal(7,7,yDim,xDim)]=true;
// ((bool*)substates[Q])[getLinearIndexNormal(8,7,yDim,xDim)]=true;
// ((bool*)substates[Q])[getLinearIndexNormal(7,8,yDim,xDim)]=true;
// ((bool*)substates[Q])[getLinearIndexNormal(8,8,yDim,xDim)]=true;
//allocate memory ON GPU
/*allocate all the substates ON GPU
substate_size=substate_count=real number of registered buffers -> coherent state of the automata
checked befor of GPU initialization
conversion between unsigned int(substateTypes) and TYPE is legal*/
CUDA_CHECK_RETURN(hipMalloc((void**)&d_CA_TOCOPY->d_substates,sizeof(void*)*substates_size));
d_subPointer = (void**)malloc(sizeof(void*)*substates_size);
for(int i=0;i<substates_size;i++){
d_subPointer[i]=allocateGPUBuffer(d_subPointer[i],(TYPE)substateTypes[i]);
copyBufferToGPU(d_subPointer[i],substates[i],(TYPE)substateTypes[i]);
}
CUDA_CHECK_RETURN(hipMemcpy(d_CA_TOCOPY->d_substates,d_subPointer,sizeof(void*)*substates_size,hipMemcpyHostToDevice));
//CUDA_CHECK_RETURN(hipFree((void*)(&d_CA_TOCOPY->d_substates[1])));
//substates type array (allocation and copy, that's a constant array usually)
CUDA_CHECK_RETURN(hipMalloc((void**)&d_CA_TOCOPY->d_substateTypes,sizeof(TYPE)*substates_size));
CUDA_CHECK_RETURN(hipMemcpy(d_CA_TOCOPY->d_substateTypes,substateTypes,sizeof(TYPE)*substates_size,hipMemcpyHostToDevice));
//copyScalars. First create structure to be copied, then allocate memory on GPU->copy structure on GPU->
//->then link d_CA_TOCOPY->scalars to d_CA->scalars whithin a kernel
SCALARS_CA_GPU2D* scalars_TOPCOPY = new SCALARS_CA_GPU2D();
scalars_TOPCOPY->xDim=xDim;
scalars_TOPCOPY->yDim=yDim;
scalars_TOPCOPY->stop=stop;
scalars_TOPCOPY->steps=steps;
scalars_TOPCOPY->isToroidal=isToroidal;
scalars_TOPCOPY->substates_size=substates_size;
scalars_TOPCOPY->numCells=numCells;
CUDA_CHECK_RETURN(hipMalloc((void**)&d_CA_TOCOPY->scalars,sizeof(SCALARS_CA_GPU2D)));
CUDA_CHECK_RETURN(hipMemcpy(d_CA_TOCOPY->scalars,scalars_TOPCOPY,sizeof(SCALARS_CA_GPU2D),hipMemcpyHostToDevice));
free(scalars_TOPCOPY);//not needed anymore
hipDeviceSynchronize();
hipLaunchKernelGGL(( initializeDCA), dim3(1),dim3(1), 0, 0, d_CA_TOCOPY->d_substates,d_CA_TOCOPY->d_substateTypes,d_CA_TOCOPY->scalars,d_CA);
hipDeviceSynchronize();
hipLaunchKernelGGL(( printValues), dim3(1),dim3(10), 0, 0, d_CA);
hipDeviceSynchronize();
printDebug("GPU memory allocated");
}
void CA2D::cleanUpGPUAutomata(){
//FREE MEMORY ON GPU-> remember to free first all the buffers INSIDE the struct
printDebug("inizio cleanUP GPU");
for(int i=0;i<substates_size;i++){
CUDA_CHECK_RETURN(hipFree((void*)((d_subPointer[i]))));
}
//free scalars GPU
CUDA_CHECK_RETURN(hipFree((void*)d_CA_TOCOPY->scalars));
//CUDA_CHECK_RETURN(hipFree(d_CA));
free(d_subPointer);
printDebug("GPU memory freeed");
}
unsigned long long int CA2D::getSteps() const{
return steps;
}
unsigned int CA2D::getToroidalLinearIndex(unsigned int linearIndex){
return hd_mod(linearIndex,yDim*xDim);
}
int CA2D::loadSubstate(SUBSTATE_LABEL substateLabel, const char* const pathToFile){
short int status =SUCCESS_OPENING_FILE;
unsigned int type= substateTypes[substateLabel];
switch(type){
case FLOAT:
status=CA_load_substate_FILE2D(pathToFile,(float*)(substates[substateLabel]),yDim,xDim);
break;
case DOUBLE:
status=CA_load_substate_FILE2D(pathToFile,(double*)(substates[substateLabel]),yDim,xDim);
break;
case CHAR:
status=CA_load_substate_FILE2D(pathToFile,(char*)(substates[substateLabel]),yDim,xDim);
break;
case INT:
status=CA_load_substate_FILE2D(pathToFile,(int*)(substates[substateLabel]),yDim,xDim);
break;
case BOOL:
status=CA_load_substate_FILE2D(pathToFile,(bool*)(substates[substateLabel]),yDim,xDim);
break;
}
return status;
}
int CA2D::saveSubstate(SUBSTATE_LABEL substateLabel, const char* const pathToFile){
short int status =SUCCESS_OPENING_FILE;
unsigned int type= substateTypes[substateLabel];
switch(type){
case FLOAT:
status=CA_save_substate_FILE2D(pathToFile,(float*)(substates[substateLabel]),yDim,xDim);
break;
case DOUBLE:
status=CA_save_substate_FILE2D(pathToFile,(double*)(substates[substateLabel]),yDim,xDim);
break;
case CHAR:
status=CA_save_substate_FILE2D(pathToFile,(char*)(substates[substateLabel]),yDim,xDim);
break;
case INT:
status=CA_save_substate_FILE2D(pathToFile,(int*)(substates[substateLabel]),yDim,xDim);
break;
case BOOL:
status=CA_save_substate_FILE2D(pathToFile,(bool*)(substates[substateLabel]),yDim,xDim);
break;
}
return status;
}
void CA2D::printSubstate_STDOUT(SUBSTATE_LABEL substateLabel){
printSubstate_STDOUT(substateLabel,yDim,xDim);
}
void CA2D::printSubstate_STDOUT(SUBSTATE_LABEL substateLabel, unsigned int Nrow, unsigned int Ncol){
assert(Nrow<=yDim && Ncol<=xDim );
unsigned int type= substateTypes[substateLabel];
switch(type){
case FLOAT:
CA_print_STDOUT2D((float*)(substates[substateLabel]),Nrow,Ncol);
break;
case DOUBLE:
CA_print_STDOUT2D((double*)(substates[substateLabel]),Nrow,Ncol);
break;
case CHAR:
CA_print_STDOUT2D((char*)(substates[substateLabel]),Nrow,Ncol);
break;
case INT:
CA_print_STDOUT2D((int*)(substates[substateLabel]),Nrow,Ncol);
break;
case BOOL:
CA_print_STDOUT2D((bool*)(substates[substateLabel]),Nrow,Ncol);
break;
}
}
/* ------------------START GET SUBSTATE FAMILY FUNCTION------------------*/
bool CA2D::getSubstateValue_BOOL(unsigned int substateLabel,unsigned int i, unsigned int j) const{
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==BOOL);
return ((bool*)substates[substateLabel])[getLinearIndex(i,j,yDim,xDim)];
}
double CA2D::getSubstateValue_DOUBLE(unsigned int substateLabel,unsigned int i, unsigned int j)const{
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==DOUBLE);
return ((double*)substates[substateLabel])[getLinearIndex(i,j,yDim,xDim)];
}
float CA2D::getSubstateValue_FLOAT(unsigned int substateLabel,unsigned int i, unsigned int j)const{
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==FLOAT);
return ((float*)substates[substateLabel])[getLinearIndex(i,j,yDim,xDim)];
}
int CA2D::getSubstateValue_INT(unsigned int substateLabel,unsigned int i, unsigned int j)const{
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==INT);
return ((int*)substates[substateLabel])[getLinearIndex(i,j,yDim,xDim)];
}
char CA2D::getSubstateValue_CHAR(unsigned int substateLabel,unsigned int i, unsigned int j)const{
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==CHAR);
return ((char*)substates[substateLabel])[getLinearIndex(i,j,yDim,xDim)];
}
//mono index cell representation
bool CA2D::getSubstateValue_BOOL(unsigned int substateLabel,unsigned int index) const{
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==BOOL);
return ((bool*)substates[substateLabel])[index];
}
double CA2D::getSubstateValue_DOUBLE(unsigned int substateLabel,unsigned int index)const{
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==DOUBLE);
return ((double*)substates[substateLabel])[index];
}
float CA2D::getSubstateValue_FLOAT(unsigned int substateLabel,unsigned int index)const{
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==FLOAT);
return ((float*)substates[substateLabel])[index];
}
int CA2D::getSubstateValue_INT(unsigned int substateLabel,unsigned int index)const{
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==INT);
return ((int*)substates[substateLabel])[index];
}
char CA2D::getSubstateValue_CHAR(unsigned int substateLabel,unsigned int index)const{
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==CHAR);
return ((char*)substates[substateLabel])[index];
}
/* ------------------END GET SUBSTATE VALUE FAMILY------------------*/
/* ----------------START SET SUBSTATE FAMILY FUNCTION ------------------*/
void CA2D::setSubstateValue2D_BOOL(unsigned int substateLabel,unsigned int i, unsigned int j,bool const value) {
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==BOOL);
((bool*)substates[substateLabel])[getLinearIndex(i,j,yDim,xDim)]=value;
}
void CA2D::setSubstateValue2D_DOUBLE(unsigned int substateLabel,unsigned int i, unsigned int j, double const value){
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==DOUBLE);
((double*)substates[substateLabel])[getLinearIndex(i,j,yDim,xDim)]=value;
}
void CA2D::setSubstateValue2D_FLOAT(unsigned int substateLabel,unsigned int i, unsigned int j,float const value){
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==FLOAT);
((float*)substates[substateLabel])[getLinearIndex(i,j,yDim,xDim)]=value;
}
void CA2D::setSubstateValue2D_INT(unsigned int substateLabel,unsigned int i, unsigned int j,int const value){
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==INT);
((int*)substates[substateLabel])[getLinearIndex(i,j,yDim,xDim)]=value;
}
void CA2D::setSubstateValue2D_CHAR(unsigned int substateLabel,unsigned int i, unsigned int j,char const value){
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==CHAR);
((char*)substates[substateLabel])[getLinearIndex(i,j,yDim,xDim)]=value;
}
void CA2D::setSubstateValue_BOOL(unsigned int substateLabel,unsigned int index,bool const value) {
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==BOOL);
((bool*)substates[substateLabel])[index]=value;
}
void CA2D::setSubstateValue_DOUBLE(unsigned int substateLabel,unsigned int index, double const value){
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==DOUBLE);
((double*)substates[substateLabel])[index]=value;
}
void CA2D::setSubstateValue_FLOAT(unsigned int substateLabel,unsigned int index,float const value){
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==FLOAT);
((float*)substates[substateLabel])[index]=value;
}
void CA2D::setSubstateValue_INT(unsigned int substateLabel,unsigned int index,int const value){
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==INT);
((int*)substates[substateLabel])[index]=value;
}
void CA2D::setSubstateValue_CHAR(unsigned int substateLabel,unsigned int index,char const value){
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==CHAR);
((char*)substates[substateLabel])[index]=value;
}
/* ------------------END SET SUBSTATE VALUE FAMILY------------------*/
void CA2D::registerStopCondictionCallback(bool(*stopCondition_callback)()){
assert(stopCondition_callback!=NULL);
stopCondition=stopCondition_callback;
}
/*It checks whether or not all the callbacks, substates,
* matrices parameter are in coherent state.
* If it works correctly computation may take place
* Return:
* TRUE if everything is OK
* FALSE stop the automata. Finalize memories-> shutdown
*/
//!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!IMPLEMENTALA BENEEEEEEEEEEEEEEEEEEEEEEEEEEE
bool CA2D::checkAutomataStatusBeforeComputation(){
/*at least one substate and one callback have to be provided*/
/*substate number parameter has to match the substate actually added*/
/*function callbacks number parameter has to match the callbacks actually registered*/
/*A stop function has to be provided as callback*/
return true;
}
void CA2D::globalTransitionFunction_MAINLOOP_callback(){
clock_t start = clock();
/*------------------------------------------------------------------------------*/
unsigned int k=0;
while(!stop){
//for each elementary process
for(k=0;k<elementaryProcesses_size;k++){
//printf("elementaryProcess -> %i\n",k);
//loops over all cells of the cellular automata
(elementaryProcesses[khipLaunchKernelGGL((])), dim3(dimGrid),dim3(blockDim), 0, 0, d_CA);
hipDeviceSynchronize();
}
//printf("DIMGRID(%i,%i,%i), BlockDim(%i,%i,%i)\n",dimGrid.x,dimGrid.y,dimGrid.z,blockDim.x,blockDim.y,blockDim.z);
steps=steps+1;
printf("Step = %i\n",steps);
stop=stopCondition();
//callback each
if(steps%stepsBetweenCallback==0){
//callback occurs
callback(steps);
}
}
/*-----------------------------------------------------------------------------------*/
clock_t end = clock();
elapsedTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Step performed = %i\nElapsed Time=%.4f\n",steps,elapsedTime);
}
void CA2D::globalTransitionFunction_MAINLOOP(){
clock_t start = clock();
/*------------------------------------------------------------------------------*/
unsigned int k=0;
while(!stop){
//for each elementary process
for(k=0;k<elementaryProcesses_size;k++){
//printf("elementaryProcess -> %i\n",k);
//loops over all cells of the cellular automata
(elementaryProcesses[khipLaunchKernelGGL((])), dim3(dimGrid),dim3(blockDim), 0, 0, d_CA);
hipDeviceSynchronize();
}
//printf("DIMGRID(%i,%i,%i), BlockDim(%i,%i,%i)\n",dimGrid.x,dimGrid.y,dimGrid.z,blockDim.x,blockDim.y,blockDim.z);
steps=steps+1;
printf("Step = %i\n",steps);
stop=stopCondition();
}
/*-----------------------------------------------------------------------------------*/
clock_t end = clock();
elapsedTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Step performed = %i\nElapsed Time=%.4f\n",steps,elapsedTime);
}
void CA2D::globalTransitionFunction(){
if(!checkAutomataStatusBeforeComputation()){
//error are printed out by the function checkAutomataStatusBeforeComputation() directly
cleanup();
exit(-1);
}
globalTransitionFunction_MAINLOOP();
}
void CA2D::registerElementaryProcess( void(*callback)(CA_GPU2D* d_CA ) ){
assert(callback!=NULL && elementaryProcesses_count < elementaryProcesses_size );
elementaryProcesses[elementaryProcesses_count]=callback;
elementaryProcesses_count++;
}
void CA2D::setInitialParameters(unsigned int substates_size,unsigned int transitionFunction_size){
/**
* substates_size = The number of substates of the automaton
* transitionFunction_size = The number of transition functions
* */
this->substates_size=substates_size;
this->elementaryProcesses_size=transitionFunction_size;
}
/**
* It does preliminary allocation of substates arrays
* and function callbacks for elementary processes
* */
void CA2D::initialize(){
assert(elementaryProcesses==0 && substates==0 && substateTypes==0);
elementaryProcesses=(void(**)(CA_GPU2D*))malloc(sizeof(void(*)(CA_GPU2D*))*elementaryProcesses_size);
substates= (void**)malloc(sizeof(void*)*substates_size);
substateTypes =(TYPE*)malloc(sizeof(TYPE)*substates_size);
}
void CA2D::cleanup(){
printDebug("CLEANUP - START");
unsigned int i=0;
//free all the allocated substates
for(;i<substate_count;i++){
free(substates[i]);
printDebug("FREED");
}
//free(elementaryProcesses);//it is allocated on GPU
free(substates);
printDebug("CLEANUP - END");
}
void CA2D::addSubstate(SUBSTATE_LABEL label,TYPE t){
void * substate=NULL;
substate=allocateSubstate(t,substate);
registerSubstate(substate,label,t);
}
void CA2D::registerSubstate(void * buffer,SUBSTATE_LABEL label,TYPE t){
assert( (substate_count < (substates_size)) && (buffer != NULL) && (label < (substates_size)) );
substates[label]=buffer;
substateTypes[label]=t;
substate_count++;
}
void* CA2D::allocateSubstate(TYPE t,void* buffer){
switch(t){
case FLOAT:
buffer = (float*)calloc(numCells,sizeof(float));
break;
case DOUBLE:
buffer = (double*)calloc(numCells,sizeof(double));
break;
case CHAR:
buffer = (char*)calloc(numCells,sizeof(char));
break;
case INT:
buffer = (int*)calloc(numCells,sizeof(int));
break;
case BOOL:
buffer = (bool*)calloc(numCells,sizeof(bool));
break;
}
//map the correnspondent buffer just created to its type
return buffer;
}
void CA2D::updateDimGrid(){
dimGrid.x= (xDim/blockDim.x)+(xDim%blockDim.x == 0 ? 0 : 1);
dimGrid.y= (yDim/blockDim.y)+(yDim%blockDim.y == 0 ? 0 : 1);
dimGrid.z=1;//restore default value for 2D automata
}
//2D constructor
CA2D::CA2D(int yDim,int xDim,bool toroidal){
this->preliminaryCAConstructor();
this->yDim=yDim;
this->xDim=xDim;
this->numCells=yDim*xDim;
this->isToroidal=toroidal;
if(isToroidal){
getLinearIndex=hd_getLinearIndexToroidal2D;
}else{
getLinearIndex=hd_getLinearIndexNormal2D;
}
blockDim.x=DEFAULT_BLOCKDIM_X;
blockDim.y=DEFAULT_BLOCKDIM_Y;
blockDim.z=1;
updateDimGrid();
}
/*GET i-th NEIGHBOR INDEX functions MOORE NEIGHBORHOOD
5 | 1 | 8
---|---|---
2 | 0 | 3
---|---|---
6 | 4 | 7
*/
unsigned int CA2D::getNeighborIndex2D_MOORE(unsigned int i, unsigned int j,unsigned int neighbor){
assert(neighbor<9);
switch(neighbor){
case 0:
return getLinearIndex(i,j,yDim,xDim);
case 1:
return getLinearIndex(i-1,j,yDim,xDim);//one row up
case 2:
return getLinearIndex(i,j-1,yDim,xDim);//same row one coloumn left
case 3:
return getLinearIndex(i,j+1,yDim,xDim);//same row one coloumn right
case 4:
return getLinearIndex(i+1,j,yDim,xDim);//same column one row down
case 5:
return getLinearIndex(i-1,j-1,yDim,xDim);//one row up one col left
case 6:
return getLinearIndex(i+1,j-1,yDim,xDim);//one row down one col left
case 7:
return getLinearIndex(i+1,j+1,yDim,xDim);//row down col right
case 8:
return getLinearIndex(i-1,j+1,yDim,xDim);//row up col right
}
return NULL;//it should never be executed
}
unsigned int CA2D::getNeighborIndex2D_MOORE(unsigned int index,unsigned int neighbor){
assert(neighbor<9);
switch(neighbor){
case 0:
return index;
case 1:
return index-xDim;//one row up
case 2:
return index-1;//same row one coloumn left
case 3:
return index+1;//same row one coloumn right
case 4:
return index+xDim;//same column one row down
case 5:
return index-xDim-1;//one row up one col left
case 6:
return index+xDim-1;//one row down one col left
case 7:
return index+xDim+1;//row down col right
case 8:
return index-xDim+1;//row up col right
}
return NULL;//it should never be executed
}
//GETTER AND SETTER
unsigned int CA2D::get_xDim() const {
return xDim;
}
unsigned int CA2D::getElementaryProcessesSize() const {
return elementaryProcesses_size;
}
unsigned int CA2D::get_yDim() const {
return yDim;
}
unsigned int CA2D::getSubstatesSize() const {
return substates_size;
}
unsigned int CA2D::getBlockdimX() const {
return blockDim.x;
}
void CA2D::setBlockdimX(unsigned int dimX) {
if(isPowerOfTwo(blockDim.x)){
this->blockDim.x = dimX;
}else{
printf("WARNING -> setBlockDimX has to be power of 2 -> dimX=%i",DEFAULT_BLOCKDIM_X);
blockDim.x=DEFAULT_BLOCKDIM_X;
}
updateDimGrid();
}
unsigned int CA2D::getBlockDimY() const {
return this->blockDim.y;
}
void CA2D::setBlockDimY(unsigned int dimY) {
if(isPowerOfTwo(blockDim.y)){
this->blockDim.y = dimY;
}else{
printf("WARNING -> setBlockDimY has to be power of 2 -> dimY=%i",DEFAULT_BLOCKDIM_Y);
blockDim.y=DEFAULT_BLOCKDIM_Y;
}
updateDimGrid();
}
unsigned int CA2D::getStepsBetweenCopy() const {
return stepsBetweenCallback;
}
void CA2D::setStepsBetweenCopy(unsigned int stepsBetweenCopy) {
this->stepsBetweenCallback = stepsBetweenCopy;
}
unsigned int CA2D::isPowerOfTwo (unsigned int x)
{
unsigned int powerOfTwo = 1;
while (powerOfTwo < x && powerOfTwo < 2147483648)
powerOfTwo *= 2;
return (x == powerOfTwo);
}
//END OFGETTER AND SETTER
void CA2D::copyBuffersFromGPU(){
printDebug("START offload copy");
for(int i=0;i<substates_size;i++){
copyBufferFromGPU(substates[i],d_subPointer[i],substateTypes[i]);
}
printDebug("END offload copy");
}
/**
* Set default parameters and do normal initialization
* */
void CA2D::preliminaryCAConstructor() {
assert(yDim <= 0);
this->steps=0;
this->elapsedTime=0.0f;
substates=NULL;
substates_size=0;
substate_count=0;
substateTypes=0;
stopCondition=0;
stop=false;//global transition func main loop ACTIVE
elementaryProcesses=NULL;
elementaryProcesses_size=0;
elementaryProcesses_count=0;
}
bool CA2D::evolveOneStep() {
if(!stop){
for(int k=0;k<elementaryProcesses_size;k++){
//printf("elementaryProcess -> %i\n",k);
//loops over all cells of the cellular automata
(elementaryProcesses[khipLaunchKernelGGL((])), dim3(dimGrid),dim3(blockDim), 0, 0, d_CA);
hipDeviceSynchronize();
}
//printf("DIMGRID(%i,%i,%i), BlockDim(%i,%i,%i)\n",dimGrid.x,dimGrid.y,dimGrid.z,blockDim.x,blockDim.y,blockDim.z);
steps=steps+1;
printf("Step = %i\n",steps);
stop=stopCondition();
return true;
}
return false;
}
bool CA2D::evolveKsteps(unsigned int k) {
for(int i=0;i<k && !stop ;k++){
for(k=0;k<elementaryProcesses_size;k++){
//printf("elementaryProcess -> %i\n",k);
//loops over all cells of the cellular automata
(elementaryProcesses[khipLaunchKernelGGL((])), dim3(dimGrid),dim3(blockDim), 0, 0, d_CA);
hipDeviceSynchronize();
}
//printf("DIMGRID(%i,%i,%i), BlockDim(%i,%i,%i)\n",dimGrid.x,dimGrid.y,dimGrid.z,blockDim.x,blockDim.y,blockDim.z);
steps=steps+1;
printf("Step = %i\n",steps);
stop=stopCondition();
return true;
}
return false;
}
void CA2D::setCallback(void(*call)(unsigned int)){
this->callback=call;
}
| 3858f399bbd89bc4b5b54f462dd26addbf4e1578.cu | /*
* CA.cpp
*
* Created on: 21/mar/2014
* Author: davide
*/
#include "CA2D.cuh"
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
//numcells=yDim*xDim in constructor CA
void* CA2D::allocateGPUBuffer(void * d_buffer,TYPE type){
switch(type){
case FLOAT:
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_buffer,sizeof(float)*numCells));
break;
case DOUBLE:
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_buffer,sizeof(double)*numCells));
break;
case CHAR:
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_buffer,sizeof(char)*numCells));
break;
case INT:
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_buffer,sizeof(int)*numCells));
break;
case BOOL:
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_buffer,sizeof(bool)*numCells));
break;
}
return d_buffer;
}
void CA2D::copyBufferFromGPU(void* h_to, void* d_from, TYPE type){
switch(type){
case FLOAT:
CUDA_CHECK_RETURN(cudaMemcpy(h_to,d_from,sizeof(float)*numCells,cudaMemcpyDeviceToHost));
break;
case DOUBLE:
CUDA_CHECK_RETURN(cudaMemcpy(h_to,d_from,sizeof(double)*numCells,cudaMemcpyDeviceToHost));
break;
case CHAR:
CUDA_CHECK_RETURN(cudaMemcpy(h_to,d_from,sizeof(char)*numCells,cudaMemcpyDeviceToHost));
break;
case INT:
CUDA_CHECK_RETURN(cudaMemcpy(h_to,d_from,sizeof(int)*numCells,cudaMemcpyDeviceToHost));
break;
case BOOL:
CUDA_CHECK_RETURN(cudaMemcpy(h_to,d_from,sizeof(bool)*numCells,cudaMemcpyDeviceToHost));
break;
}
}
void CA2D::copyBufferToGPU(void* d_to, void* h_from, TYPE type){
switch(type){
case FLOAT:
CUDA_CHECK_RETURN(cudaMemcpy(d_to,h_from,sizeof(float)*numCells,cudaMemcpyHostToDevice));
break;
case DOUBLE:
CUDA_CHECK_RETURN(cudaMemcpy(d_to,h_from,sizeof(double)*numCells,cudaMemcpyHostToDevice));
break;
case CHAR:
CUDA_CHECK_RETURN(cudaMemcpy(d_to,h_from,sizeof(char)*numCells,cudaMemcpyHostToDevice));
break;
case INT:
CUDA_CHECK_RETURN(cudaMemcpy(d_to,h_from,sizeof(int)*numCells,cudaMemcpyHostToDevice));
break;
case BOOL:
CUDA_CHECK_RETURN(cudaMemcpy(d_to,h_from,sizeof(bool)*numCells,cudaMemcpyHostToDevice));
break;
}
}
__global__ void initializeDCA(void** d_AllocatedpointerSubstates,TYPE* d_substateTypes,SCALARS_CA_GPU2D* scalarsTOCPY,CA_GPU2D* d_CA){
d_CA->d_substates=d_AllocatedpointerSubstates;
d_CA->d_substateTypes=d_substateTypes;
d_CA->scalars=scalarsTOCPY;
}
__global__ void printValues(CA_GPU2D* d_CA){
//printf("(%i,%i),(%i,%i)\n",((int*)d_CA->d_substates[0])[threadIdx.x],d_CA->d_substateTypes[0],((int*)d_CA->d_substates[1])[threadIdx.x],d_CA->d_substateTypes[1]);
//d_CA->d_substates=d_AllocatedpointerSubstates;
//printf("SCALARS\n yDim=%i, xDim=%i value=%i",d_CA->scalars->yDim,d_CA->scalars->xDim,d_CA->getSubstateValue_INT(Q,threadIdx.x));
}
void CA2D::initializeGPUAutomata(){
//allocate GPU_CA on GPU
CUDA_CHECK_RETURN(cudaMalloc(&d_CA,sizeof(CA_GPU2D)));
d_CA_TOCOPY= new CA_GPU2D();
//cancellaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
// for(int i=0;i<yDim;i++){
// for(int j=0;j<xDim;j++){
// if(i%substates[Q]==0){
// ((bool*)substates[Q])[getLinearIndexNormal(i,j,yDim,xDim)]=true;
// ((bool*)substates[Q_NEW])[getLinearIndexNormal(i,j,yDim,xDim)]=false;
// }
// }
// }
//glider
// ((bool*)substates[Q])[getLinearIndexNormal(5,5,yDim,xDim)]=true;
// ((bool*)substates[Q])[getLinearIndexNormal(6,5,yDim,xDim)]=true;
// ((bool*)substates[Q])[getLinearIndexNormal(5,6,yDim,xDim)]=true;
// ((bool*)substates[Q])[getLinearIndexNormal(6,6,yDim,xDim)]=true;
//
// ((bool*)substates[Q])[getLinearIndexNormal(7,7,yDim,xDim)]=true;
// ((bool*)substates[Q])[getLinearIndexNormal(8,7,yDim,xDim)]=true;
// ((bool*)substates[Q])[getLinearIndexNormal(7,8,yDim,xDim)]=true;
// ((bool*)substates[Q])[getLinearIndexNormal(8,8,yDim,xDim)]=true;
//allocate memory ON GPU
/*allocate all the substates ON GPU
substate_size=substate_count=real number of registered buffers -> coherent state of the automata
checked befor of GPU initialization
conversion between unsigned int(substateTypes) and TYPE is legal*/
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_CA_TOCOPY->d_substates,sizeof(void*)*substates_size));
d_subPointer = (void**)malloc(sizeof(void*)*substates_size);
for(int i=0;i<substates_size;i++){
d_subPointer[i]=allocateGPUBuffer(d_subPointer[i],(TYPE)substateTypes[i]);
copyBufferToGPU(d_subPointer[i],substates[i],(TYPE)substateTypes[i]);
}
CUDA_CHECK_RETURN(cudaMemcpy(d_CA_TOCOPY->d_substates,d_subPointer,sizeof(void*)*substates_size,cudaMemcpyHostToDevice));
//CUDA_CHECK_RETURN(cudaFree((void*)(&d_CA_TOCOPY->d_substates[1])));
//substates type array (allocation and copy, that's a constant array usually)
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_CA_TOCOPY->d_substateTypes,sizeof(TYPE)*substates_size));
CUDA_CHECK_RETURN(cudaMemcpy(d_CA_TOCOPY->d_substateTypes,substateTypes,sizeof(TYPE)*substates_size,cudaMemcpyHostToDevice));
//copyScalars. First create structure to be copied, then allocate memory on GPU->copy structure on GPU->
//->then link d_CA_TOCOPY->scalars to d_CA->scalars whithin a kernel
SCALARS_CA_GPU2D* scalars_TOPCOPY = new SCALARS_CA_GPU2D();
scalars_TOPCOPY->xDim=xDim;
scalars_TOPCOPY->yDim=yDim;
scalars_TOPCOPY->stop=stop;
scalars_TOPCOPY->steps=steps;
scalars_TOPCOPY->isToroidal=isToroidal;
scalars_TOPCOPY->substates_size=substates_size;
scalars_TOPCOPY->numCells=numCells;
CUDA_CHECK_RETURN(cudaMalloc((void**)&d_CA_TOCOPY->scalars,sizeof(SCALARS_CA_GPU2D)));
CUDA_CHECK_RETURN(cudaMemcpy(d_CA_TOCOPY->scalars,scalars_TOPCOPY,sizeof(SCALARS_CA_GPU2D),cudaMemcpyHostToDevice));
free(scalars_TOPCOPY);//not needed anymore
cudaThreadSynchronize();
initializeDCA<<<1,1>>>(d_CA_TOCOPY->d_substates,d_CA_TOCOPY->d_substateTypes,d_CA_TOCOPY->scalars,d_CA);
cudaThreadSynchronize();
printValues<<<1,10>>>(d_CA);
cudaThreadSynchronize();
printDebug("GPU memory allocated");
}
void CA2D::cleanUpGPUAutomata(){
//FREE MEMORY ON GPU-> remember to free first all the buffers INSIDE the struct
printDebug("inizio cleanUP GPU");
for(int i=0;i<substates_size;i++){
CUDA_CHECK_RETURN(cudaFree((void*)((d_subPointer[i]))));
}
//free scalars GPU
CUDA_CHECK_RETURN(cudaFree((void*)d_CA_TOCOPY->scalars));
//CUDA_CHECK_RETURN(cudaFree(d_CA));
free(d_subPointer);
printDebug("GPU memory freeed");
}
unsigned long long int CA2D::getSteps() const{
return steps;
}
unsigned int CA2D::getToroidalLinearIndex(unsigned int linearIndex){
return hd_mod(linearIndex,yDim*xDim);
}
int CA2D::loadSubstate(SUBSTATE_LABEL substateLabel, const char* const pathToFile){
short int status =SUCCESS_OPENING_FILE;
unsigned int type= substateTypes[substateLabel];
switch(type){
case FLOAT:
status=CA_load_substate_FILE2D(pathToFile,(float*)(substates[substateLabel]),yDim,xDim);
break;
case DOUBLE:
status=CA_load_substate_FILE2D(pathToFile,(double*)(substates[substateLabel]),yDim,xDim);
break;
case CHAR:
status=CA_load_substate_FILE2D(pathToFile,(char*)(substates[substateLabel]),yDim,xDim);
break;
case INT:
status=CA_load_substate_FILE2D(pathToFile,(int*)(substates[substateLabel]),yDim,xDim);
break;
case BOOL:
status=CA_load_substate_FILE2D(pathToFile,(bool*)(substates[substateLabel]),yDim,xDim);
break;
}
return status;
}
int CA2D::saveSubstate(SUBSTATE_LABEL substateLabel, const char* const pathToFile){
short int status =SUCCESS_OPENING_FILE;
unsigned int type= substateTypes[substateLabel];
switch(type){
case FLOAT:
status=CA_save_substate_FILE2D(pathToFile,(float*)(substates[substateLabel]),yDim,xDim);
break;
case DOUBLE:
status=CA_save_substate_FILE2D(pathToFile,(double*)(substates[substateLabel]),yDim,xDim);
break;
case CHAR:
status=CA_save_substate_FILE2D(pathToFile,(char*)(substates[substateLabel]),yDim,xDim);
break;
case INT:
status=CA_save_substate_FILE2D(pathToFile,(int*)(substates[substateLabel]),yDim,xDim);
break;
case BOOL:
status=CA_save_substate_FILE2D(pathToFile,(bool*)(substates[substateLabel]),yDim,xDim);
break;
}
return status;
}
void CA2D::printSubstate_STDOUT(SUBSTATE_LABEL substateLabel){
printSubstate_STDOUT(substateLabel,yDim,xDim);
}
void CA2D::printSubstate_STDOUT(SUBSTATE_LABEL substateLabel, unsigned int Nrow, unsigned int Ncol){
assert(Nrow<=yDim && Ncol<=xDim );
unsigned int type= substateTypes[substateLabel];
switch(type){
case FLOAT:
CA_print_STDOUT2D((float*)(substates[substateLabel]),Nrow,Ncol);
break;
case DOUBLE:
CA_print_STDOUT2D((double*)(substates[substateLabel]),Nrow,Ncol);
break;
case CHAR:
CA_print_STDOUT2D((char*)(substates[substateLabel]),Nrow,Ncol);
break;
case INT:
CA_print_STDOUT2D((int*)(substates[substateLabel]),Nrow,Ncol);
break;
case BOOL:
CA_print_STDOUT2D((bool*)(substates[substateLabel]),Nrow,Ncol);
break;
}
}
/* ------------------START GET SUBSTATE FAMILY FUNCTION------------------*/
bool CA2D::getSubstateValue_BOOL(unsigned int substateLabel,unsigned int i, unsigned int j) const{
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==BOOL);
return ((bool*)substates[substateLabel])[getLinearIndex(i,j,yDim,xDim)];
}
double CA2D::getSubstateValue_DOUBLE(unsigned int substateLabel,unsigned int i, unsigned int j)const{
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==DOUBLE);
return ((double*)substates[substateLabel])[getLinearIndex(i,j,yDim,xDim)];
}
float CA2D::getSubstateValue_FLOAT(unsigned int substateLabel,unsigned int i, unsigned int j)const{
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==FLOAT);
return ((float*)substates[substateLabel])[getLinearIndex(i,j,yDim,xDim)];
}
int CA2D::getSubstateValue_INT(unsigned int substateLabel,unsigned int i, unsigned int j)const{
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==INT);
return ((int*)substates[substateLabel])[getLinearIndex(i,j,yDim,xDim)];
}
char CA2D::getSubstateValue_CHAR(unsigned int substateLabel,unsigned int i, unsigned int j)const{
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==CHAR);
return ((char*)substates[substateLabel])[getLinearIndex(i,j,yDim,xDim)];
}
//mono index cell representation
bool CA2D::getSubstateValue_BOOL(unsigned int substateLabel,unsigned int index) const{
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==BOOL);
return ((bool*)substates[substateLabel])[index];
}
double CA2D::getSubstateValue_DOUBLE(unsigned int substateLabel,unsigned int index)const{
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==DOUBLE);
return ((double*)substates[substateLabel])[index];
}
float CA2D::getSubstateValue_FLOAT(unsigned int substateLabel,unsigned int index)const{
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==FLOAT);
return ((float*)substates[substateLabel])[index];
}
int CA2D::getSubstateValue_INT(unsigned int substateLabel,unsigned int index)const{
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==INT);
return ((int*)substates[substateLabel])[index];
}
char CA2D::getSubstateValue_CHAR(unsigned int substateLabel,unsigned int index)const{
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==CHAR);
return ((char*)substates[substateLabel])[index];
}
/* ------------------END GET SUBSTATE VALUE FAMILY------------------*/
/* ----------------START SET SUBSTATE FAMILY FUNCTION ------------------*/
void CA2D::setSubstateValue2D_BOOL(unsigned int substateLabel,unsigned int i, unsigned int j,bool const value) {
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==BOOL);
((bool*)substates[substateLabel])[getLinearIndex(i,j,yDim,xDim)]=value;
}
void CA2D::setSubstateValue2D_DOUBLE(unsigned int substateLabel,unsigned int i, unsigned int j, double const value){
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==DOUBLE);
((double*)substates[substateLabel])[getLinearIndex(i,j,yDim,xDim)]=value;
}
void CA2D::setSubstateValue2D_FLOAT(unsigned int substateLabel,unsigned int i, unsigned int j,float const value){
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==FLOAT);
((float*)substates[substateLabel])[getLinearIndex(i,j,yDim,xDim)]=value;
}
void CA2D::setSubstateValue2D_INT(unsigned int substateLabel,unsigned int i, unsigned int j,int const value){
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==INT);
((int*)substates[substateLabel])[getLinearIndex(i,j,yDim,xDim)]=value;
}
void CA2D::setSubstateValue2D_CHAR(unsigned int substateLabel,unsigned int i, unsigned int j,char const value){
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==CHAR);
((char*)substates[substateLabel])[getLinearIndex(i,j,yDim,xDim)]=value;
}
void CA2D::setSubstateValue_BOOL(unsigned int substateLabel,unsigned int index,bool const value) {
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==BOOL);
((bool*)substates[substateLabel])[index]=value;
}
void CA2D::setSubstateValue_DOUBLE(unsigned int substateLabel,unsigned int index, double const value){
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==DOUBLE);
((double*)substates[substateLabel])[index]=value;
}
void CA2D::setSubstateValue_FLOAT(unsigned int substateLabel,unsigned int index,float const value){
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==FLOAT);
((float*)substates[substateLabel])[index]=value;
}
void CA2D::setSubstateValue_INT(unsigned int substateLabel,unsigned int index,int const value){
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==INT);
((int*)substates[substateLabel])[index]=value;
}
void CA2D::setSubstateValue_CHAR(unsigned int substateLabel,unsigned int index,char const value){
assert(substateLabel<=substate_count);
assert(substateTypes[substateLabel]==CHAR);
((char*)substates[substateLabel])[index]=value;
}
/* ------------------END SET SUBSTATE VALUE FAMILY------------------*/
void CA2D::registerStopCondictionCallback(bool(*stopCondition_callback)()){
assert(stopCondition_callback!=NULL);
stopCondition=stopCondition_callback;
}
/*It checks whether or not all the callbacks, substates,
* matrices parameter are in coherent state.
* If it works correctly computation may take place
* Return:
* TRUE if everything is OK
* FALSE stop the automata. Finalize memories-> shutdown
*/
//!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!IMPLEMENTALA BENEEEEEEEEEEEEEEEEEEEEEEEEEEE
bool CA2D::checkAutomataStatusBeforeComputation(){
/*at least one substate and one callback have to be provided*/
/*substate number parameter has to match the substate actually added*/
/*function callbacks number parameter has to match the callbacks actually registered*/
/*A stop function has to be provided as callback*/
return true;
}
void CA2D::globalTransitionFunction_MAINLOOP_callback(){
clock_t start = clock();
/*------------------------------------------------------------------------------*/
unsigned int k=0;
while(!stop){
//for each elementary process
for(k=0;k<elementaryProcesses_size;k++){
//printf("elementaryProcess -> %i\n",k);
//loops over all cells of the cellular automata
(elementaryProcesses[k])<<<dimGrid,blockDim>>>(d_CA);
cudaThreadSynchronize();
}
//printf("DIMGRID(%i,%i,%i), BlockDim(%i,%i,%i)\n",dimGrid.x,dimGrid.y,dimGrid.z,blockDim.x,blockDim.y,blockDim.z);
steps=steps+1;
printf("Step = %i\n",steps);
stop=stopCondition();
//callback each
if(steps%stepsBetweenCallback==0){
//callback occurs
callback(steps);
}
}
/*-----------------------------------------------------------------------------------*/
clock_t end = clock();
elapsedTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Step performed = %i\nElapsed Time=%.4f\n",steps,elapsedTime);
}
void CA2D::globalTransitionFunction_MAINLOOP(){
clock_t start = clock();
/*------------------------------------------------------------------------------*/
unsigned int k=0;
while(!stop){
//for each elementary process
for(k=0;k<elementaryProcesses_size;k++){
//printf("elementaryProcess -> %i\n",k);
//loops over all cells of the cellular automata
(elementaryProcesses[k])<<<dimGrid,blockDim>>>(d_CA);
cudaThreadSynchronize();
}
//printf("DIMGRID(%i,%i,%i), BlockDim(%i,%i,%i)\n",dimGrid.x,dimGrid.y,dimGrid.z,blockDim.x,blockDim.y,blockDim.z);
steps=steps+1;
printf("Step = %i\n",steps);
stop=stopCondition();
}
/*-----------------------------------------------------------------------------------*/
clock_t end = clock();
elapsedTime = (double)(end - start) / CLOCKS_PER_SEC;
printf("Step performed = %i\nElapsed Time=%.4f\n",steps,elapsedTime);
}
void CA2D::globalTransitionFunction(){
if(!checkAutomataStatusBeforeComputation()){
//error are printed out by the function checkAutomataStatusBeforeComputation() directly
cleanup();
exit(-1);
}
globalTransitionFunction_MAINLOOP();
}
void CA2D::registerElementaryProcess( void(*callback)(CA_GPU2D* d_CA ) ){
assert(callback!=NULL && elementaryProcesses_count < elementaryProcesses_size );
elementaryProcesses[elementaryProcesses_count]=callback;
elementaryProcesses_count++;
}
void CA2D::setInitialParameters(unsigned int substates_size,unsigned int transitionFunction_size){
/**
* substates_size = The number of substates of the automaton
* transitionFunction_size = The number of transition functions
* */
this->substates_size=substates_size;
this->elementaryProcesses_size=transitionFunction_size;
}
/**
* It does preliminary allocation of substates arrays
* and function callbacks for elementary processes
* */
void CA2D::initialize(){
assert(elementaryProcesses==0 && substates==0 && substateTypes==0);
elementaryProcesses=(void(**)(CA_GPU2D*))malloc(sizeof(void(*)(CA_GPU2D*))*elementaryProcesses_size);
substates= (void**)malloc(sizeof(void*)*substates_size);
substateTypes =(TYPE*)malloc(sizeof(TYPE)*substates_size);
}
void CA2D::cleanup(){
printDebug("CLEANUP - START");
unsigned int i=0;
//free all the allocated substates
for(;i<substate_count;i++){
free(substates[i]);
printDebug("FREED");
}
//free(elementaryProcesses);//it is allocated on GPU
free(substates);
printDebug("CLEANUP - END");
}
void CA2D::addSubstate(SUBSTATE_LABEL label,TYPE t){
void * substate=NULL;
substate=allocateSubstate(t,substate);
registerSubstate(substate,label,t);
}
void CA2D::registerSubstate(void * buffer,SUBSTATE_LABEL label,TYPE t){
assert( (substate_count < (substates_size)) && (buffer != NULL) && (label < (substates_size)) );
substates[label]=buffer;
substateTypes[label]=t;
substate_count++;
}
void* CA2D::allocateSubstate(TYPE t,void* buffer){
switch(t){
case FLOAT:
buffer = (float*)calloc(numCells,sizeof(float));
break;
case DOUBLE:
buffer = (double*)calloc(numCells,sizeof(double));
break;
case CHAR:
buffer = (char*)calloc(numCells,sizeof(char));
break;
case INT:
buffer = (int*)calloc(numCells,sizeof(int));
break;
case BOOL:
buffer = (bool*)calloc(numCells,sizeof(bool));
break;
}
//map the correnspondent buffer just created to its type
return buffer;
}
void CA2D::updateDimGrid(){
dimGrid.x= (xDim/blockDim.x)+(xDim%blockDim.x == 0 ? 0 : 1);
dimGrid.y= (yDim/blockDim.y)+(yDim%blockDim.y == 0 ? 0 : 1);
dimGrid.z=1;//restore default value for 2D automata
}
//2D constructor
CA2D::CA2D(int yDim,int xDim,bool toroidal){
this->preliminaryCAConstructor();
this->yDim=yDim;
this->xDim=xDim;
this->numCells=yDim*xDim;
this->isToroidal=toroidal;
if(isToroidal){
getLinearIndex=hd_getLinearIndexToroidal2D;
}else{
getLinearIndex=hd_getLinearIndexNormal2D;
}
blockDim.x=DEFAULT_BLOCKDIM_X;
blockDim.y=DEFAULT_BLOCKDIM_Y;
blockDim.z=1;
updateDimGrid();
}
/*GET i-th NEIGHBOR INDEX functions MOORE NEIGHBORHOOD
5 | 1 | 8
---|---|---
2 | 0 | 3
---|---|---
6 | 4 | 7
*/
unsigned int CA2D::getNeighborIndex2D_MOORE(unsigned int i, unsigned int j,unsigned int neighbor){
assert(neighbor<9);
switch(neighbor){
case 0:
return getLinearIndex(i,j,yDim,xDim);
case 1:
return getLinearIndex(i-1,j,yDim,xDim);//one row up
case 2:
return getLinearIndex(i,j-1,yDim,xDim);//same row one coloumn left
case 3:
return getLinearIndex(i,j+1,yDim,xDim);//same row one coloumn right
case 4:
return getLinearIndex(i+1,j,yDim,xDim);//same column one row down
case 5:
return getLinearIndex(i-1,j-1,yDim,xDim);//one row up one col left
case 6:
return getLinearIndex(i+1,j-1,yDim,xDim);//one row down one col left
case 7:
return getLinearIndex(i+1,j+1,yDim,xDim);//row down col right
case 8:
return getLinearIndex(i-1,j+1,yDim,xDim);//row up col right
}
return NULL;//it should never be executed
}
unsigned int CA2D::getNeighborIndex2D_MOORE(unsigned int index,unsigned int neighbor){
assert(neighbor<9);
switch(neighbor){
case 0:
return index;
case 1:
return index-xDim;//one row up
case 2:
return index-1;//same row one coloumn left
case 3:
return index+1;//same row one coloumn right
case 4:
return index+xDim;//same column one row down
case 5:
return index-xDim-1;//one row up one col left
case 6:
return index+xDim-1;//one row down one col left
case 7:
return index+xDim+1;//row down col right
case 8:
return index-xDim+1;//row up col right
}
return NULL;//it should never be executed
}
//GETTER AND SETTER
unsigned int CA2D::get_xDim() const {
return xDim;
}
unsigned int CA2D::getElementaryProcessesSize() const {
return elementaryProcesses_size;
}
unsigned int CA2D::get_yDim() const {
return yDim;
}
unsigned int CA2D::getSubstatesSize() const {
return substates_size;
}
unsigned int CA2D::getBlockdimX() const {
return blockDim.x;
}
void CA2D::setBlockdimX(unsigned int dimX) {
if(isPowerOfTwo(blockDim.x)){
this->blockDim.x = dimX;
}else{
printf("WARNING -> setBlockDimX has to be power of 2 -> dimX=%i",DEFAULT_BLOCKDIM_X);
blockDim.x=DEFAULT_BLOCKDIM_X;
}
updateDimGrid();
}
unsigned int CA2D::getBlockDimY() const {
return this->blockDim.y;
}
void CA2D::setBlockDimY(unsigned int dimY) {
if(isPowerOfTwo(blockDim.y)){
this->blockDim.y = dimY;
}else{
printf("WARNING -> setBlockDimY has to be power of 2 -> dimY=%i",DEFAULT_BLOCKDIM_Y);
blockDim.y=DEFAULT_BLOCKDIM_Y;
}
updateDimGrid();
}
unsigned int CA2D::getStepsBetweenCopy() const {
return stepsBetweenCallback;
}
void CA2D::setStepsBetweenCopy(unsigned int stepsBetweenCopy) {
this->stepsBetweenCallback = stepsBetweenCopy;
}
unsigned int CA2D::isPowerOfTwo (unsigned int x)
{
unsigned int powerOfTwo = 1;
while (powerOfTwo < x && powerOfTwo < 2147483648)
powerOfTwo *= 2;
return (x == powerOfTwo);
}
//END OFGETTER AND SETTER
void CA2D::copyBuffersFromGPU(){
printDebug("START offload copy");
for(int i=0;i<substates_size;i++){
copyBufferFromGPU(substates[i],d_subPointer[i],substateTypes[i]);
}
printDebug("END offload copy");
}
/**
* Set default parameters and do normal initialization
* */
void CA2D::preliminaryCAConstructor() {
assert(yDim <= 0);
this->steps=0;
this->elapsedTime=0.0f;
substates=NULL;
substates_size=0;
substate_count=0;
substateTypes=0;
stopCondition=0;
stop=false;//global transition func main loop ACTIVE
elementaryProcesses=NULL;
elementaryProcesses_size=0;
elementaryProcesses_count=0;
}
bool CA2D::evolveOneStep() {
if(!stop){
for(int k=0;k<elementaryProcesses_size;k++){
//printf("elementaryProcess -> %i\n",k);
//loops over all cells of the cellular automata
(elementaryProcesses[k])<<<dimGrid,blockDim>>>(d_CA);
cudaThreadSynchronize();
}
//printf("DIMGRID(%i,%i,%i), BlockDim(%i,%i,%i)\n",dimGrid.x,dimGrid.y,dimGrid.z,blockDim.x,blockDim.y,blockDim.z);
steps=steps+1;
printf("Step = %i\n",steps);
stop=stopCondition();
return true;
}
return false;
}
bool CA2D::evolveKsteps(unsigned int k) {
for(int i=0;i<k && !stop ;k++){
for(k=0;k<elementaryProcesses_size;k++){
//printf("elementaryProcess -> %i\n",k);
//loops over all cells of the cellular automata
(elementaryProcesses[k])<<<dimGrid,blockDim>>>(d_CA);
cudaThreadSynchronize();
}
//printf("DIMGRID(%i,%i,%i), BlockDim(%i,%i,%i)\n",dimGrid.x,dimGrid.y,dimGrid.z,blockDim.x,blockDim.y,blockDim.z);
steps=steps+1;
printf("Step = %i\n",steps);
stop=stopCondition();
return true;
}
return false;
}
void CA2D::setCallback(void(*call)(unsigned int)){
this->callback=call;
}
|
58b0e17b0212cd07312a8e0871828ee789905f75.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void scales_channel_mul_kernel(float *data_l, float *data_r, float *result)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = 2 * (blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x);
int one_ch_index = 2 * ((threadIdx.y * blockDim.x) + threadIdx.x + blockIdx.x * blockDim.x * blockDim.y);
result[threadId] = data_l[threadId] * data_r[one_ch_index] - data_l[threadId + 1] * data_r[one_ch_index + 1];
result[threadId + 1] = data_l[threadId] * data_r[one_ch_index + 1] + data_l[threadId + 1] * data_r[one_ch_index];
} | 58b0e17b0212cd07312a8e0871828ee789905f75.cu | #include "includes.h"
__global__ void scales_channel_mul_kernel(float *data_l, float *data_r, float *result)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = 2 * (blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x);
int one_ch_index = 2 * ((threadIdx.y * blockDim.x) + threadIdx.x + blockIdx.x * blockDim.x * blockDim.y);
result[threadId] = data_l[threadId] * data_r[one_ch_index] - data_l[threadId + 1] * data_r[one_ch_index + 1];
result[threadId + 1] = data_l[threadId] * data_r[one_ch_index + 1] + data_l[threadId + 1] * data_r[one_ch_index];
} |
0997785872bfc1b17b1fca6e81f22acd961a2e3d.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <iostream>
#include <sstream>
#include <fstream>
#include <utility>
#include <cstdlib>
#include <cstdio>
#include <cstring>
#include <vector>
#include <string>
#include <cmath>
#include <map>
#include <hip/hip_runtime.h>
#include <ctime>
#include <cassert>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#define pb push_back
#define all(c) (c).begin(),(c).end()
#include <Windows.h>
#include <MMSystem.h>
#pragma comment(lib, "winmm.lib")
#define _CRTDBG_MAP_ALLOC
#include <crtdbg.h>//to detect host memory leaks
using namespace std;
#define _DTH hipMemcpyDeviceToHost
#define _HTD hipMemcpyHostToDevice
//these can be altered on user depending on data set and type of operation(random test, read from file etc)
#define BLOCK_SIZE 256
#define RANGE 997
#define RANDOM_GSIZE 700
#define FILE_GSIZE 8298//the number of edges in Wiki-Vote.txt if the file test is run
#define INF (1<<22)
#define DO_TEST_RANDOM 1
#define DO_TEST_FROM_FILE 0
//typedef for vector used in path reconstruction
typedef pair<pair<int,int>,int> Piii;
//forward function declarations
bool InitMMTimer(UINT wTimerRes);
void DestroyMMTimer(UINT wTimerRes, bool init);
void _CPU_Floyd(int *G,int *Gpath,int N);
void _showPath(int start,int end,const vector<Piii> &path,const int *D,const int N);
bool _getPath(int curEdge, int nxtEdge,vector<Piii> &path,const int *D, const int *Dpath,const int N);
void _get_full_paths(const int *D, const int *Dpath, const int N);
//CUDA GPU kernel/functions forward declaration
__global__ void _Wake_GPU(int reps);
__global__ void _GPU_Floyd_kernel(int k, int *G,int *P, int N);
void _GPU_Floyd(int *H_G, int *H_Gpath, const int N);
//other optional utility functions
int _read_from_file(int *G,const int N);
void _generateRandomGraph(int *G, int N, int range, int density);
void _generate_result_file(bool success, unsigned int cpu_time, unsigned int gpu_time, int N);
int main(){
char ch;
srand(time(NULL));
if(DO_TEST_RANDOM){//will use the #define(s) to init a random adjacency Matrix of RANDOM_GSIZE size
const int NumBytes=RANDOM_GSIZE*RANDOM_GSIZE*sizeof(int);
//host allocations to create Adjancency matrix and result matrices with path matrices
int *OrigGraph=(int *)malloc(NumBytes);//will be original Adjancency matrix, will NOT be changed
int *H_G=(int *)malloc(NumBytes);
int *H_Gpath=(int *)malloc(NumBytes);
int *D_G=(int *)malloc(NumBytes);
int *D_Gpath=(int *)malloc(NumBytes);
_generateRandomGraph(OrigGraph,RANDOM_GSIZE,RANGE,25);//init graph with values
cout<<"Successfully created random highly connected graph in adjacency Matrix form with "<<RANDOM_GSIZE*RANDOM_GSIZE<< " elements.\n";
cout<<"Also created 2 pairs of distinct result Matrices to store the respective results of the CPU results and the GPU results.\n";
for(int i=0;i<RANDOM_GSIZE*RANDOM_GSIZE;i++){//copy for use in computation
H_G[i]=D_G[i]=OrigGraph[i];//copy for use in computation
H_Gpath[i]=D_Gpath[i]=-1;//set to all negative ones for use in path construction
}
unsigned int cpu_time=0,gpu_time=0;
cout<<"\nFloyd-Warshall on CPU underway:\n";
UINT wTimerRes = 0;
bool init = InitMMTimer(wTimerRes);
DWORD startTime = timeGetTime();
_CPU_Floyd(H_G,H_Gpath,RANDOM_GSIZE);//find shortest paths (with path construction) on serial CPU (Intel i7 3770 3.9 ghz)
DWORD endTime = timeGetTime();
cpu_time=unsigned int(endTime-startTime);
printf("CPU Timing: %dms\n", cpu_time);
DestroyMMTimer(wTimerRes, init);
//wake up GPU from idle
cout<<"\nFloyd-Warshall on GPU underway:\n";
hipLaunchKernelGGL(( _Wake_GPU), dim3(1),dim3(BLOCK_SIZE), 0, 0, 32);
//call host function which will copy all info to device and run CUDA kernels
wTimerRes = 0;
init = InitMMTimer(wTimerRes);
startTime = timeGetTime();
_GPU_Floyd(D_G,D_Gpath,RANDOM_GSIZE);
endTime = timeGetTime();
gpu_time=unsigned int(endTime-startTime);
printf("GPU Timing(including all device-host, host-device copies, device allocations and freeing of device memory): %dms\n\n", gpu_time);
DestroyMMTimer(wTimerRes, init);
//compare the device generated result against the host generated result
cout<<"Verifying results of final adjacency Matrix and Path Matrix.\n";
int same_adj_Matrix = memcmp(H_G,D_G,NumBytes);
if(same_adj_Matrix==0){
cout<<"Adjacency Matrices Equal!\n";
}else
cout<<"Adjacency Matrices Not Equal!\n";
int same_path_Matrix = memcmp(H_Gpath,D_Gpath,NumBytes);
if(same_path_Matrix==0){
cout<<"Path reconstruction Matrices Equal!\n";
}else
cout<<"Path reconstruction Matrices Not Equal!\n";
_get_full_paths(D_G,D_Gpath,RANDOM_GSIZE);//find out exact step-by-step shortest paths between vertices(if such a path exists)
_generate_result_file( bool(same_adj_Matrix==0 && same_path_Matrix==0),cpu_time,gpu_time,RANDOM_GSIZE);
free(OrigGraph);
free(H_G);
free(H_Gpath);
free(D_G);
free(D_Gpath);
}
_CrtDumpMemoryLeaks();
cin>>ch;
return 0;
}
bool InitMMTimer(UINT wTimerRes){
TIMECAPS tc;
if (timeGetDevCaps(&tc, sizeof(TIMECAPS)) != TIMERR_NOERROR) {return false;}
wTimerRes = min(max(tc.wPeriodMin, 1), tc.wPeriodMax);
timeBeginPeriod(wTimerRes);
return true;
}
void DestroyMMTimer(UINT wTimerRes, bool init){
if(init)
timeEndPeriod(wTimerRes);
}
void _CPU_Floyd(int *G,int *Gpath,int N){//standard N^3 algo
for(int k=0;k<N;++k)for(int i=0;i<N;++i)for(int j=0;j<N;++j){
int curloc=i*N+j,loca=i*N+k,locb=k*N+j;
if(G[curloc]>(G[loca]+G[locb])){
G[curloc]=(G[loca]+G[locb]);
Gpath[curloc]=k;
}
}
}
void _showPath(int start,int end,const vector<Piii> &path,const int *D,const int N){
cout<<"\nHere is the shortest cost path from "<<start<< " to "<<end<<", at a total cost of "<<D[start*N+end]<<".\n";
for(int i=path.size()-1;i>=0;--i){
cout<<"From "<<path[i].first.first<<" to "<<path[i].first.second<<" at a cost of "<<path[i].second<<'\n';
}
cout<<'\n';
}
bool _getPath(int curEdge, int nxtEdge,vector<Piii> &path,const int *D, const int *Dpath,const int N){
int curIdx=curEdge*N+nxtEdge;
if(D[curIdx]>=INF)return false;
if(Dpath[curIdx]==-1){//end of backwards retracement
path.push_back(make_pair(make_pair(curEdge,nxtEdge),D[curIdx]));
return true;
}else{//record last edge cost and move backwards
path.push_back(make_pair(make_pair(Dpath[curIdx],nxtEdge),D[Dpath[curIdx]*N+nxtEdge]));
return _getPath(curEdge,Dpath[curIdx],path,D,Dpath,N);
}
}
void _get_full_paths(const int *D, const int *Dpath, const int N){
int start_vertex=-1,end_vertex=-1;
vector<Piii> path;
do{
path.clear();
cout<<"Enter start vertex #:";
cin>>start_vertex;
cout<<"Enter dest vertex(enter negative number to exit) #:";
cin>>end_vertex;
if(start_vertex<0 || start_vertex>=N || end_vertex<0 || end_vertex>=N)return;
if(_getPath(start_vertex, end_vertex,path,D,Dpath,N)){
_showPath(start_vertex,end_vertex,path,D,N);
}else{
cout<<"\nThere does not exist valid a path between "<<start_vertex<<" , and "<<end_vertex<<'\n';
}
}while(1);
}
__global__ void _Wake_GPU(int reps){
int idx=blockIdx.x*blockDim.x + threadIdx.x;
if(idx>=reps)return;
}
__global__ void _GPU_Floyd_kernel(int k, int *G,int *P, int N){//G will be the adjacency matrix, P will be path matrix
int col=blockIdx.x*blockDim.x + threadIdx.x;
if(col>=N)return;
int idx=N*blockIdx.y+col;
__shared__ int best;
if(threadIdx.x==0)
best=G[N*blockIdx.y+k];
__syncthreads();
if(best==INF)return;
int tmp_b=G[k*N+col];
if(tmp_b==INF)return;
int cur=best+tmp_b;
if(cur<G[idx]){
G[idx]=cur;
P[idx]=k;
}
}
void _GPU_Floyd(int *H_G, int *H_Gpath, const int N){
//allocate device memory and copy graph data from host
int *dG,*dP;
int numBytes=N*N*sizeof(int);
hipError_t err=hipMalloc((int **)&dG,numBytes);
if(err!=hipSuccess){printf("%s in %s at line %d\n",hipGetErrorString(err),__FILE__,__LINE__);}
err=hipMalloc((int **)&dP,numBytes);
if(err!=hipSuccess){printf("%s in %s at line %d\n",hipGetErrorString(err),__FILE__,__LINE__);}
//copy from host to device graph info
err=hipMemcpy(dG,H_G,numBytes,_HTD);
if(err!=hipSuccess){printf("%s in %s at line %d\n",hipGetErrorString(err),__FILE__,__LINE__);}
err=hipMemcpy(dP,H_Gpath,numBytes,_HTD);
if(err!=hipSuccess){printf("%s in %s at line %d\n",hipGetErrorString(err),__FILE__,__LINE__);}
dim3 dimGrid((N+BLOCK_SIZE-1)/BLOCK_SIZE,N);
for(int k=0;k<N;k++){//main loop
hipLaunchKernelGGL(( _GPU_Floyd_kernel), dim3(dimGrid),dim3(BLOCK_SIZE), 0, 0, k,dG,dP,N);
err = hipDeviceSynchronize();
if(err!=hipSuccess){printf("%s in %s at line %d\n",hipGetErrorString(err),__FILE__,__LINE__);}
}
//copy back memory
err=hipMemcpy(H_G,dG,numBytes,_DTH);
if(err!=hipSuccess){printf("%s in %s at line %d\n",hipGetErrorString(err),__FILE__,__LINE__);}
err=hipMemcpy(H_Gpath,dP,numBytes,_DTH);
if(err!=hipSuccess){printf("%s in %s at line %d\n",hipGetErrorString(err),__FILE__,__LINE__);}
//free device memory
err=hipFree(dG);
if(err!=hipSuccess){printf("%s in %s at line %d\n",hipGetErrorString(err),__FILE__,__LINE__);}
err=hipFree(dP);
if(err!=hipSuccess){printf("%s in %s at line %d\n",hipGetErrorString(err),__FILE__,__LINE__);}
}
void _generateRandomGraph(int *G,int N,int range, int density){//density will be between 0 and 100, indication the % of number of directed edges in graph
//range will be the range of edge weighting of directed edges
int Prange=(100/density);
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
if(i==j){//set G[i][i]=0
G[i*N+j]=0;
continue;
}
int pr=rand()%Prange;
G[i*N+j]= pr==0 ? ((rand()%range)+1):INF;//set edge random edge weight to random value, or to INF
}
}
}
int _read_from_file(int *G,const int N){//reads in edge list from file
int num_edges=0;
ifstream readfile;//enable stream for reading file
readfile.open("Wiki-Vote.txt");
assert(readfile.good());//make sure it finds the file & file is
string line;
int v0,v1;
while(getline(readfile,line)){
istringstream linestream(line);
linestream>>v0>>v1;
G[v0*N+v1]=1;
num_edges++;
}
readfile.close();
return num_edges;
}
void _generate_result_file(bool success, unsigned int cpu_time,unsigned int gpu_time, int N){
if(!success){
cout<<"Error in calculation!\n";
return;
}else{
ofstream myfile;
myfile.open("Floyd-Warshall_result.txt");
myfile<<"Success! The GPU Floyd-Warshall result and the CPU Floyd-Warshall results are identical(both final adjacency matrix and path matrix).\n\n";
myfile<<"N= "<<N<<" , and the total number of elements(for Adjacency Matrix and Path Matrix) was "<<N*N<<" .\n";
myfile<<"Matrices are int full dense format(row major) with a minimum of "<<(N*N)/4<<" valid directed edges.\n\n";
myfile<<"The CPU timing for all was "<<float(cpu_time)/1000.0f<<" seconds, and the GPU timing(including all device memory operations(allocations,copies etc) ) for all was "<<float(gpu_time)/1000.0f<<" seconds.\n";
myfile<<"The GPU result was "<<float(cpu_time)/float(gpu_time)<<" faster than the CPU version.\n";
myfile.close();
}
} | 0997785872bfc1b17b1fca6e81f22acd961a2e3d.cu | #include <algorithm>
#include <iostream>
#include <sstream>
#include <fstream>
#include <utility>
#include <cstdlib>
#include <cstdio>
#include <cstring>
#include <vector>
#include <string>
#include <cmath>
#include <map>
#include <cuda.h>
#include <ctime>
#include <cassert>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define pb push_back
#define all(c) (c).begin(),(c).end()
#include <Windows.h>
#include <MMSystem.h>
#pragma comment(lib, "winmm.lib")
#define _CRTDBG_MAP_ALLOC
#include <crtdbg.h>//to detect host memory leaks
using namespace std;
#define _DTH cudaMemcpyDeviceToHost
#define _HTD cudaMemcpyHostToDevice
//these can be altered on user depending on data set and type of operation(random test, read from file etc)
#define BLOCK_SIZE 256
#define RANGE 997
#define RANDOM_GSIZE 700
#define FILE_GSIZE 8298//the number of edges in Wiki-Vote.txt if the file test is run
#define INF (1<<22)
#define DO_TEST_RANDOM 1
#define DO_TEST_FROM_FILE 0
//typedef for vector used in path reconstruction
typedef pair<pair<int,int>,int> Piii;
//forward function declarations
bool InitMMTimer(UINT wTimerRes);
void DestroyMMTimer(UINT wTimerRes, bool init);
void _CPU_Floyd(int *G,int *Gpath,int N);
void _showPath(int start,int end,const vector<Piii> &path,const int *D,const int N);
bool _getPath(int curEdge, int nxtEdge,vector<Piii> &path,const int *D, const int *Dpath,const int N);
void _get_full_paths(const int *D, const int *Dpath, const int N);
//CUDA GPU kernel/functions forward declaration
__global__ void _Wake_GPU(int reps);
__global__ void _GPU_Floyd_kernel(int k, int *G,int *P, int N);
void _GPU_Floyd(int *H_G, int *H_Gpath, const int N);
//other optional utility functions
int _read_from_file(int *G,const int N);
void _generateRandomGraph(int *G, int N, int range, int density);
void _generate_result_file(bool success, unsigned int cpu_time, unsigned int gpu_time, int N);
int main(){
char ch;
srand(time(NULL));
if(DO_TEST_RANDOM){//will use the #define(s) to init a random adjacency Matrix of RANDOM_GSIZE size
const int NumBytes=RANDOM_GSIZE*RANDOM_GSIZE*sizeof(int);
//host allocations to create Adjancency matrix and result matrices with path matrices
int *OrigGraph=(int *)malloc(NumBytes);//will be original Adjancency matrix, will NOT be changed
int *H_G=(int *)malloc(NumBytes);
int *H_Gpath=(int *)malloc(NumBytes);
int *D_G=(int *)malloc(NumBytes);
int *D_Gpath=(int *)malloc(NumBytes);
_generateRandomGraph(OrigGraph,RANDOM_GSIZE,RANGE,25);//init graph with values
cout<<"Successfully created random highly connected graph in adjacency Matrix form with "<<RANDOM_GSIZE*RANDOM_GSIZE<< " elements.\n";
cout<<"Also created 2 pairs of distinct result Matrices to store the respective results of the CPU results and the GPU results.\n";
for(int i=0;i<RANDOM_GSIZE*RANDOM_GSIZE;i++){//copy for use in computation
H_G[i]=D_G[i]=OrigGraph[i];//copy for use in computation
H_Gpath[i]=D_Gpath[i]=-1;//set to all negative ones for use in path construction
}
unsigned int cpu_time=0,gpu_time=0;
cout<<"\nFloyd-Warshall on CPU underway:\n";
UINT wTimerRes = 0;
bool init = InitMMTimer(wTimerRes);
DWORD startTime = timeGetTime();
_CPU_Floyd(H_G,H_Gpath,RANDOM_GSIZE);//find shortest paths (with path construction) on serial CPU (Intel i7 3770 3.9 ghz)
DWORD endTime = timeGetTime();
cpu_time=unsigned int(endTime-startTime);
printf("CPU Timing: %dms\n", cpu_time);
DestroyMMTimer(wTimerRes, init);
//wake up GPU from idle
cout<<"\nFloyd-Warshall on GPU underway:\n";
_Wake_GPU<<<1,BLOCK_SIZE>>>(32);
//call host function which will copy all info to device and run CUDA kernels
wTimerRes = 0;
init = InitMMTimer(wTimerRes);
startTime = timeGetTime();
_GPU_Floyd(D_G,D_Gpath,RANDOM_GSIZE);
endTime = timeGetTime();
gpu_time=unsigned int(endTime-startTime);
printf("GPU Timing(including all device-host, host-device copies, device allocations and freeing of device memory): %dms\n\n", gpu_time);
DestroyMMTimer(wTimerRes, init);
//compare the device generated result against the host generated result
cout<<"Verifying results of final adjacency Matrix and Path Matrix.\n";
int same_adj_Matrix = memcmp(H_G,D_G,NumBytes);
if(same_adj_Matrix==0){
cout<<"Adjacency Matrices Equal!\n";
}else
cout<<"Adjacency Matrices Not Equal!\n";
int same_path_Matrix = memcmp(H_Gpath,D_Gpath,NumBytes);
if(same_path_Matrix==0){
cout<<"Path reconstruction Matrices Equal!\n";
}else
cout<<"Path reconstruction Matrices Not Equal!\n";
_get_full_paths(D_G,D_Gpath,RANDOM_GSIZE);//find out exact step-by-step shortest paths between vertices(if such a path exists)
_generate_result_file( bool(same_adj_Matrix==0 && same_path_Matrix==0),cpu_time,gpu_time,RANDOM_GSIZE);
free(OrigGraph);
free(H_G);
free(H_Gpath);
free(D_G);
free(D_Gpath);
}
_CrtDumpMemoryLeaks();
cin>>ch;
return 0;
}
bool InitMMTimer(UINT wTimerRes){
TIMECAPS tc;
if (timeGetDevCaps(&tc, sizeof(TIMECAPS)) != TIMERR_NOERROR) {return false;}
wTimerRes = min(max(tc.wPeriodMin, 1), tc.wPeriodMax);
timeBeginPeriod(wTimerRes);
return true;
}
void DestroyMMTimer(UINT wTimerRes, bool init){
if(init)
timeEndPeriod(wTimerRes);
}
void _CPU_Floyd(int *G,int *Gpath,int N){//standard N^3 algo
for(int k=0;k<N;++k)for(int i=0;i<N;++i)for(int j=0;j<N;++j){
int curloc=i*N+j,loca=i*N+k,locb=k*N+j;
if(G[curloc]>(G[loca]+G[locb])){
G[curloc]=(G[loca]+G[locb]);
Gpath[curloc]=k;
}
}
}
void _showPath(int start,int end,const vector<Piii> &path,const int *D,const int N){
cout<<"\nHere is the shortest cost path from "<<start<< " to "<<end<<", at a total cost of "<<D[start*N+end]<<".\n";
for(int i=path.size()-1;i>=0;--i){
cout<<"From "<<path[i].first.first<<" to "<<path[i].first.second<<" at a cost of "<<path[i].second<<'\n';
}
cout<<'\n';
}
bool _getPath(int curEdge, int nxtEdge,vector<Piii> &path,const int *D, const int *Dpath,const int N){
int curIdx=curEdge*N+nxtEdge;
if(D[curIdx]>=INF)return false;
if(Dpath[curIdx]==-1){//end of backwards retracement
path.push_back(make_pair(make_pair(curEdge,nxtEdge),D[curIdx]));
return true;
}else{//record last edge cost and move backwards
path.push_back(make_pair(make_pair(Dpath[curIdx],nxtEdge),D[Dpath[curIdx]*N+nxtEdge]));
return _getPath(curEdge,Dpath[curIdx],path,D,Dpath,N);
}
}
void _get_full_paths(const int *D, const int *Dpath, const int N){
int start_vertex=-1,end_vertex=-1;
vector<Piii> path;
do{
path.clear();
cout<<"Enter start vertex #:";
cin>>start_vertex;
cout<<"Enter dest vertex(enter negative number to exit) #:";
cin>>end_vertex;
if(start_vertex<0 || start_vertex>=N || end_vertex<0 || end_vertex>=N)return;
if(_getPath(start_vertex, end_vertex,path,D,Dpath,N)){
_showPath(start_vertex,end_vertex,path,D,N);
}else{
cout<<"\nThere does not exist valid a path between "<<start_vertex<<" , and "<<end_vertex<<'\n';
}
}while(1);
}
__global__ void _Wake_GPU(int reps){
int idx=blockIdx.x*blockDim.x + threadIdx.x;
if(idx>=reps)return;
}
__global__ void _GPU_Floyd_kernel(int k, int *G,int *P, int N){//G will be the adjacency matrix, P will be path matrix
int col=blockIdx.x*blockDim.x + threadIdx.x;
if(col>=N)return;
int idx=N*blockIdx.y+col;
__shared__ int best;
if(threadIdx.x==0)
best=G[N*blockIdx.y+k];
__syncthreads();
if(best==INF)return;
int tmp_b=G[k*N+col];
if(tmp_b==INF)return;
int cur=best+tmp_b;
if(cur<G[idx]){
G[idx]=cur;
P[idx]=k;
}
}
void _GPU_Floyd(int *H_G, int *H_Gpath, const int N){
//allocate device memory and copy graph data from host
int *dG,*dP;
int numBytes=N*N*sizeof(int);
cudaError_t err=cudaMalloc((int **)&dG,numBytes);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
err=cudaMalloc((int **)&dP,numBytes);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
//copy from host to device graph info
err=cudaMemcpy(dG,H_G,numBytes,_HTD);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
err=cudaMemcpy(dP,H_Gpath,numBytes,_HTD);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
dim3 dimGrid((N+BLOCK_SIZE-1)/BLOCK_SIZE,N);
for(int k=0;k<N;k++){//main loop
_GPU_Floyd_kernel<<<dimGrid,BLOCK_SIZE>>>(k,dG,dP,N);
err = cudaThreadSynchronize();
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
}
//copy back memory
err=cudaMemcpy(H_G,dG,numBytes,_DTH);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
err=cudaMemcpy(H_Gpath,dP,numBytes,_DTH);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
//free device memory
err=cudaFree(dG);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
err=cudaFree(dP);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
}
void _generateRandomGraph(int *G,int N,int range, int density){//density will be between 0 and 100, indication the % of number of directed edges in graph
//range will be the range of edge weighting of directed edges
int Prange=(100/density);
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
if(i==j){//set G[i][i]=0
G[i*N+j]=0;
continue;
}
int pr=rand()%Prange;
G[i*N+j]= pr==0 ? ((rand()%range)+1):INF;//set edge random edge weight to random value, or to INF
}
}
}
int _read_from_file(int *G,const int N){//reads in edge list from file
int num_edges=0;
ifstream readfile;//enable stream for reading file
readfile.open("Wiki-Vote.txt");
assert(readfile.good());//make sure it finds the file & file is
string line;
int v0,v1;
while(getline(readfile,line)){
istringstream linestream(line);
linestream>>v0>>v1;
G[v0*N+v1]=1;
num_edges++;
}
readfile.close();
return num_edges;
}
void _generate_result_file(bool success, unsigned int cpu_time,unsigned int gpu_time, int N){
if(!success){
cout<<"Error in calculation!\n";
return;
}else{
ofstream myfile;
myfile.open("Floyd-Warshall_result.txt");
myfile<<"Success! The GPU Floyd-Warshall result and the CPU Floyd-Warshall results are identical(both final adjacency matrix and path matrix).\n\n";
myfile<<"N= "<<N<<" , and the total number of elements(for Adjacency Matrix and Path Matrix) was "<<N*N<<" .\n";
myfile<<"Matrices are int full dense format(row major) with a minimum of "<<(N*N)/4<<" valid directed edges.\n\n";
myfile<<"The CPU timing for all was "<<float(cpu_time)/1000.0f<<" seconds, and the GPU timing(including all device memory operations(allocations,copies etc) ) for all was "<<float(gpu_time)/1000.0f<<" seconds.\n";
myfile<<"The GPU result was "<<float(cpu_time)/float(gpu_time)<<" faster than the CPU version.\n";
myfile.close();
}
} |
e7b984aed519a9a698798f51124cbdcb4bfbbaae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/roi_align_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static constexpr int kROISize = 4;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <class T>
__device__ T BilinearInterpolate(const T* input_data, const int height,
const int width, T y, T x) {
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return 0;
}
y = y <= 0 ? 0 : y;
x = x <= 0 ? 0 : x;
int y_low = static_cast<int>(y);
int x_low = static_cast<int>(x);
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = static_cast<T>(y_low);
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = static_cast<T>(x_low);
} else {
x_high = x_low + 1;
}
T ly = y - y_low, lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
T v1 = input_data[y_low * width + x_low];
T v2 = input_data[y_low * width + x_high];
T v3 = input_data[y_high * width + x_low];
T v4 = input_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <class T>
__device__ void BilinearInterpolateGradient(const int height, const int width,
T y, T x, T* w1, T* w2, T* w3,
T* w4, int* x_low, int* x_high,
int* y_low, int* y_high) {
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return;
}
y = y <= 0 ? 0 : y;
x = x <= 0 ? 0 : x;
*y_low = static_cast<int>(y);
*x_low = static_cast<int>(x);
if (*y_low >= height - 1) {
*y_high = *y_low = height - 1;
y = static_cast<T>(*y_low);
} else {
*y_high = *y_low + 1;
}
if (*x_low >= width - 1) {
*x_high = *x_low = width - 1;
x = static_cast<T>(*x_low);
} else {
*x_high = *x_low + 1;
}
T ly = y - *y_low, lx = x - *x_low;
T hy = 1. - ly, hx = 1. - lx;
*w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx;
return;
}
template <class T>
__global__ void GPUROIAlignForward(
const int nthreads, const T* input_data, const T* input_rois,
const float spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int sampling_ratio, int* roi_batch_id_data, T* output_data,
const bool continuous_coordinate) {
CUDA_KERNEL_LOOP(i, nthreads) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
const T* offset_input_rois = input_rois + n * kROISize;
int roi_batch_ind = roi_batch_id_data[n];
T roi_offset = continuous_coordinate ? static_cast<T>(0.5) : 0;
T roi_xmin = offset_input_rois[0] * spatial_scale - roi_offset;
T roi_ymin = offset_input_rois[1] * spatial_scale - roi_offset;
T roi_xmax = offset_input_rois[2] * spatial_scale - roi_offset;
T roi_ymax = offset_input_rois[3] * spatial_scale - roi_offset;
T roi_width = roi_xmax - roi_xmin;
T roi_height = roi_ymax - roi_ymin;
if (!continuous_coordinate) {
roi_width = max(roi_width, static_cast<T>(1.));
roi_height = max(roi_height, static_cast<T>(1.));
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_input_data =
input_data + (roi_batch_ind * channels + c) * height * width;
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1);
T output_val = 0;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_ymin + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_xmin + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = BilinearInterpolate(offset_input_data, height, width, y, x);
output_val += val;
}
}
output_val /= count;
output_data[i] = output_val;
}
}
template <typename T>
__global__ void GPUROIAlignBackward(
const int nthreads, const T* input_rois, const T* out_grad,
const int num_rois, const float spatial_scale, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int sampling_ratio, int* roi_batch_id_data,
T* input_grad, const bool continuous_coordinate) {
CUDA_KERNEL_LOOP(i, nthreads) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
const T* offset_input_rois = input_rois + n * kROISize;
int roi_batch_ind = roi_batch_id_data[n];
T roi_offset = continuous_coordinate ? T(0.5) : 0;
T roi_xmin = offset_input_rois[0] * spatial_scale - roi_offset;
T roi_ymin = offset_input_rois[1] * spatial_scale - roi_offset;
T roi_xmax = offset_input_rois[2] * spatial_scale - roi_offset;
T roi_ymax = offset_input_rois[3] * spatial_scale - roi_offset;
T roi_width = roi_xmax - roi_xmin;
T roi_height = roi_ymax - roi_ymin;
if (!continuous_coordinate) {
roi_width = max(roi_width, static_cast<T>(1.));
roi_height = max(roi_height, static_cast<T>(1.));
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_input_grad =
input_grad + (roi_batch_ind * channels + c) * height * width;
const T* offset_out_grad =
out_grad + (n * channels + c) * pooled_height * pooled_width;
const T out_grad_this_bin = offset_out_grad[ph * pooled_width + pw];
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_ymin + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_xmin + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1 = 0, w2 = 0, w3 = 0, w4 = 0;
int x_low = -1, x_high = -1, y_low = -1, y_high = -1;
BilinearInterpolateGradient(height, width, y, x, &w1, &w2, &w3, &w4,
&x_low, &x_high, &y_low, &y_high);
T diff1 = out_grad_this_bin * w1 / count;
T diff2 = out_grad_this_bin * w2 / count;
T diff3 = out_grad_this_bin * w3 / count;
T diff4 = out_grad_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_low,
diff1);
platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_high,
diff2);
platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_low,
diff3);
platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_high,
diff4);
}
}
}
}
}
template <typename Place, typename T>
class GPUROIAlignOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out = ctx.Output<Tensor>("Out");
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto sampling_ratio = ctx.Attr<int>("sampling_ratio");
auto aligned = ctx.Attr<bool>("aligned");
auto in_dims = in->dims();
int batch_size = in_dims[0];
int channels = in_dims[1];
int height = in_dims[2];
int width = in_dims[3];
int rois_num = rois->dims()[0];
if (rois_num == 0) return;
int output_size = out->numel();
int blocks = NumBlocks(output_size);
int threads = kNumCUDAThreads;
#ifdef WITH_NV_JETSON
platform::ChangeThreadNum(ctx.cuda_device_context(), &threads, 256);
#endif
Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto& dev_ctx = ctx.cuda_device_context();
auto gplace = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace());
if (ctx.HasInput("RoisNum")) {
auto* rois_num_t = ctx.Input<Tensor>("RoisNum");
int rois_batch_size = rois_num_t->numel();
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
platform::errors::InvalidArgument(
"The rois_batch_size and imgs "
"batch_size must be the same. But received rois_batch_size = %d, "
"batch_size = %d",
rois_batch_size, batch_size));
std::vector<int> rois_num_list(rois_batch_size);
memory::Copy(cplace, rois_num_list.data(), gplace,
rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0);
int start = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (int i = start; i < start + rois_num_list[n]; ++i) {
roi_batch_id_data[i] = n;
}
start += rois_num_list[n];
}
} else {
auto lod = rois->lod();
PADDLE_ENFORCE_EQ(
lod.empty(), false,
platform::errors::InvalidArgument("Input(ROIs) in ROIAlignOp does "
"not contain LoD information."));
auto rois_lod = lod.back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
platform::errors::InvalidArgument(
"The batch size of rois and batch size "
"of images must be the same. But received rois batch size = %d, "
"and images batch size = %d",
rois_batch_size, batch_size));
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(
rois_num, rois_num_with_lod,
platform::errors::InvalidArgument(
"The actual number of rois and the number of rois "
"provided from Input(RoIsLoD) in RoIAlign must be the same."
" But received actual number of rois is %d, and the number "
"of rois from RoIsLoD is %d",
rois_num, rois_num_with_lod));
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
}
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
hipLaunchKernelGGL(( GPUROIAlignForward<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
output_size, in->data<T>(), rois->data<T>(), spatial_scale, channels,
height, width, pooled_height, pooled_width, sampling_ratio, roi_id_data,
out->mutable_data<T>(ctx.GetPlace()), aligned);
}
};
template <typename Place, typename T>
class GPUROIAlignGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* in_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto sampling_ratio = ctx.Attr<int>("sampling_ratio");
auto aligned = ctx.Attr<bool>("aligned");
int rois_num = rois->dims()[0];
int channels = in->dims()[1];
int height = in->dims()[2];
int width = in->dims()[3];
if (!in_grad) {
return;
}
Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto& dev_ctx = ctx.cuda_device_context();
auto gplace = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace());
if (ctx.HasInput("RoisNum")) {
auto* rois_num_t = ctx.Input<Tensor>("RoisNum");
int rois_batch_size = rois_num_t->numel();
std::vector<int> rois_num_list(rois_batch_size);
memory::Copy(cplace, rois_num_list.data(), gplace,
rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0);
int start = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = start; i < start + rois_num_list[n]; ++i) {
roi_batch_id_data[i] = n;
}
start += rois_num_list[n];
}
} else {
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
}
auto roi_ptr =
memory::Alloc(dev_ctx, roi_batch_id_list.numel() * sizeof(int));
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
int bytes = roi_batch_id_list.numel() * sizeof(int);
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
in_grad->mutable_data<T>(ctx.GetPlace());
math::SetConstant<Place, T> set_zero;
set_zero(dev_ctx, in_grad, static_cast<T>(0));
int output_grad_size = out_grad->numel();
int blocks = NumBlocks(output_grad_size);
int threads = kNumCUDAThreads;
if (output_grad_size > 0) {
hipLaunchKernelGGL(( GPUROIAlignBackward<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
output_grad_size, rois->data<T>(), out_grad->data<T>(), rois_num,
spatial_scale, channels, height, width, pooled_height, pooled_width,
sampling_ratio, roi_id_data, in_grad->mutable_data<T>(ctx.GetPlace()),
aligned);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
roi_align,
ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
roi_align_grad,
ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, double>);
| e7b984aed519a9a698798f51124cbdcb4bfbbaae.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/roi_align_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static constexpr int kROISize = 4;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <class T>
__device__ T BilinearInterpolate(const T* input_data, const int height,
const int width, T y, T x) {
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return 0;
}
y = y <= 0 ? 0 : y;
x = x <= 0 ? 0 : x;
int y_low = static_cast<int>(y);
int x_low = static_cast<int>(x);
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = static_cast<T>(y_low);
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = static_cast<T>(x_low);
} else {
x_high = x_low + 1;
}
T ly = y - y_low, lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
T v1 = input_data[y_low * width + x_low];
T v2 = input_data[y_low * width + x_high];
T v3 = input_data[y_high * width + x_low];
T v4 = input_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <class T>
__device__ void BilinearInterpolateGradient(const int height, const int width,
T y, T x, T* w1, T* w2, T* w3,
T* w4, int* x_low, int* x_high,
int* y_low, int* y_high) {
if (y < -1.0 || y > height || x < -1.0 || x > width) {
return;
}
y = y <= 0 ? 0 : y;
x = x <= 0 ? 0 : x;
*y_low = static_cast<int>(y);
*x_low = static_cast<int>(x);
if (*y_low >= height - 1) {
*y_high = *y_low = height - 1;
y = static_cast<T>(*y_low);
} else {
*y_high = *y_low + 1;
}
if (*x_low >= width - 1) {
*x_high = *x_low = width - 1;
x = static_cast<T>(*x_low);
} else {
*x_high = *x_low + 1;
}
T ly = y - *y_low, lx = x - *x_low;
T hy = 1. - ly, hx = 1. - lx;
*w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx;
return;
}
template <class T>
__global__ void GPUROIAlignForward(
const int nthreads, const T* input_data, const T* input_rois,
const float spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int sampling_ratio, int* roi_batch_id_data, T* output_data,
const bool continuous_coordinate) {
CUDA_KERNEL_LOOP(i, nthreads) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
const T* offset_input_rois = input_rois + n * kROISize;
int roi_batch_ind = roi_batch_id_data[n];
T roi_offset = continuous_coordinate ? static_cast<T>(0.5) : 0;
T roi_xmin = offset_input_rois[0] * spatial_scale - roi_offset;
T roi_ymin = offset_input_rois[1] * spatial_scale - roi_offset;
T roi_xmax = offset_input_rois[2] * spatial_scale - roi_offset;
T roi_ymax = offset_input_rois[3] * spatial_scale - roi_offset;
T roi_width = roi_xmax - roi_xmin;
T roi_height = roi_ymax - roi_ymin;
if (!continuous_coordinate) {
roi_width = max(roi_width, static_cast<T>(1.));
roi_height = max(roi_height, static_cast<T>(1.));
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_input_data =
input_data + (roi_batch_ind * channels + c) * height * width;
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1);
T output_val = 0;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_ymin + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_xmin + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = BilinearInterpolate(offset_input_data, height, width, y, x);
output_val += val;
}
}
output_val /= count;
output_data[i] = output_val;
}
}
template <typename T>
__global__ void GPUROIAlignBackward(
const int nthreads, const T* input_rois, const T* out_grad,
const int num_rois, const float spatial_scale, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int sampling_ratio, int* roi_batch_id_data,
T* input_grad, const bool continuous_coordinate) {
CUDA_KERNEL_LOOP(i, nthreads) {
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int c = (i / pooled_width / pooled_height) % channels;
int n = i / pooled_width / pooled_height / channels;
const T* offset_input_rois = input_rois + n * kROISize;
int roi_batch_ind = roi_batch_id_data[n];
T roi_offset = continuous_coordinate ? T(0.5) : 0;
T roi_xmin = offset_input_rois[0] * spatial_scale - roi_offset;
T roi_ymin = offset_input_rois[1] * spatial_scale - roi_offset;
T roi_xmax = offset_input_rois[2] * spatial_scale - roi_offset;
T roi_ymax = offset_input_rois[3] * spatial_scale - roi_offset;
T roi_width = roi_xmax - roi_xmin;
T roi_height = roi_ymax - roi_ymin;
if (!continuous_coordinate) {
roi_width = max(roi_width, static_cast<T>(1.));
roi_height = max(roi_height, static_cast<T>(1.));
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_input_grad =
input_grad + (roi_batch_ind * channels + c) * height * width;
const T* offset_out_grad =
out_grad + (n * channels + c) * pooled_height * pooled_width;
const T out_grad_this_bin = offset_out_grad[ph * pooled_width + pw];
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height);
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
const T count = roi_bin_grid_h * roi_bin_grid_w;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
const T y = roi_ymin + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h);
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_xmin + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1 = 0, w2 = 0, w3 = 0, w4 = 0;
int x_low = -1, x_high = -1, y_low = -1, y_high = -1;
BilinearInterpolateGradient(height, width, y, x, &w1, &w2, &w3, &w4,
&x_low, &x_high, &y_low, &y_high);
T diff1 = out_grad_this_bin * w1 / count;
T diff2 = out_grad_this_bin * w2 / count;
T diff3 = out_grad_this_bin * w3 / count;
T diff4 = out_grad_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_low,
diff1);
platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_high,
diff2);
platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_low,
diff3);
platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_high,
diff4);
}
}
}
}
}
template <typename Place, typename T>
class GPUROIAlignOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out = ctx.Output<Tensor>("Out");
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto sampling_ratio = ctx.Attr<int>("sampling_ratio");
auto aligned = ctx.Attr<bool>("aligned");
auto in_dims = in->dims();
int batch_size = in_dims[0];
int channels = in_dims[1];
int height = in_dims[2];
int width = in_dims[3];
int rois_num = rois->dims()[0];
if (rois_num == 0) return;
int output_size = out->numel();
int blocks = NumBlocks(output_size);
int threads = kNumCUDAThreads;
#ifdef WITH_NV_JETSON
platform::ChangeThreadNum(ctx.cuda_device_context(), &threads, 256);
#endif
Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto& dev_ctx = ctx.cuda_device_context();
auto gplace = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace());
if (ctx.HasInput("RoisNum")) {
auto* rois_num_t = ctx.Input<Tensor>("RoisNum");
int rois_batch_size = rois_num_t->numel();
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
platform::errors::InvalidArgument(
"The rois_batch_size and imgs "
"batch_size must be the same. But received rois_batch_size = %d, "
"batch_size = %d",
rois_batch_size, batch_size));
std::vector<int> rois_num_list(rois_batch_size);
memory::Copy(cplace, rois_num_list.data(), gplace,
rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0);
int start = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (int i = start; i < start + rois_num_list[n]; ++i) {
roi_batch_id_data[i] = n;
}
start += rois_num_list[n];
}
} else {
auto lod = rois->lod();
PADDLE_ENFORCE_EQ(
lod.empty(), false,
platform::errors::InvalidArgument("Input(ROIs) in ROIAlignOp does "
"not contain LoD information."));
auto rois_lod = lod.back();
int rois_batch_size = rois_lod.size() - 1;
PADDLE_ENFORCE_EQ(
rois_batch_size, batch_size,
platform::errors::InvalidArgument(
"The batch size of rois and batch size "
"of images must be the same. But received rois batch size = %d, "
"and images batch size = %d",
rois_batch_size, batch_size));
int rois_num_with_lod = rois_lod[rois_batch_size];
PADDLE_ENFORCE_EQ(
rois_num, rois_num_with_lod,
platform::errors::InvalidArgument(
"The actual number of rois and the number of rois "
"provided from Input(RoIsLoD) in RoIAlign must be the same."
" But received actual number of rois is %d, and the number "
"of rois from RoIsLoD is %d",
rois_num, rois_num_with_lod));
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
}
int bytes = roi_batch_id_list.numel() * sizeof(int);
auto roi_ptr = memory::Alloc(dev_ctx, bytes);
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
GPUROIAlignForward<T><<<blocks, threads, 0, dev_ctx.stream()>>>(
output_size, in->data<T>(), rois->data<T>(), spatial_scale, channels,
height, width, pooled_height, pooled_width, sampling_ratio, roi_id_data,
out->mutable_data<T>(ctx.GetPlace()), aligned);
}
};
template <typename Place, typename T>
class GPUROIAlignGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* in = ctx.Input<Tensor>("X");
auto* rois = ctx.Input<LoDTensor>("ROIs");
auto* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* in_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto pooled_height = ctx.Attr<int>("pooled_height");
auto pooled_width = ctx.Attr<int>("pooled_width");
auto spatial_scale = ctx.Attr<float>("spatial_scale");
auto sampling_ratio = ctx.Attr<int>("sampling_ratio");
auto aligned = ctx.Attr<bool>("aligned");
int rois_num = rois->dims()[0];
int channels = in->dims()[1];
int height = in->dims()[2];
int width = in->dims()[3];
if (!in_grad) {
return;
}
Tensor roi_batch_id_list;
roi_batch_id_list.Resize({rois_num});
auto cplace = platform::CPUPlace();
int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace);
auto& dev_ctx = ctx.cuda_device_context();
auto gplace = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace());
if (ctx.HasInput("RoisNum")) {
auto* rois_num_t = ctx.Input<Tensor>("RoisNum");
int rois_batch_size = rois_num_t->numel();
std::vector<int> rois_num_list(rois_batch_size);
memory::Copy(cplace, rois_num_list.data(), gplace,
rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0);
int start = 0;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = start; i < start + rois_num_list[n]; ++i) {
roi_batch_id_data[i] = n;
}
start += rois_num_list[n];
}
} else {
auto rois_lod = rois->lod().back();
int rois_batch_size = rois_lod.size() - 1;
for (int n = 0; n < rois_batch_size; ++n) {
for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) {
roi_batch_id_data[i] = n;
}
}
}
auto roi_ptr =
memory::Alloc(dev_ctx, roi_batch_id_list.numel() * sizeof(int));
int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr());
int bytes = roi_batch_id_list.numel() * sizeof(int);
memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes,
dev_ctx.stream());
in_grad->mutable_data<T>(ctx.GetPlace());
math::SetConstant<Place, T> set_zero;
set_zero(dev_ctx, in_grad, static_cast<T>(0));
int output_grad_size = out_grad->numel();
int blocks = NumBlocks(output_grad_size);
int threads = kNumCUDAThreads;
if (output_grad_size > 0) {
GPUROIAlignBackward<T><<<blocks, threads, 0, dev_ctx.stream()>>>(
output_grad_size, rois->data<T>(), out_grad->data<T>(), rois_num,
spatial_scale, channels, height, width, pooled_height, pooled_width,
sampling_ratio, roi_id_data, in_grad->mutable_data<T>(ctx.GetPlace()),
aligned);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
roi_align,
ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
roi_align_grad,
ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, float>,
ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, double>);
|
514427fcf811b1dfa23a22405a8261a5150ffd97.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef MINESWEEPER_CUH
#define MINESWEEPER_CUH
#include "minesweeperUtilsGPU.cuh"
#define FLAG -1
#define COVERED -2
#define BLOCKSIZE_X 16
#define BLOCKSIZE_Y 16
enum class TileStatus {
MustBeMine,
MustBeSafe,
Unknown,
};
__device__
inline bool isOutOfRange(int x, int y, int width, int height) {
if (x < 0 || y < 0 || x >= width || y >= height)
return true;
else
return false;
}
// Determines the status (Safe, Mine, or Unknown) of the neighbors of tile (x,y)
// Only call this function with (x,y) values that are in bounds
__device__
TileStatus determineNeighborStatus(int x, int y, int width, int height, int* output) {
int idx = y * width + x;
int numMines = output[idx];
// If this neighbor is covered, then we have nothing to do
if (numMines == COVERED)
return TileStatus::Unknown;
int flagCount = 0;
int coveredCount = 0;
for (int ny = y - 1; ny <= y + 1; ny++) {
for (int nx = x - 1; nx <= x + 1; nx++) {
if (isOutOfRange(nx, ny, width, height) || (nx == x && ny == y))
continue;
int nIdx = ny * width + nx;
if (output[nIdx] == FLAG)
flagCount += 1;
else if (output[nIdx] == COVERED)
coveredCount += 1;
}
}
if (numMines == flagCount)
return TileStatus::MustBeSafe;
else if (numMines - flagCount == coveredCount)
return TileStatus::MustBeMine;
else
return TileStatus::Unknown;
}
__global__
void solveMinesweeperBoard(int width, int height, int* numMines, int* output) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int idx = y * width + x;
if (x >= width || y >= height)
return;
// Already figured out this tile, so nothing to do.
if (output[idx] >= -1)
return;
for (int ny = y - 1; ny <= y + 1; ny++) {
for (int nx = x - 1; nx <= x + 1; nx++) {
if (isOutOfRange(nx, ny, width, height) || (nx == x && ny == y))
continue;
TileStatus status = determineNeighborStatus(nx, ny, width, height, output);
if (status == TileStatus::MustBeMine) {
atomicSub(numMines, 1);
output[idx] = -1;
return;
}
else if (status == TileStatus::MustBeSafe) {
output[idx] = clickTile(x,y);
return;
}
}
}
}
void printMyBoard(int width, int height, int* output) {
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
int value = output[y * width + x];
if (value == COVERED)
printf(".");
else if (value == FLAG)
printf("A");
else
printf("%d", value);
}
printf("\n");
}
printf("\n");
}
void minesweeperGPU(int width, int height, int numMines, int startX, int startY, int* output) {
printf("Running GPU solver: simple\n");
for (int i = 0; i < width*height; ++i) {
output[i] = COVERED;
}
int startIdx = startY * width + startX;
output[startIdx] = 0;
int* numMinesGPU;
checkCudaError(hipMallocManaged(&numMinesGPU, sizeof(numMines)));
*numMinesGPU = numMines;
int xBlockSize = BLOCKSIZE_X;
int yBlockSize = BLOCKSIZE_Y;
int xNumBlocks = (width + xBlockSize - 1) / xBlockSize;
int yNumBlocks = (height + yBlockSize - 1) / yBlockSize;
int sameCount = 0;
int numMinesLast = numMines;
while(*numMinesGPU > 0) {
if (numMinesLast == *numMinesGPU) {
sameCount += 1;
}
if (sameCount >= 10000) {
printMyBoard(width, height, output);
printf("Num Mines Left: %d\n", *numMinesGPU);
break;
}
numMinesLast = *numMinesGPU;
hipLaunchKernelGGL(( solveMinesweeperBoard), dim3(dim3(xNumBlocks, yNumBlocks, 1)), dim3(dim3(xBlockSize, yBlockSize, 1)), 0, 0, width, height, numMinesGPU, output);
checkCudaError(hipGetLastError());
checkCudaError(hipDeviceSynchronize());
}
hipFree(numMinesGPU);
}
#undef FLAG
#undef COVERED
#undef BLOCKSIZE_X
#undef BLOCKSIZE_Y
#endif // MINESWEEPER_CUH
| 514427fcf811b1dfa23a22405a8261a5150ffd97.cu | #ifndef MINESWEEPER_CUH
#define MINESWEEPER_CUH
#include "minesweeperUtilsGPU.cuh"
#define FLAG -1
#define COVERED -2
#define BLOCKSIZE_X 16
#define BLOCKSIZE_Y 16
enum class TileStatus {
MustBeMine,
MustBeSafe,
Unknown,
};
__device__
inline bool isOutOfRange(int x, int y, int width, int height) {
if (x < 0 || y < 0 || x >= width || y >= height)
return true;
else
return false;
}
// Determines the status (Safe, Mine, or Unknown) of the neighbors of tile (x,y)
// Only call this function with (x,y) values that are in bounds
__device__
TileStatus determineNeighborStatus(int x, int y, int width, int height, int* output) {
int idx = y * width + x;
int numMines = output[idx];
// If this neighbor is covered, then we have nothing to do
if (numMines == COVERED)
return TileStatus::Unknown;
int flagCount = 0;
int coveredCount = 0;
for (int ny = y - 1; ny <= y + 1; ny++) {
for (int nx = x - 1; nx <= x + 1; nx++) {
if (isOutOfRange(nx, ny, width, height) || (nx == x && ny == y))
continue;
int nIdx = ny * width + nx;
if (output[nIdx] == FLAG)
flagCount += 1;
else if (output[nIdx] == COVERED)
coveredCount += 1;
}
}
if (numMines == flagCount)
return TileStatus::MustBeSafe;
else if (numMines - flagCount == coveredCount)
return TileStatus::MustBeMine;
else
return TileStatus::Unknown;
}
__global__
void solveMinesweeperBoard(int width, int height, int* numMines, int* output) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int idx = y * width + x;
if (x >= width || y >= height)
return;
// Already figured out this tile, so nothing to do.
if (output[idx] >= -1)
return;
for (int ny = y - 1; ny <= y + 1; ny++) {
for (int nx = x - 1; nx <= x + 1; nx++) {
if (isOutOfRange(nx, ny, width, height) || (nx == x && ny == y))
continue;
TileStatus status = determineNeighborStatus(nx, ny, width, height, output);
if (status == TileStatus::MustBeMine) {
atomicSub(numMines, 1);
output[idx] = -1;
return;
}
else if (status == TileStatus::MustBeSafe) {
output[idx] = clickTile(x,y);
return;
}
}
}
}
void printMyBoard(int width, int height, int* output) {
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
int value = output[y * width + x];
if (value == COVERED)
printf(".");
else if (value == FLAG)
printf("A");
else
printf("%d", value);
}
printf("\n");
}
printf("\n");
}
void minesweeperGPU(int width, int height, int numMines, int startX, int startY, int* output) {
printf("Running GPU solver: simple\n");
for (int i = 0; i < width*height; ++i) {
output[i] = COVERED;
}
int startIdx = startY * width + startX;
output[startIdx] = 0;
int* numMinesGPU;
checkCudaError(cudaMallocManaged(&numMinesGPU, sizeof(numMines)));
*numMinesGPU = numMines;
int xBlockSize = BLOCKSIZE_X;
int yBlockSize = BLOCKSIZE_Y;
int xNumBlocks = (width + xBlockSize - 1) / xBlockSize;
int yNumBlocks = (height + yBlockSize - 1) / yBlockSize;
int sameCount = 0;
int numMinesLast = numMines;
while(*numMinesGPU > 0) {
if (numMinesLast == *numMinesGPU) {
sameCount += 1;
}
if (sameCount >= 10000) {
printMyBoard(width, height, output);
printf("Num Mines Left: %d\n", *numMinesGPU);
break;
}
numMinesLast = *numMinesGPU;
solveMinesweeperBoard<<<dim3(xNumBlocks, yNumBlocks, 1), dim3(xBlockSize, yBlockSize, 1)>>>(width, height, numMinesGPU, output);
checkCudaError(cudaGetLastError());
checkCudaError(cudaDeviceSynchronize());
}
cudaFree(numMinesGPU);
}
#undef FLAG
#undef COVERED
#undef BLOCKSIZE_X
#undef BLOCKSIZE_Y
#endif // MINESWEEPER_CUH
|
74ccd1ebb1f2ac2dd04fbadf92141ddaf427c970.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017 XGBoost contributors
*/
#include <thrust/device_vector.h>
#include <xgboost/base.h>
#include "../../../src/common/device_helpers.cuh"
#include "gtest/gtest.h"
using xgboost::common::Span;
struct Shard { int id; };
TEST(DeviceHelpers, Basic) {
std::vector<Shard> shards (4);
for (int i = 0; i < 4; ++i) {
shards[i].id = i;
}
int sum = dh::ReduceShards<int>(&shards, [](Shard& s) { return s.id ; });
ASSERT_EQ(sum, 6);
}
void CreateTestData(xgboost::bst_uint num_rows, int max_row_size,
thrust::host_vector<int> *row_ptr,
thrust::host_vector<xgboost::bst_uint> *rows) {
row_ptr->resize(num_rows + 1);
int sum = 0;
for (xgboost::bst_uint i = 0; i <= num_rows; i++) {
(*row_ptr)[i] = sum;
sum += rand() % max_row_size; // NOLINT
if (i < num_rows) {
for (int j = (*row_ptr)[i]; j < sum; j++) {
(*rows).push_back(i);
}
}
}
}
void TestLbs() {
srand(17);
dh::CubMemory temp_memory;
std::vector<int> test_rows = {4, 100, 1000};
std::vector<int> test_max_row_sizes = {4, 100, 1300};
for (auto num_rows : test_rows) {
for (auto max_row_size : test_max_row_sizes) {
thrust::host_vector<int> h_row_ptr;
thrust::host_vector<xgboost::bst_uint> h_rows;
CreateTestData(num_rows, max_row_size, &h_row_ptr, &h_rows);
thrust::device_vector<size_t> row_ptr = h_row_ptr;
thrust::device_vector<int> output_row(h_rows.size());
auto d_output_row = output_row.data();
dh::TransformLbs(0, &temp_memory, h_rows.size(), dh::Raw(row_ptr),
row_ptr.size() - 1, false,
[=] __device__(size_t idx, size_t ridx) {
d_output_row[idx] = ridx;
});
dh::safe_cuda(hipDeviceSynchronize());
ASSERT_TRUE(h_rows == output_row);
}
}
}
TEST(cub_lbs, Test) { TestLbs(); }
TEST(sumReduce, Test) {
thrust::device_vector<float> data(100, 1.0f);
dh::CubMemory temp;
auto sum = dh::SumReduction(temp, dh::Raw(data), data.size());
ASSERT_NEAR(sum, 100.0f, 1e-5);
}
void TestAllocator() {
int n = 10;
Span<float> a;
Span<int> b;
Span<size_t> c;
dh::BulkAllocator ba;
ba.Allocate(0, &a, n, &b, n, &c, n);
// Should be no illegal memory accesses
dh::LaunchN(0, n, [=] __device__(size_t idx) { c[idx] = a[idx] + b[idx]; });
dh::safe_cuda(hipDeviceSynchronize());
}
// Define the test in a function so we can use device lambda
TEST(bulkAllocator, Test) { TestAllocator(); }
| 74ccd1ebb1f2ac2dd04fbadf92141ddaf427c970.cu |
/*!
* Copyright 2017 XGBoost contributors
*/
#include <thrust/device_vector.h>
#include <xgboost/base.h>
#include "../../../src/common/device_helpers.cuh"
#include "gtest/gtest.h"
using xgboost::common::Span;
struct Shard { int id; };
TEST(DeviceHelpers, Basic) {
std::vector<Shard> shards (4);
for (int i = 0; i < 4; ++i) {
shards[i].id = i;
}
int sum = dh::ReduceShards<int>(&shards, [](Shard& s) { return s.id ; });
ASSERT_EQ(sum, 6);
}
void CreateTestData(xgboost::bst_uint num_rows, int max_row_size,
thrust::host_vector<int> *row_ptr,
thrust::host_vector<xgboost::bst_uint> *rows) {
row_ptr->resize(num_rows + 1);
int sum = 0;
for (xgboost::bst_uint i = 0; i <= num_rows; i++) {
(*row_ptr)[i] = sum;
sum += rand() % max_row_size; // NOLINT
if (i < num_rows) {
for (int j = (*row_ptr)[i]; j < sum; j++) {
(*rows).push_back(i);
}
}
}
}
void TestLbs() {
srand(17);
dh::CubMemory temp_memory;
std::vector<int> test_rows = {4, 100, 1000};
std::vector<int> test_max_row_sizes = {4, 100, 1300};
for (auto num_rows : test_rows) {
for (auto max_row_size : test_max_row_sizes) {
thrust::host_vector<int> h_row_ptr;
thrust::host_vector<xgboost::bst_uint> h_rows;
CreateTestData(num_rows, max_row_size, &h_row_ptr, &h_rows);
thrust::device_vector<size_t> row_ptr = h_row_ptr;
thrust::device_vector<int> output_row(h_rows.size());
auto d_output_row = output_row.data();
dh::TransformLbs(0, &temp_memory, h_rows.size(), dh::Raw(row_ptr),
row_ptr.size() - 1, false,
[=] __device__(size_t idx, size_t ridx) {
d_output_row[idx] = ridx;
});
dh::safe_cuda(cudaDeviceSynchronize());
ASSERT_TRUE(h_rows == output_row);
}
}
}
TEST(cub_lbs, Test) { TestLbs(); }
TEST(sumReduce, Test) {
thrust::device_vector<float> data(100, 1.0f);
dh::CubMemory temp;
auto sum = dh::SumReduction(temp, dh::Raw(data), data.size());
ASSERT_NEAR(sum, 100.0f, 1e-5);
}
void TestAllocator() {
int n = 10;
Span<float> a;
Span<int> b;
Span<size_t> c;
dh::BulkAllocator ba;
ba.Allocate(0, &a, n, &b, n, &c, n);
// Should be no illegal memory accesses
dh::LaunchN(0, n, [=] __device__(size_t idx) { c[idx] = a[idx] + b[idx]; });
dh::safe_cuda(cudaDeviceSynchronize());
}
// Define the test in a function so we can use device lambda
TEST(bulkAllocator, Test) { TestAllocator(); }
|
0b553bc7785c0e5029599173e0816e68a3fddf12.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <float.h>
#include <math.h>
#include <iostream>
#include <string.h>
#include <list>
#include <map>
#include "../libarff/arff_parser.h"
#include "../libarff/arff_data.h"
using namespace std;
struct Neighbor {
float distance;
int cls;
};
struct Instance {
float * attribs;
int cls;
};
int majorityVote(list<Neighbor> & neighbors) {
std::map<int, int> frequencyMap;
int maxFrequency = 0;
int mostFrequentClass = -1;
//cout << "Neighbors are " << endl;
for (Neighbor neighbor : neighbors)
{
//cout << neighbor.distance << " " << neighbor.cls << endl;
int f = ++frequencyMap[neighbor.cls];
if (f > maxFrequency)
{
maxFrequency = f;
mostFrequentClass = neighbor.cls;
}
}
return mostFrequentClass;
}
void predictFromDistances (float * distances, Instance * instances, int numInstances,
int k, int * predictions) {
//Initialize an empty neighbor
Neighbor neighbor;
neighbor.distance = FLT_MAX;
neighbor.cls = -1;
//List of k neighbors
std::list<Neighbor> neighbors (k, neighbor);
for(int i=0; i < numInstances * numInstances; i++) {
if(i%numInstances == i/numInstances)
distances[i] = FLT_MAX; //subject and target instances are the same.
for (std::list<Neighbor>::iterator it = neighbors.begin(); it != neighbors.end(); it++) {
if(distances[i] < (*it).distance) {
Neighbor neighbor;
neighbor.distance = distances[i];
neighbor.cls = instances[i%numInstances].cls;
neighbors.insert(it, neighbor);
neighbors.pop_back(); //Remove the last neighbor
break;
}
}
if((i+1)%numInstances == 0) {
predictions[i/numInstances] = majorityVote(neighbors);
//Reset the neighbors as we are starting the next instance
neighbors.clear();
for (int x=0; x <k; x++)
{
Neighbor neighbor;
neighbor.distance = FLT_MAX;
neighbor.cls = -1;
neighbors.push_back(neighbor);
}
}
}
}
__device__ int majorityVote(int k, Neighbor * neighbors) {
struct FrequencyMap {
int cls;
int freq;
};
FrequencyMap * freqMap = (FrequencyMap *)malloc(sizeof(FrequencyMap)*k);
int maxFrequency = 0;
int mostFrequentClass = neighbors[0].cls; //default, useful when k is 1
int numClasses = 0;
for(int i=0; i <k; i++) {
bool found = false;
for(int j=0; j < numClasses; j++) {
if(freqMap[j].cls == neighbors[i].cls) {
found = true;
freqMap[j].freq = freqMap[j].freq + 1;
if(freqMap[j].freq > maxFrequency) {
maxFrequency = freqMap[j].freq;
mostFrequentClass = freqMap[j].cls;
}
break;
}
}
if(!found) {
//Encountered this class first time. Add it to the map.
freqMap[numClasses].cls = neighbors[i].cls;
freqMap[numClasses].freq = 1;
numClasses++;
}
}
free(freqMap);
return mostFrequentClass;
}
/*
* Advance CUDA kernel function.
* Each thread simply calculates the distance of one specific instance to another specific one.
* Therefore, this model requires as many threads as the number of elements in the dataset
*/
__global__ void advanceCuda(Instance * instances, int numInstances, int numAttribs, float * distances)
{
//First, compute the thread id and call it i.
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= numInstances * numInstances) {
return;
}
Instance current_instance = instances[i/numInstances];
Instance target_instance = instances[i%numInstances];
float distance = 0;
for(int h = 0; h < numAttribs; h++) // compute the distance between the two instances
{
float diff = current_instance.attribs[h] - target_instance.attribs[h];
distance += diff * diff;
}
distance = sqrt(distance);
distances[i] = distance;
}
/*
* Basic CUDA kernel function.
* Each threads runs KNN for exactly one instance in the dataset.
* Therefore, this model requires as many threads as the number of elements in the dataset
*/
__global__ void basicCuda(Instance * instances, int numInstances, int numAttribs,
int k, int * prediction)
{
//First, compute the thread id and call it i.
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= numInstances) {
return;
}
Instance current_instance = instances[i];
//Array of k neighbors. Initialize them.
Neighbor * neighbors = (Neighbor *)malloc(sizeof(Neighbor)*k);
for(int p=0; p <k; p++) {
neighbors[p].distance = FLT_MAX;
neighbors[p].cls = -1;
}
for(int j = 0; j < numInstances; j++) // target each other instance
{
if(i == j) continue;
float distance = 0;
for(int h = 0; h < numAttribs; h++) // compute the distance between the two instances
{
float diff = current_instance.attribs[h] - instances[j].attribs[h];
distance += diff * diff;
}
distance = sqrt(distance);
for(int p=0; p <k; p++) {
if(distance < neighbors[p].distance) {
Neighbor neighbor;
neighbor.distance = distance;
neighbor.cls = instances[j].cls;
Neighbor * newNeighbors = (Neighbor *)malloc(sizeof(Neighbor)*k);
for(int q=0, r=0; q <k; q++) {
if(p == q) {
newNeighbors[q] = neighbor;
continue;
}
newNeighbors[q] = neighbors[r++];
}
free(neighbors);
neighbors = newNeighbors;
break;
}
}
}
prediction[i] = majorityVote(k, neighbors);
//Free the memory
free(neighbors);
}
int* advanceCudaKNN(ArffData* dataset, int k)
{
// predictions is the array where you have to return the class predicted (integer) for the dataset instances
int* predictions = (int*)malloc(dataset->num_instances() * sizeof(int));
// The following two lines show the syntax to retrieve the attribute values and the class value for a given instance in the dataset
// float attributeValue = dataset->get_instance(instanceIndex)->get(attributeIndex)->operator float();
// int classValue = dataset->get_instance(instanceIndex)->get(dataset->num_attributes() - 1)->operator int32();
// Implement the KNN here, fill the predictions array
cout << "K is " << k << endl;
int numElements = dataset->num_instances();
int numAttribs = dataset->num_attributes() - 1; //-1 because the last attrib is class
Instance * h_instances = (Instance *)malloc(numElements * sizeof(Instance));
// Launch the CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid = (numElements * numElements / threadsPerBlock) + 1;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
//Convert the arf dataset to an array of Instance structure on host
for(int i = 0; i < numElements; i++) // for each instance in the dataset
{
float * attribs = (float *) malloc (sizeof(float)*numAttribs);
for(int h = 0; h < numAttribs; h++) // compute the distance between the two instances
{
attribs[h] = dataset->get_instance(i)->get(h)->operator float();
}
h_instances[i].attribs = attribs;
h_instances[i].cls = dataset->get_instance(i)->get(numAttribs)->operator int32();
}
//Make another copy of the instances array from host to device
Instance * d_instances;
hipMalloc(&d_instances, numElements*sizeof(Instance));
hipMemcpy(d_instances, h_instances, numElements*sizeof(Instance), hipMemcpyHostToDevice);
for(int i = 0; i < numElements; i++) // for each instance in the dataset
{
float * d_attribs;
hipMalloc(&d_attribs, numAttribs*sizeof(float));
// Copy up attributes for each instance separately
hipMemcpy(d_attribs, h_instances[i].attribs, numAttribs*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(&(d_instances[i].attribs), &d_attribs, sizeof(float*), hipMemcpyHostToDevice);
}
//Create an array of numElements X numElements elements on device. Kernel function will
//populate the distances here.
float * d_distances;
hipMalloc(&d_distances, numElements*numElements*sizeof(float));
//Call kernel
hipLaunchKernelGGL(( advanceCuda), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_instances, numElements, numAttribs, d_distances);
hipError_t hipError_t = hipGetLastError();
if(hipError_t != hipSuccess) {
fprintf(stderr, "hipGetLastError() returned %d: %s\n", hipError_t, hipGetErrorString(hipError_t));
exit(EXIT_FAILURE);
}
//Create an array of numElements X numElements elements on host.
//We will copy the distance array from the device to this one
float * h_distances = (float *) malloc(numElements*numElements*sizeof(float));
// Copy the device distance vector in device memory to the host distance vector
hipMemcpy(h_distances, d_distances, numElements * numElements * sizeof(float), hipMemcpyDeviceToHost);
predictFromDistances(h_distances, h_instances, numElements, k, predictions);
// Free host memory
for(int i = 0; i < numElements; i++) // for each instance in the dataset
{
free(h_instances[i].attribs);
}
free(h_instances);
free(h_distances);
// Free device global memory
hipFree(d_distances);
Instance * h_d_instances = (Instance *)malloc(numElements * sizeof(Instance));
hipMemcpy(h_d_instances, d_instances, numElements*sizeof(Instance), hipMemcpyDeviceToHost);
for(int i = 0; i < numElements; i++) // for each instance in the dataset
{
hipFree(h_d_instances[i].attribs);
}
hipFree(d_instances);
free(h_d_instances);
return predictions;
}
int* basicCudaKNN(ArffData* dataset, int k)
{
// predictions is the array where you have to return the class predicted (integer) for the dataset instances
int* predictions = (int*)malloc(dataset->num_instances() * sizeof(int));
// The following two lines show the syntax to retrieve the attribute values and the class value for a given instance in the dataset
// float attributeValue = dataset->get_instance(instanceIndex)->get(attributeIndex)->operator float();
// int classValue = dataset->get_instance(instanceIndex)->get(dataset->num_attributes() - 1)->operator int32();
// Implement the KNN here, fill the predictions array
cout << "K is " << k << endl;
int numElements = dataset->num_instances();
int numAttribs = dataset->num_attributes() - 1; //-1 because the last attrib is class
Instance * h_instances = (Instance *)malloc(numElements * sizeof(Instance));
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid = (numElements / threadsPerBlock) + 1;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
for(int i = 0; i < numElements; i++) // for each instance in the dataset
{
float * attribs = (float *) malloc (sizeof(float)*numAttribs);
for(int h = 0; h < numAttribs; h++) // compute the distance between the two instances
{
attribs[h] = dataset->get_instance(i)->get(h)->operator float();
}
h_instances[i].attribs = attribs;
h_instances[i].cls = dataset->get_instance(i)->get(numAttribs)->operator int32();
}
Instance * d_instances;
hipMalloc(&d_instances, numElements*sizeof(Instance));
hipMemcpy(d_instances, h_instances, numElements*sizeof(Instance), hipMemcpyHostToDevice);
for(int i = 0; i < numElements; i++) // for each instance in the dataset
{
float * d_attribs;
hipMalloc(&d_attribs, numAttribs*sizeof(float));
// Copy up attributes for each instance separately
hipMemcpy(d_attribs, h_instances[i].attribs, numAttribs*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(&(d_instances[i].attribs), &d_attribs, sizeof(float*), hipMemcpyHostToDevice);
}
int * d_predictions;
hipMalloc(&d_predictions, numElements*sizeof(int));
hipLaunchKernelGGL(( basicCuda), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_instances, numElements,
numAttribs, k, d_predictions);
// Copy the device prediction vector in device memory to the host prediction vector
hipMemcpy(predictions, d_predictions, numElements * sizeof(int), hipMemcpyDeviceToHost);
hipError_t hipError_t = hipGetLastError();
if(hipError_t != hipSuccess) {
fprintf(stderr, "hipGetLastError() returned %d: %s\n", hipError_t, hipGetErrorString(hipError_t));
exit(EXIT_FAILURE);
}
// Free host memory
for(int i = 0; i < numElements; i++) // for each instance in the dataset
{
free(h_instances[i].attribs);
}
free(h_instances);
// Free device global memory
hipFree(d_predictions);
Instance * h_d_instances = (Instance *)malloc(numElements * sizeof(Instance));
hipMemcpy(h_d_instances, d_instances, numElements*sizeof(Instance), hipMemcpyDeviceToHost);
for(int i = 0; i < numElements; i++) // for each instance in the dataset
{
hipFree(h_d_instances[i].attribs);
}
hipFree(d_instances);
free(h_d_instances);
return predictions;
}
int* KNN(ArffData* dataset, int k)
{
// predictions is the array where you have to return the class predicted (integer) for the dataset instances
int* predictions = (int*)malloc(dataset->num_instances() * sizeof(int));
// The following two lines show the syntax to retrieve the attribute values and the class value for a given instance in the dataset
// float attributeValue = dataset->get_instance(instanceIndex)->get(attributeIndex)->operator float();
// int classValue = dataset->get_instance(instanceIndex)->get(dataset->num_attributes() - 1)->operator int32();
// Implement the KNN here, fill the predictions array
cout << "K is " << k << endl;
for(int i = 0; i < dataset->num_instances(); i++) // for each instance in the dataset
{
//Initialize an empty neighbor
Neighbor neighbor;
neighbor.distance = FLT_MAX;
neighbor.cls = -1;
//List of k neighbors
std::list<Neighbor> neighbors (k, neighbor);
for(int j = 0; j < dataset->num_instances(); j++) // target each other instance
{
if(i == j) continue;
float distance = 0;
for(int h = 0; h < dataset->num_attributes() - 1; h++) // compute the distance between the two instances
{
float diff = dataset->get_instance(i)->get(h)->operator float() - dataset->get_instance(j)->get(h)->operator float();
distance += diff * diff;
}
distance = sqrt(distance);
for (std::list<Neighbor>::iterator it = neighbors.begin(); it != neighbors.end(); it++) {
if(distance < (*it).distance) {
Neighbor neighbor;
neighbor.distance = distance;
neighbor.cls = dataset->get_instance(j)->get(dataset->num_attributes() - 1)->operator int32();
neighbors.insert(it, neighbor);
neighbors.pop_back(); //Remove the last neighbor
break;
}
}
}
predictions[i] = majorityVote(neighbors);
}
return predictions;
}
int* computeConfusionMatrix(int* predictions, ArffData* dataset)
{
int* confusionMatrix = (int*)calloc(dataset->num_classes() * dataset->num_classes(), sizeof(int)); // matrix size numberClasses x numberClasses
for(int i = 0; i < dataset->num_instances(); i++) // for each instance compare the true class and predicted class
{
int trueClass = dataset->get_instance(i)->get(dataset->num_attributes() - 1)->operator int32();
int predictedClass = predictions[i];
confusionMatrix[trueClass*dataset->num_classes() + predictedClass]++;
}
return confusionMatrix;
}
float computeAccuracy(int* confusionMatrix, ArffData* dataset)
{
int successfulPredictions = 0;
for(int i = 0; i < dataset->num_classes(); i++)
{
successfulPredictions += confusionMatrix[i*dataset->num_classes() + i]; // elements in the diagonal are correct predictions
}
return successfulPredictions / (float) dataset->num_instances();
}
int main(int argc, char *argv[])
{
if(argc != 4)
{
cout << "Usage: ./main <path to dataset> <k> <method>" << endl;
exit(0);
}
// Open the dataset
ArffParser parser(argv[1]);
ArffData *dataset = parser.parse();
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
int * predictions;
// Get the class predictions
if(strcmp(argv[3], "m1") == 0)
predictions = KNN(dataset, atoi(argv[2]));
else if(strcmp(argv[3], "m2") == 0)
predictions = basicCudaKNN(dataset, atoi(argv[2]));
else
predictions = advanceCudaKNN(dataset, atoi(argv[2]));
// Compute the confusion matrix
int* confusionMatrix = computeConfusionMatrix(predictions, dataset);
// Calculate the accuracy
float accuracy = computeAccuracy(confusionMatrix, dataset);
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
uint64_t diff = (1000000000L * (end.tv_sec - start.tv_sec) + end.tv_nsec - start.tv_nsec) / 1e6;
printf("The KNN classifier for %lu instances required %llu ms CPU time, accuracy was %.4f\n", dataset->num_instances(), (long long unsigned int) diff, accuracy);
//Free the memory
free(predictions);
free(confusionMatrix);
}
| 0b553bc7785c0e5029599173e0816e68a3fddf12.cu | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <float.h>
#include <math.h>
#include <iostream>
#include <string.h>
#include <list>
#include <map>
#include "../libarff/arff_parser.h"
#include "../libarff/arff_data.h"
using namespace std;
struct Neighbor {
float distance;
int cls;
};
struct Instance {
float * attribs;
int cls;
};
int majorityVote(list<Neighbor> & neighbors) {
std::map<int, int> frequencyMap;
int maxFrequency = 0;
int mostFrequentClass = -1;
//cout << "Neighbors are " << endl;
for (Neighbor neighbor : neighbors)
{
//cout << neighbor.distance << " " << neighbor.cls << endl;
int f = ++frequencyMap[neighbor.cls];
if (f > maxFrequency)
{
maxFrequency = f;
mostFrequentClass = neighbor.cls;
}
}
return mostFrequentClass;
}
void predictFromDistances (float * distances, Instance * instances, int numInstances,
int k, int * predictions) {
//Initialize an empty neighbor
Neighbor neighbor;
neighbor.distance = FLT_MAX;
neighbor.cls = -1;
//List of k neighbors
std::list<Neighbor> neighbors (k, neighbor);
for(int i=0; i < numInstances * numInstances; i++) {
if(i%numInstances == i/numInstances)
distances[i] = FLT_MAX; //subject and target instances are the same.
for (std::list<Neighbor>::iterator it = neighbors.begin(); it != neighbors.end(); it++) {
if(distances[i] < (*it).distance) {
Neighbor neighbor;
neighbor.distance = distances[i];
neighbor.cls = instances[i%numInstances].cls;
neighbors.insert(it, neighbor);
neighbors.pop_back(); //Remove the last neighbor
break;
}
}
if((i+1)%numInstances == 0) {
predictions[i/numInstances] = majorityVote(neighbors);
//Reset the neighbors as we are starting the next instance
neighbors.clear();
for (int x=0; x <k; x++)
{
Neighbor neighbor;
neighbor.distance = FLT_MAX;
neighbor.cls = -1;
neighbors.push_back(neighbor);
}
}
}
}
__device__ int majorityVote(int k, Neighbor * neighbors) {
struct FrequencyMap {
int cls;
int freq;
};
FrequencyMap * freqMap = (FrequencyMap *)malloc(sizeof(FrequencyMap)*k);
int maxFrequency = 0;
int mostFrequentClass = neighbors[0].cls; //default, useful when k is 1
int numClasses = 0;
for(int i=0; i <k; i++) {
bool found = false;
for(int j=0; j < numClasses; j++) {
if(freqMap[j].cls == neighbors[i].cls) {
found = true;
freqMap[j].freq = freqMap[j].freq + 1;
if(freqMap[j].freq > maxFrequency) {
maxFrequency = freqMap[j].freq;
mostFrequentClass = freqMap[j].cls;
}
break;
}
}
if(!found) {
//Encountered this class first time. Add it to the map.
freqMap[numClasses].cls = neighbors[i].cls;
freqMap[numClasses].freq = 1;
numClasses++;
}
}
free(freqMap);
return mostFrequentClass;
}
/*
* Advance CUDA kernel function.
* Each thread simply calculates the distance of one specific instance to another specific one.
* Therefore, this model requires as many threads as the number of elements in the dataset
*/
__global__ void advanceCuda(Instance * instances, int numInstances, int numAttribs, float * distances)
{
//First, compute the thread id and call it i.
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= numInstances * numInstances) {
return;
}
Instance current_instance = instances[i/numInstances];
Instance target_instance = instances[i%numInstances];
float distance = 0;
for(int h = 0; h < numAttribs; h++) // compute the distance between the two instances
{
float diff = current_instance.attribs[h] - target_instance.attribs[h];
distance += diff * diff;
}
distance = sqrt(distance);
distances[i] = distance;
}
/*
* Basic CUDA kernel function.
* Each threads runs KNN for exactly one instance in the dataset.
* Therefore, this model requires as many threads as the number of elements in the dataset
*/
__global__ void basicCuda(Instance * instances, int numInstances, int numAttribs,
int k, int * prediction)
{
//First, compute the thread id and call it i.
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= numInstances) {
return;
}
Instance current_instance = instances[i];
//Array of k neighbors. Initialize them.
Neighbor * neighbors = (Neighbor *)malloc(sizeof(Neighbor)*k);
for(int p=0; p <k; p++) {
neighbors[p].distance = FLT_MAX;
neighbors[p].cls = -1;
}
for(int j = 0; j < numInstances; j++) // target each other instance
{
if(i == j) continue;
float distance = 0;
for(int h = 0; h < numAttribs; h++) // compute the distance between the two instances
{
float diff = current_instance.attribs[h] - instances[j].attribs[h];
distance += diff * diff;
}
distance = sqrt(distance);
for(int p=0; p <k; p++) {
if(distance < neighbors[p].distance) {
Neighbor neighbor;
neighbor.distance = distance;
neighbor.cls = instances[j].cls;
Neighbor * newNeighbors = (Neighbor *)malloc(sizeof(Neighbor)*k);
for(int q=0, r=0; q <k; q++) {
if(p == q) {
newNeighbors[q] = neighbor;
continue;
}
newNeighbors[q] = neighbors[r++];
}
free(neighbors);
neighbors = newNeighbors;
break;
}
}
}
prediction[i] = majorityVote(k, neighbors);
//Free the memory
free(neighbors);
}
int* advanceCudaKNN(ArffData* dataset, int k)
{
// predictions is the array where you have to return the class predicted (integer) for the dataset instances
int* predictions = (int*)malloc(dataset->num_instances() * sizeof(int));
// The following two lines show the syntax to retrieve the attribute values and the class value for a given instance in the dataset
// float attributeValue = dataset->get_instance(instanceIndex)->get(attributeIndex)->operator float();
// int classValue = dataset->get_instance(instanceIndex)->get(dataset->num_attributes() - 1)->operator int32();
// Implement the KNN here, fill the predictions array
cout << "K is " << k << endl;
int numElements = dataset->num_instances();
int numAttribs = dataset->num_attributes() - 1; //-1 because the last attrib is class
Instance * h_instances = (Instance *)malloc(numElements * sizeof(Instance));
// Launch the CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid = (numElements * numElements / threadsPerBlock) + 1;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
//Convert the arf dataset to an array of Instance structure on host
for(int i = 0; i < numElements; i++) // for each instance in the dataset
{
float * attribs = (float *) malloc (sizeof(float)*numAttribs);
for(int h = 0; h < numAttribs; h++) // compute the distance between the two instances
{
attribs[h] = dataset->get_instance(i)->get(h)->operator float();
}
h_instances[i].attribs = attribs;
h_instances[i].cls = dataset->get_instance(i)->get(numAttribs)->operator int32();
}
//Make another copy of the instances array from host to device
Instance * d_instances;
cudaMalloc(&d_instances, numElements*sizeof(Instance));
cudaMemcpy(d_instances, h_instances, numElements*sizeof(Instance), cudaMemcpyHostToDevice);
for(int i = 0; i < numElements; i++) // for each instance in the dataset
{
float * d_attribs;
cudaMalloc(&d_attribs, numAttribs*sizeof(float));
// Copy up attributes for each instance separately
cudaMemcpy(d_attribs, h_instances[i].attribs, numAttribs*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(&(d_instances[i].attribs), &d_attribs, sizeof(float*), cudaMemcpyHostToDevice);
}
//Create an array of numElements X numElements elements on device. Kernel function will
//populate the distances here.
float * d_distances;
cudaMalloc(&d_distances, numElements*numElements*sizeof(float));
//Call kernel
advanceCuda<<<blocksPerGrid, threadsPerBlock>>>(d_instances, numElements, numAttribs, d_distances);
cudaError_t cudaError = cudaGetLastError();
if(cudaError != cudaSuccess) {
fprintf(stderr, "cudaGetLastError() returned %d: %s\n", cudaError, cudaGetErrorString(cudaError));
exit(EXIT_FAILURE);
}
//Create an array of numElements X numElements elements on host.
//We will copy the distance array from the device to this one
float * h_distances = (float *) malloc(numElements*numElements*sizeof(float));
// Copy the device distance vector in device memory to the host distance vector
cudaMemcpy(h_distances, d_distances, numElements * numElements * sizeof(float), cudaMemcpyDeviceToHost);
predictFromDistances(h_distances, h_instances, numElements, k, predictions);
// Free host memory
for(int i = 0; i < numElements; i++) // for each instance in the dataset
{
free(h_instances[i].attribs);
}
free(h_instances);
free(h_distances);
// Free device global memory
cudaFree(d_distances);
Instance * h_d_instances = (Instance *)malloc(numElements * sizeof(Instance));
cudaMemcpy(h_d_instances, d_instances, numElements*sizeof(Instance), cudaMemcpyDeviceToHost);
for(int i = 0; i < numElements; i++) // for each instance in the dataset
{
cudaFree(h_d_instances[i].attribs);
}
cudaFree(d_instances);
free(h_d_instances);
return predictions;
}
int* basicCudaKNN(ArffData* dataset, int k)
{
// predictions is the array where you have to return the class predicted (integer) for the dataset instances
int* predictions = (int*)malloc(dataset->num_instances() * sizeof(int));
// The following two lines show the syntax to retrieve the attribute values and the class value for a given instance in the dataset
// float attributeValue = dataset->get_instance(instanceIndex)->get(attributeIndex)->operator float();
// int classValue = dataset->get_instance(instanceIndex)->get(dataset->num_attributes() - 1)->operator int32();
// Implement the KNN here, fill the predictions array
cout << "K is " << k << endl;
int numElements = dataset->num_instances();
int numAttribs = dataset->num_attributes() - 1; //-1 because the last attrib is class
Instance * h_instances = (Instance *)malloc(numElements * sizeof(Instance));
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid = (numElements / threadsPerBlock) + 1;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
for(int i = 0; i < numElements; i++) // for each instance in the dataset
{
float * attribs = (float *) malloc (sizeof(float)*numAttribs);
for(int h = 0; h < numAttribs; h++) // compute the distance between the two instances
{
attribs[h] = dataset->get_instance(i)->get(h)->operator float();
}
h_instances[i].attribs = attribs;
h_instances[i].cls = dataset->get_instance(i)->get(numAttribs)->operator int32();
}
Instance * d_instances;
cudaMalloc(&d_instances, numElements*sizeof(Instance));
cudaMemcpy(d_instances, h_instances, numElements*sizeof(Instance), cudaMemcpyHostToDevice);
for(int i = 0; i < numElements; i++) // for each instance in the dataset
{
float * d_attribs;
cudaMalloc(&d_attribs, numAttribs*sizeof(float));
// Copy up attributes for each instance separately
cudaMemcpy(d_attribs, h_instances[i].attribs, numAttribs*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(&(d_instances[i].attribs), &d_attribs, sizeof(float*), cudaMemcpyHostToDevice);
}
int * d_predictions;
cudaMalloc(&d_predictions, numElements*sizeof(int));
basicCuda<<<blocksPerGrid, threadsPerBlock>>>(d_instances, numElements,
numAttribs, k, d_predictions);
// Copy the device prediction vector in device memory to the host prediction vector
cudaMemcpy(predictions, d_predictions, numElements * sizeof(int), cudaMemcpyDeviceToHost);
cudaError_t cudaError = cudaGetLastError();
if(cudaError != cudaSuccess) {
fprintf(stderr, "cudaGetLastError() returned %d: %s\n", cudaError, cudaGetErrorString(cudaError));
exit(EXIT_FAILURE);
}
// Free host memory
for(int i = 0; i < numElements; i++) // for each instance in the dataset
{
free(h_instances[i].attribs);
}
free(h_instances);
// Free device global memory
cudaFree(d_predictions);
Instance * h_d_instances = (Instance *)malloc(numElements * sizeof(Instance));
cudaMemcpy(h_d_instances, d_instances, numElements*sizeof(Instance), cudaMemcpyDeviceToHost);
for(int i = 0; i < numElements; i++) // for each instance in the dataset
{
cudaFree(h_d_instances[i].attribs);
}
cudaFree(d_instances);
free(h_d_instances);
return predictions;
}
int* KNN(ArffData* dataset, int k)
{
// predictions is the array where you have to return the class predicted (integer) for the dataset instances
int* predictions = (int*)malloc(dataset->num_instances() * sizeof(int));
// The following two lines show the syntax to retrieve the attribute values and the class value for a given instance in the dataset
// float attributeValue = dataset->get_instance(instanceIndex)->get(attributeIndex)->operator float();
// int classValue = dataset->get_instance(instanceIndex)->get(dataset->num_attributes() - 1)->operator int32();
// Implement the KNN here, fill the predictions array
cout << "K is " << k << endl;
for(int i = 0; i < dataset->num_instances(); i++) // for each instance in the dataset
{
//Initialize an empty neighbor
Neighbor neighbor;
neighbor.distance = FLT_MAX;
neighbor.cls = -1;
//List of k neighbors
std::list<Neighbor> neighbors (k, neighbor);
for(int j = 0; j < dataset->num_instances(); j++) // target each other instance
{
if(i == j) continue;
float distance = 0;
for(int h = 0; h < dataset->num_attributes() - 1; h++) // compute the distance between the two instances
{
float diff = dataset->get_instance(i)->get(h)->operator float() - dataset->get_instance(j)->get(h)->operator float();
distance += diff * diff;
}
distance = sqrt(distance);
for (std::list<Neighbor>::iterator it = neighbors.begin(); it != neighbors.end(); it++) {
if(distance < (*it).distance) {
Neighbor neighbor;
neighbor.distance = distance;
neighbor.cls = dataset->get_instance(j)->get(dataset->num_attributes() - 1)->operator int32();
neighbors.insert(it, neighbor);
neighbors.pop_back(); //Remove the last neighbor
break;
}
}
}
predictions[i] = majorityVote(neighbors);
}
return predictions;
}
int* computeConfusionMatrix(int* predictions, ArffData* dataset)
{
int* confusionMatrix = (int*)calloc(dataset->num_classes() * dataset->num_classes(), sizeof(int)); // matrix size numberClasses x numberClasses
for(int i = 0; i < dataset->num_instances(); i++) // for each instance compare the true class and predicted class
{
int trueClass = dataset->get_instance(i)->get(dataset->num_attributes() - 1)->operator int32();
int predictedClass = predictions[i];
confusionMatrix[trueClass*dataset->num_classes() + predictedClass]++;
}
return confusionMatrix;
}
float computeAccuracy(int* confusionMatrix, ArffData* dataset)
{
int successfulPredictions = 0;
for(int i = 0; i < dataset->num_classes(); i++)
{
successfulPredictions += confusionMatrix[i*dataset->num_classes() + i]; // elements in the diagonal are correct predictions
}
return successfulPredictions / (float) dataset->num_instances();
}
int main(int argc, char *argv[])
{
if(argc != 4)
{
cout << "Usage: ./main <path to dataset> <k> <method>" << endl;
exit(0);
}
// Open the dataset
ArffParser parser(argv[1]);
ArffData *dataset = parser.parse();
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
int * predictions;
// Get the class predictions
if(strcmp(argv[3], "m1") == 0)
predictions = KNN(dataset, atoi(argv[2]));
else if(strcmp(argv[3], "m2") == 0)
predictions = basicCudaKNN(dataset, atoi(argv[2]));
else
predictions = advanceCudaKNN(dataset, atoi(argv[2]));
// Compute the confusion matrix
int* confusionMatrix = computeConfusionMatrix(predictions, dataset);
// Calculate the accuracy
float accuracy = computeAccuracy(confusionMatrix, dataset);
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
uint64_t diff = (1000000000L * (end.tv_sec - start.tv_sec) + end.tv_nsec - start.tv_nsec) / 1e6;
printf("The KNN classifier for %lu instances required %llu ms CPU time, accuracy was %.4f\n", dataset->num_instances(), (long long unsigned int) diff, accuracy);
//Free the memory
free(predictions);
free(confusionMatrix);
}
|
7adac86f6f5f68575b59bb51f54d409d74dcd6c9.hip | // !!! This is a file automatically generated by hipify!!!
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cxx11_tensor_random_cuda
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
#include "main.h"
#include <Eigen/CXX11/Tensor>
void test_cuda_random_uniform()
{
Tensor<float, 2> out(72,97);
out.setZero();
std::size_t out_bytes = out.size() * sizeof(float);
float* d_out;
hipMalloc((void**)(&d_out), out_bytes);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97);
gpu_out.device(gpu_device) = gpu_out.random();
assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess);
assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess);
// For now we just check thes code doesn't crash.
// TODO: come up with a valid test of randomness
}
void test_cuda_random_normal()
{
Tensor<float, 2> out(72,97);
out.setZero();
std::size_t out_bytes = out.size() * sizeof(float);
float* d_out;
hipMalloc((void**)(&d_out), out_bytes);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97);
Eigen::internal::NormalRandomGenerator<float> gen(true);
gpu_out.device(gpu_device) = gpu_out.random(gen);
assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess);
assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess);
}
static void test_complex()
{
Tensor<std::complex<float>, 1> vec(6);
vec.setRandom();
// Fixme: we should check that the generated numbers follow a uniform
// distribution instead.
for (int i = 1; i < 6; ++i) {
VERIFY_IS_NOT_EQUAL(vec(i), vec(i-1));
}
}
void test_cxx11_tensor_random_cuda()
{
CALL_SUBTEST(test_cuda_random_uniform());
CALL_SUBTEST(test_cuda_random_normal());
CALL_SUBTEST(test_complex());
}
| 7adac86f6f5f68575b59bb51f54d409d74dcd6c9.cu | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_NO_COMPLEX
#define EIGEN_TEST_FUNC cxx11_tensor_random_cuda
#define EIGEN_DEFAULT_DENSE_INDEX_TYPE int
#define EIGEN_USE_GPU
#include "main.h"
#include <Eigen/CXX11/Tensor>
void test_cuda_random_uniform()
{
Tensor<float, 2> out(72,97);
out.setZero();
std::size_t out_bytes = out.size() * sizeof(float);
float* d_out;
cudaMalloc((void**)(&d_out), out_bytes);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97);
gpu_out.device(gpu_device) = gpu_out.random();
assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
// For now we just check thes code doesn't crash.
// TODO: come up with a valid test of randomness
}
void test_cuda_random_normal()
{
Tensor<float, 2> out(72,97);
out.setZero();
std::size_t out_bytes = out.size() * sizeof(float);
float* d_out;
cudaMalloc((void**)(&d_out), out_bytes);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97);
Eigen::internal::NormalRandomGenerator<float> gen(true);
gpu_out.device(gpu_device) = gpu_out.random(gen);
assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
}
static void test_complex()
{
Tensor<std::complex<float>, 1> vec(6);
vec.setRandom();
// Fixme: we should check that the generated numbers follow a uniform
// distribution instead.
for (int i = 1; i < 6; ++i) {
VERIFY_IS_NOT_EQUAL(vec(i), vec(i-1));
}
}
void test_cxx11_tensor_random_cuda()
{
CALL_SUBTEST(test_cuda_random_uniform());
CALL_SUBTEST(test_cuda_random_normal());
CALL_SUBTEST(test_complex());
}
|
690269afe8d1347b634100c0b7374125edc01ef7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// includes, system
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
__global__ void notDivergent(int n)
//The threads should perform the same work as
//in divergent(), but the threads within a warp
//should not diverge
{
}
__global__ void divergent(int n)
//The threads should perform the same work as
//in notDivergent(), but the threads within
//a warp should be forced to diverge
{
}
// Program main
/////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
const int N = 10000, threads = 10000;
hipEvent_t start, stop;
float time;
int nBlocks, nThreads;
nThreads = 512;
nBlocks = (threads + nThreads - 1)/nThreads;
//Set up the timing variables and begin timing
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//The Divergent Kernal
hipLaunchKernelGGL(( divergent), dim3(nBlocks), dim3(nThreads), 0, 0, N);
//Stop timing
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
//Compute the Elapsed Time
hipEventElapsedTime(&time, start, stop);
printf("divergent kernel: %f milliseconds\n", time);
//begin new timing
hipEventRecord(start, 0);
//The non-Divergent Kernel
hipLaunchKernelGGL(( notDivergent), dim3(nBlocks), dim3(nThreads), 0, 0, N);
//Stop timing
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
//Compute the Kernel Time
hipEventElapsedTime(&time, start, stop);
printf("non-divergent kernel: %f milliseconds\n", time);
return 0;
}
| 690269afe8d1347b634100c0b7374125edc01ef7.cu | // includes, system
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
__global__ void notDivergent(int n)
//The threads should perform the same work as
//in divergent(), but the threads within a warp
//should not diverge
{
}
__global__ void divergent(int n)
//The threads should perform the same work as
//in notDivergent(), but the threads within
//a warp should be forced to diverge
{
}
// Program main
/////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
const int N = 10000, threads = 10000;
cudaEvent_t start, stop;
float time;
int nBlocks, nThreads;
nThreads = 512;
nBlocks = (threads + nThreads - 1)/nThreads;
//Set up the timing variables and begin timing
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//The Divergent Kernal
divergent<<<nBlocks, nThreads>>>(N);
//Stop timing
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
//Compute the Elapsed Time
cudaEventElapsedTime(&time, start, stop);
printf("divergent kernel: %f milliseconds\n", time);
//begin new timing
cudaEventRecord(start, 0);
//The non-Divergent Kernel
notDivergent<<<nBlocks, nThreads>>>(N);
//Stop timing
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
//Compute the Kernel Time
cudaEventElapsedTime(&time, start, stop);
printf("non-divergent kernel: %f milliseconds\n", time);
return 0;
}
|
fcc3a766aa78a4e3f856135bc52e56c7e19f42d3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "AddIntegers.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *arr1 = NULL;
hipMalloc(&arr1, XSIZE*YSIZE);
int *arr2 = NULL;
hipMalloc(&arr2, XSIZE*YSIZE);
int num_elements = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
AddIntegers), dim3(gridBlock),dim3(threadBlock), 0, 0, arr1,arr2,num_elements);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
AddIntegers), dim3(gridBlock),dim3(threadBlock), 0, 0, arr1,arr2,num_elements);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
AddIntegers), dim3(gridBlock),dim3(threadBlock), 0, 0, arr1,arr2,num_elements);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | fcc3a766aa78a4e3f856135bc52e56c7e19f42d3.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "AddIntegers.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *arr1 = NULL;
cudaMalloc(&arr1, XSIZE*YSIZE);
int *arr2 = NULL;
cudaMalloc(&arr2, XSIZE*YSIZE);
int num_elements = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
AddIntegers<<<gridBlock,threadBlock>>>(arr1,arr2,num_elements);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
AddIntegers<<<gridBlock,threadBlock>>>(arr1,arr2,num_elements);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
AddIntegers<<<gridBlock,threadBlock>>>(arr1,arr2,num_elements);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
2bec6237a1fdd8779b57bdbaa79447779a99d16e.hip | // !!! This is a file automatically generated by hipify!!!
/*
Author : Kapil Agarwal
Date : 19 June 2012
Compile : make all_mpi_bandwidth
Help : mpirun -n <no of processes> -host <host ip> ./all_mpi_bandwidth -help
*/
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<mpi.h>
#include<string.h>
#include<sys/time.h>
#include<ctype.h>
void call_finalize()
{
MPI_Finalize();
exit(-1);
}
void safe_call(hipError_t ret, int myrank, int line)
{
if(ret!=hipSuccess)
{
if(myrank == 0)
printf("Error at line %d : %s\n",line,hipGetErrorString(ret));
call_finalize();
}
}
void fill_data(char *arr, int len)
{
int i;
for(i=0;i<len;i++)
{
srand(time(NULL));
arr[i] = (char)(rand()%26 + 97);
}
}
int get_cmd_arg(int argc, char **arg, char *cmp, char *ret)
{
int i,j;
char *pch;
i=0;
for(j=0;j<argc;j++)
{
while(arg[j][i]=='-') { i++; }
if(i!=0)
{
if(pch=strstr(arg[j],cmp))
{
if(strcmp(cmp,"help") == 0)
return 1;
else if(pch=strpbrk(arg[j],"="))
{
strcpy(ret,pch+1);
return 1;
}
}
}
}
return 0;
}
void printSyntax()
{
printf("Syntax : \n\
mpirun -n <no of processes> -host <host ip> ./new_mpi_bandwidth -options\n\
\n\
-help\n\
-mode=MODE pinned,pageable\n\
-start=START\n\
-end=END\n\
-step=STEP\n");
}
int isint(char *str)
{
int i,len;
len = strlen(str);
for(i=0;i<len;i++)
if(!isdigit(str[i]))
return 0;
return 1;
}
int main(int argc, char *argv[])
{
int comm_size, myrank, i, no_of_args, valid_args;
int START, END, STEP, SIZE;
char MODE[10], temp_arg[80];
char myname[MPI_MAX_PROCESSOR_NAME];
int namelen, devcount, device;
char devname[256];
hipDeviceProp_t devprop;
char *h_A, *h_B;
char *d_A, *d_B;
hipEvent_t start, stop;
double time, h2d, d2d, d2h;
float diff;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&comm_size);
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
// Default
strcpy(MODE,"pageable");
START = 1024;
END = 10240;
STEP = 1024;
no_of_args = argc;
if(get_cmd_arg(argc,argv,"help",temp_arg) == 1)
{
no_of_args--;
if(myrank==0)
printSyntax();
call_finalize();
}
if(get_cmd_arg(argc,argv,"mode",temp_arg) == 1)
{
no_of_args--;
strcpy(MODE,temp_arg);
}
if(no_of_args==4)
{
valid_args = 1;
if(get_cmd_arg(argc,argv,"start",temp_arg) == 1)
{
no_of_args--;
if(isint(temp_arg))
START = atoi(temp_arg);
else
valid_args=0;
}
if(get_cmd_arg(argc,argv,"end",temp_arg) == 1)
{
no_of_args--;
if(isint(temp_arg))
END = atoi(temp_arg);
else
valid_args=0;
}
if(get_cmd_arg(argc,argv,"step",temp_arg) == 1)
{
no_of_args--;
if(isint(temp_arg))
STEP = atoi(temp_arg);
else
valid_args=0;
}
if(((1.0*END-START)/STEP < 1.0))
valid_args=0;
if(valid_args == 0)
{
if(myrank==0)
printf("Enter valid values for start, end and step.\n");
call_finalize();
}
}
else if(no_of_args != 1)
{
if(myrank==0)
printSyntax();
call_finalize();
}
if(myrank == 0)
printf("MODE=%s START=%d END=%d STEP=%d\n",MODE,START,END,STEP);
MPI_Get_processor_name(myname, &namelen);
myname[namelen++] = (char)0;
safe_call(hipGetDeviceCount(&devcount),myrank,__LINE__);
if(devcount > 0)
{
if(strcmp(MODE,"pageable") == 0)
{
for(i = myrank; i < devcount; i+=comm_size)
{
safe_call(hipSetDevice(i),myrank,__LINE__);
safe_call(hipGetDevice(&device),myrank,__LINE__);
if(device == i)
{
safe_call(hipGetDeviceProperties(&devprop,device),myrank,__LINE__);
strcpy(devname,devprop.name);
for(SIZE=START ; SIZE<=END; SIZE+=STEP)
{
h_A = (char *) malloc(SIZE*sizeof(char));
h_B = (char *) malloc(SIZE*sizeof(char));
if(h_A==NULL || h_B==NULL)
{
if(myrank == 0)
printf("Error : host memory allocation, Line : %d\n",myrank,__LINE__);
call_finalize();
}
safe_call(hipMalloc((void **)&d_A, SIZE*sizeof(char)),myrank,__LINE__);
safe_call(hipMalloc((void **)&d_B, SIZE*sizeof(char)),myrank,__LINE__);
fill_data(h_A,SIZE);
safe_call(hipEventCreate(&start),myrank,__LINE__);
safe_call(hipEventCreate(&stop),myrank,__LINE__);
/************************************** Host to Device Starts ***********************************/
safe_call(hipEventRecord(start, 0),myrank,__LINE__);
safe_call(hipMemcpy((void *)d_A, (void *)h_A, SIZE*sizeof(char), hipMemcpyHostToDevice),myrank,__LINE__);
safe_call(hipEventRecord(stop, 0),myrank,__LINE__);
safe_call(hipEventSynchronize(stop),myrank,__LINE__);
safe_call(hipEventElapsedTime(&diff,start,stop),myrank,__LINE__);
time = diff*1.0e-3;
h2d = ( SIZE * sizeof(char) * 2.0 ) / ( 1024 * 1024 * time ) ;
/************************************** Host to Device Ends **************************************/
/************************************** Device to Device Starts **********************************/
safe_call(hipEventRecord(start, 0),myrank,__LINE__);
safe_call(hipMemcpy((void *)d_B, (void *)d_A, SIZE*sizeof(char), hipMemcpyDeviceToDevice),myrank,__LINE__);
safe_call(hipEventRecord(stop, 0),myrank,__LINE__);
safe_call(hipEventSynchronize(stop),myrank,__LINE__);
safe_call(hipEventElapsedTime(&diff,start,stop),myrank,__LINE__);
time = diff*1.0e-3;
d2d = ( SIZE * sizeof(char) * 2.0 ) / ( 1024 * 1024 * time ) ;
/************************************** Device to Device Ends ************************************/
/************************************** Device to Host Starts ************************************/
safe_call(hipEventRecord(start, 0),myrank,__LINE__);
safe_call(hipMemcpy((void *)h_B, (void *)d_B, SIZE*sizeof(char), hipMemcpyDeviceToHost),myrank,__LINE__);
safe_call(hipEventRecord(stop, 0),myrank,__LINE__);
safe_call(hipEventSynchronize(stop),myrank,__LINE__);
safe_call(hipEventElapsedTime(&diff,start,stop),myrank,__LINE__);
time = diff*1.0e-3;
d2h = ( SIZE * sizeof(char) * 2.0 ) / ( 1024 * 1024 * time ) ;
/************************************** Device to Host Ends **************************************/
printf("\n\
Process %d : %s\n\
Device %d : %s\n\
Mode : %s\n\
Size of Data : %dB\n\
Host to Device : %fMB/s\n\
Device to Device : %fMB/s\n\
Device to Host : %fMB/s\n", \
myrank,myname,device,devname,MODE,SIZE,h2d,d2d,d2h);
safe_call(hipFree(d_A),myrank,__LINE__);
safe_call(hipFree(d_B),myrank,__LINE__);
free(h_A);
free(h_B);
safe_call(hipEventDestroy(start),myrank,__LINE__);
safe_call(hipEventDestroy(stop),myrank,__LINE__);
}
}
}
}
else if(strcmp(MODE,"pinned") == 0)
{
for(i = myrank; i < devcount; i+=comm_size)
{
safe_call(hipSetDevice(i),myrank,__LINE__);
safe_call(hipGetDevice(&device),myrank,__LINE__);
if(device == i)
{
safe_call(hipGetDeviceProperties(&devprop,device),myrank,__LINE__);
strcpy(devname,devprop.name);
for(SIZE=START ; SIZE<=END; SIZE+=STEP)
{
safe_call(hipHostMalloc((void **)&h_A, SIZE*sizeof(char)),myrank,__LINE__);
safe_call(hipHostMalloc((void **)&h_B, SIZE*sizeof(char)),myrank,__LINE__);
safe_call(hipMalloc((void **)&d_A, SIZE*sizeof(char)),myrank,__LINE__);
safe_call(hipMalloc((void **)&d_B, SIZE*sizeof(char)),myrank,__LINE__);
fill_data(h_A,SIZE);
safe_call(hipEventCreate(&start),myrank,__LINE__);
safe_call(hipEventCreate(&stop),myrank,__LINE__);
/************************************** Host to Device Starts ***********************************/
safe_call(hipEventRecord(start, 0),myrank,__LINE__);
safe_call(hipMemcpyAsync((void *)d_A, (void *)h_A, SIZE*sizeof(char), hipMemcpyHostToDevice),myrank,__LINE__);
safe_call(hipEventRecord(stop, 0),myrank,__LINE__);
safe_call(hipEventSynchronize(stop),myrank,__LINE__);
safe_call(hipEventElapsedTime(&diff,start,stop),myrank,__LINE__);
time = diff*1.0e-3;
h2d = ( SIZE * sizeof(char) * 2.0 ) / ( 1024 * 1024 * time ) ;
/************************************** Host to Device Ends **************************************/
/************************************** Device to Device Starts **********************************/
safe_call(hipEventRecord(start, 0),myrank,__LINE__);
safe_call(hipMemcpyAsync((void *)d_B, (void *)d_A, SIZE*sizeof(char), hipMemcpyDeviceToDevice),myrank,__LINE__);
safe_call(hipEventRecord(stop, 0),myrank,__LINE__);
safe_call(hipEventSynchronize(stop),myrank,__LINE__);
safe_call(hipEventElapsedTime(&diff,start,stop),myrank,__LINE__);
time = diff*1.0e-3;
d2d = ( SIZE * sizeof(char) * 2.0 ) / ( 1024 * 1024 * time ) ;
/************************************** Device to Device Ends ************************************/
/************************************** Device to Host Starts ************************************/
safe_call(hipEventRecord(start, 0),myrank,__LINE__);
safe_call(hipMemcpyAsync((void *)h_B, (void *)d_B, SIZE*sizeof(char), hipMemcpyDeviceToHost),myrank,__LINE__);
safe_call(hipEventRecord(stop, 0),myrank,__LINE__);
safe_call(hipEventSynchronize(stop),myrank,__LINE__);
safe_call(hipEventElapsedTime(&diff,start,stop),myrank,__LINE__);
time = diff*1.0e-3;
d2h = ( SIZE * sizeof(char) * 2.0 ) / ( 1024 * 1024 * time ) ;
/************************************** Device to Host Ends **************************************/
printf("\n\
Process %d : %s\n\
Device %d : %s\n\
Mode : %s\n\
Size of Data : %dB\n\
Host to Device : %fMB/s\n\
Device to Device : %fMB/s\n\
Device to Host : %fMB/s\n", \
myrank,myname,device,devname,MODE,SIZE,h2d,d2d,d2h);
safe_call(hipFree(d_A),myrank,__LINE__);
safe_call(hipFree(d_B),myrank,__LINE__);
safe_call(hipHostFree(h_A),myrank,__LINE__);
safe_call(hipHostFree(h_B),myrank,__LINE__);
safe_call(hipEventDestroy(start),myrank,__LINE__);
safe_call(hipEventDestroy(stop),myrank,__LINE__);
}
}
}
}
else
{
if(myrank==0)
printf("Memory mode choices : pinned/pageable\n");
}
}
else
{
if(myrank == 0)
printf("No devices found.\n");
}
MPI_Finalize();
return 0;
}
| 2bec6237a1fdd8779b57bdbaa79447779a99d16e.cu | /*
Author : Kapil Agarwal
Date : 19 June 2012
Compile : make all_mpi_bandwidth
Help : mpirun -n <no of processes> -host <host ip> ./all_mpi_bandwidth -help
*/
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<mpi.h>
#include<string.h>
#include<sys/time.h>
#include<ctype.h>
void call_finalize()
{
MPI_Finalize();
exit(-1);
}
void safe_call(cudaError_t ret, int myrank, int line)
{
if(ret!=cudaSuccess)
{
if(myrank == 0)
printf("Error at line %d : %s\n",line,cudaGetErrorString(ret));
call_finalize();
}
}
void fill_data(char *arr, int len)
{
int i;
for(i=0;i<len;i++)
{
srand(time(NULL));
arr[i] = (char)(rand()%26 + 97);
}
}
int get_cmd_arg(int argc, char **arg, char *cmp, char *ret)
{
int i,j;
char *pch;
i=0;
for(j=0;j<argc;j++)
{
while(arg[j][i]=='-') { i++; }
if(i!=0)
{
if(pch=strstr(arg[j],cmp))
{
if(strcmp(cmp,"help") == 0)
return 1;
else if(pch=strpbrk(arg[j],"="))
{
strcpy(ret,pch+1);
return 1;
}
}
}
}
return 0;
}
void printSyntax()
{
printf("Syntax : \n\
mpirun -n <no of processes> -host <host ip> ./new_mpi_bandwidth -options\n\
\n\
-help\n\
-mode=MODE pinned,pageable\n\
-start=START\n\
-end=END\n\
-step=STEP\n");
}
int isint(char *str)
{
int i,len;
len = strlen(str);
for(i=0;i<len;i++)
if(!isdigit(str[i]))
return 0;
return 1;
}
int main(int argc, char *argv[])
{
int comm_size, myrank, i, no_of_args, valid_args;
int START, END, STEP, SIZE;
char MODE[10], temp_arg[80];
char myname[MPI_MAX_PROCESSOR_NAME];
int namelen, devcount, device;
char devname[256];
cudaDeviceProp devprop;
char *h_A, *h_B;
char *d_A, *d_B;
cudaEvent_t start, stop;
double time, h2d, d2d, d2h;
float diff;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&comm_size);
MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
// Default
strcpy(MODE,"pageable");
START = 1024;
END = 10240;
STEP = 1024;
no_of_args = argc;
if(get_cmd_arg(argc,argv,"help",temp_arg) == 1)
{
no_of_args--;
if(myrank==0)
printSyntax();
call_finalize();
}
if(get_cmd_arg(argc,argv,"mode",temp_arg) == 1)
{
no_of_args--;
strcpy(MODE,temp_arg);
}
if(no_of_args==4)
{
valid_args = 1;
if(get_cmd_arg(argc,argv,"start",temp_arg) == 1)
{
no_of_args--;
if(isint(temp_arg))
START = atoi(temp_arg);
else
valid_args=0;
}
if(get_cmd_arg(argc,argv,"end",temp_arg) == 1)
{
no_of_args--;
if(isint(temp_arg))
END = atoi(temp_arg);
else
valid_args=0;
}
if(get_cmd_arg(argc,argv,"step",temp_arg) == 1)
{
no_of_args--;
if(isint(temp_arg))
STEP = atoi(temp_arg);
else
valid_args=0;
}
if(((1.0*END-START)/STEP < 1.0))
valid_args=0;
if(valid_args == 0)
{
if(myrank==0)
printf("Enter valid values for start, end and step.\n");
call_finalize();
}
}
else if(no_of_args != 1)
{
if(myrank==0)
printSyntax();
call_finalize();
}
if(myrank == 0)
printf("MODE=%s START=%d END=%d STEP=%d\n",MODE,START,END,STEP);
MPI_Get_processor_name(myname, &namelen);
myname[namelen++] = (char)0;
safe_call(cudaGetDeviceCount(&devcount),myrank,__LINE__);
if(devcount > 0)
{
if(strcmp(MODE,"pageable") == 0)
{
for(i = myrank; i < devcount; i+=comm_size)
{
safe_call(cudaSetDevice(i),myrank,__LINE__);
safe_call(cudaGetDevice(&device),myrank,__LINE__);
if(device == i)
{
safe_call(cudaGetDeviceProperties(&devprop,device),myrank,__LINE__);
strcpy(devname,devprop.name);
for(SIZE=START ; SIZE<=END; SIZE+=STEP)
{
h_A = (char *) malloc(SIZE*sizeof(char));
h_B = (char *) malloc(SIZE*sizeof(char));
if(h_A==NULL || h_B==NULL)
{
if(myrank == 0)
printf("Error : host memory allocation, Line : %d\n",myrank,__LINE__);
call_finalize();
}
safe_call(cudaMalloc((void **)&d_A, SIZE*sizeof(char)),myrank,__LINE__);
safe_call(cudaMalloc((void **)&d_B, SIZE*sizeof(char)),myrank,__LINE__);
fill_data(h_A,SIZE);
safe_call(cudaEventCreate(&start),myrank,__LINE__);
safe_call(cudaEventCreate(&stop),myrank,__LINE__);
/************************************** Host to Device Starts ***********************************/
safe_call(cudaEventRecord(start, 0),myrank,__LINE__);
safe_call(cudaMemcpy((void *)d_A, (void *)h_A, SIZE*sizeof(char), cudaMemcpyHostToDevice),myrank,__LINE__);
safe_call(cudaEventRecord(stop, 0),myrank,__LINE__);
safe_call(cudaEventSynchronize(stop),myrank,__LINE__);
safe_call(cudaEventElapsedTime(&diff,start,stop),myrank,__LINE__);
time = diff*1.0e-3;
h2d = ( SIZE * sizeof(char) * 2.0 ) / ( 1024 * 1024 * time ) ;
/************************************** Host to Device Ends **************************************/
/************************************** Device to Device Starts **********************************/
safe_call(cudaEventRecord(start, 0),myrank,__LINE__);
safe_call(cudaMemcpy((void *)d_B, (void *)d_A, SIZE*sizeof(char), cudaMemcpyDeviceToDevice),myrank,__LINE__);
safe_call(cudaEventRecord(stop, 0),myrank,__LINE__);
safe_call(cudaEventSynchronize(stop),myrank,__LINE__);
safe_call(cudaEventElapsedTime(&diff,start,stop),myrank,__LINE__);
time = diff*1.0e-3;
d2d = ( SIZE * sizeof(char) * 2.0 ) / ( 1024 * 1024 * time ) ;
/************************************** Device to Device Ends ************************************/
/************************************** Device to Host Starts ************************************/
safe_call(cudaEventRecord(start, 0),myrank,__LINE__);
safe_call(cudaMemcpy((void *)h_B, (void *)d_B, SIZE*sizeof(char), cudaMemcpyDeviceToHost),myrank,__LINE__);
safe_call(cudaEventRecord(stop, 0),myrank,__LINE__);
safe_call(cudaEventSynchronize(stop),myrank,__LINE__);
safe_call(cudaEventElapsedTime(&diff,start,stop),myrank,__LINE__);
time = diff*1.0e-3;
d2h = ( SIZE * sizeof(char) * 2.0 ) / ( 1024 * 1024 * time ) ;
/************************************** Device to Host Ends **************************************/
printf("\n\
Process %d : %s\n\
Device %d : %s\n\
Mode : %s\n\
Size of Data : %dB\n\
Host to Device : %fMB/s\n\
Device to Device : %fMB/s\n\
Device to Host : %fMB/s\n", \
myrank,myname,device,devname,MODE,SIZE,h2d,d2d,d2h);
safe_call(cudaFree(d_A),myrank,__LINE__);
safe_call(cudaFree(d_B),myrank,__LINE__);
free(h_A);
free(h_B);
safe_call(cudaEventDestroy(start),myrank,__LINE__);
safe_call(cudaEventDestroy(stop),myrank,__LINE__);
}
}
}
}
else if(strcmp(MODE,"pinned") == 0)
{
for(i = myrank; i < devcount; i+=comm_size)
{
safe_call(cudaSetDevice(i),myrank,__LINE__);
safe_call(cudaGetDevice(&device),myrank,__LINE__);
if(device == i)
{
safe_call(cudaGetDeviceProperties(&devprop,device),myrank,__LINE__);
strcpy(devname,devprop.name);
for(SIZE=START ; SIZE<=END; SIZE+=STEP)
{
safe_call(cudaMallocHost((void **)&h_A, SIZE*sizeof(char)),myrank,__LINE__);
safe_call(cudaMallocHost((void **)&h_B, SIZE*sizeof(char)),myrank,__LINE__);
safe_call(cudaMalloc((void **)&d_A, SIZE*sizeof(char)),myrank,__LINE__);
safe_call(cudaMalloc((void **)&d_B, SIZE*sizeof(char)),myrank,__LINE__);
fill_data(h_A,SIZE);
safe_call(cudaEventCreate(&start),myrank,__LINE__);
safe_call(cudaEventCreate(&stop),myrank,__LINE__);
/************************************** Host to Device Starts ***********************************/
safe_call(cudaEventRecord(start, 0),myrank,__LINE__);
safe_call(cudaMemcpyAsync((void *)d_A, (void *)h_A, SIZE*sizeof(char), cudaMemcpyHostToDevice),myrank,__LINE__);
safe_call(cudaEventRecord(stop, 0),myrank,__LINE__);
safe_call(cudaEventSynchronize(stop),myrank,__LINE__);
safe_call(cudaEventElapsedTime(&diff,start,stop),myrank,__LINE__);
time = diff*1.0e-3;
h2d = ( SIZE * sizeof(char) * 2.0 ) / ( 1024 * 1024 * time ) ;
/************************************** Host to Device Ends **************************************/
/************************************** Device to Device Starts **********************************/
safe_call(cudaEventRecord(start, 0),myrank,__LINE__);
safe_call(cudaMemcpyAsync((void *)d_B, (void *)d_A, SIZE*sizeof(char), cudaMemcpyDeviceToDevice),myrank,__LINE__);
safe_call(cudaEventRecord(stop, 0),myrank,__LINE__);
safe_call(cudaEventSynchronize(stop),myrank,__LINE__);
safe_call(cudaEventElapsedTime(&diff,start,stop),myrank,__LINE__);
time = diff*1.0e-3;
d2d = ( SIZE * sizeof(char) * 2.0 ) / ( 1024 * 1024 * time ) ;
/************************************** Device to Device Ends ************************************/
/************************************** Device to Host Starts ************************************/
safe_call(cudaEventRecord(start, 0),myrank,__LINE__);
safe_call(cudaMemcpyAsync((void *)h_B, (void *)d_B, SIZE*sizeof(char), cudaMemcpyDeviceToHost),myrank,__LINE__);
safe_call(cudaEventRecord(stop, 0),myrank,__LINE__);
safe_call(cudaEventSynchronize(stop),myrank,__LINE__);
safe_call(cudaEventElapsedTime(&diff,start,stop),myrank,__LINE__);
time = diff*1.0e-3;
d2h = ( SIZE * sizeof(char) * 2.0 ) / ( 1024 * 1024 * time ) ;
/************************************** Device to Host Ends **************************************/
printf("\n\
Process %d : %s\n\
Device %d : %s\n\
Mode : %s\n\
Size of Data : %dB\n\
Host to Device : %fMB/s\n\
Device to Device : %fMB/s\n\
Device to Host : %fMB/s\n", \
myrank,myname,device,devname,MODE,SIZE,h2d,d2d,d2h);
safe_call(cudaFree(d_A),myrank,__LINE__);
safe_call(cudaFree(d_B),myrank,__LINE__);
safe_call(cudaFreeHost(h_A),myrank,__LINE__);
safe_call(cudaFreeHost(h_B),myrank,__LINE__);
safe_call(cudaEventDestroy(start),myrank,__LINE__);
safe_call(cudaEventDestroy(stop),myrank,__LINE__);
}
}
}
}
else
{
if(myrank==0)
printf("Memory mode choices : pinned/pageable\n");
}
}
else
{
if(myrank == 0)
printf("No devices found.\n");
}
MPI_Finalize();
return 0;
}
|
f8fe539bfdbfdf010f101999a2ab64295fa65098.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#include <cub/device/device_memcpy.cuh>
#include <cub/iterator/transform_input_iterator.cuh>
#include <cub/util_ptx.cuh>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/host_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/logical.h>
#include <thrust/sequence.h>
#include <algorithm>
#include <cstdint>
#include <limits>
#include <numeric>
#include <random>
#include <type_traits>
#include <vector>
#include "test_util.h"
/**
* @brief Host-side random data generation
*/
template <typename T>
void GenerateRandomData(
T *rand_out,
const std::size_t num_items,
const T min_rand_val = std::numeric_limits<T>::min(),
const T max_rand_val = std::numeric_limits<T>::max(),
const std::uint_fast32_t seed = 320981U,
typename std::enable_if<std::is_integral<T>::value && (sizeof(T) >= 2)>::type * = nullptr)
{
// initialize random number generator
std::mt19937 rng(seed);
std::uniform_int_distribution<T> uni_dist(min_rand_val, max_rand_val);
// generate random numbers
for (std::size_t i = 0; i < num_items; ++i)
{
rand_out[i] = uni_dist(rng);
}
}
template <typename InputBufferIt,
typename OutputBufferIt,
typename BufferSizeIteratorT,
typename BufferOffsetT>
void __global__ BaselineBatchMemCpyKernel(InputBufferIt input_buffer_it,
OutputBufferIt output_buffer_it,
BufferSizeIteratorT buffer_sizes,
BufferOffsetT num_buffers)
{
BufferOffsetT gtid = blockDim.x * blockIdx.x + threadIdx.x;
if (gtid >= num_buffers)
{
return;
}
for (BufferOffsetT i = 0; i < buffer_sizes[gtid]; i++)
{
reinterpret_cast<uint8_t *>(output_buffer_it[gtid])[i] =
reinterpret_cast<uint8_t *>(input_buffer_it[gtid])[i];
}
}
template <typename InputBufferIt, typename OutputBufferIt, typename BufferSizeIteratorT>
void InvokeBaselineBatchMemcpy(InputBufferIt input_buffer_it,
OutputBufferIt output_buffer_it,
BufferSizeIteratorT buffer_sizes,
uint32_t num_buffers)
{
constexpr uint32_t block_threads = 128U;
uint32_t num_blocks = (num_buffers + block_threads - 1) / block_threads;
hipLaunchKernelGGL(( BaselineBatchMemCpyKernel), dim3(num_blocks), dim3(block_threads), 0, 0, input_buffer_it,
output_buffer_it,
buffer_sizes,
num_buffers);
}
template <typename InputBufferIt,
typename OutputBufferIt,
typename BufferSizeIteratorT,
typename BufferOffsetT>
void __global__ BaselineBatchMemCpyPerBlockKernel(InputBufferIt input_buffer_it,
OutputBufferIt output_buffer_it,
BufferSizeIteratorT buffer_sizes,
BufferOffsetT num_buffers)
{
BufferOffsetT gbid = blockIdx.x;
if (gbid >= num_buffers)
{
return;
}
for (BufferOffsetT i = threadIdx.x; i < buffer_sizes[gbid] / 8; i += blockDim.x)
{
reinterpret_cast<uint64_t *>(output_buffer_it[gbid])[i] =
reinterpret_cast<uint64_t *>(input_buffer_it[gbid])[i];
}
}
/**
* @brief Used for generating a shuffled but cohesive sequence of output-buffer offsets for the
* sequence of input-buffers.
*/
template <typename BufferOffsetT, typename ByteOffsetT, typename BufferSizeT>
std::vector<ByteOffsetT> GetShuffledBufferOffsets(const std::vector<BufferSizeT> &buffer_sizes,
const std::uint_fast32_t seed = 320981U)
{
BufferOffsetT num_buffers = static_cast<BufferOffsetT>(buffer_sizes.size());
// We're remapping the i-th buffer to pmt_idxs[i]
std::mt19937 rng(seed);
std::vector<BufferOffsetT> pmt_idxs(num_buffers);
std::iota(pmt_idxs.begin(), pmt_idxs.end(), static_cast<BufferOffsetT>(0));
std::shuffle(std::begin(pmt_idxs), std::end(pmt_idxs), rng);
// Compute the offsets using the new mapping
ByteOffsetT running_offset = {};
std::vector<ByteOffsetT> permuted_offsets;
permuted_offsets.reserve(num_buffers);
for (auto permuted_buffer_idx : pmt_idxs)
{
permuted_offsets.emplace_back(running_offset);
running_offset += buffer_sizes[permuted_buffer_idx];
}
// Generate the scatter indexes that identify where each buffer was mapped to
std::vector<BufferOffsetT> scatter_idxs(num_buffers);
for (BufferOffsetT i = 0; i < num_buffers; i++)
{
scatter_idxs[pmt_idxs[i]] = i;
}
std::vector<ByteOffsetT> new_offsets(num_buffers);
for (BufferOffsetT i = 0; i < num_buffers; i++)
{
new_offsets[i] = permuted_offsets[scatter_idxs[i]];
}
return new_offsets;
}
/**
* @brief Function object class template that takes an offset and returns an iterator at the given
* offset relative to a fixed base iterator.
*
* @tparam IteratorT The random-access iterator type to be returned
*/
template <typename IteratorT>
struct OffsetToPtrOp
{
template <typename T>
__host__ __device__ __forceinline__ IteratorT operator()(T offset) const
{
return base_it + offset;
}
IteratorT base_it;
};
enum class TestDataGen
{
// Random offsets into a data segment
RANDOM,
// Buffers cohesively reside next to each other
CONSECUTIVE
};
/**
* @brief
*
* @tparam AtomicT The most granular type being copied. All source and destination pointers will be
* aligned based on this type, the number of bytes being copied will be an integer multiple of this
* type's size
* @tparam BufferOffsetT Type used for indexing into the array of buffers
* @tparam BufferSizeT Type used for indexing into individual bytes of a buffer (large enough to
* cover the max buffer size)
* @tparam ByteOffsetT Type used for indexing into bytes over *all* the buffers' sizes
*/
template <typename AtomicT, typename BufferOffsetT, typename BufferSizeT, typename ByteOffsetT>
void RunTest(BufferOffsetT num_buffers,
BufferSizeT min_buffer_size,
BufferSizeT max_buffer_size,
TestDataGen input_gen,
TestDataGen output_gen)
{
using SrcPtrT = uint8_t *;
// Buffer segment data (their offsets and sizes)
std::vector<BufferSizeT> h_buffer_sizes(num_buffers);
std::vector<ByteOffsetT> h_buffer_src_offsets(num_buffers);
std::vector<ByteOffsetT> h_buffer_dst_offsets(num_buffers);
// Device-side resources
void *d_in = nullptr;
void *d_out = nullptr;
ByteOffsetT *d_buffer_src_offsets = nullptr;
ByteOffsetT *d_buffer_dst_offsets = nullptr;
BufferSizeT *d_buffer_sizes = nullptr;
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
// Generate the buffer sizes
GenerateRandomData(h_buffer_sizes.data(), h_buffer_sizes.size(), min_buffer_size, max_buffer_size);
// Make sure buffer sizes are a multiple of the most granular unit (one AtomicT) being copied
// (round down)
for (BufferOffsetT i = 0; i < num_buffers; i++)
{
h_buffer_sizes[i] = (h_buffer_sizes[i] / sizeof(AtomicT)) * sizeof(AtomicT);
}
// Compute the total bytes to be copied
ByteOffsetT num_total_bytes = 0;
for (BufferOffsetT i = 0; i < num_buffers; i++)
{
if (input_gen == TestDataGen::CONSECUTIVE)
{
h_buffer_src_offsets[i] = num_total_bytes;
}
if (output_gen == TestDataGen::CONSECUTIVE)
{
h_buffer_dst_offsets[i] = num_total_bytes;
}
num_total_bytes += h_buffer_sizes[i];
}
// Shuffle input buffer source-offsets
std::uint_fast32_t shuffle_seed = 320981U;
if (input_gen == TestDataGen::RANDOM)
{
h_buffer_src_offsets = GetShuffledBufferOffsets<BufferOffsetT, ByteOffsetT>(h_buffer_sizes,
shuffle_seed);
shuffle_seed += 42;
}
// Shuffle input buffer source-offsets
if (output_gen == TestDataGen::RANDOM)
{
h_buffer_dst_offsets = GetShuffledBufferOffsets<BufferOffsetT, ByteOffsetT>(h_buffer_sizes,
shuffle_seed);
}
// Get temporary storage requirements
CubDebugExit(cub::DeviceMemcpy::Batched(d_temp_storage,
temp_storage_bytes,
static_cast<SrcPtrT *>(nullptr),
static_cast<SrcPtrT *>(nullptr),
d_buffer_sizes,
num_buffers));
// Check if there's sufficient device memory to run this test
std::size_t total_required_mem = num_total_bytes + //
num_total_bytes + //
(num_buffers * sizeof(d_buffer_src_offsets[0])) + //
(num_buffers * sizeof(d_buffer_dst_offsets[0])) + //
(num_buffers * sizeof(d_buffer_sizes[0])) + //
temp_storage_bytes; //
if (TotalGlobalMem() < total_required_mem)
{
std::cout
<< "Skipping the test due to insufficient device memory\n" //
<< " - Required: " << total_required_mem << " B, available: " << TotalGlobalMem() << " B\n" //
<< " - Skipped test instance: " //
<< " -> Min. buffer size: " << min_buffer_size << ", max. buffer size: " << max_buffer_size //
<< ", num_buffers: " << num_buffers //
<< ", in_gen: " << ((input_gen == TestDataGen::RANDOM) ? "SHFL" : "CONSECUTIVE") //
<< ", out_gen: " << ((output_gen == TestDataGen::RANDOM) ? "SHFL" : "CONSECUTIVE");
return;
}
hipEvent_t events[2];
hipEventCreate(&events[0]);
hipEventCreate(&events[1]);
hipStream_t stream;
hipStreamCreate(&stream);
// Allocate device memory
CubDebugExit(hipMalloc(&d_in, num_total_bytes));
CubDebugExit(hipMalloc(&d_out, num_total_bytes));
CubDebugExit(hipMalloc(&d_buffer_src_offsets, num_buffers * sizeof(d_buffer_src_offsets[0])));
CubDebugExit(hipMalloc(&d_buffer_dst_offsets, num_buffers * sizeof(d_buffer_dst_offsets[0])));
CubDebugExit(hipMalloc(&d_buffer_sizes, num_buffers * sizeof(d_buffer_sizes[0])));
CubDebugExit(hipMalloc(&d_temp_storage, temp_storage_bytes));
// Populate the data source with random data
using RandomInitAliasT = uint16_t;
std::size_t num_aliased_factor = sizeof(RandomInitAliasT) / sizeof(uint8_t);
std::size_t num_aliased_units = CUB_QUOTIENT_CEILING(num_total_bytes, num_aliased_factor);
std::unique_ptr<uint8_t[]> h_in(new uint8_t[num_aliased_units * num_aliased_factor]);
std::unique_ptr<uint8_t[]> h_out(new uint8_t[num_total_bytes]);
std::unique_ptr<uint8_t[]> h_gpu_results(new uint8_t[num_total_bytes]);
// Generate random offsets into the random-bits data buffer
GenerateRandomData(reinterpret_cast<RandomInitAliasT *>(h_in.get()), num_aliased_units);
// Prepare d_buffer_srcs
OffsetToPtrOp<SrcPtrT> src_transform_op{static_cast<SrcPtrT>(d_in)};
hipcub::TransformInputIterator<SrcPtrT, OffsetToPtrOp<SrcPtrT>, ByteOffsetT *> d_buffer_srcs(
d_buffer_src_offsets,
src_transform_op);
// Prepare d_buffer_dsts
OffsetToPtrOp<SrcPtrT> dst_transform_op{static_cast<SrcPtrT>(d_out)};
hipcub::TransformInputIterator<SrcPtrT, OffsetToPtrOp<SrcPtrT>, ByteOffsetT *> d_buffer_dsts(
d_buffer_dst_offsets,
dst_transform_op);
// Prepare random data segment (which serves for the buffer sources)
CubDebugExit(hipMemcpyAsync(d_in, h_in.get(), num_total_bytes, hipMemcpyHostToDevice, stream));
// Prepare d_buffer_src_offsets
CubDebugExit(hipMemcpyAsync(d_buffer_src_offsets,
h_buffer_src_offsets.data(),
h_buffer_src_offsets.size() * sizeof(h_buffer_src_offsets[0]),
hipMemcpyHostToDevice,
stream));
// Prepare d_buffer_dst_offsets
CubDebugExit(hipMemcpyAsync(d_buffer_dst_offsets,
h_buffer_dst_offsets.data(),
h_buffer_dst_offsets.size() * sizeof(h_buffer_dst_offsets[0]),
hipMemcpyHostToDevice,
stream));
// Prepare d_buffer_sizes
CubDebugExit(hipMemcpyAsync(d_buffer_sizes,
h_buffer_sizes.data(),
h_buffer_sizes.size() * sizeof(h_buffer_sizes[0]),
hipMemcpyHostToDevice,
stream));
// Record event before algorithm
hipEventRecord(events[0], stream);
// Invoke device-side algorithm being under test
CubDebugExit(cub::DeviceMemcpy::Batched(d_temp_storage,
temp_storage_bytes,
d_buffer_srcs,
d_buffer_dsts,
d_buffer_sizes,
num_buffers,
stream));
// Record event after algorithm
hipEventRecord(events[1], stream);
// Copy back the output buffer
CubDebugExit(
hipMemcpyAsync(h_gpu_results.get(), d_out, num_total_bytes, hipMemcpyDeviceToHost, stream));
// Make sure results have been copied back to the host
CubDebugExit(hipStreamSynchronize(stream));
// CPU-side result generation for verification
for (BufferOffsetT i = 0; i < num_buffers; i++)
{
std::memcpy(h_out.get() + h_buffer_dst_offsets[i],
h_in.get() + h_buffer_src_offsets[i],
h_buffer_sizes[i]);
}
float duration = 0;
hipEventElapsedTime(&duration, events[0], events[1]);
#ifdef CUB_TEST_BENCHMARK
size_t stats_src_offsets = sizeof(ByteOffsetT) * num_buffers;
size_t stats_dst_offsets = sizeof(ByteOffsetT) * num_buffers;
size_t stats_sizes = sizeof(BufferSizeT) * num_buffers;
size_t stats_data_copied = 2 * num_total_bytes;
std::cout
<< "Min. buffer size: " << min_buffer_size << ", max. buffer size: " << max_buffer_size //
<< ", num_buffers: " << num_buffers //
<< ", in_gen: " << ((input_gen == TestDataGen::RANDOM) ? "SHFL" : "CONSECUTIVE") //
<< ", out_gen: " << ((output_gen == TestDataGen::RANDOM) ? "SHFL" : "CONSECUTIVE") //
<< ", src size: " << stats_src_offsets << ", dst size: " << stats_dst_offsets //
<< ", sizes size: " << stats_sizes << ", cpy_data_size: " << stats_data_copied //
<< ", total: " << (stats_src_offsets + stats_dst_offsets + stats_sizes + stats_data_copied) //
<< ", duration: " << duration //
<< ", BW: "
<< ((double)(stats_src_offsets + stats_dst_offsets + stats_sizes + stats_data_copied) /
1000000000.0) /
(duration / 1000.0)
<< "GB/s \n";
#endif
for (ByteOffsetT i = 0; i < num_total_bytes; i++)
{
if (h_gpu_results.get()[i] != h_out.get()[i])
{
std::cout << "Mismatch at index " << i
<< ", CPU vs. GPU: " << static_cast<uint16_t>(h_gpu_results.get()[i]) << ", "
<< static_cast<uint16_t>(h_out.get()[i]) << "\n";
}
AssertEquals(h_out.get()[i], h_gpu_results.get()[i]);
}
CubDebugExit(hipFree(d_in));
CubDebugExit(hipFree(d_out));
CubDebugExit(hipFree(d_buffer_src_offsets));
CubDebugExit(hipFree(d_buffer_dst_offsets));
CubDebugExit(hipFree(d_buffer_sizes));
CubDebugExit(hipFree(d_temp_storage));
}
template <int LOGICAL_WARP_SIZE, typename VectorT, typename ByteOffsetT>
__global__ void TestVectorizedCopyKernel(const void *d_in, void *d_out, ByteOffsetT copy_size)
{
cub::detail::VectorizedCopy<LOGICAL_WARP_SIZE, VectorT>(threadIdx.x, d_out, copy_size, d_in);
}
struct TupleMemberEqualityOp
{
template <typename T>
__host__ __device__ __forceinline__ bool operator()(T tuple)
{
return thrust::get<0>(tuple) == thrust::get<1>(tuple);
}
};
/**
* @brief Tests the VectorizedCopy for various aligned and misaligned input and output pointers.
* @tparam VectorT The vector type used for vectorized stores (i.e., one of uint4, uint2, uint32_t)
*/
template <typename VectorT>
void TestVectorizedCopy()
{
constexpr uint32_t threads_per_block = 8;
std::vector<std::size_t> in_offsets{0, 1, sizeof(uint32_t) - 1};
std::vector<std::size_t> out_offsets{0, 1, sizeof(VectorT) - 1};
std::vector<std::size_t> copy_sizes{0,
1,
sizeof(uint32_t),
sizeof(VectorT),
2 * threads_per_block * sizeof(VectorT)};
for (auto copy_sizes_it = std::begin(copy_sizes); copy_sizes_it < std::end(copy_sizes);
copy_sizes_it++)
{
for (auto in_offsets_it = std::begin(in_offsets); in_offsets_it < std::end(in_offsets);
in_offsets_it++)
{
for (auto out_offsets_it = std::begin(out_offsets); out_offsets_it < std::end(out_offsets);
out_offsets_it++)
{
std::size_t in_offset = *in_offsets_it;
std::size_t out_offset = *out_offsets_it;
std::size_t copy_size = *copy_sizes_it;
// Prepare data
const std::size_t alloc_size_in = in_offset + copy_size;
const std::size_t alloc_size_out = out_offset + copy_size;
thrust::device_vector<char> data_in(alloc_size_in);
thrust::device_vector<char> data_out(alloc_size_out);
thrust::sequence(data_in.begin(), data_in.end(), static_cast<char>(0));
thrust::fill_n(data_out.begin(), alloc_size_out, static_cast<char>(0x42));
auto d_in = thrust::raw_pointer_cast(data_in.data());
auto d_out = thrust::raw_pointer_cast(data_out.data());
hipLaunchKernelGGL(( TestVectorizedCopyKernel<threads_per_block, VectorT>)
, dim3(1), dim3(threads_per_block), 0, 0, d_in + in_offset,
d_out + out_offset,
static_cast<int>(copy_size));
auto zip_it = thrust::make_zip_iterator(data_in.begin() + in_offset,
data_out.begin() + out_offset);
bool success = thrust::all_of(zip_it, zip_it + copy_size, TupleMemberEqualityOp{});
AssertTrue(success);
}
}
}
}
template <uint32_t NUM_ITEMS, uint32_t MAX_ITEM_VALUE, bool PREFER_POW2_BITS>
__global__ void TestBitPackedCounterKernel(uint32_t *bins,
uint32_t *increments,
uint32_t *counts_out,
uint32_t num_items)
{
using BitPackedCounterT =
cub::detail::BitPackedCounter<NUM_ITEMS, MAX_ITEM_VALUE, PREFER_POW2_BITS>;
BitPackedCounterT counter{};
for (uint32_t i = 0; i < num_items; i++)
{
counter.Add(bins[i], increments[i]);
}
for (uint32_t i = 0; i < NUM_ITEMS; i++)
{
counts_out[i] = counter.Get(i);
}
}
/**
* @brief Tests BitPackedCounter that's used for computing the histogram of buffer sizes (i.e.,
* small, medium, large).
*/
template <uint32_t NUM_ITEMS, uint32_t MAX_ITEM_VALUE>
void TestBitPackedCounter(const std::uint_fast32_t seed = 320981U)
{
constexpr uint32_t min_increment = 0;
constexpr uint32_t max_increment = 4;
constexpr double avg_increment = static_cast<double>(min_increment) +
(static_cast<double>(max_increment - min_increment) / 2.0);
std::uint32_t num_increments =
static_cast<uint32_t>(static_cast<double>(MAX_ITEM_VALUE * NUM_ITEMS) / avg_increment);
// Test input data
std::array<uint64_t, NUM_ITEMS> reference_counters{};
thrust::host_vector<uint32_t> h_bins(num_increments);
thrust::host_vector<uint32_t> h_increments(num_increments);
// Generate random test input data
GenerateRandomData(thrust::raw_pointer_cast(h_bins.data()),
num_increments,
0U,
NUM_ITEMS - 1U,
seed);
GenerateRandomData(thrust::raw_pointer_cast(h_increments.data()),
num_increments,
min_increment,
max_increment,
(seed + 17));
// Make sure test data does not overflow any of the counters
for (std::size_t i = 0; i < num_increments; i++)
{
// New increment for this bin would overflow => zero this increment
if (reference_counters[h_bins[i]] + h_increments[i] >= MAX_ITEM_VALUE)
{
h_increments[i] = 0;
}
else
{
reference_counters[h_bins[i]] += h_increments[i];
}
}
// Device memory
thrust::device_vector<uint32_t> bins_in(num_increments);
thrust::device_vector<uint32_t> increments_in(num_increments);
thrust::device_vector<uint32_t> counts_out(NUM_ITEMS);
// Initialize device-side test data
bins_in = h_bins;
increments_in = h_increments;
// Memory for GPU-generated results
thrust::host_vector<uint32_t> host_counts(num_increments);
// Reset counters to arbitrary random value
thrust::fill(counts_out.begin(), counts_out.end(), 814920U);
// Run tests with densely bit-packed counters
hipLaunchKernelGGL(( TestBitPackedCounterKernel<NUM_ITEMS, MAX_ITEM_VALUE, false>)
, dim3(1), dim3(1), 0, 0, thrust::raw_pointer_cast(bins_in.data()),
thrust::raw_pointer_cast(increments_in.data()),
thrust::raw_pointer_cast(counts_out.data()),
num_increments);
// Result verification
host_counts = counts_out;
for (uint32_t i = 0; i < NUM_ITEMS; i++)
{
AssertEquals(reference_counters[i], host_counts[i]);
}
// Reset counters to arbitrary random value
thrust::fill(counts_out.begin(), counts_out.end(), 814920U);
// Run tests with bit-packed counters, where bit-count is a power-of-two
hipLaunchKernelGGL(( TestBitPackedCounterKernel<NUM_ITEMS, MAX_ITEM_VALUE, true>)
, dim3(1), dim3(1), 0, 0, thrust::raw_pointer_cast(bins_in.data()),
thrust::raw_pointer_cast(increments_in.data()),
thrust::raw_pointer_cast(counts_out.data()),
num_increments);
// Result verification
host_counts = counts_out;
for (uint32_t i = 0; i < NUM_ITEMS; i++)
{
AssertEquals(reference_counters[i], host_counts[i]);
}
}
int main(int argc, char **argv)
{
CommandLineArgs args(argc, argv);
// Initialize device
CubDebugExit(args.DeviceInit());
//---------------------------------------------------------------------
// VectorizedCopy tests
//---------------------------------------------------------------------
TestVectorizedCopy<uint32_t>();
TestVectorizedCopy<uint4>();
//---------------------------------------------------------------------
// BitPackedCounter tests
//---------------------------------------------------------------------
TestBitPackedCounter<1, 1>();
TestBitPackedCounter<1, (0x01U << 16)>();
TestBitPackedCounter<4, 1>();
TestBitPackedCounter<4, 2>();
TestBitPackedCounter<4, 255>();
TestBitPackedCounter<4, 256>();
TestBitPackedCounter<8, 1024>();
TestBitPackedCounter<32, 1>();
TestBitPackedCounter<32, 256>();
//---------------------------------------------------------------------
// DeviceMemcpy::Batched tests
//---------------------------------------------------------------------
// The most granular type being copied. Buffer's will be aligned and their size be an integer
// multiple of this type
using AtomicCopyT = uint8_t;
// Type used for indexing into the array of buffers
using BufferOffsetT = uint32_t;
// Type used for indexing into individual bytes of a buffer (large enough to cover the max buffer
using BufferSizeT = uint32_t;
// Type used for indexing into bytes over *all* the buffers' sizes
using ByteOffsetT = uint32_t;
// Total number of bytes that are targeted to be copied on each run
const BufferOffsetT target_copy_size = 64U << 20;
// The number of randomly
constexpr std::size_t num_rnd_buffer_range_tests = 32;
// Each buffer's size will be random within this interval
std::vector<std::pair<std::size_t, std::size_t>> buffer_size_ranges = {{0, 1},
{1, 2},
{0, 16},
{1, 32},
{1, 1024},
{1, 32 * 1024},
{128 * 1024, 256 * 1024},
{target_copy_size,
target_copy_size}};
std::mt19937 rng(0);
std::uniform_int_distribution<std::size_t> size_dist(1, 1000000);
for (std::size_t i = 0; i < num_rnd_buffer_range_tests; i++)
{
auto range_begin = size_dist(rng);
auto range_end = size_dist(rng);
if (range_begin > range_end)
{
std::swap(range_begin, range_end);
}
buffer_size_ranges.push_back({range_begin, range_end});
}
for (const auto &buffer_size_range : buffer_size_ranges)
{
BufferSizeT min_buffer_size =
static_cast<BufferSizeT>(CUB_ROUND_UP_NEAREST(buffer_size_range.first, sizeof(AtomicCopyT)));
BufferSizeT max_buffer_size =
static_cast<BufferSizeT>(CUB_ROUND_UP_NEAREST(buffer_size_range.second,
static_cast<BufferSizeT>(sizeof(AtomicCopyT))));
double average_buffer_size = (min_buffer_size + max_buffer_size) / 2.0;
BufferOffsetT target_num_buffers =
static_cast<BufferOffsetT>(target_copy_size / average_buffer_size);
// Run tests with input buffer being consecutive and output buffers being consecutive
RunTest<AtomicCopyT, BufferOffsetT, BufferSizeT, ByteOffsetT>(target_num_buffers,
min_buffer_size,
max_buffer_size,
TestDataGen::CONSECUTIVE,
TestDataGen::CONSECUTIVE);
// Run tests with input buffer being randomly shuffled and output buffers being randomly
// shuffled
RunTest<AtomicCopyT, BufferOffsetT, BufferSizeT, ByteOffsetT>(target_num_buffers,
min_buffer_size,
max_buffer_size,
TestDataGen::RANDOM,
TestDataGen::RANDOM);
}
//---------------------------------------------------------------------
// DeviceMemcpy::Batched test with 64-bit offsets
//---------------------------------------------------------------------
using ByteOffset64T = uint64_t;
using BufferSize64T = uint64_t;
ByteOffset64T large_target_copy_size =
static_cast<ByteOffset64T>(std::numeric_limits<uint32_t>::max()) + (128ULL * 1024ULL * 1024ULL);
// Make sure min_buffer_size is in fact smaller than max buffer size
constexpr BufferOffsetT single_buffer = 1;
// Run tests with input buffer being consecutive and output buffers being consecutive
RunTest<AtomicCopyT, BufferOffsetT, BufferSize64T, ByteOffset64T>(single_buffer,
large_target_copy_size,
large_target_copy_size,
TestDataGen::CONSECUTIVE,
TestDataGen::CONSECUTIVE);
}
| f8fe539bfdbfdf010f101999a2ab64295fa65098.cu | /******************************************************************************
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#include <cub/device/device_memcpy.cuh>
#include <cub/iterator/transform_input_iterator.cuh>
#include <cub/util_ptx.cuh>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/host_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/logical.h>
#include <thrust/sequence.h>
#include <algorithm>
#include <cstdint>
#include <limits>
#include <numeric>
#include <random>
#include <type_traits>
#include <vector>
#include "test_util.h"
/**
* @brief Host-side random data generation
*/
template <typename T>
void GenerateRandomData(
T *rand_out,
const std::size_t num_items,
const T min_rand_val = std::numeric_limits<T>::min(),
const T max_rand_val = std::numeric_limits<T>::max(),
const std::uint_fast32_t seed = 320981U,
typename std::enable_if<std::is_integral<T>::value && (sizeof(T) >= 2)>::type * = nullptr)
{
// initialize random number generator
std::mt19937 rng(seed);
std::uniform_int_distribution<T> uni_dist(min_rand_val, max_rand_val);
// generate random numbers
for (std::size_t i = 0; i < num_items; ++i)
{
rand_out[i] = uni_dist(rng);
}
}
template <typename InputBufferIt,
typename OutputBufferIt,
typename BufferSizeIteratorT,
typename BufferOffsetT>
void __global__ BaselineBatchMemCpyKernel(InputBufferIt input_buffer_it,
OutputBufferIt output_buffer_it,
BufferSizeIteratorT buffer_sizes,
BufferOffsetT num_buffers)
{
BufferOffsetT gtid = blockDim.x * blockIdx.x + threadIdx.x;
if (gtid >= num_buffers)
{
return;
}
for (BufferOffsetT i = 0; i < buffer_sizes[gtid]; i++)
{
reinterpret_cast<uint8_t *>(output_buffer_it[gtid])[i] =
reinterpret_cast<uint8_t *>(input_buffer_it[gtid])[i];
}
}
template <typename InputBufferIt, typename OutputBufferIt, typename BufferSizeIteratorT>
void InvokeBaselineBatchMemcpy(InputBufferIt input_buffer_it,
OutputBufferIt output_buffer_it,
BufferSizeIteratorT buffer_sizes,
uint32_t num_buffers)
{
constexpr uint32_t block_threads = 128U;
uint32_t num_blocks = (num_buffers + block_threads - 1) / block_threads;
BaselineBatchMemCpyKernel<<<num_blocks, block_threads>>>(input_buffer_it,
output_buffer_it,
buffer_sizes,
num_buffers);
}
template <typename InputBufferIt,
typename OutputBufferIt,
typename BufferSizeIteratorT,
typename BufferOffsetT>
void __global__ BaselineBatchMemCpyPerBlockKernel(InputBufferIt input_buffer_it,
OutputBufferIt output_buffer_it,
BufferSizeIteratorT buffer_sizes,
BufferOffsetT num_buffers)
{
BufferOffsetT gbid = blockIdx.x;
if (gbid >= num_buffers)
{
return;
}
for (BufferOffsetT i = threadIdx.x; i < buffer_sizes[gbid] / 8; i += blockDim.x)
{
reinterpret_cast<uint64_t *>(output_buffer_it[gbid])[i] =
reinterpret_cast<uint64_t *>(input_buffer_it[gbid])[i];
}
}
/**
* @brief Used for generating a shuffled but cohesive sequence of output-buffer offsets for the
* sequence of input-buffers.
*/
template <typename BufferOffsetT, typename ByteOffsetT, typename BufferSizeT>
std::vector<ByteOffsetT> GetShuffledBufferOffsets(const std::vector<BufferSizeT> &buffer_sizes,
const std::uint_fast32_t seed = 320981U)
{
BufferOffsetT num_buffers = static_cast<BufferOffsetT>(buffer_sizes.size());
// We're remapping the i-th buffer to pmt_idxs[i]
std::mt19937 rng(seed);
std::vector<BufferOffsetT> pmt_idxs(num_buffers);
std::iota(pmt_idxs.begin(), pmt_idxs.end(), static_cast<BufferOffsetT>(0));
std::shuffle(std::begin(pmt_idxs), std::end(pmt_idxs), rng);
// Compute the offsets using the new mapping
ByteOffsetT running_offset = {};
std::vector<ByteOffsetT> permuted_offsets;
permuted_offsets.reserve(num_buffers);
for (auto permuted_buffer_idx : pmt_idxs)
{
permuted_offsets.emplace_back(running_offset);
running_offset += buffer_sizes[permuted_buffer_idx];
}
// Generate the scatter indexes that identify where each buffer was mapped to
std::vector<BufferOffsetT> scatter_idxs(num_buffers);
for (BufferOffsetT i = 0; i < num_buffers; i++)
{
scatter_idxs[pmt_idxs[i]] = i;
}
std::vector<ByteOffsetT> new_offsets(num_buffers);
for (BufferOffsetT i = 0; i < num_buffers; i++)
{
new_offsets[i] = permuted_offsets[scatter_idxs[i]];
}
return new_offsets;
}
/**
* @brief Function object class template that takes an offset and returns an iterator at the given
* offset relative to a fixed base iterator.
*
* @tparam IteratorT The random-access iterator type to be returned
*/
template <typename IteratorT>
struct OffsetToPtrOp
{
template <typename T>
__host__ __device__ __forceinline__ IteratorT operator()(T offset) const
{
return base_it + offset;
}
IteratorT base_it;
};
enum class TestDataGen
{
// Random offsets into a data segment
RANDOM,
// Buffers cohesively reside next to each other
CONSECUTIVE
};
/**
* @brief
*
* @tparam AtomicT The most granular type being copied. All source and destination pointers will be
* aligned based on this type, the number of bytes being copied will be an integer multiple of this
* type's size
* @tparam BufferOffsetT Type used for indexing into the array of buffers
* @tparam BufferSizeT Type used for indexing into individual bytes of a buffer (large enough to
* cover the max buffer size)
* @tparam ByteOffsetT Type used for indexing into bytes over *all* the buffers' sizes
*/
template <typename AtomicT, typename BufferOffsetT, typename BufferSizeT, typename ByteOffsetT>
void RunTest(BufferOffsetT num_buffers,
BufferSizeT min_buffer_size,
BufferSizeT max_buffer_size,
TestDataGen input_gen,
TestDataGen output_gen)
{
using SrcPtrT = uint8_t *;
// Buffer segment data (their offsets and sizes)
std::vector<BufferSizeT> h_buffer_sizes(num_buffers);
std::vector<ByteOffsetT> h_buffer_src_offsets(num_buffers);
std::vector<ByteOffsetT> h_buffer_dst_offsets(num_buffers);
// Device-side resources
void *d_in = nullptr;
void *d_out = nullptr;
ByteOffsetT *d_buffer_src_offsets = nullptr;
ByteOffsetT *d_buffer_dst_offsets = nullptr;
BufferSizeT *d_buffer_sizes = nullptr;
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
// Generate the buffer sizes
GenerateRandomData(h_buffer_sizes.data(), h_buffer_sizes.size(), min_buffer_size, max_buffer_size);
// Make sure buffer sizes are a multiple of the most granular unit (one AtomicT) being copied
// (round down)
for (BufferOffsetT i = 0; i < num_buffers; i++)
{
h_buffer_sizes[i] = (h_buffer_sizes[i] / sizeof(AtomicT)) * sizeof(AtomicT);
}
// Compute the total bytes to be copied
ByteOffsetT num_total_bytes = 0;
for (BufferOffsetT i = 0; i < num_buffers; i++)
{
if (input_gen == TestDataGen::CONSECUTIVE)
{
h_buffer_src_offsets[i] = num_total_bytes;
}
if (output_gen == TestDataGen::CONSECUTIVE)
{
h_buffer_dst_offsets[i] = num_total_bytes;
}
num_total_bytes += h_buffer_sizes[i];
}
// Shuffle input buffer source-offsets
std::uint_fast32_t shuffle_seed = 320981U;
if (input_gen == TestDataGen::RANDOM)
{
h_buffer_src_offsets = GetShuffledBufferOffsets<BufferOffsetT, ByteOffsetT>(h_buffer_sizes,
shuffle_seed);
shuffle_seed += 42;
}
// Shuffle input buffer source-offsets
if (output_gen == TestDataGen::RANDOM)
{
h_buffer_dst_offsets = GetShuffledBufferOffsets<BufferOffsetT, ByteOffsetT>(h_buffer_sizes,
shuffle_seed);
}
// Get temporary storage requirements
CubDebugExit(cub::DeviceMemcpy::Batched(d_temp_storage,
temp_storage_bytes,
static_cast<SrcPtrT *>(nullptr),
static_cast<SrcPtrT *>(nullptr),
d_buffer_sizes,
num_buffers));
// Check if there's sufficient device memory to run this test
std::size_t total_required_mem = num_total_bytes + //
num_total_bytes + //
(num_buffers * sizeof(d_buffer_src_offsets[0])) + //
(num_buffers * sizeof(d_buffer_dst_offsets[0])) + //
(num_buffers * sizeof(d_buffer_sizes[0])) + //
temp_storage_bytes; //
if (TotalGlobalMem() < total_required_mem)
{
std::cout
<< "Skipping the test due to insufficient device memory\n" //
<< " - Required: " << total_required_mem << " B, available: " << TotalGlobalMem() << " B\n" //
<< " - Skipped test instance: " //
<< " -> Min. buffer size: " << min_buffer_size << ", max. buffer size: " << max_buffer_size //
<< ", num_buffers: " << num_buffers //
<< ", in_gen: " << ((input_gen == TestDataGen::RANDOM) ? "SHFL" : "CONSECUTIVE") //
<< ", out_gen: " << ((output_gen == TestDataGen::RANDOM) ? "SHFL" : "CONSECUTIVE");
return;
}
cudaEvent_t events[2];
cudaEventCreate(&events[0]);
cudaEventCreate(&events[1]);
cudaStream_t stream;
cudaStreamCreate(&stream);
// Allocate device memory
CubDebugExit(cudaMalloc(&d_in, num_total_bytes));
CubDebugExit(cudaMalloc(&d_out, num_total_bytes));
CubDebugExit(cudaMalloc(&d_buffer_src_offsets, num_buffers * sizeof(d_buffer_src_offsets[0])));
CubDebugExit(cudaMalloc(&d_buffer_dst_offsets, num_buffers * sizeof(d_buffer_dst_offsets[0])));
CubDebugExit(cudaMalloc(&d_buffer_sizes, num_buffers * sizeof(d_buffer_sizes[0])));
CubDebugExit(cudaMalloc(&d_temp_storage, temp_storage_bytes));
// Populate the data source with random data
using RandomInitAliasT = uint16_t;
std::size_t num_aliased_factor = sizeof(RandomInitAliasT) / sizeof(uint8_t);
std::size_t num_aliased_units = CUB_QUOTIENT_CEILING(num_total_bytes, num_aliased_factor);
std::unique_ptr<uint8_t[]> h_in(new uint8_t[num_aliased_units * num_aliased_factor]);
std::unique_ptr<uint8_t[]> h_out(new uint8_t[num_total_bytes]);
std::unique_ptr<uint8_t[]> h_gpu_results(new uint8_t[num_total_bytes]);
// Generate random offsets into the random-bits data buffer
GenerateRandomData(reinterpret_cast<RandomInitAliasT *>(h_in.get()), num_aliased_units);
// Prepare d_buffer_srcs
OffsetToPtrOp<SrcPtrT> src_transform_op{static_cast<SrcPtrT>(d_in)};
cub::TransformInputIterator<SrcPtrT, OffsetToPtrOp<SrcPtrT>, ByteOffsetT *> d_buffer_srcs(
d_buffer_src_offsets,
src_transform_op);
// Prepare d_buffer_dsts
OffsetToPtrOp<SrcPtrT> dst_transform_op{static_cast<SrcPtrT>(d_out)};
cub::TransformInputIterator<SrcPtrT, OffsetToPtrOp<SrcPtrT>, ByteOffsetT *> d_buffer_dsts(
d_buffer_dst_offsets,
dst_transform_op);
// Prepare random data segment (which serves for the buffer sources)
CubDebugExit(cudaMemcpyAsync(d_in, h_in.get(), num_total_bytes, cudaMemcpyHostToDevice, stream));
// Prepare d_buffer_src_offsets
CubDebugExit(cudaMemcpyAsync(d_buffer_src_offsets,
h_buffer_src_offsets.data(),
h_buffer_src_offsets.size() * sizeof(h_buffer_src_offsets[0]),
cudaMemcpyHostToDevice,
stream));
// Prepare d_buffer_dst_offsets
CubDebugExit(cudaMemcpyAsync(d_buffer_dst_offsets,
h_buffer_dst_offsets.data(),
h_buffer_dst_offsets.size() * sizeof(h_buffer_dst_offsets[0]),
cudaMemcpyHostToDevice,
stream));
// Prepare d_buffer_sizes
CubDebugExit(cudaMemcpyAsync(d_buffer_sizes,
h_buffer_sizes.data(),
h_buffer_sizes.size() * sizeof(h_buffer_sizes[0]),
cudaMemcpyHostToDevice,
stream));
// Record event before algorithm
cudaEventRecord(events[0], stream);
// Invoke device-side algorithm being under test
CubDebugExit(cub::DeviceMemcpy::Batched(d_temp_storage,
temp_storage_bytes,
d_buffer_srcs,
d_buffer_dsts,
d_buffer_sizes,
num_buffers,
stream));
// Record event after algorithm
cudaEventRecord(events[1], stream);
// Copy back the output buffer
CubDebugExit(
cudaMemcpyAsync(h_gpu_results.get(), d_out, num_total_bytes, cudaMemcpyDeviceToHost, stream));
// Make sure results have been copied back to the host
CubDebugExit(cudaStreamSynchronize(stream));
// CPU-side result generation for verification
for (BufferOffsetT i = 0; i < num_buffers; i++)
{
std::memcpy(h_out.get() + h_buffer_dst_offsets[i],
h_in.get() + h_buffer_src_offsets[i],
h_buffer_sizes[i]);
}
float duration = 0;
cudaEventElapsedTime(&duration, events[0], events[1]);
#ifdef CUB_TEST_BENCHMARK
size_t stats_src_offsets = sizeof(ByteOffsetT) * num_buffers;
size_t stats_dst_offsets = sizeof(ByteOffsetT) * num_buffers;
size_t stats_sizes = sizeof(BufferSizeT) * num_buffers;
size_t stats_data_copied = 2 * num_total_bytes;
std::cout
<< "Min. buffer size: " << min_buffer_size << ", max. buffer size: " << max_buffer_size //
<< ", num_buffers: " << num_buffers //
<< ", in_gen: " << ((input_gen == TestDataGen::RANDOM) ? "SHFL" : "CONSECUTIVE") //
<< ", out_gen: " << ((output_gen == TestDataGen::RANDOM) ? "SHFL" : "CONSECUTIVE") //
<< ", src size: " << stats_src_offsets << ", dst size: " << stats_dst_offsets //
<< ", sizes size: " << stats_sizes << ", cpy_data_size: " << stats_data_copied //
<< ", total: " << (stats_src_offsets + stats_dst_offsets + stats_sizes + stats_data_copied) //
<< ", duration: " << duration //
<< ", BW: "
<< ((double)(stats_src_offsets + stats_dst_offsets + stats_sizes + stats_data_copied) /
1000000000.0) /
(duration / 1000.0)
<< "GB/s \n";
#endif
for (ByteOffsetT i = 0; i < num_total_bytes; i++)
{
if (h_gpu_results.get()[i] != h_out.get()[i])
{
std::cout << "Mismatch at index " << i
<< ", CPU vs. GPU: " << static_cast<uint16_t>(h_gpu_results.get()[i]) << ", "
<< static_cast<uint16_t>(h_out.get()[i]) << "\n";
}
AssertEquals(h_out.get()[i], h_gpu_results.get()[i]);
}
CubDebugExit(cudaFree(d_in));
CubDebugExit(cudaFree(d_out));
CubDebugExit(cudaFree(d_buffer_src_offsets));
CubDebugExit(cudaFree(d_buffer_dst_offsets));
CubDebugExit(cudaFree(d_buffer_sizes));
CubDebugExit(cudaFree(d_temp_storage));
}
template <int LOGICAL_WARP_SIZE, typename VectorT, typename ByteOffsetT>
__global__ void TestVectorizedCopyKernel(const void *d_in, void *d_out, ByteOffsetT copy_size)
{
cub::detail::VectorizedCopy<LOGICAL_WARP_SIZE, VectorT>(threadIdx.x, d_out, copy_size, d_in);
}
struct TupleMemberEqualityOp
{
template <typename T>
__host__ __device__ __forceinline__ bool operator()(T tuple)
{
return thrust::get<0>(tuple) == thrust::get<1>(tuple);
}
};
/**
* @brief Tests the VectorizedCopy for various aligned and misaligned input and output pointers.
* @tparam VectorT The vector type used for vectorized stores (i.e., one of uint4, uint2, uint32_t)
*/
template <typename VectorT>
void TestVectorizedCopy()
{
constexpr uint32_t threads_per_block = 8;
std::vector<std::size_t> in_offsets{0, 1, sizeof(uint32_t) - 1};
std::vector<std::size_t> out_offsets{0, 1, sizeof(VectorT) - 1};
std::vector<std::size_t> copy_sizes{0,
1,
sizeof(uint32_t),
sizeof(VectorT),
2 * threads_per_block * sizeof(VectorT)};
for (auto copy_sizes_it = std::begin(copy_sizes); copy_sizes_it < std::end(copy_sizes);
copy_sizes_it++)
{
for (auto in_offsets_it = std::begin(in_offsets); in_offsets_it < std::end(in_offsets);
in_offsets_it++)
{
for (auto out_offsets_it = std::begin(out_offsets); out_offsets_it < std::end(out_offsets);
out_offsets_it++)
{
std::size_t in_offset = *in_offsets_it;
std::size_t out_offset = *out_offsets_it;
std::size_t copy_size = *copy_sizes_it;
// Prepare data
const std::size_t alloc_size_in = in_offset + copy_size;
const std::size_t alloc_size_out = out_offset + copy_size;
thrust::device_vector<char> data_in(alloc_size_in);
thrust::device_vector<char> data_out(alloc_size_out);
thrust::sequence(data_in.begin(), data_in.end(), static_cast<char>(0));
thrust::fill_n(data_out.begin(), alloc_size_out, static_cast<char>(0x42));
auto d_in = thrust::raw_pointer_cast(data_in.data());
auto d_out = thrust::raw_pointer_cast(data_out.data());
TestVectorizedCopyKernel<threads_per_block, VectorT>
<<<1, threads_per_block>>>(d_in + in_offset,
d_out + out_offset,
static_cast<int>(copy_size));
auto zip_it = thrust::make_zip_iterator(data_in.begin() + in_offset,
data_out.begin() + out_offset);
bool success = thrust::all_of(zip_it, zip_it + copy_size, TupleMemberEqualityOp{});
AssertTrue(success);
}
}
}
}
template <uint32_t NUM_ITEMS, uint32_t MAX_ITEM_VALUE, bool PREFER_POW2_BITS>
__global__ void TestBitPackedCounterKernel(uint32_t *bins,
uint32_t *increments,
uint32_t *counts_out,
uint32_t num_items)
{
using BitPackedCounterT =
cub::detail::BitPackedCounter<NUM_ITEMS, MAX_ITEM_VALUE, PREFER_POW2_BITS>;
BitPackedCounterT counter{};
for (uint32_t i = 0; i < num_items; i++)
{
counter.Add(bins[i], increments[i]);
}
for (uint32_t i = 0; i < NUM_ITEMS; i++)
{
counts_out[i] = counter.Get(i);
}
}
/**
* @brief Tests BitPackedCounter that's used for computing the histogram of buffer sizes (i.e.,
* small, medium, large).
*/
template <uint32_t NUM_ITEMS, uint32_t MAX_ITEM_VALUE>
void TestBitPackedCounter(const std::uint_fast32_t seed = 320981U)
{
constexpr uint32_t min_increment = 0;
constexpr uint32_t max_increment = 4;
constexpr double avg_increment = static_cast<double>(min_increment) +
(static_cast<double>(max_increment - min_increment) / 2.0);
std::uint32_t num_increments =
static_cast<uint32_t>(static_cast<double>(MAX_ITEM_VALUE * NUM_ITEMS) / avg_increment);
// Test input data
std::array<uint64_t, NUM_ITEMS> reference_counters{};
thrust::host_vector<uint32_t> h_bins(num_increments);
thrust::host_vector<uint32_t> h_increments(num_increments);
// Generate random test input data
GenerateRandomData(thrust::raw_pointer_cast(h_bins.data()),
num_increments,
0U,
NUM_ITEMS - 1U,
seed);
GenerateRandomData(thrust::raw_pointer_cast(h_increments.data()),
num_increments,
min_increment,
max_increment,
(seed + 17));
// Make sure test data does not overflow any of the counters
for (std::size_t i = 0; i < num_increments; i++)
{
// New increment for this bin would overflow => zero this increment
if (reference_counters[h_bins[i]] + h_increments[i] >= MAX_ITEM_VALUE)
{
h_increments[i] = 0;
}
else
{
reference_counters[h_bins[i]] += h_increments[i];
}
}
// Device memory
thrust::device_vector<uint32_t> bins_in(num_increments);
thrust::device_vector<uint32_t> increments_in(num_increments);
thrust::device_vector<uint32_t> counts_out(NUM_ITEMS);
// Initialize device-side test data
bins_in = h_bins;
increments_in = h_increments;
// Memory for GPU-generated results
thrust::host_vector<uint32_t> host_counts(num_increments);
// Reset counters to arbitrary random value
thrust::fill(counts_out.begin(), counts_out.end(), 814920U);
// Run tests with densely bit-packed counters
TestBitPackedCounterKernel<NUM_ITEMS, MAX_ITEM_VALUE, false>
<<<1, 1>>>(thrust::raw_pointer_cast(bins_in.data()),
thrust::raw_pointer_cast(increments_in.data()),
thrust::raw_pointer_cast(counts_out.data()),
num_increments);
// Result verification
host_counts = counts_out;
for (uint32_t i = 0; i < NUM_ITEMS; i++)
{
AssertEquals(reference_counters[i], host_counts[i]);
}
// Reset counters to arbitrary random value
thrust::fill(counts_out.begin(), counts_out.end(), 814920U);
// Run tests with bit-packed counters, where bit-count is a power-of-two
TestBitPackedCounterKernel<NUM_ITEMS, MAX_ITEM_VALUE, true>
<<<1, 1>>>(thrust::raw_pointer_cast(bins_in.data()),
thrust::raw_pointer_cast(increments_in.data()),
thrust::raw_pointer_cast(counts_out.data()),
num_increments);
// Result verification
host_counts = counts_out;
for (uint32_t i = 0; i < NUM_ITEMS; i++)
{
AssertEquals(reference_counters[i], host_counts[i]);
}
}
int main(int argc, char **argv)
{
CommandLineArgs args(argc, argv);
// Initialize device
CubDebugExit(args.DeviceInit());
//---------------------------------------------------------------------
// VectorizedCopy tests
//---------------------------------------------------------------------
TestVectorizedCopy<uint32_t>();
TestVectorizedCopy<uint4>();
//---------------------------------------------------------------------
// BitPackedCounter tests
//---------------------------------------------------------------------
TestBitPackedCounter<1, 1>();
TestBitPackedCounter<1, (0x01U << 16)>();
TestBitPackedCounter<4, 1>();
TestBitPackedCounter<4, 2>();
TestBitPackedCounter<4, 255>();
TestBitPackedCounter<4, 256>();
TestBitPackedCounter<8, 1024>();
TestBitPackedCounter<32, 1>();
TestBitPackedCounter<32, 256>();
//---------------------------------------------------------------------
// DeviceMemcpy::Batched tests
//---------------------------------------------------------------------
// The most granular type being copied. Buffer's will be aligned and their size be an integer
// multiple of this type
using AtomicCopyT = uint8_t;
// Type used for indexing into the array of buffers
using BufferOffsetT = uint32_t;
// Type used for indexing into individual bytes of a buffer (large enough to cover the max buffer
using BufferSizeT = uint32_t;
// Type used for indexing into bytes over *all* the buffers' sizes
using ByteOffsetT = uint32_t;
// Total number of bytes that are targeted to be copied on each run
const BufferOffsetT target_copy_size = 64U << 20;
// The number of randomly
constexpr std::size_t num_rnd_buffer_range_tests = 32;
// Each buffer's size will be random within this interval
std::vector<std::pair<std::size_t, std::size_t>> buffer_size_ranges = {{0, 1},
{1, 2},
{0, 16},
{1, 32},
{1, 1024},
{1, 32 * 1024},
{128 * 1024, 256 * 1024},
{target_copy_size,
target_copy_size}};
std::mt19937 rng(0);
std::uniform_int_distribution<std::size_t> size_dist(1, 1000000);
for (std::size_t i = 0; i < num_rnd_buffer_range_tests; i++)
{
auto range_begin = size_dist(rng);
auto range_end = size_dist(rng);
if (range_begin > range_end)
{
std::swap(range_begin, range_end);
}
buffer_size_ranges.push_back({range_begin, range_end});
}
for (const auto &buffer_size_range : buffer_size_ranges)
{
BufferSizeT min_buffer_size =
static_cast<BufferSizeT>(CUB_ROUND_UP_NEAREST(buffer_size_range.first, sizeof(AtomicCopyT)));
BufferSizeT max_buffer_size =
static_cast<BufferSizeT>(CUB_ROUND_UP_NEAREST(buffer_size_range.second,
static_cast<BufferSizeT>(sizeof(AtomicCopyT))));
double average_buffer_size = (min_buffer_size + max_buffer_size) / 2.0;
BufferOffsetT target_num_buffers =
static_cast<BufferOffsetT>(target_copy_size / average_buffer_size);
// Run tests with input buffer being consecutive and output buffers being consecutive
RunTest<AtomicCopyT, BufferOffsetT, BufferSizeT, ByteOffsetT>(target_num_buffers,
min_buffer_size,
max_buffer_size,
TestDataGen::CONSECUTIVE,
TestDataGen::CONSECUTIVE);
// Run tests with input buffer being randomly shuffled and output buffers being randomly
// shuffled
RunTest<AtomicCopyT, BufferOffsetT, BufferSizeT, ByteOffsetT>(target_num_buffers,
min_buffer_size,
max_buffer_size,
TestDataGen::RANDOM,
TestDataGen::RANDOM);
}
//---------------------------------------------------------------------
// DeviceMemcpy::Batched test with 64-bit offsets
//---------------------------------------------------------------------
using ByteOffset64T = uint64_t;
using BufferSize64T = uint64_t;
ByteOffset64T large_target_copy_size =
static_cast<ByteOffset64T>(std::numeric_limits<uint32_t>::max()) + (128ULL * 1024ULL * 1024ULL);
// Make sure min_buffer_size is in fact smaller than max buffer size
constexpr BufferOffsetT single_buffer = 1;
// Run tests with input buffer being consecutive and output buffers being consecutive
RunTest<AtomicCopyT, BufferOffsetT, BufferSize64T, ByteOffset64T>(single_buffer,
large_target_copy_size,
large_target_copy_size,
TestDataGen::CONSECUTIVE,
TestDataGen::CONSECUTIVE);
}
|
18aae18fa8e28e736914b3dd39e16e7f0718e277.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <time.h>
#include "fs_initializer.cu.h"
#include "neural_network.hh"
#include "linear_layer.hh"
#include "relu_activation.hh"
#include "sigmoid_activation.hh"
#include "nn_exception.hh"
#include "bce_cost.hh"
#include "coordinates_dataset.hh"
#include <ctime>
#include <cstdio>
#include <iostream>
#include <chrono>
typedef std::chrono::high_resolution_clock Clock;
float computeAccuracy(const Matrix& predictions, const Matrix& targets);
void init_device_app()
{
hipDeviceSetLimit(hipLimitMallocHeapSize, 1 << 30);
}
int main() {
// GPUFS setup
int device = 0;
char* gpudev = getenv("GPUDEVICE");
if (gpudev != NULL)
device = atoi(gpudev);
hipSetDevice(device);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
printf("Running on device %d: \"%s\"\n", device, deviceProp.name);
volatile GPUGlobals* gpuGlobals;
initializer(&gpuGlobals);
init_device_app();
srand( time(NULL) );
CoordinatesDataset dataset(10000000, 21);
BCECost bce_cost;
NeuralNetwork nn;
nn.addLayer(new LinearLayer("linear_1", Shape(2, 30), gpuGlobals));
nn.addLayer(new ReLUActivation("relu_1"));
nn.addLayer(new LinearLayer("linear_2", Shape(30, 1)));
nn.addLayer(new SigmoidActivation("sigmoid_output"));
printf("Beginning training\n");
// network training
Matrix Y;
auto t1 = Clock::now();
for (int epoch = 0; epoch < 1001; epoch++) {
float cost = 0.0;
// Not concerned with actually training, moreso evaluating effect
// of using gpufs for loading input matrix on runtime performance,
// so just usin junk
for (int batch = 0; batch < dataset.getNumOfBatches() - 1; batch++) {
Y = nn.forward("input", Shape(10000000, 2)); // dataset.getBatches().at(batch));
//nn.backprop(Y, dataset.getTargets().at(batch));
//cost += bce_cost.cost(Y, dataset.getTargets().at(batch));
}
if (epoch % 100 == 0) {
std::cout << "Epoch: " << epoch
<< ", Cost: " << cost / dataset.getNumOfBatches()
<< ", Time (s): "
<< (std::chrono::duration_cast<std::chrono::milliseconds>(Clock::now() - t1).count()) / 1000.0
<< std::endl;
t1 = Clock::now();
}
}
return 0;
}
float computeAccuracy(const Matrix& predictions, const Matrix& targets) {
int m = predictions.shape.x;
int correct_predictions = 0;
for (int i = 0; i < m; i++) {
float prediction = predictions[i] > 0.5 ? 1 : 0;
if (prediction == targets[i]) {
correct_predictions++;
}
}
return static_cast<float>(correct_predictions) / m;
}
| 18aae18fa8e28e736914b3dd39e16e7f0718e277.cu | #include <iostream>
#include <time.h>
#include "fs_initializer.cu.h"
#include "neural_network.hh"
#include "linear_layer.hh"
#include "relu_activation.hh"
#include "sigmoid_activation.hh"
#include "nn_exception.hh"
#include "bce_cost.hh"
#include "coordinates_dataset.hh"
#include <ctime>
#include <cstdio>
#include <iostream>
#include <chrono>
typedef std::chrono::high_resolution_clock Clock;
float computeAccuracy(const Matrix& predictions, const Matrix& targets);
void init_device_app()
{
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 1 << 30);
}
int main() {
// GPUFS setup
int device = 0;
char* gpudev = getenv("GPUDEVICE");
if (gpudev != NULL)
device = atoi(gpudev);
cudaSetDevice(device);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printf("Running on device %d: \"%s\"\n", device, deviceProp.name);
volatile GPUGlobals* gpuGlobals;
initializer(&gpuGlobals);
init_device_app();
srand( time(NULL) );
CoordinatesDataset dataset(10000000, 21);
BCECost bce_cost;
NeuralNetwork nn;
nn.addLayer(new LinearLayer("linear_1", Shape(2, 30), gpuGlobals));
nn.addLayer(new ReLUActivation("relu_1"));
nn.addLayer(new LinearLayer("linear_2", Shape(30, 1)));
nn.addLayer(new SigmoidActivation("sigmoid_output"));
printf("Beginning training\n");
// network training
Matrix Y;
auto t1 = Clock::now();
for (int epoch = 0; epoch < 1001; epoch++) {
float cost = 0.0;
// Not concerned with actually training, moreso evaluating effect
// of using gpufs for loading input matrix on runtime performance,
// so just usin junk
for (int batch = 0; batch < dataset.getNumOfBatches() - 1; batch++) {
Y = nn.forward("input", Shape(10000000, 2)); // dataset.getBatches().at(batch));
//nn.backprop(Y, dataset.getTargets().at(batch));
//cost += bce_cost.cost(Y, dataset.getTargets().at(batch));
}
if (epoch % 100 == 0) {
std::cout << "Epoch: " << epoch
<< ", Cost: " << cost / dataset.getNumOfBatches()
<< ", Time (s): "
<< (std::chrono::duration_cast<std::chrono::milliseconds>(Clock::now() - t1).count()) / 1000.0
<< std::endl;
t1 = Clock::now();
}
}
return 0;
}
float computeAccuracy(const Matrix& predictions, const Matrix& targets) {
int m = predictions.shape.x;
int correct_predictions = 0;
for (int i = 0; i < m; i++) {
float prediction = predictions[i] > 0.5 ? 1 : 0;
if (prediction == targets[i]) {
correct_predictions++;
}
}
return static_cast<float>(correct_predictions) / m;
}
|
73299810d59e4332714c6b005fb57142adf5b74c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <string.h>
#include <math.h>
//ldoc on
/**
* ## Implementation
*
* The actually work of computing the fluxes and speeds is done
* by local (`static`) helper functions that take as arguments
* pointers to all the individual fields. This is helpful to the
* compilers, since by specifying the `restrict` keyword, we are
* promising that we will not access the field data through the
* wrong pointer. This lets the compiler do a better job with
* vectorization.
*/
static const float g = 9.8;
static
void shallow2dv_flux(float* __restrict__ fh,
float* __restrict__ fhu,
float* __restrict__ fhv,
float* __restrict__ gh,
float* __restrict__ ghu,
float* __restrict__ ghv,
const float* __restrict__ h,
const float* __restrict__ hu,
const float* __restrict__ hv,
float g,
int ncell)
{
memcpy(fh, hu, ncell * sizeof(float));
memcpy(gh, hv, ncell * sizeof(float));
for (int i = 0; i < ncell; ++i) {
float hi = h[i], hui = hu[i], hvi = hv[i];
float inv_h = 1/hi;
fhu[i] = hui*hui*inv_h + (0.5f*g)*hi*hi;
fhv[i] = hui*hvi*inv_h;
ghu[i] = hui*hvi*inv_h;
ghv[i] = hvi*hvi*inv_h + (0.5f*g)*hi*hi;
}
}
__device__
static
void shallow2dv_speed(float* __restrict__ cxy,
const float* __restrict__ h,
const float* __restrict__ hu,
const float* __restrict__ hv,
float g,
int ncell)
{
float cx = cxy[0];
float cy = cxy[1];
//for (int i = 0; i < ncell; ++i) {
for (int i = (threadIdx.x-1)*(ncell-1+blockDim.x)/blockDim.x; i < min(threadIdx.x*(ncell-1+blockDim.x)/blockDim.x,ncell); ++i) {
float hi = h[i];
float inv_hi = 1.0f/h[i];
float root_gh = sqrtf(g * hi);
float cxi = fabsf(hu[i] * inv_hi) + root_gh;
float cyi = fabsf(hv[i] * inv_hi) + root_gh;
if (cx < cxi) cx = cxi;
if (cy < cyi) cy = cyi;
}
cxy[0] = cx;
cxy[1] = cy;
}
void shallow2d_flux(float* FU, float* GU, const float* U,
int ncell, int field_stride)
{
shallow2dv_flux(FU, FU+field_stride, FU+2*field_stride,
GU, GU+field_stride, GU+2*field_stride,
U, U +field_stride, U +2*field_stride,
g, ncell);
}
__global__
void shallow2d_speed(float* cxy, const float* U,
int ncell, int field_stride)
{
shallow2dv_speed(cxy, U, U+field_stride, U+2*field_stride, g, ncell);
}
| 73299810d59e4332714c6b005fb57142adf5b74c.cu | #include <string.h>
#include <math.h>
//ldoc on
/**
* ## Implementation
*
* The actually work of computing the fluxes and speeds is done
* by local (`static`) helper functions that take as arguments
* pointers to all the individual fields. This is helpful to the
* compilers, since by specifying the `restrict` keyword, we are
* promising that we will not access the field data through the
* wrong pointer. This lets the compiler do a better job with
* vectorization.
*/
static const float g = 9.8;
static
void shallow2dv_flux(float* __restrict__ fh,
float* __restrict__ fhu,
float* __restrict__ fhv,
float* __restrict__ gh,
float* __restrict__ ghu,
float* __restrict__ ghv,
const float* __restrict__ h,
const float* __restrict__ hu,
const float* __restrict__ hv,
float g,
int ncell)
{
memcpy(fh, hu, ncell * sizeof(float));
memcpy(gh, hv, ncell * sizeof(float));
for (int i = 0; i < ncell; ++i) {
float hi = h[i], hui = hu[i], hvi = hv[i];
float inv_h = 1/hi;
fhu[i] = hui*hui*inv_h + (0.5f*g)*hi*hi;
fhv[i] = hui*hvi*inv_h;
ghu[i] = hui*hvi*inv_h;
ghv[i] = hvi*hvi*inv_h + (0.5f*g)*hi*hi;
}
}
__device__
static
void shallow2dv_speed(float* __restrict__ cxy,
const float* __restrict__ h,
const float* __restrict__ hu,
const float* __restrict__ hv,
float g,
int ncell)
{
float cx = cxy[0];
float cy = cxy[1];
//for (int i = 0; i < ncell; ++i) {
for (int i = (threadIdx.x-1)*(ncell-1+blockDim.x)/blockDim.x; i < min(threadIdx.x*(ncell-1+blockDim.x)/blockDim.x,ncell); ++i) {
float hi = h[i];
float inv_hi = 1.0f/h[i];
float root_gh = sqrtf(g * hi);
float cxi = fabsf(hu[i] * inv_hi) + root_gh;
float cyi = fabsf(hv[i] * inv_hi) + root_gh;
if (cx < cxi) cx = cxi;
if (cy < cyi) cy = cyi;
}
cxy[0] = cx;
cxy[1] = cy;
}
void shallow2d_flux(float* FU, float* GU, const float* U,
int ncell, int field_stride)
{
shallow2dv_flux(FU, FU+field_stride, FU+2*field_stride,
GU, GU+field_stride, GU+2*field_stride,
U, U +field_stride, U +2*field_stride,
g, ncell);
}
__global__
void shallow2d_speed(float* cxy, const float* U,
int ncell, int field_stride)
{
shallow2dv_speed(cxy, U, U+field_stride, U+2*field_stride, g, ncell);
}
|
c5be43b87ee05d295869c7fe5ff6fd97f5ec626e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// SDSC SCC Training - GPU Computing and Programming
// May 3, 2019
// Andreas Goetz (agoetz@sdsc.edu)
// Hello World Program in CUDA C
//
// Contains a function that is executed on the device (GPU)
//
#include<stdio.h>
__global__ void my_kernel(void){
}
int main(void) {
hipLaunchKernelGGL(( my_kernel), dim3(1),dim3(1), 0, 0, );
printf("Hello World!\n");
return 0;
}
| c5be43b87ee05d295869c7fe5ff6fd97f5ec626e.cu | // SDSC SCC Training - GPU Computing and Programming
// May 3, 2019
// Andreas Goetz (agoetz@sdsc.edu)
// Hello World Program in CUDA C
//
// Contains a function that is executed on the device (GPU)
//
#include<stdio.h>
__global__ void my_kernel(void){
}
int main(void) {
my_kernel<<<1,1>>>();
printf("Hello World!\n");
return 0;
}
|
7ee0dbad4083380577bfde4db114d3dd7ecfc69e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zlobpcg_shift.cu normal z -> s, Fri Jan 30 19:00:29 2015
*/
#include "common_magma.h"
__global__ void
magma_slobpcg_shift_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
float * x )
{
int idx = threadIdx.x ; // thread in row
int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index
if( row<num_rows){
float tmp = x[idx];
__syncthreads();
if( idx > shift-1 ){
idx-=shift;
x[idx] = tmp;
__syncthreads();
}
}
}
/**
Purpose
-------
For a Block-LOBPCG, the set of residuals (entries consecutive in memory)
shrinks and the vectors are shifted in case shift residuals drop below
threshold. The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
shift magma_int_t
shift number
@param[in/out]
x magmaFloat_ptr
input/output vector x
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_slobpcg_shift(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaFloat_ptr x,
magma_queue_t queue )
{
magma_int_t num_threads = num_vecs;
// every thread handles one row containing the
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int Ms = num_threads * sizeof( float );
if ( Ms > 1024*8 )
printf("error: too much shared memory requested.\n");
dim3 block( num_threads, 1, 1 );
int dimgrid1 = (int) sqrt( (float) num_rows);
int dimgrid2 = (num_rows + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
hipLaunchKernelGGL(( magma_slobpcg_shift_kernel), dim3(grid), dim3(block), Ms, queue ,
num_rows, num_vecs, shift, x );
return MAGMA_SUCCESS;
}
| 7ee0dbad4083380577bfde4db114d3dd7ecfc69e.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zlobpcg_shift.cu normal z -> s, Fri Jan 30 19:00:29 2015
*/
#include "common_magma.h"
__global__ void
magma_slobpcg_shift_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
float * x )
{
int idx = threadIdx.x ; // thread in row
int row = blockIdx.y * gridDim.x + blockIdx.x; // global block index
if( row<num_rows){
float tmp = x[idx];
__syncthreads();
if( idx > shift-1 ){
idx-=shift;
x[idx] = tmp;
__syncthreads();
}
}
}
/**
Purpose
-------
For a Block-LOBPCG, the set of residuals (entries consecutive in memory)
shrinks and the vectors are shifted in case shift residuals drop below
threshold. The memory layout of x is:
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
x = | x1[2] x2[2] x3[2] | = x1[0] x2[0] x3[0] x1[1] x2[1] x3[1] x1[2] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
shift magma_int_t
shift number
@param[in/out]
x magmaFloat_ptr
input/output vector x
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_slobpcg_shift(
magma_int_t num_rows,
magma_int_t num_vecs,
magma_int_t shift,
magmaFloat_ptr x,
magma_queue_t queue )
{
magma_int_t num_threads = num_vecs;
// every thread handles one row containing the
if ( num_threads > 1024 )
printf("error: too many threads requested.\n");
int Ms = num_threads * sizeof( float );
if ( Ms > 1024*8 )
printf("error: too much shared memory requested.\n");
dim3 block( num_threads, 1, 1 );
int dimgrid1 = (int) sqrt( (float) num_rows);
int dimgrid2 = (num_rows + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
magma_slobpcg_shift_kernel<<< grid, block, Ms, queue >>>
( num_rows, num_vecs, shift, x );
return MAGMA_SUCCESS;
}
|
2c2c6e8b23ae24d55e029b8618e6a1db04a917ac.hip | // !!! This is a file automatically generated by hipify!!!
extern "C" {
#include "lua.h"
#include "lualib.h"
#include "lauxlib.h"
}
#include "luaT.h"
#include "THH.h"
#include "hip/hip_runtime.h"
#include "npp.h"
#define TB 128
#define NPP_CALL(x) {const NppStatus a = (x); if (a != NPP_SUCCESS) {printf("\nNPP Error: (err_num=%d) \n", a);} }
THCState* getCutorchState(lua_State* L)
{
lua_getglobal(L, "cutorch");
lua_getfield(L, -1, "getState");
lua_call(L, 0, 1);
THCState *state = (THCState*) lua_touserdata(L, -1);
lua_pop(L, 2);
return state;
}
void checkCudaError(lua_State *L) {
hipError_t status = hipPeekAtLastError();
if (status != hipSuccess) {
luaL_error(L, hipGetErrorString(status));
}
}
THCudaTensor *gpu_new_tensor_like(THCState *state, THCudaTensor *x)
{
THCudaTensor *y = THCudaTensor_new(state);
THCudaTensor_resizeAs(state, y, x);
return y;
}
THFloatTensor *cpu_new_tensor_like(THCState *state, THFloatTensor *x)
{
THFloatTensor *y = THFloatTensor_new();
THFloatTensor_resizeAs(y, x);
return y;
}
__global__ void depth_filter(float *img, float*out, int size, int height, int width, int threshold)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id == 0)
{
printf("Size is %d\n", size);
printf("Height is %d\n", height);
printf("Width is %d\n", width);
printf("Threshold is %d\n", threshold);
printf("Img first value is %f\n", img[0]);
}
if(id < size)
{
//int col = id % width;
//int row = id / width;
if(img[id] < threshold)
{
out[id] = 0;
}
else
{
out[id] = img[id];
}
}
}
int depth_filter(lua_State *L)
{
printf("Entering depth_filter\n");
THCState *state = getCutorchState(L);
printf("Got the state\n");
THCudaTensor *img = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor");
printf("Got the img\n");
int threshold = luaL_checknumber(L, 2);
printf("Got the threshold\n");
THCudaTensor *out = gpu_new_tensor_like(state, img);
printf("Made the out tensor\n");
hipLaunchKernelGGL(( depth_filter), dim3((THCudaTensor_nElement(state, out) - 1) / TB + 1), dim3(TB), 0, 0,
THCudaTensor_data(state, img),
THCudaTensor_data(state, out),
THCudaTensor_nElement(state, out),
THCudaTensor_size(state, out, 2),
THCudaTensor_size(state, out, 3),
threshold);
printf("Executed depth_filter\n");
checkCudaError(L);
printf("Checked cuda error\n");
luaT_pushudata(L, out, "torch.CudaTensor");
printf("Pushed data\n");
return 1;
}
__host__ void host_erode(float* input)
{
printf("Accessing input in host, called from global. Element 0: %f\n", input[1]);
}
__global__ void glob_erode(float* input)
{
printf("Test accesing inside a global method to an element of the img. Element 0: %f\n", input[1]);
//host_erode(input);
}
int erode(lua_State *L)
{
printf("Entered to ERODE method\n");
THCState *state = getCutorchState(L);
THFloatTensor *img = (THFloatTensor*)luaT_checkudata(L, 1, "torch.FloatTensor");
THCudaTensor *kernel = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THFloatTensor *out = cpu_new_tensor_like(state, img);
printf("Parameters read correctly\n");
long half_kernel_width = (kernel->size[0] - 1) / 2;
long int y = img->size[0];
long int x = img->size[1];
long int c = img->size[2];
printf("Dimensions retrieved correctly: y:%d, x:%d, c:%d, half kernel:%d\n", y, x, c, half_kernel_width);
NppiSize oSizeRoi;
oSizeRoi.width = x - (half_kernel_width * 2);
oSizeRoi.height = y - (half_kernel_width * 2);
//oSizeRoi.width = 3;
//oSizeRoi.height = 1;
printf("ROI size generated: width:%d, height:%d\n", oSizeRoi.width, oSizeRoi.height);
NppiSize oMaskSize;
oMaskSize.width = kernel->size[0];
oMaskSize.height = kernel->size[1];
printf("Mask size generated: width:%d, height:%d\n", oMaskSize.width, oMaskSize.height);
NppiPoint oAnchor;
oAnchor.x = half_kernel_width;
oAnchor.y = half_kernel_width;
printf("Anchor point generated: x:%d, y:%d\n", oAnchor.x, oAnchor.y);
printf("Test accesing to an element of the img. Element 0: %f\n", (img->storage->data)[1]);
printf("Test size of the img data. Element 0: %f\n", sizeof(&(img->storage->data)));
printf("Test size to an element of the img. Element 0: %d\n", sizeof(typeof(&(img->storage->data)[1])));
printf("Test accesing in a different way to an element of the img. Element 0: %f\n", (THFloatTensor_data(img))[1]);
//glob_erode<<<1,1>>>(THCudaTensor_data(state, img));
//checkCudaError(L);
//host_erode(img->storage->data);
//checkCudaError(L);
Npp32f *pSrc = img->storage->data + img->stride[0] * half_kernel_width + img->stride[1] * half_kernel_width;
Npp32f *pDst = out->storage->data + out->stride[0] * half_kernel_width + out->stride[1] * half_kernel_width;
Npp8u pMask[9];
/*float *kernel_data = THCudaTensor_data(state, kernel);
for(int i = 0; i < oMaskSize.width; i++)
{
printf("Reading kernel data %f\n", kernel_data[i]);
pMask[i] = static_cast<unsigned int>(kernel_data[i]);
}*/
pMask[0] = 0;
pMask[1] = 1;
pMask[2] = 0;
pMask[3] = 1;
pMask[4] = 1;
pMask[5] = 1;
pMask[6] = 0;
pMask[7] = 1;
pMask[8] = 0;
printf("Checking step sizes: %d, %d\n", sizeof(Npp32f) * img->stride[0], sizeof(Npp32f) * out->stride[1]);
//NPP_CALL(nppiErode_32f_C1R(pSrc, sizeof(Npp32f) * (img->stride[2] - 2), out->storage->data, sizeof(Npp32f) * img->stride[2], oSizeRoi, pMask, oMaskSize, oAnchor));
NPP_CALL(nppiErode_32f_C1R(pSrc, sizeof(Npp32f) * (img->stride[0] - 2), out->storage->data, sizeof(Npp32f) * img->stride[0], oSizeRoi, pMask, oMaskSize, oAnchor));
printf("nppiErode executed...\n");
checkCudaError(L);
printf("Test accesing to an element of the out. Element: %f\n", (out->storage->data)[1242]);
luaT_pushudata(L, out, "torch.FloatTensor");
return 1;
}
int cpu_erode(lua_State *L)
{
printf("Entered to ERODE method\n");
THCState *state = getCutorchState(L);
THCudaTensor *img = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor");
THCudaTensor *kernel = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *out = gpu_new_tensor_like(state, img);
printf("Parameters read correctly\n");
long half_kernel_width = (kernel->size[0] - 1) / 2;
long int y = img->size[2];
long int x = img->size[3];
long int c = img->size[1];
printf("Dimensions retrieved correctly: y:%d, x:%d, c:%d, half kernel:%d\n", y, x, c, half_kernel_width);
NppiSize oSizeRoi;
oSizeRoi.width = x - (half_kernel_width * 2);
oSizeRoi.height = y - (half_kernel_width * 2);
//oSizeRoi.width = 3;
//oSizeRoi.height = 1;
printf("ROI size generated: width:%d, height:%d\n", oSizeRoi.width, oSizeRoi.height);
NppiSize oMaskSize;
oMaskSize.width = kernel->size[0];
oMaskSize.height = kernel->size[1];
printf("Mask size generated: width:%d, height:%d\n", oMaskSize.width, oMaskSize.height);
NppiPoint oAnchor;
oAnchor.x = half_kernel_width;
oAnchor.y = half_kernel_width;
printf("Anchor point generated: x:%d, y:%d\n", oAnchor.x, oAnchor.y);
printf("Test accesing to an element of the img. Element 0: %f\n", &(img->storage->data)[1]);
printf("Test size of the img data. Element 0: %f\n", sizeof(&(img->storage->data)));
printf("Test size to an element of the img. Element 0: %d\n", sizeof(typeof(&(img->storage->data)[1])));
//printf("Test accesing in a different way to an element of the img. Element 0: %f\n", &(THCudaTensor_data(state, img))[1]);
//glob_erode<<<1,1>>>(THCudaTensor_data(state, img));
//checkCudaError(L);
host_erode(img->storage->data);
//checkCudaError(L);
Npp32f *pSrc = img->storage->data + img->stride[2] * half_kernel_width + img->stride[3] * half_kernel_width;
Npp32f *pDst = out->storage->data + out->stride[0] * half_kernel_width + out->stride[1] * half_kernel_width;
Npp8u pMask[9];
/*float *kernel_data = THCudaTensor_data(state, kernel);
for(int i = 0; i < oMaskSize.width; i++)
{
printf("Reading kernel data %f\n", kernel_data[i]);
pMask[i] = static_cast<unsigned int>(kernel_data[i]);
}*/
pMask[0] = 0;
pMask[1] = 1;
pMask[2] = 0;
pMask[3] = 1;
pMask[4] = 1;
pMask[5] = 1;
pMask[6] = 0;
pMask[7] = 1;
pMask[8] = 0;
printf("Checking step sizes: %d, %d\n", sizeof(Npp32f) * img->stride[2], sizeof(Npp32f) * out->stride[3]);
//NPP_CALL(nppiErode_32f_C1R(pSrc, sizeof(Npp32f) * (img->stride[2] - 2), out->storage->data, sizeof(Npp32f) * img->stride[2], oSizeRoi, pMask, oMaskSize, oAnchor));
NPP_CALL(nppiErode_32f_C1R(pSrc, sizeof(Npp32f) * (img->stride[2] - 2), THCudaTensor_data(state, out), sizeof(Npp32f) * img->stride[2], oSizeRoi, pMask, oMaskSize, oAnchor));
printf("nppiErode executed...\n");
checkCudaError(L);
printf("Test accesing to an element of the out. Element: %f\n", &(out->storage->data)[1242]);
luaT_pushudata(L, out, "torch.CudaTensor");
return 1;
}
static const struct luaL_Reg funcs[] = {
{"depth_filter", depth_filter},
{"erode", erode},
{NULL, NULL}
};
extern "C" int luaopen_libgdcutils(lua_State *L) {
srand(42);
luaL_openlib(L, "gdcutils", funcs, 0);
return 1;
} | 2c2c6e8b23ae24d55e029b8618e6a1db04a917ac.cu | extern "C" {
#include "lua.h"
#include "lualib.h"
#include "lauxlib.h"
}
#include "luaT.h"
#include "THC.h"
#include "cuda_runtime.h"
#include "npp.h"
#define TB 128
#define NPP_CALL(x) {const NppStatus a = (x); if (a != NPP_SUCCESS) {printf("\nNPP Error: (err_num=%d) \n", a);} }
THCState* getCutorchState(lua_State* L)
{
lua_getglobal(L, "cutorch");
lua_getfield(L, -1, "getState");
lua_call(L, 0, 1);
THCState *state = (THCState*) lua_touserdata(L, -1);
lua_pop(L, 2);
return state;
}
void checkCudaError(lua_State *L) {
cudaError_t status = cudaPeekAtLastError();
if (status != cudaSuccess) {
luaL_error(L, cudaGetErrorString(status));
}
}
THCudaTensor *gpu_new_tensor_like(THCState *state, THCudaTensor *x)
{
THCudaTensor *y = THCudaTensor_new(state);
THCudaTensor_resizeAs(state, y, x);
return y;
}
THFloatTensor *cpu_new_tensor_like(THCState *state, THFloatTensor *x)
{
THFloatTensor *y = THFloatTensor_new();
THFloatTensor_resizeAs(y, x);
return y;
}
__global__ void depth_filter(float *img, float*out, int size, int height, int width, int threshold)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id == 0)
{
printf("Size is %d\n", size);
printf("Height is %d\n", height);
printf("Width is %d\n", width);
printf("Threshold is %d\n", threshold);
printf("Img first value is %f\n", img[0]);
}
if(id < size)
{
//int col = id % width;
//int row = id / width;
if(img[id] < threshold)
{
out[id] = 0;
}
else
{
out[id] = img[id];
}
}
}
int depth_filter(lua_State *L)
{
printf("Entering depth_filter\n");
THCState *state = getCutorchState(L);
printf("Got the state\n");
THCudaTensor *img = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor");
printf("Got the img\n");
int threshold = luaL_checknumber(L, 2);
printf("Got the threshold\n");
THCudaTensor *out = gpu_new_tensor_like(state, img);
printf("Made the out tensor\n");
depth_filter<<<(THCudaTensor_nElement(state, out) - 1) / TB + 1, TB>>>(
THCudaTensor_data(state, img),
THCudaTensor_data(state, out),
THCudaTensor_nElement(state, out),
THCudaTensor_size(state, out, 2),
THCudaTensor_size(state, out, 3),
threshold);
printf("Executed depth_filter\n");
checkCudaError(L);
printf("Checked cuda error\n");
luaT_pushudata(L, out, "torch.CudaTensor");
printf("Pushed data\n");
return 1;
}
__host__ void host_erode(float* input)
{
printf("Accessing input in host, called from global. Element 0: %f\n", input[1]);
}
__global__ void glob_erode(float* input)
{
printf("Test accesing inside a global method to an element of the img. Element 0: %f\n", input[1]);
//host_erode(input);
}
int erode(lua_State *L)
{
printf("Entered to ERODE method\n");
THCState *state = getCutorchState(L);
THFloatTensor *img = (THFloatTensor*)luaT_checkudata(L, 1, "torch.FloatTensor");
THCudaTensor *kernel = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THFloatTensor *out = cpu_new_tensor_like(state, img);
printf("Parameters read correctly\n");
long half_kernel_width = (kernel->size[0] - 1) / 2;
long int y = img->size[0];
long int x = img->size[1];
long int c = img->size[2];
printf("Dimensions retrieved correctly: y:%d, x:%d, c:%d, half kernel:%d\n", y, x, c, half_kernel_width);
NppiSize oSizeRoi;
oSizeRoi.width = x - (half_kernel_width * 2);
oSizeRoi.height = y - (half_kernel_width * 2);
//oSizeRoi.width = 3;
//oSizeRoi.height = 1;
printf("ROI size generated: width:%d, height:%d\n", oSizeRoi.width, oSizeRoi.height);
NppiSize oMaskSize;
oMaskSize.width = kernel->size[0];
oMaskSize.height = kernel->size[1];
printf("Mask size generated: width:%d, height:%d\n", oMaskSize.width, oMaskSize.height);
NppiPoint oAnchor;
oAnchor.x = half_kernel_width;
oAnchor.y = half_kernel_width;
printf("Anchor point generated: x:%d, y:%d\n", oAnchor.x, oAnchor.y);
printf("Test accesing to an element of the img. Element 0: %f\n", (img->storage->data)[1]);
printf("Test size of the img data. Element 0: %f\n", sizeof(&(img->storage->data)));
printf("Test size to an element of the img. Element 0: %d\n", sizeof(typeof(&(img->storage->data)[1])));
printf("Test accesing in a different way to an element of the img. Element 0: %f\n", (THFloatTensor_data(img))[1]);
//glob_erode<<<1,1>>>(THCudaTensor_data(state, img));
//checkCudaError(L);
//host_erode(img->storage->data);
//checkCudaError(L);
Npp32f *pSrc = img->storage->data + img->stride[0] * half_kernel_width + img->stride[1] * half_kernel_width;
Npp32f *pDst = out->storage->data + out->stride[0] * half_kernel_width + out->stride[1] * half_kernel_width;
Npp8u pMask[9];
/*float *kernel_data = THCudaTensor_data(state, kernel);
for(int i = 0; i < oMaskSize.width; i++)
{
printf("Reading kernel data %f\n", kernel_data[i]);
pMask[i] = static_cast<unsigned int>(kernel_data[i]);
}*/
pMask[0] = 0;
pMask[1] = 1;
pMask[2] = 0;
pMask[3] = 1;
pMask[4] = 1;
pMask[5] = 1;
pMask[6] = 0;
pMask[7] = 1;
pMask[8] = 0;
printf("Checking step sizes: %d, %d\n", sizeof(Npp32f) * img->stride[0], sizeof(Npp32f) * out->stride[1]);
//NPP_CALL(nppiErode_32f_C1R(pSrc, sizeof(Npp32f) * (img->stride[2] - 2), out->storage->data, sizeof(Npp32f) * img->stride[2], oSizeRoi, pMask, oMaskSize, oAnchor));
NPP_CALL(nppiErode_32f_C1R(pSrc, sizeof(Npp32f) * (img->stride[0] - 2), out->storage->data, sizeof(Npp32f) * img->stride[0], oSizeRoi, pMask, oMaskSize, oAnchor));
printf("nppiErode executed...\n");
checkCudaError(L);
printf("Test accesing to an element of the out. Element: %f\n", (out->storage->data)[1242]);
luaT_pushudata(L, out, "torch.FloatTensor");
return 1;
}
int cpu_erode(lua_State *L)
{
printf("Entered to ERODE method\n");
THCState *state = getCutorchState(L);
THCudaTensor *img = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor");
THCudaTensor *kernel = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *out = gpu_new_tensor_like(state, img);
printf("Parameters read correctly\n");
long half_kernel_width = (kernel->size[0] - 1) / 2;
long int y = img->size[2];
long int x = img->size[3];
long int c = img->size[1];
printf("Dimensions retrieved correctly: y:%d, x:%d, c:%d, half kernel:%d\n", y, x, c, half_kernel_width);
NppiSize oSizeRoi;
oSizeRoi.width = x - (half_kernel_width * 2);
oSizeRoi.height = y - (half_kernel_width * 2);
//oSizeRoi.width = 3;
//oSizeRoi.height = 1;
printf("ROI size generated: width:%d, height:%d\n", oSizeRoi.width, oSizeRoi.height);
NppiSize oMaskSize;
oMaskSize.width = kernel->size[0];
oMaskSize.height = kernel->size[1];
printf("Mask size generated: width:%d, height:%d\n", oMaskSize.width, oMaskSize.height);
NppiPoint oAnchor;
oAnchor.x = half_kernel_width;
oAnchor.y = half_kernel_width;
printf("Anchor point generated: x:%d, y:%d\n", oAnchor.x, oAnchor.y);
printf("Test accesing to an element of the img. Element 0: %f\n", &(img->storage->data)[1]);
printf("Test size of the img data. Element 0: %f\n", sizeof(&(img->storage->data)));
printf("Test size to an element of the img. Element 0: %d\n", sizeof(typeof(&(img->storage->data)[1])));
//printf("Test accesing in a different way to an element of the img. Element 0: %f\n", &(THCudaTensor_data(state, img))[1]);
//glob_erode<<<1,1>>>(THCudaTensor_data(state, img));
//checkCudaError(L);
host_erode(img->storage->data);
//checkCudaError(L);
Npp32f *pSrc = img->storage->data + img->stride[2] * half_kernel_width + img->stride[3] * half_kernel_width;
Npp32f *pDst = out->storage->data + out->stride[0] * half_kernel_width + out->stride[1] * half_kernel_width;
Npp8u pMask[9];
/*float *kernel_data = THCudaTensor_data(state, kernel);
for(int i = 0; i < oMaskSize.width; i++)
{
printf("Reading kernel data %f\n", kernel_data[i]);
pMask[i] = static_cast<unsigned int>(kernel_data[i]);
}*/
pMask[0] = 0;
pMask[1] = 1;
pMask[2] = 0;
pMask[3] = 1;
pMask[4] = 1;
pMask[5] = 1;
pMask[6] = 0;
pMask[7] = 1;
pMask[8] = 0;
printf("Checking step sizes: %d, %d\n", sizeof(Npp32f) * img->stride[2], sizeof(Npp32f) * out->stride[3]);
//NPP_CALL(nppiErode_32f_C1R(pSrc, sizeof(Npp32f) * (img->stride[2] - 2), out->storage->data, sizeof(Npp32f) * img->stride[2], oSizeRoi, pMask, oMaskSize, oAnchor));
NPP_CALL(nppiErode_32f_C1R(pSrc, sizeof(Npp32f) * (img->stride[2] - 2), THCudaTensor_data(state, out), sizeof(Npp32f) * img->stride[2], oSizeRoi, pMask, oMaskSize, oAnchor));
printf("nppiErode executed...\n");
checkCudaError(L);
printf("Test accesing to an element of the out. Element: %f\n", &(out->storage->data)[1242]);
luaT_pushudata(L, out, "torch.CudaTensor");
return 1;
}
static const struct luaL_Reg funcs[] = {
{"depth_filter", depth_filter},
{"erode", erode},
{NULL, NULL}
};
extern "C" int luaopen_libgdcutils(lua_State *L) {
srand(42);
luaL_openlib(L, "gdcutils", funcs, 0);
return 1;
} |
a957cbd360f8faa8553dfc23dac59a683e26ca8f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "utils.h"
#define SIZE 1e6
#define THREADS 256
#define BLOCKS MIN(32, (SIZE + THREADS - 1)/ THREADS)
__global__ void sum(int *array, long *result) {
__shared__ long cache[THREADS];
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
int cacheIndex = threadIdx.x;
long acum = 0;
while (tid < SIZE) {
acum += array[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = acum;
__syncthreads();
int i = blockDim.x / 2;
while (i > 0) {
if (cacheIndex < i) {
cache[cacheIndex] += cache[cacheIndex + i];
}
__syncthreads();
i /= 2;
}
if (cacheIndex == 0) {
result[blockIdx.x] = cache[cacheIndex];
}
}
int main(int argc, char* argv[]) {
int i, *array, *d_a;
long *results, *d_r;
double ms;
array = (int*) malloc( SIZE * sizeof(int) );
fill_array(array, SIZE);
display_array("array", array);
results = (long*) malloc( BLOCKS * sizeof(long) );
hipMalloc( (void**) &d_a, SIZE * sizeof(int) );
hipMalloc( (void**) &d_r, BLOCKS * sizeof(long) );
hipMemcpy(d_a, array, SIZE * sizeof(int), hipMemcpyHostToDevice);
printf("Starting...\n");
ms = 0;
for (i = 1; i <= N; i++) {
start_timer();
hipLaunchKernelGGL(( sum), dim3(BLOCKS), dim3(THREADS), 0, 0, d_a, d_r);
ms += stop_timer();
}
hipMemcpy(results, d_r, BLOCKS * sizeof(long), hipMemcpyDeviceToHost);
long acum = 0;
for (i = 0; i < BLOCKS; i++) {
acum += results[i];
}
printf("sum = %li\n", acum);
printf("avg time = %.5lf\n", (ms / N));
hipFree(d_r);
hipFree(d_a);
free(array);
free(results);
return 0;
}
| a957cbd360f8faa8553dfc23dac59a683e26ca8f.cu | #include <stdio.h>
#include <stdlib.h>
#include "utils.h"
#define SIZE 1e6
#define THREADS 256
#define BLOCKS MIN(32, (SIZE + THREADS - 1)/ THREADS)
__global__ void sum(int *array, long *result) {
__shared__ long cache[THREADS];
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
int cacheIndex = threadIdx.x;
long acum = 0;
while (tid < SIZE) {
acum += array[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = acum;
__syncthreads();
int i = blockDim.x / 2;
while (i > 0) {
if (cacheIndex < i) {
cache[cacheIndex] += cache[cacheIndex + i];
}
__syncthreads();
i /= 2;
}
if (cacheIndex == 0) {
result[blockIdx.x] = cache[cacheIndex];
}
}
int main(int argc, char* argv[]) {
int i, *array, *d_a;
long *results, *d_r;
double ms;
array = (int*) malloc( SIZE * sizeof(int) );
fill_array(array, SIZE);
display_array("array", array);
results = (long*) malloc( BLOCKS * sizeof(long) );
cudaMalloc( (void**) &d_a, SIZE * sizeof(int) );
cudaMalloc( (void**) &d_r, BLOCKS * sizeof(long) );
cudaMemcpy(d_a, array, SIZE * sizeof(int), cudaMemcpyHostToDevice);
printf("Starting...\n");
ms = 0;
for (i = 1; i <= N; i++) {
start_timer();
sum<<<BLOCKS, THREADS>>> (d_a, d_r);
ms += stop_timer();
}
cudaMemcpy(results, d_r, BLOCKS * sizeof(long), cudaMemcpyDeviceToHost);
long acum = 0;
for (i = 0; i < BLOCKS; i++) {
acum += results[i];
}
printf("sum = %li\n", acum);
printf("avg time = %.5lf\n", (ms / N));
cudaFree(d_r);
cudaFree(d_a);
free(array);
free(results);
return 0;
}
|
f6b187ce06bab340586a1796eb92c32f82428868.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "DeviceFunctions.cuh"
__global__ void TraverseStackless(const DeviceBVH* bvh, uint2* list, uint* listSize, const uint size, const uint listCapacity)
{
uint i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < size)
{
uint currLeaf = i;
DeviceAABB& queryAABB = bvh->LeafNodesAABBs[currLeaf];
bool collides;
bool traverseRightChild = true;
uint curr = bvh->getDfsNextNode(currLeaf ^ IS_LEAF);
// Start the collision detection
while (curr < size - 1)
{
curr = (traverseRightChild) ? bvh->InternalNodesChildren[curr].y : bvh->InternalNodesChildren[curr].x;
collides = queryAABB.Collide(bvh->GetNodeAABB(curr));
if (collides)
{
if (curr & IS_LEAF)
{
uint index = atomicInc(listSize, listCapacity);
list[index].x = bvh->GetBaseObjectIdx(currLeaf);
list[index].y = bvh->GetBaseObjectIdx(curr ^ IS_LEAF);
}
else
{
traverseRightChild = false;
continue;
}
}
curr = bvh->getDfsNextNode(curr);
traverseRightChild = true;
}
i += blockDim.x * gridDim.x;
}
}
__global__ void Initialize(uint value, uint* dest)
{
dest[0] = value;
}
namespace CUDALBVH
{
DEVICECUDALBVH::CollisionPair* GetCollisionList(uint& collisionArraySize, DeviceBVH*& bvh, uint size, bool stackless)
{
int blockSize = 0; // The launch configurator returned block size
int gridSize = 0; // The actual grid size needed, based on input size
GetMaximumOccupancyForFunction(gridSize, blockSize, size, TraverseStackless);
uint2* deviceCollisionList;
hipMalloc(&deviceCollisionList, sizeof(uint2) * collisionArraySize);
uint* collisionListSize;
hipMalloc(&collisionListSize, sizeof(uint));
Initialize << <1, 1 >> >(0, collisionListSize);
TraverseStackless << <gridSize, blockSize >> > (bvh, deviceCollisionList, collisionListSize, size, collisionArraySize);
hipMemcpy(&collisionArraySize, collisionListSize, sizeof(uint), hipMemcpyKind::hipMemcpyDeviceToHost);
DEVICECUDALBVH::CollisionPair* hostArray = new DEVICECUDALBVH::CollisionPair[collisionArraySize];
hipMemcpy(&hostArray[0], deviceCollisionList, sizeof(uint2) * collisionArraySize, hipMemcpyKind::hipMemcpyDeviceToHost);
hipFree(collisionListSize);
hipFree(deviceCollisionList);
return &hostArray[0];
}
}
| f6b187ce06bab340586a1796eb92c32f82428868.cu | #include "DeviceFunctions.cuh"
__global__ void TraverseStackless(const DeviceBVH* bvh, uint2* list, uint* listSize, const uint size, const uint listCapacity)
{
uint i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < size)
{
uint currLeaf = i;
DeviceAABB& queryAABB = bvh->LeafNodesAABBs[currLeaf];
bool collides;
bool traverseRightChild = true;
uint curr = bvh->getDfsNextNode(currLeaf ^ IS_LEAF);
// Start the collision detection
while (curr < size - 1)
{
curr = (traverseRightChild) ? bvh->InternalNodesChildren[curr].y : bvh->InternalNodesChildren[curr].x;
collides = queryAABB.Collide(bvh->GetNodeAABB(curr));
if (collides)
{
if (curr & IS_LEAF)
{
uint index = atomicInc(listSize, listCapacity);
list[index].x = bvh->GetBaseObjectIdx(currLeaf);
list[index].y = bvh->GetBaseObjectIdx(curr ^ IS_LEAF);
}
else
{
traverseRightChild = false;
continue;
}
}
curr = bvh->getDfsNextNode(curr);
traverseRightChild = true;
}
i += blockDim.x * gridDim.x;
}
}
__global__ void Initialize(uint value, uint* dest)
{
dest[0] = value;
}
namespace CUDALBVH
{
DEVICECUDALBVH::CollisionPair* GetCollisionList(uint& collisionArraySize, DeviceBVH*& bvh, uint size, bool stackless)
{
int blockSize = 0; // The launch configurator returned block size
int gridSize = 0; // The actual grid size needed, based on input size
GetMaximumOccupancyForFunction(gridSize, blockSize, size, TraverseStackless);
uint2* deviceCollisionList;
cudaMalloc(&deviceCollisionList, sizeof(uint2) * collisionArraySize);
uint* collisionListSize;
cudaMalloc(&collisionListSize, sizeof(uint));
Initialize << <1, 1 >> >(0, collisionListSize);
TraverseStackless << <gridSize, blockSize >> > (bvh, deviceCollisionList, collisionListSize, size, collisionArraySize);
cudaMemcpy(&collisionArraySize, collisionListSize, sizeof(uint), cudaMemcpyKind::cudaMemcpyDeviceToHost);
DEVICECUDALBVH::CollisionPair* hostArray = new DEVICECUDALBVH::CollisionPair[collisionArraySize];
cudaMemcpy(&hostArray[0], deviceCollisionList, sizeof(uint2) * collisionArraySize, cudaMemcpyKind::cudaMemcpyDeviceToHost);
cudaFree(collisionListSize);
cudaFree(deviceCollisionList);
return &hostArray[0];
}
}
|
a70b6133392bec67aea9681a63f9c906f1a9063b.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <vector>
#include <tuple>
#include "dali/kernels/scratch.h"
#include "dali/core/tensor_shape.h"
#include "dali/kernels/common/copy.h"
#include "dali/test/tensor_test_utils.h"
#include "dali/kernels/test/kernel_test_utils.h"
#include "dali/kernels/imgproc/pointwise/multiply_add_gpu.h"
namespace dali {
namespace kernels {
namespace multiply_add {
namespace test {
namespace {
constexpr size_t kNdims = 3;
/**
* Rounding to nearest even (like GPU does it)
*/
template <class In, class Out>
std::enable_if_t<std::is_integral<Out>::value, Out> custom_round(float val) {
return static_cast<Out>(std::nearbyint(val));
}
template <class In, class Out>
std::enable_if_t<!std::is_integral<Out>::value, Out> custom_round(float val) {
return val;
}
} // namespace
template <class InputOutputTypes>
class MultiplyAddGpuTest : public ::testing::Test {
using In = typename InputOutputTypes::In;
using Out = typename InputOutputTypes::Out;
public:
MultiplyAddGpuTest() {
input_host_.resize(dataset_size());
}
void SetUp() final {
std::mt19937_64 rng;
UniformRandomFill(input_host_, rng, 0., 10.);
calc_output(0);
CUDA_CALL(hipMalloc(&input_device_, sizeof(In) * dataset_size()));
CUDA_CALL(hipMemcpy(input_device_, input_host_.data(), input_host_.size() * sizeof(In),
hipMemcpyDefault));
CUDA_CALL(hipMalloc(&output_, dataset_size() * sizeof(Out)));
CUDA_CALL(hipDeviceSynchronize());
verify_test();
}
In *input_device_;
Out *output_;
std::vector<In> input_host_;
std::vector<Out> ref_output_;
std::vector<TensorShape<kNdims>> shapes_ = {{480, 640, 3}};
std::vector<float> addends_ = {4};
std::vector<float> multipliers_ = {3};
void verify_test() {
assert(shapes_.size() == addends_.size());
assert(addends_.size() == multipliers_.size());
assert(dataset_size() == input_host_.size());
assert(dataset_size() == ref_output_.size());
}
void calc_output(int idx) {
for (auto in : input_host_) {
ref_output_.push_back(custom_round<In, Out>(in * multipliers_[idx] + addends_[idx]));
}
}
size_t dataset_size() {
int ret = 0;
for (auto sh : shapes_) {
ret += volume(sh);
}
return ret;
}
};
using TestTypes = std::tuple<int8_t, float>;
/* Cause the line below takes RIDICULOUSLY long time to compile */
// using TestTypes = std::tuple<uint8_t, int8_t, uint16_t, int16_t, int32_t, float>;
INPUT_OUTPUT_TYPED_TEST_SUITE(MultiplyAddGpuTest, TestTypes);
namespace {
template <class GtestTypeParam>
using TheKernel = MultiplyAddGpu
<typename GtestTypeParam::Out, typename GtestTypeParam::In, kNdims>;
} // namespace
TYPED_TEST(MultiplyAddGpuTest, check_kernel) {
check_kernel<TheKernel<TypeParam>>();
}
TYPED_TEST(MultiplyAddGpuTest, setup_test) {
TheKernel<TypeParam> kernel;
KernelContext ctx;
InListGPU<typename TypeParam::In, kNdims> in(this->input_device_, this->shapes_);
auto reqs = kernel.Setup(ctx, in, this->addends_, this->multipliers_);
ASSERT_EQ(this->shapes_.size(), static_cast<size_t>(reqs.output_shapes[0].num_samples()))
<< "Kernel::Setup provides incorrect shape";
for (size_t i = 0; i < this->shapes_.size(); i++) {
EXPECT_EQ(this->shapes_[i], reqs.output_shapes[0][i])
<< "Kernel::Setup provides incorrect shape";
}
}
TYPED_TEST(MultiplyAddGpuTest, run_test) {
TheKernel<TypeParam> kernel;
KernelContext c;
InListGPU<typename TypeParam::In, kNdims> in(this->input_device_, this->shapes_);
OutListGPU<typename TypeParam::Out, kNdims> out(this->output_,
TensorListShape<kNdims>(this->shapes_));
auto reqs = kernel.Setup(c, in, this->addends_, this->multipliers_);
ScratchpadAllocator sa;
sa.Reserve(reqs.scratch_sizes);
auto scratchpad = sa.GetScratchpad();
c.scratchpad = &scratchpad;
kernel.Run(c, out, in, this->addends_, this->multipliers_);
CUDA_CALL(hipDeviceSynchronize());
auto res = copy<AllocType::Host>(out[0]);
ASSERT_EQ(static_cast<int>(this->ref_output_.size()), res.first.num_elements());
for (size_t i = 0; i < this->ref_output_.size(); i++) {
EXPECT_FLOAT_EQ(this->ref_output_[i], res.second.get()[i]) << "Failed for index " << i;
}
}
TYPED_TEST(MultiplyAddGpuTest, sample_descriptors) {
using InType = typename TypeParam::In;
using OutType = typename TypeParam::Out;
InListGPU<InType, kNdims> in(this->input_device_, this->shapes_);
OutListGPU<OutType, kNdims> out(this->output_, TensorListShape<3>(this->shapes_));
std::vector<SampleDescriptor<OutType, InType, kNdims-1>> res(in.num_samples());
CreateSampleDescriptors(make_span(res), out, in, this->addends_, this->multipliers_);
EXPECT_EQ(this->input_device_, res[0].in);
EXPECT_EQ(this->output_, res[0].out);
ivec<kNdims - 2> ref_pitch = {1920};
EXPECT_EQ(ref_pitch, res[0].in_pitch);
EXPECT_EQ(ref_pitch, res[0].out_pitch);
EXPECT_EQ(this->addends_[0], res[0].addend);
EXPECT_EQ(this->multipliers_[0], res[0].multiplier);
}
} // namespace test
} // namespace multiply_add
} // namespace kernels
} // namespace dali
| a70b6133392bec67aea9681a63f9c906f1a9063b.cu | // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <gtest/gtest.h>
#include <vector>
#include <tuple>
#include "dali/kernels/scratch.h"
#include "dali/core/tensor_shape.h"
#include "dali/kernels/common/copy.h"
#include "dali/test/tensor_test_utils.h"
#include "dali/kernels/test/kernel_test_utils.h"
#include "dali/kernels/imgproc/pointwise/multiply_add_gpu.h"
namespace dali {
namespace kernels {
namespace multiply_add {
namespace test {
namespace {
constexpr size_t kNdims = 3;
/**
* Rounding to nearest even (like GPU does it)
*/
template <class In, class Out>
std::enable_if_t<std::is_integral<Out>::value, Out> custom_round(float val) {
return static_cast<Out>(std::nearbyint(val));
}
template <class In, class Out>
std::enable_if_t<!std::is_integral<Out>::value, Out> custom_round(float val) {
return val;
}
} // namespace
template <class InputOutputTypes>
class MultiplyAddGpuTest : public ::testing::Test {
using In = typename InputOutputTypes::In;
using Out = typename InputOutputTypes::Out;
public:
MultiplyAddGpuTest() {
input_host_.resize(dataset_size());
}
void SetUp() final {
std::mt19937_64 rng;
UniformRandomFill(input_host_, rng, 0., 10.);
calc_output(0);
CUDA_CALL(cudaMalloc(&input_device_, sizeof(In) * dataset_size()));
CUDA_CALL(cudaMemcpy(input_device_, input_host_.data(), input_host_.size() * sizeof(In),
cudaMemcpyDefault));
CUDA_CALL(cudaMalloc(&output_, dataset_size() * sizeof(Out)));
CUDA_CALL(cudaDeviceSynchronize());
verify_test();
}
In *input_device_;
Out *output_;
std::vector<In> input_host_;
std::vector<Out> ref_output_;
std::vector<TensorShape<kNdims>> shapes_ = {{480, 640, 3}};
std::vector<float> addends_ = {4};
std::vector<float> multipliers_ = {3};
void verify_test() {
assert(shapes_.size() == addends_.size());
assert(addends_.size() == multipliers_.size());
assert(dataset_size() == input_host_.size());
assert(dataset_size() == ref_output_.size());
}
void calc_output(int idx) {
for (auto in : input_host_) {
ref_output_.push_back(custom_round<In, Out>(in * multipliers_[idx] + addends_[idx]));
}
}
size_t dataset_size() {
int ret = 0;
for (auto sh : shapes_) {
ret += volume(sh);
}
return ret;
}
};
using TestTypes = std::tuple<int8_t, float>;
/* Cause the line below takes RIDICULOUSLY long time to compile */
// using TestTypes = std::tuple<uint8_t, int8_t, uint16_t, int16_t, int32_t, float>;
INPUT_OUTPUT_TYPED_TEST_SUITE(MultiplyAddGpuTest, TestTypes);
namespace {
template <class GtestTypeParam>
using TheKernel = MultiplyAddGpu
<typename GtestTypeParam::Out, typename GtestTypeParam::In, kNdims>;
} // namespace
TYPED_TEST(MultiplyAddGpuTest, check_kernel) {
check_kernel<TheKernel<TypeParam>>();
}
TYPED_TEST(MultiplyAddGpuTest, setup_test) {
TheKernel<TypeParam> kernel;
KernelContext ctx;
InListGPU<typename TypeParam::In, kNdims> in(this->input_device_, this->shapes_);
auto reqs = kernel.Setup(ctx, in, this->addends_, this->multipliers_);
ASSERT_EQ(this->shapes_.size(), static_cast<size_t>(reqs.output_shapes[0].num_samples()))
<< "Kernel::Setup provides incorrect shape";
for (size_t i = 0; i < this->shapes_.size(); i++) {
EXPECT_EQ(this->shapes_[i], reqs.output_shapes[0][i])
<< "Kernel::Setup provides incorrect shape";
}
}
TYPED_TEST(MultiplyAddGpuTest, run_test) {
TheKernel<TypeParam> kernel;
KernelContext c;
InListGPU<typename TypeParam::In, kNdims> in(this->input_device_, this->shapes_);
OutListGPU<typename TypeParam::Out, kNdims> out(this->output_,
TensorListShape<kNdims>(this->shapes_));
auto reqs = kernel.Setup(c, in, this->addends_, this->multipliers_);
ScratchpadAllocator sa;
sa.Reserve(reqs.scratch_sizes);
auto scratchpad = sa.GetScratchpad();
c.scratchpad = &scratchpad;
kernel.Run(c, out, in, this->addends_, this->multipliers_);
CUDA_CALL(cudaDeviceSynchronize());
auto res = copy<AllocType::Host>(out[0]);
ASSERT_EQ(static_cast<int>(this->ref_output_.size()), res.first.num_elements());
for (size_t i = 0; i < this->ref_output_.size(); i++) {
EXPECT_FLOAT_EQ(this->ref_output_[i], res.second.get()[i]) << "Failed for index " << i;
}
}
TYPED_TEST(MultiplyAddGpuTest, sample_descriptors) {
using InType = typename TypeParam::In;
using OutType = typename TypeParam::Out;
InListGPU<InType, kNdims> in(this->input_device_, this->shapes_);
OutListGPU<OutType, kNdims> out(this->output_, TensorListShape<3>(this->shapes_));
std::vector<SampleDescriptor<OutType, InType, kNdims-1>> res(in.num_samples());
CreateSampleDescriptors(make_span(res), out, in, this->addends_, this->multipliers_);
EXPECT_EQ(this->input_device_, res[0].in);
EXPECT_EQ(this->output_, res[0].out);
ivec<kNdims - 2> ref_pitch = {1920};
EXPECT_EQ(ref_pitch, res[0].in_pitch);
EXPECT_EQ(ref_pitch, res[0].out_pitch);
EXPECT_EQ(this->addends_[0], res[0].addend);
EXPECT_EQ(this->multipliers_[0], res[0].multiplier);
}
} // namespace test
} // namespace multiply_add
} // namespace kernels
} // namespace dali
|
7eebccfdede41934f2520af61442bf77693505c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <string.h>
#include <stdio.h>
#include <iostream>
struct DataElement
{
char *name;
int value;
};
__global__
void Kernel(DataElement *elem) {
printf("On device: name=%s, value=%d\n", elem->name, elem->value);
elem->name[0] = 'd';
elem->value++;
}
void launch(DataElement *elem, hipStream_t &stream) {
hipLaunchKernelGGL(( Kernel), dim3(1), dim3(1), 0, stream , elem);
//hipDeviceSynchronize();
}
void iteration(hipStream_t &stream)
{
DataElement *e;
hipMallocManaged((void**)&e, sizeof(DataElement));
e->value = 10;
hipMallocManaged((void**)&(e->name), sizeof(char) * (strlen("hello") + 1) );
strcpy(e->name, "hello");
launch(e, stream);
printf("On host: name=%s, value=%d\n", e->name, e->value);
hipFree(e->name);
hipFree(e);
}
int main(void)
{
hipError_t err;
int count = 0;
err = hipGetDeviceCount(&count);
std::cout << count << " devices found." << std::endl;
for (int d=0;d<count;d++) {
err = hipSetDevice(d);
if (err != hipSuccess) {
std::cout << "error setting device, #=" << hipGetErrorString(err) << std::endl;
}
hipDeviceProp_t deviceProp;
err = hipGetDeviceProperties(&deviceProp, d);
if (err != hipSuccess) {
std::cout << "error getting device properties, #=" << hipGetErrorString(err) << std::endl;
}
std::cout << "Using device " << d << ", name: " << deviceProp.name << std::endl;
for (int s = 0 ; s < 10 ; s++) {
hipStream_t stream;
err = hipStreamCreate(&stream);
if (err != hipSuccess) {
std::cout << "error in stream creation, #=" << hipGetErrorString(err) << std::endl;
}
iteration(stream);
hipStreamDestroy(stream);
}
}
}
| 7eebccfdede41934f2520af61442bf77693505c4.cu | #include <string.h>
#include <stdio.h>
#include <iostream>
struct DataElement
{
char *name;
int value;
};
__global__
void Kernel(DataElement *elem) {
printf("On device: name=%s, value=%d\n", elem->name, elem->value);
elem->name[0] = 'd';
elem->value++;
}
void launch(DataElement *elem, cudaStream_t &stream) {
Kernel<<< 1, 1, 0, stream >>>(elem);
//cudaDeviceSynchronize();
}
void iteration(cudaStream_t &stream)
{
DataElement *e;
cudaMallocManaged((void**)&e, sizeof(DataElement));
e->value = 10;
cudaMallocManaged((void**)&(e->name), sizeof(char) * (strlen("hello") + 1) );
strcpy(e->name, "hello");
launch(e, stream);
printf("On host: name=%s, value=%d\n", e->name, e->value);
cudaFree(e->name);
cudaFree(e);
}
int main(void)
{
cudaError_t err;
int count = 0;
err = cudaGetDeviceCount(&count);
std::cout << count << " devices found." << std::endl;
for (int d=0;d<count;d++) {
err = cudaSetDevice(d);
if (err != cudaSuccess) {
std::cout << "error setting device, #=" << cudaGetErrorString(err) << std::endl;
}
cudaDeviceProp deviceProp;
err = cudaGetDeviceProperties(&deviceProp, d);
if (err != cudaSuccess) {
std::cout << "error getting device properties, #=" << cudaGetErrorString(err) << std::endl;
}
std::cout << "Using device " << d << ", name: " << deviceProp.name << std::endl;
for (int s = 0 ; s < 10 ; s++) {
cudaStream_t stream;
err = cudaStreamCreate(&stream);
if (err != cudaSuccess) {
std::cout << "error in stream creation, #=" << cudaGetErrorString(err) << std::endl;
}
iteration(stream);
cudaStreamDestroy(stream);
}
}
}
|
f0f1434e3aeb8185cd24a8dcef8a4c72ecacd40c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"Device_Matrix.cuh"
Device_Matrix::Device_Matrix(){
width_ = 1;
height_ = 1;
channel_ = 1;
depth_ = 1;
rows_ = 1;
cols_ = 1;
size_ = sizeof(float);
CHECK(hipMalloc((void**)&data_,size_));
}
Device_Matrix::Device_Matrix(const Device_Matrix& obj){
rows_ = obj.rows_;
cols_ = obj.cols_;
size_ = obj.size_;
CHECK(hipMalloc((void**)&data_, size_));
CHECK(hipMemcpy(data_, obj.data_, size_, hipMemcpyDeviceToDevice));
width_ = obj.width_;
height_ = obj.cols_;
channel_ = obj.channel_;
depth_ = obj.channel_;
}
Device_Matrix::Device_Matrix(unsigned int rows, unsigned int cols, INITIALIZER init, float a, float b){
rows_ = rows;
cols_ = cols;
size_ = rows * cols * sizeof(float);
CHECK(hipMalloc((void**)&data_, size_));
dim3 block(BLOCK_X, BLOCK_Y);
dim3 grid((cols_ + block.x - 1) / block.x, (rows_ + block.y - 1) / block.y);
switch (init){
case ZERO:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 0);
break;
case CONSTANT:
set_array_2D << <grid, block >> >(data_, rows_, cols_, a);
break;
case IDENTITY:
set_array_identity_2D << <grid, block >> >(data_, rows_, cols_);
break;
case RANDOM:{
std::random_device rd;
hiprandState_t *state;
CHECK(hipMalloc((hiprandState_t **)&state, sizeof(hiprandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(hipDeviceSynchronize());
initialize_uniform2D << <grid, block >> >(data_, rows_, cols_, 0, 1.0f, state);
CHECK(hipDeviceSynchronize());
hipFree(state);
break;
}
case GAUSSIAN:{
std::random_device rd;
hiprandState_t *state;
CHECK(hipMalloc((hiprandState_t **)&state, sizeof(hiprandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(hipDeviceSynchronize());
initialize_normal2D << <grid, block >> >(data_, rows_, cols_, a, b, state);
CHECK(hipDeviceSynchronize());
hipFree(state);
break;
}
case UNIF_DISTRIB:
{
if (a >= b){
cout << "DEVICE_MATRIX ERROR : UNIF_DISTRIB: b should be larger than a" << endl;
}
std::random_device rd;
hiprandState_t *state;
CHECK(hipMalloc((hiprandState_t **)&state, sizeof(hiprandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(hipDeviceSynchronize());
initialize_uniform2D << <grid, block >> >(data_, rows_, cols_, a, b, state);
CHECK(hipDeviceSynchronize());
hipFree(state);
break;
}
case STEP:
set_array_step_2D << < grid, block >> > (data_, rows_, cols_);
break;
case PLANE:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 0);
break;
default:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 11);
break;
}
CHECK(hipDeviceSynchronize());
width_ = cols;
height_ = rows;
channel_ = 1;
depth_ = 1;
}
Device_Matrix::Device_Matrix(unsigned int height, unsigned int width, unsigned int channel, unsigned int depth, INITIALIZER init, float a, float b){
width_ = width;
height_ = height;
channel_ = channel;
depth_ = depth;
rows_ = height;
cols_ = width * channel * depth;
size_ = rows_ * cols_ * sizeof(float);
CHECK(hipMalloc((void**)&data_, size_));
dim3 block(BLOCK_X, BLOCK_Y);
dim3 grid((cols_ + block.x - 1) / block.x, (rows_ + block.y - 1) / block.y);
switch (init){
case ZERO:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 0);
break;
case CONSTANT:
set_array_2D << <grid, block >> >(data_, rows_, cols_, a);
break;
case IDENTITY:
set_array_identity_2D << <grid, block >> >(data_, rows_, cols_);
break;
case RANDOM:{
std::random_device rd;
hiprandState_t *state;
CHECK(hipMalloc((hiprandState_t **)&state, sizeof(hiprandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(hipDeviceSynchronize());
initialize_uniform2D << <grid, block >> >(data_, rows_, cols_, 0, 1.0f, state);
CHECK(hipDeviceSynchronize());
hipFree(state);
break;
}
case GAUSSIAN:{
std::random_device rd;
hiprandState_t *state;
CHECK(hipMalloc((hiprandState_t **)&state, sizeof(hiprandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(hipDeviceSynchronize());
initialize_normal2D << <grid, block >> >(data_, rows_, cols_, a, b, state);
CHECK(hipDeviceSynchronize());
hipFree(state);
break;
}
case UNIF_DISTRIB:
{
if (a >= b){
cout << "DEVICE_MATRIX ERROR : UNIF_DISTRIB: b should be larger than a" << endl;
}
std::random_device rd;
hiprandState_t *state;
CHECK(hipMalloc((hiprandState_t **)&state, sizeof(hiprandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(hipDeviceSynchronize());
initialize_uniform2D << <grid, block >> >(data_, rows_, cols_, a, b, state);
CHECK(hipDeviceSynchronize());
hipFree(state);
break;
}
case STEP:
set_array_step_2D << < grid, block >> > (data_, rows_, cols_);
break;
case PLANE:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 0);
break;
default:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 0);
break;
}
CHECK(hipDeviceSynchronize());
}
Device_Matrix::Device_Matrix(unsigned int rows, unsigned int cols, float* host_data){
rows_ = rows;
cols_ = cols;
size_ = cols * rows * sizeof(float);
CHECK(hipMalloc((void**)&data_, size_));
CHECK(hipMemcpy(data_, host_data, size_, hipMemcpyHostToDevice));
width_ = cols;
height_ = rows;
channel_ = 1;
depth_ = 1;
}
void Device_Matrix::initialize(Device_Matrix& obj){
if (data_ != NULL){
CHECK(hipFree(data_));
}
rows_ = obj.rows_;
cols_ = obj.cols_;
size_ = obj.size_;
CHECK(hipMalloc((void**)&data_, size_));
CHECK(hipMemcpy(data_, obj.data_, size_, hipMemcpyDeviceToDevice));
width_ = obj.width_;
height_ = obj.cols_;
channel_ = obj.channel_;
depth_ = obj.channel_;
}
void Device_Matrix::initialize(unsigned int rows, unsigned int cols, INITIALIZER init, float a, float b){
if (data_ != NULL){
CHECK(hipFree(data_));
}
rows_ = rows;
cols_ = cols;
size_ = rows * cols * sizeof(float);
CHECK(hipMalloc((void**)&data_, size_));
dim3 block(BLOCK_X, BLOCK_Y);
dim3 grid((cols_ + block.x - 1) / block.x, (rows_ + block.y - 1) / block.y);
switch (init){
case ZERO:
hipLaunchKernelGGL(( set_array_2D) , dim3(grid),dim3(block) , 0, 0, data_,rows_,cols_,0);
break;
case CONSTANT:
set_array_2D << <grid, block >> >(data_, rows_, cols_, a);
break;
case IDENTITY:
set_array_identity_2D << <grid, block >> >(data_, rows_, cols_);
break;
case RANDOM:{
std::random_device rd;
hiprandState_t *state;
CHECK(hipMalloc((hiprandState_t **)&state,sizeof(hiprandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(),rows_, cols_, state);
CHECK(hipDeviceSynchronize());
initialize_uniform2D << <grid, block >> >(data_, rows_, cols_, 0, 1.0f, state);
CHECK(hipDeviceSynchronize());
hipFree(state);
break;
}
case GAUSSIAN:{
std::random_device rd;
hiprandState_t *state;
CHECK(hipMalloc((hiprandState_t **)&state, sizeof(hiprandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(hipDeviceSynchronize());
initialize_normal2D << <grid, block >> >(data_, rows_, cols_, a, b, state);
CHECK(hipDeviceSynchronize());
hipFree(state);
break;
}
case UNIF_DISTRIB:
{
if (a >= b){
cout << "DEVICE_MATRIX ERROR : UNIF_DISTRIB: b should be larger than a" << endl;
}
std::random_device rd;
hiprandState_t *state;
CHECK(hipMalloc((hiprandState_t **)&state, sizeof(hiprandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(hipDeviceSynchronize());
initialize_uniform2D << <grid, block >> >(data_, rows_, cols_, a, b, state);
CHECK(hipDeviceSynchronize());
hipFree(state);
break;
}
case STEP:
set_array_step_2D << < grid, block >> > (data_, rows_, cols_);
break;
case PLANE:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 0);
break;
default:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 0);
break;
}
CHECK(hipDeviceSynchronize());
width_ = cols;
height_ = rows;
channel_ = 1;
depth_ = 1;
}
void Device_Matrix::initialize(unsigned int height, unsigned int width, unsigned int channel, unsigned int depth, INITIALIZER init, float a, float b){
if (data_ != NULL){
CHECK(hipFree(data_));
}
width_ = width;
height_ = height;
channel_ = channel;
depth_ = depth;
rows_ = height;
cols_ = width * channel * depth;
size_ = rows_ * cols_ * sizeof(float);
CHECK(hipMalloc((void**)&data_, size_));
dim3 block(BLOCK_X, BLOCK_Y);
dim3 grid((cols_ + block.x - 1) / block.x, (rows_ + block.y - 1) / block.y);
switch (init){
case ZERO:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 0);
break;
case CONSTANT:
set_array_2D << <grid, block >> >(data_, rows_, cols_, a);
break;
case IDENTITY:
set_array_identity_2D << <grid, block >> >(data_, rows_, cols_);
break;
case RANDOM:{
std::random_device rd;
hiprandState_t *state;
CHECK(hipMalloc((hiprandState_t **)&state, sizeof(hiprandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(hipDeviceSynchronize());
initialize_uniform2D << <grid, block >> >(data_, rows_, cols_, 0, 1.0f, state);
CHECK(hipDeviceSynchronize());
hipFree(state);
break;
}
case GAUSSIAN:{
std::random_device rd;
hiprandState_t *state;
CHECK(hipMalloc((hiprandState_t **)&state, sizeof(hiprandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(hipDeviceSynchronize());
initialize_normal2D << <grid, block >> >(data_, rows_, cols_, a, b, state);
CHECK(hipDeviceSynchronize());
hipFree(state);
break;
}
case UNIF_DISTRIB:
{
if (a >= b){
cout << "DEVICE_MATRIX ERROR : UNIF_DISTRIB: b should be larger than a" << endl;
}
std::random_device rd;
hiprandState_t *state;
CHECK(hipMalloc((hiprandState_t **)&state, sizeof(hiprandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(hipDeviceSynchronize());
initialize_uniform2D << <grid, block >> >(data_, rows_, cols_, a, b, state);
CHECK(hipDeviceSynchronize());
hipFree(state);
break;
}
case STEP:
set_array_step_2D << < grid, block >> > (data_, rows_, cols_);
break;
case PLANE:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 0);
break;
default:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 0);
break;
}
CHECK(hipDeviceSynchronize());
}
void Device_Matrix::read_data(string file_name){
}
void Device_Matrix::write_data(string file_name){
}
//////////////////////////// operator ///////////////////
Device_Matrix& Device_Matrix::operator = (const Device_Matrix& obj){
if (&obj == this) return *this;
if (data_ != NULL){
CHECK(hipFree(data_));
}
rows_ = obj.rows_;
cols_ = obj.cols_;
size_ = obj.size_;
CHECK(hipMalloc((void**)&data_, size_));
CHECK(hipMemcpy(data_, obj.data_, size_, hipMemcpyDeviceToDevice));
width_ = obj.width_;
height_ = obj.cols_;
channel_ = obj.channel_;
depth_ = obj.channel_;
return *this;
}
Device_Matrix& Device_Matrix::operator * (const Device_Matrix& obj){
return *this;
}
Device_Matrix& Device_Matrix::operator + (const Device_Matrix& obj){
return *this;
}
Device_Matrix& Device_Matrix::operator - (const Device_Matrix& obj){
return *this;
}
void Device_Matrix::print(bool All_status){
if (data_ == NULL){
cout << "This Matrix doesn't have data" << endl;
}
else{
cout << "------------- Device_Matrix -------------" << endl;
cout << "rows : " << rows_ << ", cols : " << cols_ << endl;
cout << "width : " << width_ << ", height : " << height_ << ",channel : " << channel_ << ", depth : " << depth_ << endl;
cout << "data_adress : " << data_ << endl;
if (All_status){
float* tmp_data;
tmp_data = (float*)malloc(size_);
hipMemcpy(tmp_data,data_,size_,hipMemcpyDeviceToHost);
//
for (int i = 0; i < rows_; i++){
for (int j = 0; j < cols_; j++){
unsigned int idx = j * rows_ + i;
cout <<setw(5)<< tmp_data[idx] << ",";
}
cout << endl;
}
free(tmp_data);
}
cout << "--------------------------------------------" << endl;
}
} | f0f1434e3aeb8185cd24a8dcef8a4c72ecacd40c.cu | #include"Device_Matrix.cuh"
Device_Matrix::Device_Matrix(){
width_ = 1;
height_ = 1;
channel_ = 1;
depth_ = 1;
rows_ = 1;
cols_ = 1;
size_ = sizeof(float);
CHECK(cudaMalloc((void**)&data_,size_));
}
Device_Matrix::Device_Matrix(const Device_Matrix& obj){
rows_ = obj.rows_;
cols_ = obj.cols_;
size_ = obj.size_;
CHECK(cudaMalloc((void**)&data_, size_));
CHECK(cudaMemcpy(data_, obj.data_, size_, cudaMemcpyDeviceToDevice));
width_ = obj.width_;
height_ = obj.cols_;
channel_ = obj.channel_;
depth_ = obj.channel_;
}
Device_Matrix::Device_Matrix(unsigned int rows, unsigned int cols, INITIALIZER init, float a, float b){
rows_ = rows;
cols_ = cols;
size_ = rows * cols * sizeof(float);
CHECK(cudaMalloc((void**)&data_, size_));
dim3 block(BLOCK_X, BLOCK_Y);
dim3 grid((cols_ + block.x - 1) / block.x, (rows_ + block.y - 1) / block.y);
switch (init){
case ZERO:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 0);
break;
case CONSTANT:
set_array_2D << <grid, block >> >(data_, rows_, cols_, a);
break;
case IDENTITY:
set_array_identity_2D << <grid, block >> >(data_, rows_, cols_);
break;
case RANDOM:{
std::random_device rd;
curandState_t *state;
CHECK(cudaMalloc((curandState_t **)&state, sizeof(curandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(cudaDeviceSynchronize());
initialize_uniform2D << <grid, block >> >(data_, rows_, cols_, 0, 1.0f, state);
CHECK(cudaDeviceSynchronize());
cudaFree(state);
break;
}
case GAUSSIAN:{
std::random_device rd;
curandState_t *state;
CHECK(cudaMalloc((curandState_t **)&state, sizeof(curandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(cudaDeviceSynchronize());
initialize_normal2D << <grid, block >> >(data_, rows_, cols_, a, b, state);
CHECK(cudaDeviceSynchronize());
cudaFree(state);
break;
}
case UNIF_DISTRIB:
{
if (a >= b){
cout << "DEVICE_MATRIX ERROR : UNIF_DISTRIB: b should be larger than a" << endl;
}
std::random_device rd;
curandState_t *state;
CHECK(cudaMalloc((curandState_t **)&state, sizeof(curandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(cudaDeviceSynchronize());
initialize_uniform2D << <grid, block >> >(data_, rows_, cols_, a, b, state);
CHECK(cudaDeviceSynchronize());
cudaFree(state);
break;
}
case STEP:
set_array_step_2D << < grid, block >> > (data_, rows_, cols_);
break;
case PLANE:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 0);
break;
default:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 11);
break;
}
CHECK(cudaDeviceSynchronize());
width_ = cols;
height_ = rows;
channel_ = 1;
depth_ = 1;
}
Device_Matrix::Device_Matrix(unsigned int height, unsigned int width, unsigned int channel, unsigned int depth, INITIALIZER init, float a, float b){
width_ = width;
height_ = height;
channel_ = channel;
depth_ = depth;
rows_ = height;
cols_ = width * channel * depth;
size_ = rows_ * cols_ * sizeof(float);
CHECK(cudaMalloc((void**)&data_, size_));
dim3 block(BLOCK_X, BLOCK_Y);
dim3 grid((cols_ + block.x - 1) / block.x, (rows_ + block.y - 1) / block.y);
switch (init){
case ZERO:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 0);
break;
case CONSTANT:
set_array_2D << <grid, block >> >(data_, rows_, cols_, a);
break;
case IDENTITY:
set_array_identity_2D << <grid, block >> >(data_, rows_, cols_);
break;
case RANDOM:{
std::random_device rd;
curandState_t *state;
CHECK(cudaMalloc((curandState_t **)&state, sizeof(curandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(cudaDeviceSynchronize());
initialize_uniform2D << <grid, block >> >(data_, rows_, cols_, 0, 1.0f, state);
CHECK(cudaDeviceSynchronize());
cudaFree(state);
break;
}
case GAUSSIAN:{
std::random_device rd;
curandState_t *state;
CHECK(cudaMalloc((curandState_t **)&state, sizeof(curandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(cudaDeviceSynchronize());
initialize_normal2D << <grid, block >> >(data_, rows_, cols_, a, b, state);
CHECK(cudaDeviceSynchronize());
cudaFree(state);
break;
}
case UNIF_DISTRIB:
{
if (a >= b){
cout << "DEVICE_MATRIX ERROR : UNIF_DISTRIB: b should be larger than a" << endl;
}
std::random_device rd;
curandState_t *state;
CHECK(cudaMalloc((curandState_t **)&state, sizeof(curandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(cudaDeviceSynchronize());
initialize_uniform2D << <grid, block >> >(data_, rows_, cols_, a, b, state);
CHECK(cudaDeviceSynchronize());
cudaFree(state);
break;
}
case STEP:
set_array_step_2D << < grid, block >> > (data_, rows_, cols_);
break;
case PLANE:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 0);
break;
default:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 0);
break;
}
CHECK(cudaDeviceSynchronize());
}
Device_Matrix::Device_Matrix(unsigned int rows, unsigned int cols, float* host_data){
rows_ = rows;
cols_ = cols;
size_ = cols * rows * sizeof(float);
CHECK(cudaMalloc((void**)&data_, size_));
CHECK(cudaMemcpy(data_, host_data, size_, cudaMemcpyHostToDevice));
width_ = cols;
height_ = rows;
channel_ = 1;
depth_ = 1;
}
void Device_Matrix::initialize(Device_Matrix& obj){
if (data_ != NULL){
CHECK(cudaFree(data_));
}
rows_ = obj.rows_;
cols_ = obj.cols_;
size_ = obj.size_;
CHECK(cudaMalloc((void**)&data_, size_));
CHECK(cudaMemcpy(data_, obj.data_, size_, cudaMemcpyDeviceToDevice));
width_ = obj.width_;
height_ = obj.cols_;
channel_ = obj.channel_;
depth_ = obj.channel_;
}
void Device_Matrix::initialize(unsigned int rows, unsigned int cols, INITIALIZER init, float a, float b){
if (data_ != NULL){
CHECK(cudaFree(data_));
}
rows_ = rows;
cols_ = cols;
size_ = rows * cols * sizeof(float);
CHECK(cudaMalloc((void**)&data_, size_));
dim3 block(BLOCK_X, BLOCK_Y);
dim3 grid((cols_ + block.x - 1) / block.x, (rows_ + block.y - 1) / block.y);
switch (init){
case ZERO:
set_array_2D <<<grid,block >>>(data_,rows_,cols_,0);
break;
case CONSTANT:
set_array_2D << <grid, block >> >(data_, rows_, cols_, a);
break;
case IDENTITY:
set_array_identity_2D << <grid, block >> >(data_, rows_, cols_);
break;
case RANDOM:{
std::random_device rd;
curandState_t *state;
CHECK(cudaMalloc((curandState_t **)&state,sizeof(curandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(),rows_, cols_, state);
CHECK(cudaDeviceSynchronize());
initialize_uniform2D << <grid, block >> >(data_, rows_, cols_, 0, 1.0f, state);
CHECK(cudaDeviceSynchronize());
cudaFree(state);
break;
}
case GAUSSIAN:{
std::random_device rd;
curandState_t *state;
CHECK(cudaMalloc((curandState_t **)&state, sizeof(curandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(cudaDeviceSynchronize());
initialize_normal2D << <grid, block >> >(data_, rows_, cols_, a, b, state);
CHECK(cudaDeviceSynchronize());
cudaFree(state);
break;
}
case UNIF_DISTRIB:
{
if (a >= b){
cout << "DEVICE_MATRIX ERROR : UNIF_DISTRIB: b should be larger than a" << endl;
}
std::random_device rd;
curandState_t *state;
CHECK(cudaMalloc((curandState_t **)&state, sizeof(curandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(cudaDeviceSynchronize());
initialize_uniform2D << <grid, block >> >(data_, rows_, cols_, a, b, state);
CHECK(cudaDeviceSynchronize());
cudaFree(state);
break;
}
case STEP:
set_array_step_2D << < grid, block >> > (data_, rows_, cols_);
break;
case PLANE:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 0);
break;
default:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 0);
break;
}
CHECK(cudaDeviceSynchronize());
width_ = cols;
height_ = rows;
channel_ = 1;
depth_ = 1;
}
void Device_Matrix::initialize(unsigned int height, unsigned int width, unsigned int channel, unsigned int depth, INITIALIZER init, float a, float b){
if (data_ != NULL){
CHECK(cudaFree(data_));
}
width_ = width;
height_ = height;
channel_ = channel;
depth_ = depth;
rows_ = height;
cols_ = width * channel * depth;
size_ = rows_ * cols_ * sizeof(float);
CHECK(cudaMalloc((void**)&data_, size_));
dim3 block(BLOCK_X, BLOCK_Y);
dim3 grid((cols_ + block.x - 1) / block.x, (rows_ + block.y - 1) / block.y);
switch (init){
case ZERO:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 0);
break;
case CONSTANT:
set_array_2D << <grid, block >> >(data_, rows_, cols_, a);
break;
case IDENTITY:
set_array_identity_2D << <grid, block >> >(data_, rows_, cols_);
break;
case RANDOM:{
std::random_device rd;
curandState_t *state;
CHECK(cudaMalloc((curandState_t **)&state, sizeof(curandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(cudaDeviceSynchronize());
initialize_uniform2D << <grid, block >> >(data_, rows_, cols_, 0, 1.0f, state);
CHECK(cudaDeviceSynchronize());
cudaFree(state);
break;
}
case GAUSSIAN:{
std::random_device rd;
curandState_t *state;
CHECK(cudaMalloc((curandState_t **)&state, sizeof(curandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(cudaDeviceSynchronize());
initialize_normal2D << <grid, block >> >(data_, rows_, cols_, a, b, state);
CHECK(cudaDeviceSynchronize());
cudaFree(state);
break;
}
case UNIF_DISTRIB:
{
if (a >= b){
cout << "DEVICE_MATRIX ERROR : UNIF_DISTRIB: b should be larger than a" << endl;
}
std::random_device rd;
curandState_t *state;
CHECK(cudaMalloc((curandState_t **)&state, sizeof(curandState_t) * rows_ * cols_));
rand_init2D << <grid, block >> > (rd(), rows_, cols_, state);
CHECK(cudaDeviceSynchronize());
initialize_uniform2D << <grid, block >> >(data_, rows_, cols_, a, b, state);
CHECK(cudaDeviceSynchronize());
cudaFree(state);
break;
}
case STEP:
set_array_step_2D << < grid, block >> > (data_, rows_, cols_);
break;
case PLANE:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 0);
break;
default:
set_array_2D << <grid, block >> >(data_, rows_, cols_, 0);
break;
}
CHECK(cudaDeviceSynchronize());
}
void Device_Matrix::read_data(string file_name){
}
void Device_Matrix::write_data(string file_name){
}
//////////////////////////// operator ///////////////////
Device_Matrix& Device_Matrix::operator = (const Device_Matrix& obj){
if (&obj == this) return *this;
if (data_ != NULL){
CHECK(cudaFree(data_));
}
rows_ = obj.rows_;
cols_ = obj.cols_;
size_ = obj.size_;
CHECK(cudaMalloc((void**)&data_, size_));
CHECK(cudaMemcpy(data_, obj.data_, size_, cudaMemcpyDeviceToDevice));
width_ = obj.width_;
height_ = obj.cols_;
channel_ = obj.channel_;
depth_ = obj.channel_;
return *this;
}
Device_Matrix& Device_Matrix::operator * (const Device_Matrix& obj){
return *this;
}
Device_Matrix& Device_Matrix::operator + (const Device_Matrix& obj){
return *this;
}
Device_Matrix& Device_Matrix::operator - (const Device_Matrix& obj){
return *this;
}
void Device_Matrix::print(bool All_status){
if (data_ == NULL){
cout << "This Matrix doesn't have data" << endl;
}
else{
cout << "------------- Device_Matrix -------------" << endl;
cout << "rows : " << rows_ << ", cols : " << cols_ << endl;
cout << "width : " << width_ << ", height : " << height_ << ",channel : " << channel_ << ", depth : " << depth_ << endl;
cout << "data_adress : " << data_ << endl;
if (All_status){
float* tmp_data;
tmp_data = (float*)malloc(size_);
cudaMemcpy(tmp_data,data_,size_,cudaMemcpyDeviceToHost);
//列優先のデータの表示
for (int i = 0; i < rows_; i++){
for (int j = 0; j < cols_; j++){
unsigned int idx = j * rows_ + i;
cout <<setw(5)<< tmp_data[idx] << ",";
}
cout << endl;
}
free(tmp_data);
}
cout << "--------------------------------------------" << endl;
}
} |
17377895ee46af9825db93fa749f6ea354513bb1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* dedisperse_gpu.cu
* Functions for coherent dedispersion in CUDA/GPUs
* Paul Demorest, 2009/05
*/
#include "dedisperse_gpu.h"
/* CUDA kernel to convert bytes to floats. Also splits incoming
* data into two polarizations (assuming polns are interleaved
* in the raw data).
*/
__global__ void byte_to_float_2pol_complex(
unsigned short *in, float2 *outx, float2 *outy,
size_t n) {
const int nt = blockDim.x * gridDim.x;
const int tId = blockIdx.x * blockDim.x + threadIdx.x;
char4 *in_8bit = (char4 *)in;
for (int i=tId; i<n; i+=nt) {
outx[i].x = __int2float_rn(in_8bit[i].x);
outx[i].y = __int2float_rn(in_8bit[i].y);
outy[i].x = __int2float_rn(in_8bit[i].z);
outy[i].y = __int2float_rn(in_8bit[i].w);
}
}
/* CUDA kernel for inplace complex vector (elementwise) multiplication:
*
* Does a[i] *= b[i] for 0<i<n.
*
* a can contain multiple vectors to be multiplied by b, each
* should run in its own thread block, ie:
* vec_mult_complex<<<n_vector,64>>>(a,b,n_element_per_vector);
* where a has n_element_per_vector*n_vector entries, and b
* has n_element.
*/
#if 1
__global__ void vector_multiply_complex(float2 *a, const float2 *b, unsigned nelem) {
const unsigned nelem_chunk = nelem / gridDim.y;
const unsigned elem0 = blockIdx.y * nelem_chunk;
const unsigned elem1 = elem0 + nelem_chunk > nelem ? nelem : elem0 + nelem_chunk;
float2 *ptr = &a[blockIdx.x*nelem];
float2 tmp;
for (int i=elem0+threadIdx.x; i<elem1; i+=blockDim.x) {
tmp.x = ptr[i].x*b[i].x - ptr[i].y*b[i].y;
tmp.y = ptr[i].y*b[i].x + ptr[i].x*b[i].y;
ptr[i] = tmp;
}
}
#endif
#if 0
__global__ void vector_multiply_complex(float2 *a, const float2 *b,
unsigned nelem) {
float2 *ptr = &a[blockIdx.x*nelem];
float2 tmp;
for (int i=threadIdx.x; i<nelem; i+=blockDim.x) {
tmp.x = ptr[i].x*b[i].x - ptr[i].y*b[i].y;
tmp.y = ptr[i].y*b[i].x + ptr[i].x*b[i].y;
ptr[i] = tmp;
}
}
#endif
/* Full-stokes detection "in-place"
* vx and vy arrays are voltage data. Output total power
* terms go into vx, and cross terms in vy. n is total number
* of data points.
* TODO: check signs, etc
* Also, if we're folding on the GPU it probably makes more sense
* to combine the two operations.
*/
__global__ void detect_4pol(float2 *vx, float2 *vy, size_t n) {
const int nt = blockDim.x * gridDim.x;
const int tId = blockIdx.x * blockDim.x + threadIdx.x;
float2 pp, xp;
for (int i=tId; i<n; i+=nt) {
pp.x = vx[i].x*vx[i].x + vx[i].y*vx[i].y;
pp.y = vy[i].x*vy[i].x + vy[i].y*vy[i].y;
xp.x = vx[i].x*vy[i].x + vx[i].y*vy[i].y;
xp.y = vx[i].x*vy[i].y - vx[i].y*vy[i].x;
vx[i] = pp;
vy[i] = xp;
}
}
/* Expand the original input out so that FFTs will overlap */
void expand_overlap(dedispersion_setup *s) {
const size_t bytes_per_sample = 4; // 8-bit complex, 2 pol
const size_t bytes_per_fft = bytes_per_sample * s->fft_len;
const size_t bytes_overlap = bytes_per_sample * s->overlap;
const size_t bytes_total = bytes_per_sample * s->npts_per_block;
size_t icount=0, ocount=0;
for (icount=0, ocount=0;
icount<=bytes_total-bytes_per_fft;
icount+=bytes_per_fft-bytes_overlap,
ocount+=bytes_per_fft)
hipMemcpy(s->overlap_gpu + ocount, s->tbuf_gpu + icount,
bytes_per_fft, hipMemcpyDeviceToDevice);
}
/* Transfer the dedispersed data back to the main system memory
* while simultaneously collapsing the overlap (ie, removing
* invalid points at FFT edge).
*/
void transfer_collapse_overlap(dedispersion_setup *s) {
/* At this point, databuf0 holds total-power terms (AA, BB)
* and databuf1 holds poln cross-terms (Re, Im(AB)).
*/
const int valid_samp_per_fft = s->fft_len - s->overlap;
unsigned ifft;
// TODO Think about the best way to organize this data for later
// TODO Make sure we're getting the right part of the FFT
for (ifft=0; ifft<s->nfft_per_block; ifft++) {
// Each memcpy transfers a single FFT's worth of valid data
// from 2 (out of 4 total) polns.
hipMemcpy(s->result_host + (4*ifft+0)*valid_samp_per_fft,
s->databuf0_gpu + ifft*s->fft_len + s->overlap/2,
valid_samp_per_fft * sizeof(float) * 2,
hipMemcpyDeviceToHost);
hipMemcpy(s->result_host + (4*ifft+2)*valid_samp_per_fft,
s->databuf1_gpu + ifft*s->fft_len + s->overlap/2,
valid_samp_per_fft * sizeof(float) * 2,
hipMemcpyDeviceToHost);
}
}
/* Fills in the freq-domain chirp, given the input params.
* Assumes memory has already been allocated. If fft_len has not
* been changed, this func can be called again to change the
* DM, freq, etc currently being applied. Formula is taken
* from ASP's CalcChirp.
*/
void init_chirp(dedispersion_setup *s) {
// Alloc temporary host memory
float2 *chirp_host;
size_t chirp_size = sizeof(float2) * s->fft_len * s->nchan;
// TODO check that this isn't too big
printf("sizeof(chirp_gpu) = %d MB\n", (int)(chirp_size >> 20));
chirp_host = (float2 *)malloc(chirp_size);
double dmfac = s->dm*2.0*M_PI/(2.41e-10*(1.0+s->earth_z4/1.0e4));
double band_sign = (s->bw<0.0) ? -1.0 : 1.0;
int i, ichan;
for (ichan=0; ichan<s->nchan; ichan++) {
for (i=0; i<s->fft_len; i++) {
double dfreq = (double)i * s->bw / (double)s->fft_len;
if (i>s->fft_len/2) dfreq -= s->bw;
double freqfac = dfreq / s->freq[ichan];
freqfac = freqfac * freqfac / (s->freq[ichan] + dfreq);
double arg = band_sign * dmfac * freqfac;
// This makes Ingrid happy, but I have no idea where this
// particular formula comes from.
// double taper = 1.0/sqrt(1.0 + pow(fabs(dfreq)/(0.47*s->bw),80));
double taper = 1.0;
chirp_host[ichan*s->fft_len + i].x =
(float)(cos(arg)*taper/(double)s->fft_len);
chirp_host[ichan*s->fft_len + i].y =
-1.0*(float)(sin(arg)*taper/(double)s->fft_len);
#if 0
chirp_host[ichan*s->fft_len + i].x = 1.0/(double)s->fft_len;
chirp_host[ichan*s->fft_len + i].y = 0.0;
#endif
}
}
// Transfer the values to the gpu, free host memory
hipMemcpy(s->chirp_gpu[0], chirp_host, chirp_size, hipMemcpyHostToDevice);
free(chirp_host);
}
/* Initialize all necessary memory, etc for doing dedispersion
* at the given params. In the struct, the following MUST be
* filled in:
* rf, bw, dm, npts_per_block, npol
* Optionally, fft_len and overlap can be specified as well. If
* either of these is set to 0, it will be computed automatically
* from the input params.
* TODO: more error checking
*/
extern "C"
void init_dedispersion(dedispersion_setup *s) {
// Find lowest freq
int i;
double f_chan_lo_mhz = s->freq[0];
for (i=1; i<s->nchan; i++)
if (s->freq[i] < f_chan_lo_mhz)
f_chan_lo_mhz = s->freq[i];
printf("rf=%f bw=%f dm=%f freq_lo=%f\n", s->rf, s->bw, s->dm,
f_chan_lo_mhz);
// Calc various parameters
double f_lo_ghz = (f_chan_lo_mhz - fabs(s->bw)/2.0)/1.0e3;
double f_hi_ghz = (f_chan_lo_mhz + fabs(s->bw)/2.0)/1.0e3;
double chirp_len_samples = 4150. * s->dm *
(1.0/(f_lo_ghz*f_lo_ghz) - 1.0/(f_hi_ghz*f_hi_ghz));
printf("Chirp length = %f us\n", chirp_len_samples);
chirp_len_samples *= fabs(s->bw);
printf("Chirp length = %f samples\n", chirp_len_samples);
if (s->overlap==0 && chirp_len_samples!=0.0) {
// Do nearest power of 2 for now. Find out what's optimal
// Also find out what values don't work.
s->overlap=1;
while (s->overlap<chirp_len_samples) s->overlap <<= 1;
}
if (s->fft_len==0) {
// Rough optimization based on testing w/ CUDA 2.3
// Could make a "dedispersion plan" that tests?
s->fft_len = 16*1024;
if (s->overlap <= 1024) s->fft_len = 32*1024; // previously 16
else if (s->overlap <= 2048) s->fft_len = 64*1024;
else if (s->overlap <= 16*1024) s->fft_len = 128*1024;
else if (s->overlap <= 64*1024) s->fft_len = 256*1024;
while (s->fft_len < 2.0*s->overlap) s->fft_len *= 2;
if (s->fft_len > 8*1024*1024) {
printf("init_dedispersion error: FFT length too large! (%d)\n",
s->fft_len);
s->fft_len = 8*1024*1024;
}
}
printf("fft_len=%d overlap=%d\n", s->fft_len, s->overlap); fflush(stdout);
// Figure out how many FFTs per block
s->nfft_per_block = 1;
int npts_used = s->fft_len;
while(npts_used <= s->npts_per_block) {
s->nfft_per_block++;
npts_used = s->nfft_per_block*(s->fft_len-s->overlap) + s->overlap;
}
s->nfft_per_block--;
npts_used = s->nfft_per_block*(s->fft_len-s->overlap) + s->overlap;
// Allocate memory
const size_t bytes_per_sample = 4; // 8-bit complex 2-pol
const size_t bytes_in = bytes_per_sample * s->npts_per_block;
const size_t bytes_tot = bytes_per_sample * s->fft_len*s->nfft_per_block;
const size_t bytes_databuf = sizeof(float2)*s->fft_len*s->nfft_per_block;
const size_t bytes_chirp = sizeof(float2)*s->fft_len*s->nchan;
size_t total_gpu_mem = 0;
printf("npts_per_block=%d nfft_per_block=%d npts_used=%d diff=%d\n",
s->npts_per_block, s->nfft_per_block, npts_used,
s->npts_per_block - npts_used);
fflush(stdout);
hipError_t rv = hipHostMalloc((void**)&(s->tbuf_host), bytes_in, hipHostMallocWriteCombined);
hipMalloc((void**)&s->tbuf_gpu, bytes_in);
total_gpu_mem += bytes_in;
hipMalloc((void**)&s->overlap_gpu, bytes_tot);
total_gpu_mem += bytes_tot;
hipMalloc((void**)&s->databuf0_gpu, 2 * bytes_databuf);
s->databuf1_gpu = s->databuf0_gpu + s->fft_len*s->nfft_per_block;
total_gpu_mem += 2*bytes_databuf;
hipMalloc((void**)&s->chirp_gpu[0], bytes_chirp);
total_gpu_mem += bytes_chirp;
for (i=0; i<s->nchan; i++) s->chirp_gpu[i] = s->chirp_gpu[0] + i*s->fft_len;
//printf("allocated mem\n"); fflush(stdout);
printf("total_gpu_mem = %d MB\n", total_gpu_mem >> 20);
hipDeviceSynchronize();
//printf("init_dedispersion2 cuda_err=\'%s\'\n", hipGetErrorString(hipGetLastError()));
// Init chirp function
init_chirp(s);
hipDeviceSynchronize();
//printf("init_dedispersion3 cuda_err=\'%s\'\n", hipGetErrorString(hipGetLastError()));
// Plan FFT
// nfft_per_block is only for 1 pol, hence the factor of 2 here
hipfftResult fft_rv = hipfftPlan1d(&s->plan, s->fft_len, HIPFFT_C2C, 2*s->nfft_per_block);
//printf("fft (%d)\n", fft_rv); fflush(stdout);
hipDeviceSynchronize();
//printf("init_dedispersion4 cuda_err=\'%s\'\n", hipGetErrorString(hipGetLastError()));
// Zero out fold buffers (call init_fold() to set them up)
s->fold_phase = NULL;
s->fold_step = NULL;
s->foldbuf_gpu = NULL;
// Zero out ds buffer
s->dsbuf_gpu = NULL;
// Zero out timers
memset(&s->time, 0, sizeof(dedispersion_times));
// Check errors
hipDeviceSynchronize();
//printf("init_dedispersion cuda_err=\'%s\'\n", hipGetErrorString(hipGetLastError()));
}
/* Actually do the dedispersion */
/* TODO: add benchmarking info */
extern "C"
void dedisperse(dedispersion_setup *s, int ichan,
const unsigned char *in, float *out) {
hipfftResult fft_rv;
/* Various sizes */
const size_t bytes_per_sample = 4; // 8-bit complex 2-pol
const size_t bytes_in = bytes_per_sample * s->npts_per_block;
const size_t npts_tot = s->fft_len*s->nfft_per_block;
/* Benchmarking stuff
* Do we want to create these each time?
*/
#define NT 12
hipEvent_t t[NT];
int it;
for (it=0; it<NT; it++) hipEventCreate(&t[it]);
it=0;
/* copy input data to transfer buffer */
memcpy(s->tbuf_host, in, bytes_in);
hipEventRecord(t[it], 0); it++;
hipEventRecord(t[it], 0); it++;
/* Copy data to GPU */
hipMemcpy(s->tbuf_gpu, s->tbuf_host, bytes_in, hipMemcpyHostToDevice);
hipEventRecord(t[it], 0); it++;
/* Expand overlap */
expand_overlap(s);
hipEventRecord(t[it], 0); it++;
/* Convert to floating point */
hipLaunchKernelGGL(( byte_to_float_2pol_complex), dim3(16),dim3(128), 0, 0, (unsigned short *)s->overlap_gpu, s->databuf0_gpu, s->databuf1_gpu, npts_tot);
hipEventRecord(t[it], 0); it++;
/* Forward FFT */
fft_rv = hipfftExecC2C(s->plan, s->databuf0_gpu, s->databuf0_gpu, HIPFFT_FORWARD);
hipEventRecord(t[it], 0); it++;
//printf("fft1 = %d\n", fft_rv);
/* Multiply by chirp */
dim3 gd(2*s->nfft_per_block, s->fft_len/4096, 1);
hipLaunchKernelGGL(( vector_multiply_complex), dim3(gd),dim3(64), 0, 0, s->databuf0_gpu,
s->chirp_gpu[ichan], s->fft_len);
hipEventRecord(t[it], 0); it++;
/* Inverse FFT */
fft_rv = hipfftExecC2C(s->plan, s->databuf0_gpu, s->databuf0_gpu, HIPFFT_BACKWARD);
hipEventRecord(t[it], 0); it++;
//printf("fft2 = %d\n", fft_rv);
#define DETECT_AND_TRANSFER 0
#if DETECT_AND_TRANSFER
/* Detect */
hipLaunchKernelGGL(( detect_4pol), dim3(32),dim3(64), 0, 0, s->databuf0_gpu, s->databuf1_gpu, npts_tot);
hipEventRecord(t[it], 0); it++;
/* Re-quantize to 8 bit?? */
/* Transfer data back, removing non-valid (overlapped) FFT edges */
transfer_collapse_overlap(s);
hipEventRecord(t[it], 0); it++;
#endif
hipEventRecord(t[it], 0);
hipEventSynchronize(t[it]);
/* Compute timers */
float ttmp;
it=1;
hipEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.transfer_to_gpu += ttmp;
s->time.total2 += ttmp;
it++;
hipEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.overlap += ttmp;
s->time.total2 += ttmp;
it++;
hipEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.bit_to_float += ttmp;
s->time.total2 += ttmp;
it++;
hipEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.fft += ttmp;
s->time.total2 += ttmp;
it++;
hipEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.xmult += ttmp;
s->time.total2 += ttmp;
it++;
hipEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.fft += ttmp;
s->time.total2 += ttmp;
it++;
#if DETECT_AND_TRANSFER
hipEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.detect += ttmp;
s->time.total2 += ttmp;
it++;
hipEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.transfer_to_host += ttmp;
s->time.total2 += ttmp;
it++;
#endif
hipEventElapsedTime(&ttmp, t[0], t[it+1]);
s->time.total += ttmp;
int nvalid = s->nfft_per_block*(s->fft_len-s->overlap);
s->time.nsamp_tot += nvalid;
for (it=0; it<NT; it++) hipEventDestroy(t[it]);
}
/* Actually just unpack the data */
extern "C"
void unpack(dedispersion_setup *s, int ichan,
const unsigned char *in, float *out) {
hipfftResult fft_rv;
/* Various sizes */
const size_t bytes_per_sample = 4; // 8-bit complex 2-pol
const size_t bytes_in = bytes_per_sample * s->npts_per_block;
const size_t npts_tot = s->fft_len*s->nfft_per_block;
/* Benchmarking stuff
* Do we want to create these each time?
*/
#define NT 12
hipEvent_t t[NT];
int it;
for (it=0; it<NT; it++) hipEventCreate(&t[it]);
it=0;
/* copy input data to transfer buffer */
memcpy(s->tbuf_host, in, bytes_in);
hipEventRecord(t[it], 0); it++;
hipEventRecord(t[it], 0); it++;
/* Copy data to GPU */
hipMemcpy(s->tbuf_gpu, s->tbuf_host, bytes_in, hipMemcpyHostToDevice);
hipEventRecord(t[it], 0); it++;
/* Expand overlap */
expand_overlap(s);
hipEventRecord(t[it], 0); it++;
/* Convert to floating point */
hipLaunchKernelGGL(( byte_to_float_2pol_complex), dim3(16),dim3(128), 0, 0, (unsigned short *)s->overlap_gpu, s->databuf0_gpu, s->databuf1_gpu, npts_tot);
hipEventRecord(t[it], 0); it++;
hipEventRecord(t[it], 0);
hipEventSynchronize(t[it]);
/* Compute timers */
float ttmp;
it=1;
hipEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.transfer_to_gpu += ttmp;
s->time.total2 += ttmp;
it++;
hipEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.overlap += ttmp;
s->time.total2 += ttmp;
it++;
hipEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.bit_to_float += ttmp;
s->time.total2 += ttmp;
it++;
hipEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.fft += ttmp;
s->time.total2 += ttmp;
it++;
hipEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.xmult += ttmp;
s->time.total2 += ttmp;
it++;
hipEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.fft += ttmp;
s->time.total2 += ttmp;
it++;
hipEventElapsedTime(&ttmp, t[0], t[it+1]);
s->time.total += ttmp;
int nvalid = s->nfft_per_block*(s->fft_len-s->overlap);
s->time.nsamp_tot += nvalid;
for (it=0; it<NT; it++) hipEventDestroy(t[it]);
}
/* Free any resources associated with dedispersion */
extern "C"
void free_dedispersion(dedispersion_setup *s) {
hipDeviceSynchronize(); // Need?
hipHostFree(s->tbuf_host);
hipFree(s->tbuf_gpu);
hipFree(s->overlap_gpu);
hipFree(s->databuf0_gpu);
hipFree(s->chirp_gpu[0]);
hipDeviceReset();
}
#define print_percent(var) \
printf(" %7.2f%% %s\n", 100.0*s->time.var/s->time.total, #var)
#define print_percent_short(var) \
fprintf(f, "%.3f ", s->time.var/s->time.total)
void print_timing_report(dedispersion_setup *s) {
/* Print to screen */
printf("\n");
printf("Total time = %6.1f s (%.4f ns/samp)\n",
s->time.total/1e3, 1e6*s->time.total/(double)s->time.nsamp_tot);
printf("Total2 time = %6.1f s (%.4f ns/samp)\n",
s->time.total2/1e3, 1e6*s->time.total2/(double)s->time.nsamp_tot);
//printf(" %f ns/sample\n", 1e6*s->time.total/(double)s->time.nsamp_tot);
print_percent(transfer_to_gpu);
print_percent(overlap);
print_percent(bit_to_float);
print_percent(fft);
print_percent(xmult);
#if DETECT_AND_TRANSFER
print_percent(detect);
#endif
print_percent(fold_mem);
print_percent(fold_blocks);
print_percent(fold_combine);
print_percent(downsample);
print_percent(transfer_to_host);
/* print short version to file */
FILE *f = fopen("dedisp_timing.dat", "a");
fprintf(f, "%7d %6d %.4e %.4e ", s->fft_len, s->overlap,
s->time.total/(double)s->time.nsamp_tot,
s->gp->drop_frac_tot);
print_percent_short(transfer_to_gpu);
print_percent_short(overlap);
print_percent_short(bit_to_float);
print_percent_short(fft);
print_percent_short(xmult);
print_percent_short(fold_mem);
print_percent_short(fold_blocks);
print_percent_short(fold_combine);
print_percent_short(downsample);
print_percent_short(transfer_to_host);
fprintf(f, "\n");
fclose(f);
}
| 17377895ee46af9825db93fa749f6ea354513bb1.cu | /* dedisperse_gpu.cu
* Functions for coherent dedispersion in CUDA/GPUs
* Paul Demorest, 2009/05
*/
#include "dedisperse_gpu.h"
/* CUDA kernel to convert bytes to floats. Also splits incoming
* data into two polarizations (assuming polns are interleaved
* in the raw data).
*/
__global__ void byte_to_float_2pol_complex(
unsigned short *in, float2 *outx, float2 *outy,
size_t n) {
const int nt = blockDim.x * gridDim.x;
const int tId = blockIdx.x * blockDim.x + threadIdx.x;
char4 *in_8bit = (char4 *)in;
for (int i=tId; i<n; i+=nt) {
outx[i].x = __int2float_rn(in_8bit[i].x);
outx[i].y = __int2float_rn(in_8bit[i].y);
outy[i].x = __int2float_rn(in_8bit[i].z);
outy[i].y = __int2float_rn(in_8bit[i].w);
}
}
/* CUDA kernel for inplace complex vector (elementwise) multiplication:
*
* Does a[i] *= b[i] for 0<i<n.
*
* a can contain multiple vectors to be multiplied by b, each
* should run in its own thread block, ie:
* vec_mult_complex<<<n_vector,64>>>(a,b,n_element_per_vector);
* where a has n_element_per_vector*n_vector entries, and b
* has n_element.
*/
#if 1
__global__ void vector_multiply_complex(float2 *a, const float2 *b, unsigned nelem) {
const unsigned nelem_chunk = nelem / gridDim.y;
const unsigned elem0 = blockIdx.y * nelem_chunk;
const unsigned elem1 = elem0 + nelem_chunk > nelem ? nelem : elem0 + nelem_chunk;
float2 *ptr = &a[blockIdx.x*nelem];
float2 tmp;
for (int i=elem0+threadIdx.x; i<elem1; i+=blockDim.x) {
tmp.x = ptr[i].x*b[i].x - ptr[i].y*b[i].y;
tmp.y = ptr[i].y*b[i].x + ptr[i].x*b[i].y;
ptr[i] = tmp;
}
}
#endif
#if 0
__global__ void vector_multiply_complex(float2 *a, const float2 *b,
unsigned nelem) {
float2 *ptr = &a[blockIdx.x*nelem];
float2 tmp;
for (int i=threadIdx.x; i<nelem; i+=blockDim.x) {
tmp.x = ptr[i].x*b[i].x - ptr[i].y*b[i].y;
tmp.y = ptr[i].y*b[i].x + ptr[i].x*b[i].y;
ptr[i] = tmp;
}
}
#endif
/* Full-stokes detection "in-place"
* vx and vy arrays are voltage data. Output total power
* terms go into vx, and cross terms in vy. n is total number
* of data points.
* TODO: check signs, etc
* Also, if we're folding on the GPU it probably makes more sense
* to combine the two operations.
*/
__global__ void detect_4pol(float2 *vx, float2 *vy, size_t n) {
const int nt = blockDim.x * gridDim.x;
const int tId = blockIdx.x * blockDim.x + threadIdx.x;
float2 pp, xp;
for (int i=tId; i<n; i+=nt) {
pp.x = vx[i].x*vx[i].x + vx[i].y*vx[i].y;
pp.y = vy[i].x*vy[i].x + vy[i].y*vy[i].y;
xp.x = vx[i].x*vy[i].x + vx[i].y*vy[i].y;
xp.y = vx[i].x*vy[i].y - vx[i].y*vy[i].x;
vx[i] = pp;
vy[i] = xp;
}
}
/* Expand the original input out so that FFTs will overlap */
void expand_overlap(dedispersion_setup *s) {
const size_t bytes_per_sample = 4; // 8-bit complex, 2 pol
const size_t bytes_per_fft = bytes_per_sample * s->fft_len;
const size_t bytes_overlap = bytes_per_sample * s->overlap;
const size_t bytes_total = bytes_per_sample * s->npts_per_block;
size_t icount=0, ocount=0;
for (icount=0, ocount=0;
icount<=bytes_total-bytes_per_fft;
icount+=bytes_per_fft-bytes_overlap,
ocount+=bytes_per_fft)
cudaMemcpy(s->overlap_gpu + ocount, s->tbuf_gpu + icount,
bytes_per_fft, cudaMemcpyDeviceToDevice);
}
/* Transfer the dedispersed data back to the main system memory
* while simultaneously collapsing the overlap (ie, removing
* invalid points at FFT edge).
*/
void transfer_collapse_overlap(dedispersion_setup *s) {
/* At this point, databuf0 holds total-power terms (AA, BB)
* and databuf1 holds poln cross-terms (Re, Im(AB)).
*/
const int valid_samp_per_fft = s->fft_len - s->overlap;
unsigned ifft;
// TODO Think about the best way to organize this data for later
// TODO Make sure we're getting the right part of the FFT
for (ifft=0; ifft<s->nfft_per_block; ifft++) {
// Each memcpy transfers a single FFT's worth of valid data
// from 2 (out of 4 total) polns.
cudaMemcpy(s->result_host + (4*ifft+0)*valid_samp_per_fft,
s->databuf0_gpu + ifft*s->fft_len + s->overlap/2,
valid_samp_per_fft * sizeof(float) * 2,
cudaMemcpyDeviceToHost);
cudaMemcpy(s->result_host + (4*ifft+2)*valid_samp_per_fft,
s->databuf1_gpu + ifft*s->fft_len + s->overlap/2,
valid_samp_per_fft * sizeof(float) * 2,
cudaMemcpyDeviceToHost);
}
}
/* Fills in the freq-domain chirp, given the input params.
* Assumes memory has already been allocated. If fft_len has not
* been changed, this func can be called again to change the
* DM, freq, etc currently being applied. Formula is taken
* from ASP's CalcChirp.
*/
void init_chirp(dedispersion_setup *s) {
// Alloc temporary host memory
float2 *chirp_host;
size_t chirp_size = sizeof(float2) * s->fft_len * s->nchan;
// TODO check that this isn't too big
printf("sizeof(chirp_gpu) = %d MB\n", (int)(chirp_size >> 20));
chirp_host = (float2 *)malloc(chirp_size);
double dmfac = s->dm*2.0*M_PI/(2.41e-10*(1.0+s->earth_z4/1.0e4));
double band_sign = (s->bw<0.0) ? -1.0 : 1.0;
int i, ichan;
for (ichan=0; ichan<s->nchan; ichan++) {
for (i=0; i<s->fft_len; i++) {
double dfreq = (double)i * s->bw / (double)s->fft_len;
if (i>s->fft_len/2) dfreq -= s->bw;
double freqfac = dfreq / s->freq[ichan];
freqfac = freqfac * freqfac / (s->freq[ichan] + dfreq);
double arg = band_sign * dmfac * freqfac;
// This makes Ingrid happy, but I have no idea where this
// particular formula comes from.
// double taper = 1.0/sqrt(1.0 + pow(fabs(dfreq)/(0.47*s->bw),80));
double taper = 1.0;
chirp_host[ichan*s->fft_len + i].x =
(float)(cos(arg)*taper/(double)s->fft_len);
chirp_host[ichan*s->fft_len + i].y =
-1.0*(float)(sin(arg)*taper/(double)s->fft_len);
#if 0
chirp_host[ichan*s->fft_len + i].x = 1.0/(double)s->fft_len;
chirp_host[ichan*s->fft_len + i].y = 0.0;
#endif
}
}
// Transfer the values to the gpu, free host memory
cudaMemcpy(s->chirp_gpu[0], chirp_host, chirp_size, cudaMemcpyHostToDevice);
free(chirp_host);
}
/* Initialize all necessary memory, etc for doing dedispersion
* at the given params. In the struct, the following MUST be
* filled in:
* rf, bw, dm, npts_per_block, npol
* Optionally, fft_len and overlap can be specified as well. If
* either of these is set to 0, it will be computed automatically
* from the input params.
* TODO: more error checking
*/
extern "C"
void init_dedispersion(dedispersion_setup *s) {
// Find lowest freq
int i;
double f_chan_lo_mhz = s->freq[0];
for (i=1; i<s->nchan; i++)
if (s->freq[i] < f_chan_lo_mhz)
f_chan_lo_mhz = s->freq[i];
printf("rf=%f bw=%f dm=%f freq_lo=%f\n", s->rf, s->bw, s->dm,
f_chan_lo_mhz);
// Calc various parameters
double f_lo_ghz = (f_chan_lo_mhz - fabs(s->bw)/2.0)/1.0e3;
double f_hi_ghz = (f_chan_lo_mhz + fabs(s->bw)/2.0)/1.0e3;
double chirp_len_samples = 4150. * s->dm *
(1.0/(f_lo_ghz*f_lo_ghz) - 1.0/(f_hi_ghz*f_hi_ghz));
printf("Chirp length = %f us\n", chirp_len_samples);
chirp_len_samples *= fabs(s->bw);
printf("Chirp length = %f samples\n", chirp_len_samples);
if (s->overlap==0 && chirp_len_samples!=0.0) {
// Do nearest power of 2 for now. Find out what's optimal
// Also find out what values don't work.
s->overlap=1;
while (s->overlap<chirp_len_samples) s->overlap <<= 1;
}
if (s->fft_len==0) {
// Rough optimization based on testing w/ CUDA 2.3
// Could make a "dedispersion plan" that tests?
s->fft_len = 16*1024;
if (s->overlap <= 1024) s->fft_len = 32*1024; // previously 16
else if (s->overlap <= 2048) s->fft_len = 64*1024;
else if (s->overlap <= 16*1024) s->fft_len = 128*1024;
else if (s->overlap <= 64*1024) s->fft_len = 256*1024;
while (s->fft_len < 2.0*s->overlap) s->fft_len *= 2;
if (s->fft_len > 8*1024*1024) {
printf("init_dedispersion error: FFT length too large! (%d)\n",
s->fft_len);
s->fft_len = 8*1024*1024;
}
}
printf("fft_len=%d overlap=%d\n", s->fft_len, s->overlap); fflush(stdout);
// Figure out how many FFTs per block
s->nfft_per_block = 1;
int npts_used = s->fft_len;
while(npts_used <= s->npts_per_block) {
s->nfft_per_block++;
npts_used = s->nfft_per_block*(s->fft_len-s->overlap) + s->overlap;
}
s->nfft_per_block--;
npts_used = s->nfft_per_block*(s->fft_len-s->overlap) + s->overlap;
// Allocate memory
const size_t bytes_per_sample = 4; // 8-bit complex 2-pol
const size_t bytes_in = bytes_per_sample * s->npts_per_block;
const size_t bytes_tot = bytes_per_sample * s->fft_len*s->nfft_per_block;
const size_t bytes_databuf = sizeof(float2)*s->fft_len*s->nfft_per_block;
const size_t bytes_chirp = sizeof(float2)*s->fft_len*s->nchan;
size_t total_gpu_mem = 0;
printf("npts_per_block=%d nfft_per_block=%d npts_used=%d diff=%d\n",
s->npts_per_block, s->nfft_per_block, npts_used,
s->npts_per_block - npts_used);
fflush(stdout);
cudaError_t rv = cudaHostAlloc((void**)&(s->tbuf_host), bytes_in, cudaHostAllocWriteCombined);
cudaMalloc((void**)&s->tbuf_gpu, bytes_in);
total_gpu_mem += bytes_in;
cudaMalloc((void**)&s->overlap_gpu, bytes_tot);
total_gpu_mem += bytes_tot;
cudaMalloc((void**)&s->databuf0_gpu, 2 * bytes_databuf);
s->databuf1_gpu = s->databuf0_gpu + s->fft_len*s->nfft_per_block;
total_gpu_mem += 2*bytes_databuf;
cudaMalloc((void**)&s->chirp_gpu[0], bytes_chirp);
total_gpu_mem += bytes_chirp;
for (i=0; i<s->nchan; i++) s->chirp_gpu[i] = s->chirp_gpu[0] + i*s->fft_len;
//printf("allocated mem\n"); fflush(stdout);
printf("total_gpu_mem = %d MB\n", total_gpu_mem >> 20);
cudaThreadSynchronize();
//printf("init_dedispersion2 cuda_err=\'%s\'\n", cudaGetErrorString(cudaGetLastError()));
// Init chirp function
init_chirp(s);
cudaThreadSynchronize();
//printf("init_dedispersion3 cuda_err=\'%s\'\n", cudaGetErrorString(cudaGetLastError()));
// Plan FFT
// nfft_per_block is only for 1 pol, hence the factor of 2 here
cufftResult fft_rv = cufftPlan1d(&s->plan, s->fft_len, CUFFT_C2C, 2*s->nfft_per_block);
//printf("fft (%d)\n", fft_rv); fflush(stdout);
cudaThreadSynchronize();
//printf("init_dedispersion4 cuda_err=\'%s\'\n", cudaGetErrorString(cudaGetLastError()));
// Zero out fold buffers (call init_fold() to set them up)
s->fold_phase = NULL;
s->fold_step = NULL;
s->foldbuf_gpu = NULL;
// Zero out ds buffer
s->dsbuf_gpu = NULL;
// Zero out timers
memset(&s->time, 0, sizeof(dedispersion_times));
// Check errors
cudaThreadSynchronize();
//printf("init_dedispersion cuda_err=\'%s\'\n", cudaGetErrorString(cudaGetLastError()));
}
/* Actually do the dedispersion */
/* TODO: add benchmarking info */
extern "C"
void dedisperse(dedispersion_setup *s, int ichan,
const unsigned char *in, float *out) {
cufftResult fft_rv;
/* Various sizes */
const size_t bytes_per_sample = 4; // 8-bit complex 2-pol
const size_t bytes_in = bytes_per_sample * s->npts_per_block;
const size_t npts_tot = s->fft_len*s->nfft_per_block;
/* Benchmarking stuff
* Do we want to create these each time?
*/
#define NT 12
cudaEvent_t t[NT];
int it;
for (it=0; it<NT; it++) cudaEventCreate(&t[it]);
it=0;
/* copy input data to transfer buffer */
memcpy(s->tbuf_host, in, bytes_in);
cudaEventRecord(t[it], 0); it++;
cudaEventRecord(t[it], 0); it++;
/* Copy data to GPU */
cudaMemcpy(s->tbuf_gpu, s->tbuf_host, bytes_in, cudaMemcpyHostToDevice);
cudaEventRecord(t[it], 0); it++;
/* Expand overlap */
expand_overlap(s);
cudaEventRecord(t[it], 0); it++;
/* Convert to floating point */
byte_to_float_2pol_complex<<<16,128>>>((unsigned short *)s->overlap_gpu, s->databuf0_gpu, s->databuf1_gpu, npts_tot);
cudaEventRecord(t[it], 0); it++;
/* Forward FFT */
fft_rv = cufftExecC2C(s->plan, s->databuf0_gpu, s->databuf0_gpu, CUFFT_FORWARD);
cudaEventRecord(t[it], 0); it++;
//printf("fft1 = %d\n", fft_rv);
/* Multiply by chirp */
dim3 gd(2*s->nfft_per_block, s->fft_len/4096, 1);
vector_multiply_complex<<<gd,64>>>(s->databuf0_gpu,
s->chirp_gpu[ichan], s->fft_len);
cudaEventRecord(t[it], 0); it++;
/* Inverse FFT */
fft_rv = cufftExecC2C(s->plan, s->databuf0_gpu, s->databuf0_gpu, CUFFT_INVERSE);
cudaEventRecord(t[it], 0); it++;
//printf("fft2 = %d\n", fft_rv);
#define DETECT_AND_TRANSFER 0
#if DETECT_AND_TRANSFER
/* Detect */
detect_4pol<<<32,64>>>(s->databuf0_gpu, s->databuf1_gpu, npts_tot);
cudaEventRecord(t[it], 0); it++;
/* Re-quantize to 8 bit?? */
/* Transfer data back, removing non-valid (overlapped) FFT edges */
transfer_collapse_overlap(s);
cudaEventRecord(t[it], 0); it++;
#endif
cudaEventRecord(t[it], 0);
cudaEventSynchronize(t[it]);
/* Compute timers */
float ttmp;
it=1;
cudaEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.transfer_to_gpu += ttmp;
s->time.total2 += ttmp;
it++;
cudaEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.overlap += ttmp;
s->time.total2 += ttmp;
it++;
cudaEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.bit_to_float += ttmp;
s->time.total2 += ttmp;
it++;
cudaEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.fft += ttmp;
s->time.total2 += ttmp;
it++;
cudaEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.xmult += ttmp;
s->time.total2 += ttmp;
it++;
cudaEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.fft += ttmp;
s->time.total2 += ttmp;
it++;
#if DETECT_AND_TRANSFER
cudaEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.detect += ttmp;
s->time.total2 += ttmp;
it++;
cudaEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.transfer_to_host += ttmp;
s->time.total2 += ttmp;
it++;
#endif
cudaEventElapsedTime(&ttmp, t[0], t[it+1]);
s->time.total += ttmp;
int nvalid = s->nfft_per_block*(s->fft_len-s->overlap);
s->time.nsamp_tot += nvalid;
for (it=0; it<NT; it++) cudaEventDestroy(t[it]);
}
/* Actually just unpack the data */
extern "C"
void unpack(dedispersion_setup *s, int ichan,
const unsigned char *in, float *out) {
cufftResult fft_rv;
/* Various sizes */
const size_t bytes_per_sample = 4; // 8-bit complex 2-pol
const size_t bytes_in = bytes_per_sample * s->npts_per_block;
const size_t npts_tot = s->fft_len*s->nfft_per_block;
/* Benchmarking stuff
* Do we want to create these each time?
*/
#define NT 12
cudaEvent_t t[NT];
int it;
for (it=0; it<NT; it++) cudaEventCreate(&t[it]);
it=0;
/* copy input data to transfer buffer */
memcpy(s->tbuf_host, in, bytes_in);
cudaEventRecord(t[it], 0); it++;
cudaEventRecord(t[it], 0); it++;
/* Copy data to GPU */
cudaMemcpy(s->tbuf_gpu, s->tbuf_host, bytes_in, cudaMemcpyHostToDevice);
cudaEventRecord(t[it], 0); it++;
/* Expand overlap */
expand_overlap(s);
cudaEventRecord(t[it], 0); it++;
/* Convert to floating point */
byte_to_float_2pol_complex<<<16,128>>>((unsigned short *)s->overlap_gpu, s->databuf0_gpu, s->databuf1_gpu, npts_tot);
cudaEventRecord(t[it], 0); it++;
cudaEventRecord(t[it], 0);
cudaEventSynchronize(t[it]);
/* Compute timers */
float ttmp;
it=1;
cudaEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.transfer_to_gpu += ttmp;
s->time.total2 += ttmp;
it++;
cudaEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.overlap += ttmp;
s->time.total2 += ttmp;
it++;
cudaEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.bit_to_float += ttmp;
s->time.total2 += ttmp;
it++;
cudaEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.fft += ttmp;
s->time.total2 += ttmp;
it++;
cudaEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.xmult += ttmp;
s->time.total2 += ttmp;
it++;
cudaEventElapsedTime(&ttmp, t[it], t[it+1]);
s->time.fft += ttmp;
s->time.total2 += ttmp;
it++;
cudaEventElapsedTime(&ttmp, t[0], t[it+1]);
s->time.total += ttmp;
int nvalid = s->nfft_per_block*(s->fft_len-s->overlap);
s->time.nsamp_tot += nvalid;
for (it=0; it<NT; it++) cudaEventDestroy(t[it]);
}
/* Free any resources associated with dedispersion */
extern "C"
void free_dedispersion(dedispersion_setup *s) {
cudaThreadSynchronize(); // Need?
cudaFreeHost(s->tbuf_host);
cudaFree(s->tbuf_gpu);
cudaFree(s->overlap_gpu);
cudaFree(s->databuf0_gpu);
cudaFree(s->chirp_gpu[0]);
cudaThreadExit();
}
#define print_percent(var) \
printf(" %7.2f%% %s\n", 100.0*s->time.var/s->time.total, #var)
#define print_percent_short(var) \
fprintf(f, "%.3f ", s->time.var/s->time.total)
void print_timing_report(dedispersion_setup *s) {
/* Print to screen */
printf("\n");
printf("Total time = %6.1f s (%.4f ns/samp)\n",
s->time.total/1e3, 1e6*s->time.total/(double)s->time.nsamp_tot);
printf("Total2 time = %6.1f s (%.4f ns/samp)\n",
s->time.total2/1e3, 1e6*s->time.total2/(double)s->time.nsamp_tot);
//printf(" %f ns/sample\n", 1e6*s->time.total/(double)s->time.nsamp_tot);
print_percent(transfer_to_gpu);
print_percent(overlap);
print_percent(bit_to_float);
print_percent(fft);
print_percent(xmult);
#if DETECT_AND_TRANSFER
print_percent(detect);
#endif
print_percent(fold_mem);
print_percent(fold_blocks);
print_percent(fold_combine);
print_percent(downsample);
print_percent(transfer_to_host);
/* print short version to file */
FILE *f = fopen("dedisp_timing.dat", "a");
fprintf(f, "%7d %6d %.4e %.4e ", s->fft_len, s->overlap,
s->time.total/(double)s->time.nsamp_tot,
s->gp->drop_frac_tot);
print_percent_short(transfer_to_gpu);
print_percent_short(overlap);
print_percent_short(bit_to_float);
print_percent_short(fft);
print_percent_short(xmult);
print_percent_short(fold_mem);
print_percent_short(fold_blocks);
print_percent_short(fold_combine);
print_percent_short(downsample);
print_percent_short(transfer_to_host);
fprintf(f, "\n");
fclose(f);
}
|
ac177c8d3048b6c54f10b1ec08e690c254cf7cef.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
/*inline void CUDA_ERROR_CHECK(const hipError_t &err){
if(err != hipSuccess){
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}*/
__device__ int mandel(float c_re, float c_im, int maxIteration)
{
float z_re = c_re, z_im = c_im;
int i;
for (i = 0; i < maxIteration; ++i)
{
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = c_re + new_re;
z_im = c_im + new_im;
}
return i;
}
__global__ void mandelKernel(float lowerX, float lowerY, float stepX, float stepY, int *d_res, int resX, int resY, int maxIterations){
// To avoid error caused by the floating number, use the following pseudo code
//
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
int now_x, now_y;
now_x = blockIdx.x * blockDim.x + threadIdx.x;
now_y = blockIdx.y * blockDim.y + threadIdx.y;
if(now_x >= resX || now_y >= resY) return;
float x, y;
int idx;
x = lowerX + now_x * stepX;
y = lowerY + now_y * stepY;
idx = now_y*resX+now_x;
d_res[idx] = mandel(x, y, maxIterations);
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
int blocksX = (int) ceil(resX/16.0);
int blocksY = (int) ceil(resY/16.0);
dim3 block(16, 16);
dim3 grid(blocksX, blocksY);
int *d_res;
int size;
size = resX*resY*sizeof(int);
hipMalloc((void**)&d_res, size);
//int *h = (int*)malloc(size);
hipLaunchKernelGGL(( mandelKernel) , dim3(grid), dim3(block) , 0, 0, lowerX, lowerY, stepX, stepY, d_res, resX, resY, maxIterations);
hipMemcpy(img, d_res, size, hipMemcpyDeviceToHost);
//memcpy(img, h, size);
//free(h);
hipFree(d_res);
}
| ac177c8d3048b6c54f10b1ec08e690c254cf7cef.cu | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
/*inline void CUDA_ERROR_CHECK(const cudaError_t &err){
if(err != cudaSuccess){
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}*/
__device__ int mandel(float c_re, float c_im, int maxIteration)
{
float z_re = c_re, z_im = c_im;
int i;
for (i = 0; i < maxIteration; ++i)
{
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = c_re + new_re;
z_im = c_im + new_im;
}
return i;
}
__global__ void mandelKernel(float lowerX, float lowerY, float stepX, float stepY, int *d_res, int resX, int resY, int maxIterations){
// To avoid error caused by the floating number, use the following pseudo code
//
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
int now_x, now_y;
now_x = blockIdx.x * blockDim.x + threadIdx.x;
now_y = blockIdx.y * blockDim.y + threadIdx.y;
if(now_x >= resX || now_y >= resY) return;
float x, y;
int idx;
x = lowerX + now_x * stepX;
y = lowerY + now_y * stepY;
idx = now_y*resX+now_x;
d_res[idx] = mandel(x, y, maxIterations);
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
int blocksX = (int) ceil(resX/16.0);
int blocksY = (int) ceil(resY/16.0);
dim3 block(16, 16);
dim3 grid(blocksX, blocksY);
int *d_res;
int size;
size = resX*resY*sizeof(int);
cudaMalloc((void**)&d_res, size);
//int *h = (int*)malloc(size);
mandelKernel <<< grid, block >>> (lowerX, lowerY, stepX, stepY, d_res, resX, resY, maxIterations);
cudaMemcpy(img, d_res, size, cudaMemcpyDeviceToHost);
//memcpy(img, h, size);
//free(h);
cudaFree(d_res);
}
|
46a5679e2fc481f2cd2166cc80954def4ae31147.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "hip/hip_runtime.h"
#include "common.h"
void print_header() {
PRINT("# %10s %12s %8s out-of-place in-place \n", "", "", "");
PRINT("# %10s %12s %8s %7s %6s %6s %5s %7s %6s %6s %5s\n", "size", "count", "type",
"time", "algbw", "busbw", "error", "time", "algbw", "busbw", "error");
PRINT("# %10s %12s %8s %7s %6s %6s %5s %7s %6s %6s %5s\n", "(B)", "(elements)", "",
"(us)", "(GB/s)", "(GB/s)", "", "(us)", "(GB/s)", "(GB/s)", "");
}
void print_line_header (size_t size, size_t count, const char *typeName, const char *opName, int root) {
PRINT("%12li %12li %8s", size, count, typeName);
}
void AllGatherGetCollByteCount(size_t *sendcount, size_t *recvcount, size_t *paramcount, size_t *sendInplaceOffset, size_t *recvInplaceOffset, size_t count, int nranks) {
*sendcount = count/nranks;
*recvcount = (count/nranks)*nranks;
*sendInplaceOffset = count/nranks;
*recvInplaceOffset = 0;
*paramcount = *sendcount;
}
testResult_t AllGatherInitData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int rep, int in_place) {
size_t sendcount = args->sendBytes / wordSize(type);
size_t recvcount = args->expectedBytes / wordSize(type);
int nranks = args->nProcs*args->nThreads*args->nGpus;
for (int i=0; i<args->nGpus; i++) {
int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i;
CUDACHECK(hipSetDevice(gpuid));
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
CUDACHECK(hipMemset(args->recvbuffs[i], 0, args->expectedBytes));
void* data = in_place ? ((char*)args->recvbuffs[i])+rank*args->sendBytes : args->sendbuffs[i];
TESTCHECK(InitData(data, sendcount, type, rep, rank));
for (int j=0; j<nranks; j++) {
TESTCHECK(InitData(((char*)args->expected[i])+args->sendBytes*j, sendcount, type, rep, j));
}
CUDACHECK(hipDeviceSynchronize());
}
return testSuccess;
}
void AllGatherGetBw(size_t count, int typesize, double sec, double* algBw, double* busBw, int nranks) {
double baseBw = (double)(count * typesize * nranks) / 1.0E9 / sec;
*algBw = baseBw;
double factor = ((double)(nranks - 1))/((double)nranks);
*busBw = baseBw * factor;
}
testResult_t AllGatherRunColl(void* sendbuff, void* recvbuff, size_t count, ncclDataType_t type, ncclRedOp_t op, int root, ncclComm_t comm, hipStream_t stream) {
NCCLCHECK(ncclAllGather(sendbuff, recvbuff, count, type, comm, stream));
return testSuccess;
}
struct testColl allGatherTest = {
"AllGather",
AllGatherGetCollByteCount,
AllGatherInitData,
AllGatherGetBw,
AllGatherRunColl
};
void AllGatherGetBuffSize(size_t *sendcount, size_t *recvcount, size_t count, int nranks) {
size_t paramcount, sendInplaceOffset, recvInplaceOffset;
AllGatherGetCollByteCount(sendcount, recvcount, ¶mcount, &sendInplaceOffset, &recvInplaceOffset, count, nranks);
}
testResult_t AllGatherRunTest(struct threadArgs* args, int root, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName) {
args->collTest = &allGatherTest;
ncclDataType_t *run_types;
const char **run_typenames;
int type_count;
if ((int)type != -1) {
type_count = 1;
run_types = &type;
run_typenames = &typeName;
} else {
type_count = test_typenum;
run_types = test_types;
run_typenames = test_typenames;
}
for (int i=0; i<type_count; i++) {
TESTCHECK(TimeTest(args, run_types[i], run_typenames[i], (ncclRedOp_t)0, "", -1));
}
return testSuccess;
}
struct testEngine allGatherEngine = {
AllGatherGetBuffSize,
AllGatherRunTest
};
#pragma weak ncclTestEngine=allGatherEngine
| 46a5679e2fc481f2cd2166cc80954def4ae31147.cu | /*************************************************************************
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "cuda_runtime.h"
#include "common.h"
void print_header() {
PRINT("# %10s %12s %8s out-of-place in-place \n", "", "", "");
PRINT("# %10s %12s %8s %7s %6s %6s %5s %7s %6s %6s %5s\n", "size", "count", "type",
"time", "algbw", "busbw", "error", "time", "algbw", "busbw", "error");
PRINT("# %10s %12s %8s %7s %6s %6s %5s %7s %6s %6s %5s\n", "(B)", "(elements)", "",
"(us)", "(GB/s)", "(GB/s)", "", "(us)", "(GB/s)", "(GB/s)", "");
}
void print_line_header (size_t size, size_t count, const char *typeName, const char *opName, int root) {
PRINT("%12li %12li %8s", size, count, typeName);
}
void AllGatherGetCollByteCount(size_t *sendcount, size_t *recvcount, size_t *paramcount, size_t *sendInplaceOffset, size_t *recvInplaceOffset, size_t count, int nranks) {
*sendcount = count/nranks;
*recvcount = (count/nranks)*nranks;
*sendInplaceOffset = count/nranks;
*recvInplaceOffset = 0;
*paramcount = *sendcount;
}
testResult_t AllGatherInitData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int rep, int in_place) {
size_t sendcount = args->sendBytes / wordSize(type);
size_t recvcount = args->expectedBytes / wordSize(type);
int nranks = args->nProcs*args->nThreads*args->nGpus;
for (int i=0; i<args->nGpus; i++) {
int gpuid = args->localRank*args->nThreads*args->nGpus + args->thread*args->nGpus + i;
CUDACHECK(cudaSetDevice(gpuid));
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
CUDACHECK(cudaMemset(args->recvbuffs[i], 0, args->expectedBytes));
void* data = in_place ? ((char*)args->recvbuffs[i])+rank*args->sendBytes : args->sendbuffs[i];
TESTCHECK(InitData(data, sendcount, type, rep, rank));
for (int j=0; j<nranks; j++) {
TESTCHECK(InitData(((char*)args->expected[i])+args->sendBytes*j, sendcount, type, rep, j));
}
CUDACHECK(cudaDeviceSynchronize());
}
return testSuccess;
}
void AllGatherGetBw(size_t count, int typesize, double sec, double* algBw, double* busBw, int nranks) {
double baseBw = (double)(count * typesize * nranks) / 1.0E9 / sec;
*algBw = baseBw;
double factor = ((double)(nranks - 1))/((double)nranks);
*busBw = baseBw * factor;
}
testResult_t AllGatherRunColl(void* sendbuff, void* recvbuff, size_t count, ncclDataType_t type, ncclRedOp_t op, int root, ncclComm_t comm, cudaStream_t stream) {
NCCLCHECK(ncclAllGather(sendbuff, recvbuff, count, type, comm, stream));
return testSuccess;
}
struct testColl allGatherTest = {
"AllGather",
AllGatherGetCollByteCount,
AllGatherInitData,
AllGatherGetBw,
AllGatherRunColl
};
void AllGatherGetBuffSize(size_t *sendcount, size_t *recvcount, size_t count, int nranks) {
size_t paramcount, sendInplaceOffset, recvInplaceOffset;
AllGatherGetCollByteCount(sendcount, recvcount, ¶mcount, &sendInplaceOffset, &recvInplaceOffset, count, nranks);
}
testResult_t AllGatherRunTest(struct threadArgs* args, int root, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName) {
args->collTest = &allGatherTest;
ncclDataType_t *run_types;
const char **run_typenames;
int type_count;
if ((int)type != -1) {
type_count = 1;
run_types = &type;
run_typenames = &typeName;
} else {
type_count = test_typenum;
run_types = test_types;
run_typenames = test_typenames;
}
for (int i=0; i<type_count; i++) {
TESTCHECK(TimeTest(args, run_types[i], run_typenames[i], (ncclRedOp_t)0, "", -1));
}
return testSuccess;
}
struct testEngine allGatherEngine = {
AllGatherGetBuffSize,
AllGatherRunTest
};
#pragma weak ncclTestEngine=allGatherEngine
|
7ddb3600b8d76ba1fccb91f419dbde1b7a599d7f.hip | // !!! This is a file automatically generated by hipify!!!
/// 16 threads per block
#include "rgbd_sensor.h"
#include "geometry/geometry_helper.h"
#include "visualization/color_util.h"
#include <helper_cuda.h>
#include <helper_math.h>
#include <glog/logging.h>
#include <hip/driver_types.h>
#include <extern/cuda/helper_cuda.h>
#include "sensor/preprocess.h"
/// Member functions: (CPU code)
Sensor::Sensor(SensorParams &sensor_params) {
const uint image_size = sensor_params.height * sensor_params.width;
params_ = sensor_params; // Is it copy constructing?
checkCudaErrors(hipMalloc(&data_.depth_buffer, sizeof(short) * image_size));
checkCudaErrors(hipMalloc(&data_.color_buffer, sizeof(uchar4) * image_size));
checkCudaErrors(hipMalloc(&data_.depth_data, sizeof(float) * image_size));
checkCudaErrors(hipMalloc(&data_.inlier_ratio, sizeof(float) * image_size));
checkCudaErrors(hipMalloc(&data_.filtered_depth_data, sizeof(float) * image_size));
checkCudaErrors(hipMalloc(&data_.color_data, sizeof(float4) * image_size));
checkCudaErrors(hipMalloc(&data_.normal_data, sizeof(float4) * image_size));
data_.depth_channel_desc = hipCreateChannelDesc<float>();
checkCudaErrors(hipMallocArray(&data_.depth_array,
&data_.depth_channel_desc,
params_.width, params_.height));
data_.color_channel_desc = hipCreateChannelDesc<float4>();
checkCudaErrors(hipMallocArray(&data_.color_array,
&data_.color_channel_desc,
params_.width, params_.height));
data_.normal_channel_desc = hipCreateChannelDesc<float4>();
checkCudaErrors(hipMallocArray(&data_.normal_array,
&data_.normal_channel_desc,
params_.width, params_.height));
data_.depth_texture = 0;
data_.color_texture = 0;
data_.normal_texture = 0;
BindCUDATexture();
is_allocated_on_gpu_ = true;
}
Sensor::~Sensor() {
if (is_allocated_on_gpu_) {
checkCudaErrors(hipFree(data_.depth_buffer));
checkCudaErrors(hipFree(data_.color_buffer));
checkCudaErrors(hipFree(data_.depth_data));
checkCudaErrors(hipFree(data_.inlier_ratio));
checkCudaErrors(hipFree(data_.filtered_depth_data));
checkCudaErrors(hipFree(data_.color_data));
checkCudaErrors(hipFree(data_.normal_data));
checkCudaErrors(hipFreeArray(data_.depth_array));
checkCudaErrors(hipFreeArray(data_.color_array));
checkCudaErrors(hipFreeArray(data_.normal_array));
}
}
void Sensor::BindCUDATexture() {
hipResourceDesc depth_resource;
memset(&depth_resource, 0, sizeof(depth_resource));
depth_resource.resType = hipResourceTypeArray;
depth_resource.res.array.array = data_.depth_array;
hipTextureDesc depth_tex_desc;
memset(&depth_tex_desc, 0, sizeof(depth_tex_desc));
depth_tex_desc.readMode = hipReadModeElementType;
if (data_.depth_texture != 0)
checkCudaErrors(hipDestroyTextureObject(data_.depth_texture));
checkCudaErrors(hipCreateTextureObject(&data_.depth_texture,
&depth_resource,
&depth_tex_desc,
NULL));
hipResourceDesc color_resource;
memset(&color_resource, 0, sizeof(color_resource));
color_resource.resType = hipResourceTypeArray;
color_resource.res.array.array = data_.color_array;
hipTextureDesc color_tex_desc;
memset(&color_tex_desc, 0, sizeof(color_tex_desc));
color_tex_desc.readMode = hipReadModeElementType;
if (data_.color_texture != 0)
checkCudaErrors(hipDestroyTextureObject(data_.color_texture));
checkCudaErrors(hipCreateTextureObject(&data_.color_texture,
&color_resource,
&color_tex_desc,
NULL));
hipResourceDesc normal_resource;
memset(&normal_resource, 0, sizeof(normal_resource));
normal_resource.resType = hipResourceTypeArray;
normal_resource.res.array.array = data_.normal_array;
hipTextureDesc normal_tex_desc;
memset(&normal_tex_desc, 0, sizeof(normal_tex_desc));
normal_tex_desc.readMode = hipReadModeElementType;
if (data_.normal_texture != 0)
checkCudaErrors(hipDestroyTextureObject(data_.normal_texture));
checkCudaErrors(hipCreateTextureObject(&data_.normal_texture,
&normal_resource,
&normal_tex_desc,
NULL));
}
int Sensor::Process(cv::Mat &depth, cv::Mat &color) {
// TODO(wei): deal with distortion
/// Disable all filters at current
ConvertDepthFormat(depth, data_.depth_buffer, data_.depth_data, params_);
ConvertColorFormat(color, data_.color_buffer, data_.color_data, params_);
ResetInlierRatio(data_.inlier_ratio, params_);
/// Array used as texture in mapper
checkCudaErrors(hipMemcpyToArray(data_.depth_array, 0, 0,
data_.depth_data,
sizeof(float)*params_.height*params_.width,
hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpyToArray(data_.color_array, 0, 0,
data_.color_data,
sizeof(float4)*params_.height*params_.width,
hipMemcpyDeviceToDevice));
BindCUDATexture();
return 0;
}
| 7ddb3600b8d76ba1fccb91f419dbde1b7a599d7f.cu | /// 16 threads per block
#include "rgbd_sensor.h"
#include "geometry/geometry_helper.h"
#include "visualization/color_util.h"
#include <helper_cuda.h>
#include <helper_math.h>
#include <glog/logging.h>
#include <driver_types.h>
#include <extern/cuda/helper_cuda.h>
#include "sensor/preprocess.h"
/// Member functions: (CPU code)
Sensor::Sensor(SensorParams &sensor_params) {
const uint image_size = sensor_params.height * sensor_params.width;
params_ = sensor_params; // Is it copy constructing?
checkCudaErrors(cudaMalloc(&data_.depth_buffer, sizeof(short) * image_size));
checkCudaErrors(cudaMalloc(&data_.color_buffer, sizeof(uchar4) * image_size));
checkCudaErrors(cudaMalloc(&data_.depth_data, sizeof(float) * image_size));
checkCudaErrors(cudaMalloc(&data_.inlier_ratio, sizeof(float) * image_size));
checkCudaErrors(cudaMalloc(&data_.filtered_depth_data, sizeof(float) * image_size));
checkCudaErrors(cudaMalloc(&data_.color_data, sizeof(float4) * image_size));
checkCudaErrors(cudaMalloc(&data_.normal_data, sizeof(float4) * image_size));
data_.depth_channel_desc = cudaCreateChannelDesc<float>();
checkCudaErrors(cudaMallocArray(&data_.depth_array,
&data_.depth_channel_desc,
params_.width, params_.height));
data_.color_channel_desc = cudaCreateChannelDesc<float4>();
checkCudaErrors(cudaMallocArray(&data_.color_array,
&data_.color_channel_desc,
params_.width, params_.height));
data_.normal_channel_desc = cudaCreateChannelDesc<float4>();
checkCudaErrors(cudaMallocArray(&data_.normal_array,
&data_.normal_channel_desc,
params_.width, params_.height));
data_.depth_texture = 0;
data_.color_texture = 0;
data_.normal_texture = 0;
BindCUDATexture();
is_allocated_on_gpu_ = true;
}
Sensor::~Sensor() {
if (is_allocated_on_gpu_) {
checkCudaErrors(cudaFree(data_.depth_buffer));
checkCudaErrors(cudaFree(data_.color_buffer));
checkCudaErrors(cudaFree(data_.depth_data));
checkCudaErrors(cudaFree(data_.inlier_ratio));
checkCudaErrors(cudaFree(data_.filtered_depth_data));
checkCudaErrors(cudaFree(data_.color_data));
checkCudaErrors(cudaFree(data_.normal_data));
checkCudaErrors(cudaFreeArray(data_.depth_array));
checkCudaErrors(cudaFreeArray(data_.color_array));
checkCudaErrors(cudaFreeArray(data_.normal_array));
}
}
void Sensor::BindCUDATexture() {
cudaResourceDesc depth_resource;
memset(&depth_resource, 0, sizeof(depth_resource));
depth_resource.resType = cudaResourceTypeArray;
depth_resource.res.array.array = data_.depth_array;
cudaTextureDesc depth_tex_desc;
memset(&depth_tex_desc, 0, sizeof(depth_tex_desc));
depth_tex_desc.readMode = cudaReadModeElementType;
if (data_.depth_texture != 0)
checkCudaErrors(cudaDestroyTextureObject(data_.depth_texture));
checkCudaErrors(cudaCreateTextureObject(&data_.depth_texture,
&depth_resource,
&depth_tex_desc,
NULL));
cudaResourceDesc color_resource;
memset(&color_resource, 0, sizeof(color_resource));
color_resource.resType = cudaResourceTypeArray;
color_resource.res.array.array = data_.color_array;
cudaTextureDesc color_tex_desc;
memset(&color_tex_desc, 0, sizeof(color_tex_desc));
color_tex_desc.readMode = cudaReadModeElementType;
if (data_.color_texture != 0)
checkCudaErrors(cudaDestroyTextureObject(data_.color_texture));
checkCudaErrors(cudaCreateTextureObject(&data_.color_texture,
&color_resource,
&color_tex_desc,
NULL));
cudaResourceDesc normal_resource;
memset(&normal_resource, 0, sizeof(normal_resource));
normal_resource.resType = cudaResourceTypeArray;
normal_resource.res.array.array = data_.normal_array;
cudaTextureDesc normal_tex_desc;
memset(&normal_tex_desc, 0, sizeof(normal_tex_desc));
normal_tex_desc.readMode = cudaReadModeElementType;
if (data_.normal_texture != 0)
checkCudaErrors(cudaDestroyTextureObject(data_.normal_texture));
checkCudaErrors(cudaCreateTextureObject(&data_.normal_texture,
&normal_resource,
&normal_tex_desc,
NULL));
}
int Sensor::Process(cv::Mat &depth, cv::Mat &color) {
// TODO(wei): deal with distortion
/// Disable all filters at current
ConvertDepthFormat(depth, data_.depth_buffer, data_.depth_data, params_);
ConvertColorFormat(color, data_.color_buffer, data_.color_data, params_);
ResetInlierRatio(data_.inlier_ratio, params_);
/// Array used as texture in mapper
checkCudaErrors(cudaMemcpyToArray(data_.depth_array, 0, 0,
data_.depth_data,
sizeof(float)*params_.height*params_.width,
cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpyToArray(data_.color_array, 0, 0,
data_.color_data,
sizeof(float4)*params_.height*params_.width,
cudaMemcpyDeviceToDevice));
BindCUDATexture();
return 0;
}
|
7e56e4ff7a56d9ddc6074c4d91c75581dfdb2cc3.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <cstdint>
__device__ unsigned char rgb_bound(int v)
{
return v>255?255:(v<0?0:v);
}
template<typename T>
__device__ T max(T v1, T v2)
{
return v1>v2?v1:v2;
}
template<typename T>
__device__ T min(T v1, T v2)
{
return v1<v2?v1:v2;
}
__device__ void bgr2hsv(float bb, float gg, float rr, float *hsv)
{
float r=rr/255.0, g=gg/255.0, b=bb/255.0;
float rgbMax = max(max(r,g), b);
float rgbMin = min(min(r,g), b);
float delta = rgbMax-rgbMin;
float hue, sat, val;
val = rgbMax;
if(rgbMax == 0) sat = 0;
else sat = delta/rgbMax;
if(delta == 0) hue = 0;
else
{
if(rgbMax == r)
{
if(g>=b) hue = 60*(g-b)/delta;
else hue = 60*(g-b)/delta+360;
}
else if(rgbMax == g)
{
hue = 60*(b-r)/delta+120;
}
else
{
hue = 60*(r-g)/delta+240;
}
}
hsv[0] = hue;
hsv[1] = sat;
hsv[2] = val;
}
__global__ void yuyv2yuv_kernal(unsigned char *in, unsigned char *out, int w, int h)
{
int x=blockIdx.x;
int y=threadIdx.x;
int tmp = y*w;
int src_offset = x*2;
int dst_offset = x*3;
out[tmp*3+dst_offset+0] = in[tmp*2+src_offset+0];
out[tmp*3+dst_offset+1] = in[tmp*2+src_offset+(int)powf(-1, x&1)];
out[tmp*3+dst_offset+2] = in[tmp*2+src_offset+2+(int)powf(-1, x&1)];
}
__global__ void yuyv2bgr_kernal(unsigned char *in, unsigned char *out, int w, int h)
{
int x=blockIdx.x;
int y=threadIdx.x;
int tmp = y*w;
int src_offset = x*2;
int dst_offset = x*3;
unsigned char Y = in[tmp*2+src_offset+0];
unsigned char U = in[tmp*2+src_offset+(int)powf(-1, x&1)];
unsigned char V = in[tmp*2+src_offset+2+(int)powf(-1, x&1)];
float r,g,b;
r = (1.164 * (Y - 16)) + (2.018 * (V - 128));
g = (1.164 * (Y - 16)) - (0.813 * (U - 128)) - (0.391 * (V - 128));
b = (1.164 * (Y - 16)) + (1.596 * (U - 128));
out[tmp*3+dst_offset+2] = rgb_bound(r);
out[tmp*3+dst_offset+1] = rgb_bound(g);
out[tmp*3+dst_offset+0] = rgb_bound(b);
}
__global__ void bgr2rgbfp_kernal(unsigned char *in, float *rgbfp, int w, int h)
{
int x=blockIdx.x;
int y=threadIdx.x;
int offset = y*w*3+x*3;
float rf, gf, bf;
rf = in[offset+2]/255.0f;
gf = in[offset+1]/255.0f;
bf = in[offset+0]/255.0f;
int planesize = w*h;
int tmp = y*w+x;
rgbfp[tmp] = rf;
rgbfp[planesize+tmp] = gf;
rgbfp[planesize*2+tmp] = bf;
}
__global__ void bgr2yuv422_kernal(unsigned char *in, unsigned char *out, int w, int h)
{
int x = blockIdx.x;
int y = threadIdx.x;
int in_tmp = y*w*3;
int out_tmp = y*w*2;
int src_offset = x*6;
int dst_offset = x*4;
float b1 = in[in_tmp+src_offset];
float g1 = in[in_tmp+src_offset+1];
float r1 = in[in_tmp+src_offset+2];
float b2 = in[in_tmp+src_offset+3];
float g2 = in[in_tmp+src_offset+4];
float r2 = in[in_tmp+src_offset+5];
out[out_tmp+dst_offset] = (unsigned char)(int)(0.299*r1+0.587*g1+0.114*b1);
out[out_tmp+dst_offset+1] = (unsigned char)(int)(-0.169*r1-0.331*g1+0.499*b1+128);
out[out_tmp+dst_offset+2] = (unsigned char)(int)(0.299*r2+0.587*g2+0.114*b2);
out[out_tmp+dst_offset+3] = (unsigned char)(int)(0.498*r2-0.419*g2-0.0813*b2+128);
}
__global__ void bgr2hsv_kernal(unsigned char *bgr, unsigned char *hsv, int w, int h)
{
int x = blockIdx.x;
int y = threadIdx.x;
int offset = y*w*3+x*3;
float r,g,b;
r = bgr[offset+2];
g = bgr[offset+1];
b = bgr[offset+0];
float hsv_t[3];
bgr2hsv(b, g, r, hsv_t);
hsv[offset+0] = rgb_bound(hsv[0]*255/360);
hsv[offset+1] = rgb_bound(hsv[1]*255);
hsv[offset+2] = rgb_bound(hsv[2]*255);
}
__global__ void baygr2bgr_kernal(unsigned char *bayergr, unsigned char *bgr, int w, int h,
float ds, float rgain, float ggain, float bgain)
{
int x = blockIdx.x;
int y = threadIdx.x;
int outy = h-y;
int outx = w-x;
float r,g,b;
float hue, sat, val;
float rn, gn, bn;
b = bayergr[(y+((y+1)&1))*w+x-(x&1)]*bgain;
g = bayergr[y*w+x-(x&1)+(y&1)]*ggain;
r = bayergr[(y-(y&1))*w+x+((x+1)&1)]*rgain;
float hsv[3];
bgr2hsv(b,g,r, hsv);
hue = hsv[0];
sat = hsv[1];
val = hsv[2];
if(ds>=0) sat = sat+(1-sat)*ds;
else sat = sat+sat*ds;
int hi = hue/60.0;
float f = hue/60.0-hi;
float p = val*(1-sat);
float q = val*(1-f*sat);
float t = val*(1-(1-f)*sat);
switch(hi)
{
case 0:
rn = val; gn = t; bn = p; break;
case 1:
rn = q; gn = val; bn = p; break;
case 2:
rn = p; gn = val; bn = t; break;
case 3:
rn = p; gn = q; bn = val; break;
case 4:
rn = t; gn = p; bn = val; break;
case 5:
rn = val; gn = p; bn = q; break;
default:
break;
}
bgr[outy*w*3+outx*3+0] = rgb_bound(bn);
bgr[outy*w*3+outx*3+1] = rgb_bound(gn);
bgr[outy*w*3+outx*3+2] = rgb_bound(rn);
}
template<typename T>
__global__ void resize_packed_kernal(T *in, int iw, int ih, T *out, int ow, int oh)
{
int x = blockIdx.x;
int y = threadIdx.x;
int offset_out = y*ow*3+x*3;
float h_scale_rate = (float)ih/oh;
float w_scale_rate = (float)iw/ow;
float y_scale = h_scale_rate * y;
float x_scale = w_scale_rate * x;
int j = y_scale, i = x_scale;
float u = y_scale-j, v = x_scale-i;
int offset_in1 = j*iw*3;
int offset_in2 = (j+1)*iw*3;
if(j+1>=ih || i+1>=iw)
{
out[offset_out+0] = in[offset_in1+i*3];
out[offset_out+1] = in[offset_in1+i*3+1];
out[offset_out+2] = in[offset_in1+i*3+2];
}
else
{
unsigned char x1,x2,x3,x4;
x1 = in[offset_in1+i*3];
x2 = in[offset_in1+(i+1)*3];
x3 = in[offset_in2+i*3];
x4 = in[offset_in2+(i+1)*3];
out[offset_out+0] = ((1-u)*(1-v)*x1+(1-u)*v*x2+u*(1-v)*x3+u*v*x4);
x1 = in[offset_in1+i*3+1];
x2 = in[offset_in1+(i+1)*3+1];
x3 = in[offset_in2+i*3+1];
x4 = in[offset_in2+(i+1)*3+1];
out[offset_out+1] = ((1-u)*(1-v)*x1+(1-u)*v*x2+u*(1-v)*x3+u*v*x4);
x1 = in[offset_in1+i*3+2];
x2 = in[offset_in1+(i+1)*3+2];
x3 = in[offset_in2+i*3+2];
x4 = in[offset_in2+(i+1)*3+2];
out[offset_out+2] = ((1-u)*(1-v)*x1+(1-u)*v*x2+u*(1-v)*x3+u*v*x4);
}
}
__global__ void build_map_kernal(float *pCamK, float *pDistort, float *pInvNewCamK, float *pMapx, float *pMapy, int outImgW, int outImgH)
{
const int tidx = blockDim.x*blockIdx.x + threadIdx.x;
const int tidy = blockDim.y*blockIdx.y + threadIdx.y;
if (tidx < outImgW && tidy < outImgH)
{
float k1 = pDistort[0];
float k2 = pDistort[1];
float p1 = pDistort[2];
float p2 = pDistort[3];
float k3, k4, k5, k6, s1, s2, s3, s4;
k3 = k4 = k5 = k6 = s1 = s2 = s3 = s4 = 0;
float fx = pCamK[0];
float fy = pCamK[4];
float u0 = pCamK[2];
float v0 = pCamK[5];
float _x = tidx*pInvNewCamK[0] + tidy*pInvNewCamK[1] + pInvNewCamK[2];
float _y = tidx*pInvNewCamK[3] + tidy*pInvNewCamK[4] + pInvNewCamK[5];
float _w = tidx*pInvNewCamK[6] + tidy*pInvNewCamK[7] + pInvNewCamK[8];
float w = 1. / _w;
float x = _x * w;
float y = _y * w;
float x2 = x*x;
float y2 = y*y;
float r2 = x2 + y2;
float _2xy = 2 * x*y;
float kr = (1 + ((k3*r2 + k2)*r2 + k1)*r2) / (1 + ((k6*r2 + k5)*r2 + k4)*r2);
float xd = (x*kr + p1*_2xy + p2*(r2 + 2 * x2) + s1*r2 + s2*r2*r2);
float yd = (y*kr + p1*(r2 + 2 * y2) + p2*_2xy + s3*r2 + s4*r2*r2);
float invProj = 1.;
float u = fx*invProj*xd + u0;
float v = fy*invProj*yd + v0;
int mapIdx = tidy*outImgW + tidx;
pMapx[mapIdx] = (float)u;
pMapy[mapIdx] = (float)v;
}
}
__global__ void remap_kernal(unsigned char* pSrcImg, unsigned char* pDstImg, float* pMapx, float* pMapy, int inWidth, int inHeight,
int outWidth, int outHeight, int channels)
{
const int tidx = blockDim.x*blockIdx.x + threadIdx.x;
const int tidy = blockDim.y*blockIdx.y + threadIdx.y;
if (tidx < outWidth && tidy < outHeight)
{
int mapIdx = tidy*outWidth + tidx;
float u = pMapx[mapIdx];
float v = pMapy[mapIdx];
int u1 = floor(u);
int v1 = floor(v);
int u2 = u1 + 1;
int v2 = v1 + 1;
if (u1 >= 0 && v1 >= 0 && u2 < inWidth && v2 < inHeight)
{
float dx = u - u1;
float dy = v - v1;
float weight1 = (1 - dx)*(1 - dy);
float weight2 = dx*(1 - dy);
float weight3 = (1 - dx)*dy;
float weight4 = dx*dy;
int resultIdx = mapIdx * 3;
for (int chan = 0; chan < channels; chan++)
{
pDstImg[resultIdx + chan] = (unsigned char)(weight1*pSrcImg[(v1*inWidth + u1) * 3 + chan]
+ weight2*pSrcImg[(v1*inWidth + u2) * 3 + chan]
+ weight3*pSrcImg[(v2*inWidth + u1) * 3 + chan]
+ weight4*pSrcImg[(v2*inWidth + u2) * 3 + chan]);
}
}
}
}
namespace imgproc
{
void cudaYUYV2YUV(unsigned char *in, unsigned char *out, int w, int h)
{
hipLaunchKernelGGL(( yuyv2yuv_kernal), dim3(w), dim3(h), 0, 0, in,out,w,h);
}
void cudaYUYV2BGR(unsigned char *in, unsigned char *out, int w, int h)
{
hipLaunchKernelGGL(( yuyv2bgr_kernal), dim3(w), dim3(h), 0, 0, in,out,w,h);
}
void cudaBayer2BGR(unsigned char *bayer, unsigned char *bgr, int w, int h,
float sat, float rgain, float ggain, float bgain)
{
hipLaunchKernelGGL(( baygr2bgr_kernal), dim3(w),dim3(h), 0, 0, bayer, bgr, w, h, sat, rgain, ggain, bgain);
}
void cudaBGR2RGBfp(unsigned char *bgr, float *rgbfp, int w, int h)
{
hipLaunchKernelGGL(( bgr2rgbfp_kernal), dim3(w),dim3(h), 0, 0, bgr, rgbfp, w, h);
}
void cudaBGR2YUV422(unsigned char *bgr, unsigned char *yuv422, int w, int h)
{
hipLaunchKernelGGL(( bgr2yuv422_kernal), dim3(w/2), dim3(h), 0, 0, bgr, yuv422, w, h);
}
void cudaBGR2HSV(unsigned char *bgr, unsigned char *hsv, int w, int h)
{
hipLaunchKernelGGL(( bgr2hsv_kernal), dim3(w), dim3(h), 0, 0, bgr, hsv, w, h);
}
void cudaResizePacked(float *in, int iw, int ih, float *sized, int ow, int oh)
{
hipLaunchKernelGGL(( resize_packed_kernal), dim3(ow), dim3(oh), 0, 0, in, iw, ih, sized, ow, oh);
}
void cudaResizePacked(unsigned char *in, int iw, int ih, unsigned char *sized, int ow, int oh)
{
hipLaunchKernelGGL(( resize_packed_kernal), dim3(ow), dim3(oh), 0, 0, in, iw, ih, sized, ow, oh);
}
void cudaUndistored(unsigned char *in, unsigned char *out, float *pCamK, float *pDistort, float *pInvNewCamK,
float* pMapx, float* pMapy, int w, int h, int c)
{
dim3 block(16, 16);
dim3 grid((w + block.x - 1) / block.x, (h + block.y - 1) / block.y);
hipLaunchKernelGGL(( build_map_kernal) , dim3(grid), dim3(block) , 0, 0, pCamK, pDistort, pInvNewCamK, pMapx, pMapy, w, h);
hipDeviceSynchronize();
hipLaunchKernelGGL(( remap_kernal) , dim3(grid), dim3(block) , 0, 0, in, out, pMapx, pMapy, w, h, w, h, c);
hipDeviceSynchronize();
}
};
| 7e56e4ff7a56d9ddc6074c4d91c75581dfdb2cc3.cu | #include <cuda_runtime.h>
#include <cstdint>
__device__ unsigned char rgb_bound(int v)
{
return v>255?255:(v<0?0:v);
}
template<typename T>
__device__ T max(T v1, T v2)
{
return v1>v2?v1:v2;
}
template<typename T>
__device__ T min(T v1, T v2)
{
return v1<v2?v1:v2;
}
__device__ void bgr2hsv(float bb, float gg, float rr, float *hsv)
{
float r=rr/255.0, g=gg/255.0, b=bb/255.0;
float rgbMax = max(max(r,g), b);
float rgbMin = min(min(r,g), b);
float delta = rgbMax-rgbMin;
float hue, sat, val;
val = rgbMax;
if(rgbMax == 0) sat = 0;
else sat = delta/rgbMax;
if(delta == 0) hue = 0;
else
{
if(rgbMax == r)
{
if(g>=b) hue = 60*(g-b)/delta;
else hue = 60*(g-b)/delta+360;
}
else if(rgbMax == g)
{
hue = 60*(b-r)/delta+120;
}
else
{
hue = 60*(r-g)/delta+240;
}
}
hsv[0] = hue;
hsv[1] = sat;
hsv[2] = val;
}
__global__ void yuyv2yuv_kernal(unsigned char *in, unsigned char *out, int w, int h)
{
int x=blockIdx.x;
int y=threadIdx.x;
int tmp = y*w;
int src_offset = x*2;
int dst_offset = x*3;
out[tmp*3+dst_offset+0] = in[tmp*2+src_offset+0];
out[tmp*3+dst_offset+1] = in[tmp*2+src_offset+(int)powf(-1, x&1)];
out[tmp*3+dst_offset+2] = in[tmp*2+src_offset+2+(int)powf(-1, x&1)];
}
__global__ void yuyv2bgr_kernal(unsigned char *in, unsigned char *out, int w, int h)
{
int x=blockIdx.x;
int y=threadIdx.x;
int tmp = y*w;
int src_offset = x*2;
int dst_offset = x*3;
unsigned char Y = in[tmp*2+src_offset+0];
unsigned char U = in[tmp*2+src_offset+(int)powf(-1, x&1)];
unsigned char V = in[tmp*2+src_offset+2+(int)powf(-1, x&1)];
float r,g,b;
r = (1.164 * (Y - 16)) + (2.018 * (V - 128));
g = (1.164 * (Y - 16)) - (0.813 * (U - 128)) - (0.391 * (V - 128));
b = (1.164 * (Y - 16)) + (1.596 * (U - 128));
out[tmp*3+dst_offset+2] = rgb_bound(r);
out[tmp*3+dst_offset+1] = rgb_bound(g);
out[tmp*3+dst_offset+0] = rgb_bound(b);
}
__global__ void bgr2rgbfp_kernal(unsigned char *in, float *rgbfp, int w, int h)
{
int x=blockIdx.x;
int y=threadIdx.x;
int offset = y*w*3+x*3;
float rf, gf, bf;
rf = in[offset+2]/255.0f;
gf = in[offset+1]/255.0f;
bf = in[offset+0]/255.0f;
int planesize = w*h;
int tmp = y*w+x;
rgbfp[tmp] = rf;
rgbfp[planesize+tmp] = gf;
rgbfp[planesize*2+tmp] = bf;
}
__global__ void bgr2yuv422_kernal(unsigned char *in, unsigned char *out, int w, int h)
{
int x = blockIdx.x;
int y = threadIdx.x;
int in_tmp = y*w*3;
int out_tmp = y*w*2;
int src_offset = x*6;
int dst_offset = x*4;
float b1 = in[in_tmp+src_offset];
float g1 = in[in_tmp+src_offset+1];
float r1 = in[in_tmp+src_offset+2];
float b2 = in[in_tmp+src_offset+3];
float g2 = in[in_tmp+src_offset+4];
float r2 = in[in_tmp+src_offset+5];
out[out_tmp+dst_offset] = (unsigned char)(int)(0.299*r1+0.587*g1+0.114*b1);
out[out_tmp+dst_offset+1] = (unsigned char)(int)(-0.169*r1-0.331*g1+0.499*b1+128);
out[out_tmp+dst_offset+2] = (unsigned char)(int)(0.299*r2+0.587*g2+0.114*b2);
out[out_tmp+dst_offset+3] = (unsigned char)(int)(0.498*r2-0.419*g2-0.0813*b2+128);
}
__global__ void bgr2hsv_kernal(unsigned char *bgr, unsigned char *hsv, int w, int h)
{
int x = blockIdx.x;
int y = threadIdx.x;
int offset = y*w*3+x*3;
float r,g,b;
r = bgr[offset+2];
g = bgr[offset+1];
b = bgr[offset+0];
float hsv_t[3];
bgr2hsv(b, g, r, hsv_t);
hsv[offset+0] = rgb_bound(hsv[0]*255/360);
hsv[offset+1] = rgb_bound(hsv[1]*255);
hsv[offset+2] = rgb_bound(hsv[2]*255);
}
__global__ void baygr2bgr_kernal(unsigned char *bayergr, unsigned char *bgr, int w, int h,
float ds, float rgain, float ggain, float bgain)
{
int x = blockIdx.x;
int y = threadIdx.x;
int outy = h-y;
int outx = w-x;
float r,g,b;
float hue, sat, val;
float rn, gn, bn;
b = bayergr[(y+((y+1)&1))*w+x-(x&1)]*bgain;
g = bayergr[y*w+x-(x&1)+(y&1)]*ggain;
r = bayergr[(y-(y&1))*w+x+((x+1)&1)]*rgain;
float hsv[3];
bgr2hsv(b,g,r, hsv);
hue = hsv[0];
sat = hsv[1];
val = hsv[2];
if(ds>=0) sat = sat+(1-sat)*ds;
else sat = sat+sat*ds;
int hi = hue/60.0;
float f = hue/60.0-hi;
float p = val*(1-sat);
float q = val*(1-f*sat);
float t = val*(1-(1-f)*sat);
switch(hi)
{
case 0:
rn = val; gn = t; bn = p; break;
case 1:
rn = q; gn = val; bn = p; break;
case 2:
rn = p; gn = val; bn = t; break;
case 3:
rn = p; gn = q; bn = val; break;
case 4:
rn = t; gn = p; bn = val; break;
case 5:
rn = val; gn = p; bn = q; break;
default:
break;
}
bgr[outy*w*3+outx*3+0] = rgb_bound(bn);
bgr[outy*w*3+outx*3+1] = rgb_bound(gn);
bgr[outy*w*3+outx*3+2] = rgb_bound(rn);
}
template<typename T>
__global__ void resize_packed_kernal(T *in, int iw, int ih, T *out, int ow, int oh)
{
int x = blockIdx.x;
int y = threadIdx.x;
int offset_out = y*ow*3+x*3;
float h_scale_rate = (float)ih/oh;
float w_scale_rate = (float)iw/ow;
float y_scale = h_scale_rate * y;
float x_scale = w_scale_rate * x;
int j = y_scale, i = x_scale;
float u = y_scale-j, v = x_scale-i;
int offset_in1 = j*iw*3;
int offset_in2 = (j+1)*iw*3;
if(j+1>=ih || i+1>=iw)
{
out[offset_out+0] = in[offset_in1+i*3];
out[offset_out+1] = in[offset_in1+i*3+1];
out[offset_out+2] = in[offset_in1+i*3+2];
}
else
{
unsigned char x1,x2,x3,x4;
x1 = in[offset_in1+i*3];
x2 = in[offset_in1+(i+1)*3];
x3 = in[offset_in2+i*3];
x4 = in[offset_in2+(i+1)*3];
out[offset_out+0] = ((1-u)*(1-v)*x1+(1-u)*v*x2+u*(1-v)*x3+u*v*x4);
x1 = in[offset_in1+i*3+1];
x2 = in[offset_in1+(i+1)*3+1];
x3 = in[offset_in2+i*3+1];
x4 = in[offset_in2+(i+1)*3+1];
out[offset_out+1] = ((1-u)*(1-v)*x1+(1-u)*v*x2+u*(1-v)*x3+u*v*x4);
x1 = in[offset_in1+i*3+2];
x2 = in[offset_in1+(i+1)*3+2];
x3 = in[offset_in2+i*3+2];
x4 = in[offset_in2+(i+1)*3+2];
out[offset_out+2] = ((1-u)*(1-v)*x1+(1-u)*v*x2+u*(1-v)*x3+u*v*x4);
}
}
__global__ void build_map_kernal(float *pCamK, float *pDistort, float *pInvNewCamK, float *pMapx, float *pMapy, int outImgW, int outImgH)
{
const int tidx = blockDim.x*blockIdx.x + threadIdx.x;
const int tidy = blockDim.y*blockIdx.y + threadIdx.y;
if (tidx < outImgW && tidy < outImgH)
{
float k1 = pDistort[0];
float k2 = pDistort[1];
float p1 = pDistort[2];
float p2 = pDistort[3];
float k3, k4, k5, k6, s1, s2, s3, s4;
k3 = k4 = k5 = k6 = s1 = s2 = s3 = s4 = 0;
float fx = pCamK[0];
float fy = pCamK[4];
float u0 = pCamK[2];
float v0 = pCamK[5];
float _x = tidx*pInvNewCamK[0] + tidy*pInvNewCamK[1] + pInvNewCamK[2];
float _y = tidx*pInvNewCamK[3] + tidy*pInvNewCamK[4] + pInvNewCamK[5];
float _w = tidx*pInvNewCamK[6] + tidy*pInvNewCamK[7] + pInvNewCamK[8];
float w = 1. / _w;
float x = _x * w;
float y = _y * w;
float x2 = x*x;
float y2 = y*y;
float r2 = x2 + y2;
float _2xy = 2 * x*y;
float kr = (1 + ((k3*r2 + k2)*r2 + k1)*r2) / (1 + ((k6*r2 + k5)*r2 + k4)*r2);
float xd = (x*kr + p1*_2xy + p2*(r2 + 2 * x2) + s1*r2 + s2*r2*r2);
float yd = (y*kr + p1*(r2 + 2 * y2) + p2*_2xy + s3*r2 + s4*r2*r2);
float invProj = 1.;
float u = fx*invProj*xd + u0;
float v = fy*invProj*yd + v0;
int mapIdx = tidy*outImgW + tidx;
pMapx[mapIdx] = (float)u;
pMapy[mapIdx] = (float)v;
}
}
__global__ void remap_kernal(unsigned char* pSrcImg, unsigned char* pDstImg, float* pMapx, float* pMapy, int inWidth, int inHeight,
int outWidth, int outHeight, int channels)
{
const int tidx = blockDim.x*blockIdx.x + threadIdx.x;
const int tidy = blockDim.y*blockIdx.y + threadIdx.y;
if (tidx < outWidth && tidy < outHeight)
{
int mapIdx = tidy*outWidth + tidx;
float u = pMapx[mapIdx];
float v = pMapy[mapIdx];
int u1 = floor(u);
int v1 = floor(v);
int u2 = u1 + 1;
int v2 = v1 + 1;
if (u1 >= 0 && v1 >= 0 && u2 < inWidth && v2 < inHeight)
{
float dx = u - u1;
float dy = v - v1;
float weight1 = (1 - dx)*(1 - dy);
float weight2 = dx*(1 - dy);
float weight3 = (1 - dx)*dy;
float weight4 = dx*dy;
int resultIdx = mapIdx * 3;
for (int chan = 0; chan < channels; chan++)
{
pDstImg[resultIdx + chan] = (unsigned char)(weight1*pSrcImg[(v1*inWidth + u1) * 3 + chan]
+ weight2*pSrcImg[(v1*inWidth + u2) * 3 + chan]
+ weight3*pSrcImg[(v2*inWidth + u1) * 3 + chan]
+ weight4*pSrcImg[(v2*inWidth + u2) * 3 + chan]);
}
}
}
}
namespace imgproc
{
void cudaYUYV2YUV(unsigned char *in, unsigned char *out, int w, int h)
{
yuyv2yuv_kernal<<<w, h>>>(in,out,w,h);
}
void cudaYUYV2BGR(unsigned char *in, unsigned char *out, int w, int h)
{
yuyv2bgr_kernal<<<w, h>>>(in,out,w,h);
}
void cudaBayer2BGR(unsigned char *bayer, unsigned char *bgr, int w, int h,
float sat, float rgain, float ggain, float bgain)
{
baygr2bgr_kernal<<<w,h>>>(bayer, bgr, w, h, sat, rgain, ggain, bgain);
}
void cudaBGR2RGBfp(unsigned char *bgr, float *rgbfp, int w, int h)
{
bgr2rgbfp_kernal<<<w,h>>>(bgr, rgbfp, w, h);
}
void cudaBGR2YUV422(unsigned char *bgr, unsigned char *yuv422, int w, int h)
{
bgr2yuv422_kernal<<<w/2, h>>>(bgr, yuv422, w, h);
}
void cudaBGR2HSV(unsigned char *bgr, unsigned char *hsv, int w, int h)
{
bgr2hsv_kernal<<<w, h>>>(bgr, hsv, w, h);
}
void cudaResizePacked(float *in, int iw, int ih, float *sized, int ow, int oh)
{
resize_packed_kernal<<<ow, oh>>>(in, iw, ih, sized, ow, oh);
}
void cudaResizePacked(unsigned char *in, int iw, int ih, unsigned char *sized, int ow, int oh)
{
resize_packed_kernal<<<ow, oh>>>(in, iw, ih, sized, ow, oh);
}
void cudaUndistored(unsigned char *in, unsigned char *out, float *pCamK, float *pDistort, float *pInvNewCamK,
float* pMapx, float* pMapy, int w, int h, int c)
{
dim3 block(16, 16);
dim3 grid((w + block.x - 1) / block.x, (h + block.y - 1) / block.y);
build_map_kernal <<<grid, block >>> (pCamK, pDistort, pInvNewCamK, pMapx, pMapy, w, h);
cudaThreadSynchronize();
remap_kernal <<<grid, block >>> (in, out, pMapx, pMapy, w, h, w, h, c);
cudaThreadSynchronize();
}
};
|
b9200d64062c4e157dbda0abb63c5ec56e17aad3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
using namespace cute;
template <class ElementType, class SmemLayout>
struct SharedStorage
{
cute::array_aligned<ElementType, cute::cosize_v<SmemLayout>> smem;
cute::uint64_t tma_load_mbar[1];
};
#if CUDA_12_0_SM90_FEATURES_SUPPORTED
template <class T, class TiledCopy, class CTA_Tiler, class GmemLayout, class SmemLayout>
__global__ void
tma_test_device_cute(T const* g_in, T* g_out,
CUTE_GRID_CONSTANT TiledCopy const tma, CTA_Tiler cta_tiler,
GmemLayout gmem_layout, SmemLayout smem_layout)
{
CUTE_STATIC_ASSERT_V(product_each(shape(cta_tiler)) == product_each(shape(smem_layout)));
// Use Shared Storage structure to allocate and distribute aligned SMEM addresses
extern __shared__ char shared_memory[];
using SharedStorage = SharedStorage<T, SmemLayout>;
SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(shared_memory);
// Construct SMEM tensor
Tensor sA = make_tensor(make_smem_ptr(shared_storage.smem.data()), smem_layout); // (CTA_TILE_M,CTA_TILE_N,...)
// Shared memory barriers use 64bits in SMEM for synchronization
uint64_t* tma_load_mbar = shared_storage.tma_load_mbar;
// TMA requires special handling of strides to deal with coord codomain mapping
// Represent the full tensors -- get these from TMA
Tensor mA = tma.get_tma_tensor(shape(gmem_layout));
Tensor mB = make_tensor(make_gmem_ptr(g_out), gmem_layout);
constexpr int R = rank_v<CTA_Tiler>;
Tensor gA = local_tile(mA, cta_tiler, repeat<R>(_)); // (CTA_TILE_M,CTA_TILE_N,...REST_M,REST_N,...)
Tensor gB = local_tile(mB, cta_tiler, repeat<R>(_)); // (CTA_TILE_M,CTA_TILE_N,...REST_M,REST_N,...)
//
// Prepare the TMA_LOAD
//
auto cta_tma = tma.get_slice(Int<0>{}); // CTA slice
Tensor tAgA_x = cta_tma.partition_S(gA); // (TMA,TMA_M,TMA_N,REST_M,REST_N)
Tensor tAsA_x = cta_tma.partition_D(sA); // (TMA,TMA_M,TMA_N)
#if 0
if (thread0()) {
print(tma);
print("TILE : "); print(cta_tiler); print("\n");
print(" mA : "); print( mA.data()); print(" o "); print( mA.layout()); print("\n");
print(" gA : "); print( gA.data()); print(" o "); print( gA.layout()); print("\n");
print("tAgA_x: "); print(tAgA_x.data()); print(" o "); print(tAgA_x.layout()); print("\n");
print(" sA : "); print( sA.data()); print(" o "); print( sA.layout()); print("\n");
print("tAsA_x: "); print(tAsA_x.data()); print(" o "); print(tAsA_x.layout()); print("\n");
}
#endif
//
// Perform the TMA_LOAD
//
// INPUT: Group the REST_X modes and the TMA_X modes to easily iterate through the tiles
Tensor tAgA = group_modes<1,rank(tAgA_x)>(tAgA_x); // (TMA,REST)
Tensor tAsA = group_modes<1,rank(tAsA_x)>(tAsA_x); // (TMA,REST)
static_assert(size<1>(tAsA) == 1);
// OUTPUT: Group the CTA_TILE_X modes and REST_X modes for output
Tensor tBgB = group_modes<0,R>(group_modes<R,rank(gB)>(gB)); // (CTA_TILE, REST)
#if 0
if (thread0()) {
print("tAgA : "); print(tAgA.data()); print(" o "); print(tAgA.layout()); print("\n");
print("tAsA : "); print(tAsA.data()); print(" o "); print(tAsA.layout()); print("\n");
print("tBgB : "); print(tBgB.data()); print(" o "); print(tBgB.layout()); print("\n");
}
#endif
// Loop over the TMA stages, using smem as our buffer
for (int stage = 0; stage < size<1>(tAgA); ++stage)
{
// Set the bytes transferred in this TMA transaction (may involve multiple issues)
constexpr int kTmaTransactionBytes = size(sA) * sizeof_bits_v<T> / 8;
if (threadIdx.x == 0)
{
/// Initialize shared memory barrier
tma_load_mbar[0] = 0;
cute::initialize_barrier(tma_load_mbar[0], 1 /*numThreads*/);
cute::set_barrier_transaction_bytes(tma_load_mbar[0], kTmaTransactionBytes);
copy(tma.with(tma_load_mbar[0]), tAgA(_,stage), tAsA(_,0));
}
__syncthreads();
/// Wait on the shared memory barrier until the phase bit flips from kPhaseBit value
constexpr int kPhaseBit = 0;
cute::wait_barrier(tma_load_mbar[0], kPhaseBit);
//
// Write out trivially smem -> gmem
//
//if (thread0()) {
// print_tensor(sA);
//}
for (int i = threadIdx.x; i < size(sA); i += blockDim.x) {
tBgB(i,stage) = sA(i);
}
__syncthreads();
}
}
template <class T, class GMEM_Layout, class SMEM_Layout, class CTA_Tile>
void
test_tma_load(GMEM_Layout const& gmem_layout,
SMEM_Layout const& smem_layout,
CTA_Tile const& cta_tile)
{
thrust::host_vector<T> h_in(cosize(gmem_layout));
for (int i = 0; i < h_in.size(); ++i) { h_in[i] = T(i % 13); }
thrust::device_vector<T> d_in = h_in;
thrust::device_vector<T> d_out(h_in.size(), T(-1));
Tensor gA = make_tensor(d_in.data().get(), gmem_layout);
auto tma = make_tma_copy(SM90_TMA_LOAD{}, gA, smem_layout, cta_tile, Int<1>{});
//print(tma);
int smem_size = int(sizeof(SharedStorage<T, decltype(smem_layout)>));
hipLaunchKernelGGL(( tma_test_device_cute), dim3(1), dim3(128), smem_size, 0,
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tma, cta_tile,
gmem_layout,
smem_layout);
thrust::host_vector<T> h_out = d_out;
Tensor hA_in = make_tensor(h_in.data(), gmem_layout);
Tensor hA_out = make_tensor(h_out.data(), gmem_layout);
for (int i = 0; i < size(gmem_layout); ++i) {
EXPECT_EQ(hA_in(i), hA_out(i));
}
}
template <class T, class GMEM_Layout, class SMEM_Layout>
void
test_tma_load(GMEM_Layout const& gmem_layout,
SMEM_Layout const& smem_layout)
{
return test_tma_load<T>(gmem_layout, smem_layout, product_each(shape(smem_layout)));
}
TEST(SM90_CuTe_Hopper, Tma_Load_1D)
{
Layout smem_layout = Layout<_256, _1>{};
{
Layout gmem_layout = smem_layout;
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
{
Layout gmem_layout = make_layout(128, GenColMajor{});
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
}
TEST(SM90_CuTe_Hopper, Tma_Load_32x32_Col)
{
Layout smem_layout = Layout<Shape<_32,_32>, Stride<_1,_32>>{};
{
Layout gmem_layout = smem_layout;
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
{
Layout gmem_layout = make_layout(make_shape(32,32), GenColMajor{});
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
{
Layout gmem_layout = make_layout(make_shape(32,32), make_stride(Int<1>{}, 1024));
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
}
TEST(SM90_CuTe_Hopper, Tma_Load_32x32_Row)
{
Layout smem_layout = Layout<Shape<_32,_32>, Stride<_32,_1>>{};
{
Layout gmem_layout = smem_layout;
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
{
Layout gmem_layout = make_layout(make_shape(32,32), GenRowMajor{});
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
{
Layout gmem_layout = make_layout(make_shape(32,32), make_stride(1024, Int<1>{}));
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
}
template <class T, template <typename> typename SWIZZLE_ATOM>
void
test_tma_load_swizzle_atom_mn()
{
auto smem_layout = SWIZZLE_ATOM<T>{};
Layout gmem_layout = make_layout(shape(smem_layout), GenColMajor{});
return test_tma_load<T>(gmem_layout, smem_layout, product_each(shape(smem_layout)));
}
template <class T, template <typename> typename SWIZZLE_ATOM>
void
test_tma_load_swizzle_atom_k()
{
auto smem_layout = SWIZZLE_ATOM<T>{};
Layout gmem_layout = make_layout(shape(smem_layout), GenRowMajor{});
return test_tma_load<T>(gmem_layout, smem_layout, product_each(shape(smem_layout)));
}
TEST(SM90_CuTe_Hopper, Tma_Load_Swizzle_Atoms)
{
test_tma_load_swizzle_atom_mn<int8_t, GMMA::Layout_MN_SW128_Atom>();
test_tma_load_swizzle_atom_mn<half_t, GMMA::Layout_MN_SW128_Atom>();
test_tma_load_swizzle_atom_mn< float, GMMA::Layout_MN_SW128_Atom>();
test_tma_load_swizzle_atom_mn<double, GMMA::Layout_MN_SW128_Atom>();
test_tma_load_swizzle_atom_mn<int8_t, GMMA::Layout_MN_SW64_Atom>();
test_tma_load_swizzle_atom_mn<half_t, GMMA::Layout_MN_SW64_Atom>();
test_tma_load_swizzle_atom_mn< float, GMMA::Layout_MN_SW64_Atom>();
test_tma_load_swizzle_atom_mn<double, GMMA::Layout_MN_SW64_Atom>();
test_tma_load_swizzle_atom_mn<int8_t, GMMA::Layout_MN_SW32_Atom>();
test_tma_load_swizzle_atom_mn<half_t, GMMA::Layout_MN_SW32_Atom>();
test_tma_load_swizzle_atom_mn< float, GMMA::Layout_MN_SW32_Atom>();
test_tma_load_swizzle_atom_mn<double, GMMA::Layout_MN_SW32_Atom>();
test_tma_load_swizzle_atom_mn<int8_t, GMMA::Layout_MN_INTER_Atom>();
test_tma_load_swizzle_atom_mn<half_t, GMMA::Layout_MN_INTER_Atom>();
test_tma_load_swizzle_atom_mn< float, GMMA::Layout_MN_INTER_Atom>();
test_tma_load_swizzle_atom_mn<double, GMMA::Layout_MN_INTER_Atom>();
test_tma_load_swizzle_atom_k<int8_t, GMMA::Layout_K_SW128_Atom>();
test_tma_load_swizzle_atom_k<half_t, GMMA::Layout_K_SW128_Atom>();
test_tma_load_swizzle_atom_k< float, GMMA::Layout_K_SW128_Atom>();
test_tma_load_swizzle_atom_k<double, GMMA::Layout_K_SW128_Atom>();
test_tma_load_swizzle_atom_k<int8_t, GMMA::Layout_K_SW64_Atom>();
test_tma_load_swizzle_atom_k<half_t, GMMA::Layout_K_SW64_Atom>();
test_tma_load_swizzle_atom_k< float, GMMA::Layout_K_SW64_Atom>();
test_tma_load_swizzle_atom_k<double, GMMA::Layout_K_SW64_Atom>();
test_tma_load_swizzle_atom_k<int8_t, GMMA::Layout_K_SW32_Atom>();
test_tma_load_swizzle_atom_k<half_t, GMMA::Layout_K_SW32_Atom>();
test_tma_load_swizzle_atom_k< float, GMMA::Layout_K_SW32_Atom>();
test_tma_load_swizzle_atom_k<double, GMMA::Layout_K_SW32_Atom>();
test_tma_load_swizzle_atom_k<int8_t, GMMA::Layout_K_INTER_Atom>();
test_tma_load_swizzle_atom_k<half_t, GMMA::Layout_K_INTER_Atom>();
test_tma_load_swizzle_atom_k< float, GMMA::Layout_K_INTER_Atom>();
test_tma_load_swizzle_atom_k<double, GMMA::Layout_K_INTER_Atom>();
}
template <class T, template <typename> typename SWIZZLE_ATOM>
void
test_tma_load_swizzle_tile_mn()
{
auto smem_layout = tile_to_shape(SWIZZLE_ATOM<T>{}, Shape<_128,_128>{});
Layout gmem_layout = make_layout(make_shape(int(size<0>(smem_layout)), int(size<1>(smem_layout))), GenColMajor{});
return test_tma_load<T>(gmem_layout, smem_layout, product_each(shape(smem_layout)));
}
template <class T, template <typename> typename SWIZZLE_ATOM>
void
test_tma_load_swizzle_tile_k()
{
auto smem_layout = tile_to_shape(SWIZZLE_ATOM<T>{}, Shape<_128,_128>{});
Layout gmem_layout = make_layout(make_shape(int(size<0>(smem_layout)), int(size<1>(smem_layout))), GenRowMajor{});
return test_tma_load<T>(gmem_layout, smem_layout, product_each(shape(smem_layout)));
}
TEST(SM90_CuTe_Hopper, Tma_Load_Swizzle_Tiles)
{
// Other T-types use too much smem
test_tma_load_swizzle_tile_mn<int8_t, GMMA::Layout_MN_SW128_Atom>();
test_tma_load_swizzle_tile_mn<half_t, GMMA::Layout_MN_SW128_Atom>();
test_tma_load_swizzle_tile_mn<int8_t, GMMA::Layout_MN_SW64_Atom>();
test_tma_load_swizzle_tile_mn<half_t, GMMA::Layout_MN_SW64_Atom>();
test_tma_load_swizzle_tile_mn<int8_t, GMMA::Layout_MN_SW32_Atom>();
test_tma_load_swizzle_tile_mn<half_t, GMMA::Layout_MN_SW32_Atom>();
test_tma_load_swizzle_tile_mn<int8_t, GMMA::Layout_MN_INTER_Atom>();
test_tma_load_swizzle_tile_mn<half_t, GMMA::Layout_MN_INTER_Atom>();
test_tma_load_swizzle_tile_k<int8_t, GMMA::Layout_K_SW128_Atom>();
test_tma_load_swizzle_tile_k<half_t, GMMA::Layout_K_SW128_Atom>();
test_tma_load_swizzle_tile_k<int8_t, GMMA::Layout_K_SW64_Atom>();
test_tma_load_swizzle_tile_k<half_t, GMMA::Layout_K_SW64_Atom>();
test_tma_load_swizzle_tile_k<int8_t, GMMA::Layout_K_SW32_Atom>();
test_tma_load_swizzle_tile_k<half_t, GMMA::Layout_K_SW32_Atom>();
test_tma_load_swizzle_tile_k<int8_t, GMMA::Layout_K_INTER_Atom>();
test_tma_load_swizzle_tile_k<half_t, GMMA::Layout_K_INTER_Atom>();
}
// Tensor by-mode
TEST(SM90_CuTe_Hopper, Tma_Load_Tensor)
{
// 3-mode TMA
{
Layout gmem_layout = make_layout(make_shape(128, 64, 5));
auto cta_tile = Shape<_64, _32>{}; // GMEM Tiling:
// Take 64-elem from m
// Take 32-elem from k
auto smem_layout = make_layout(Shape<_64,_32>{});
test_tma_load<half_t>(gmem_layout, smem_layout, cta_tile);
}
// 4-mode TMA
{
Layout gmem_layout = make_layout(make_shape(make_shape(80,40),make_shape(32,12)));
auto cta_tile = Shape<Shape<_16,_8>,Shape<_32,_2>>{}; // GMEM Tiling:
// Take 16-elem from m0, 8-elem from m1,
// Take 32-elem from k0, 2-elem from k1
auto smem_layout = make_layout(Shape<_128,_64>{});
test_tma_load<half_t>(gmem_layout, smem_layout, cta_tile);
}
// 5-mode TMA
{
Layout gmem_layout = make_layout(make_shape(make_shape(32,32,32),make_shape(32,12)));
auto cta_tile = Shape<Shape<_16,_4,_2>,Shape<_16,_2>>{}; // GMEM Tiling:
// Take 4-elem from m0, 4-elem from m1, 5-elem from m2
// Take 32-elem from k0, 2-elem from k1
auto smem_layout = make_layout(Shape<_128,_32>{});
test_tma_load<half_t>(gmem_layout, smem_layout, cta_tile);
}
}
// Tensor Multimode -- TMA with more than 5 modes in GMEM (packs residual modes into last TMA mode)
TEST(SM90_CuTe_Hopper, Tma_Load_Tensor_Multimode)
{
{
Layout gmem_layout = make_layout(make_shape(make_shape(32,3,2,2),make_shape(32,4,2)));
auto cta_tile = Shape<Shape<_32>, Shape<_32,_2>>{}; // GMEM Tiling:
// Take 32-elem from m0
// Take 32-elem from k0, 2-elem from k1
auto smem_layout = make_layout(Shape<_32,_64>{});
test_tma_load<half_t>(gmem_layout, smem_layout, cta_tile);
}
{
Layout gmem_layout = make_layout(make_shape(make_shape(64,3,2,2),make_shape(32,4,2)));
auto cta_tile = Shape<Shape<_32,_3>, Shape<_32,_2>>{}; // GMEM Tiling:
// Take 32-elem from m0, 3-elem from m1
// Take 32-elem from k0, 2-elem from k1
auto smem_layout = make_layout(Shape<_96,_64>{});
test_tma_load<half_t>(gmem_layout, smem_layout, cta_tile);
}
{
Layout gmem_layout = make_layout(make_shape(make_shape(64,3,2,3,2),make_shape(32,4,2,2)));
auto cta_tile = Shape<Shape<_32>, Shape<_16,_2>>{}; // GMEM Tiling:
// Take 32-elem from m0
// Take 16-elem from k0, 2-elem from k1
auto smem_layout = make_layout(Shape<_32,_32>{});
test_tma_load<half_t>(gmem_layout, smem_layout, cta_tile);
}
}
#endif
| b9200d64062c4e157dbda0abb63c5ec56e17aad3.cu | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include "cutlass_unit_test.h"
#include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cute/tensor.hpp>
using namespace cute;
template <class ElementType, class SmemLayout>
struct SharedStorage
{
cute::array_aligned<ElementType, cute::cosize_v<SmemLayout>> smem;
cute::uint64_t tma_load_mbar[1];
};
#if CUDA_12_0_SM90_FEATURES_SUPPORTED
template <class T, class TiledCopy, class CTA_Tiler, class GmemLayout, class SmemLayout>
__global__ void
tma_test_device_cute(T const* g_in, T* g_out,
CUTE_GRID_CONSTANT TiledCopy const tma, CTA_Tiler cta_tiler,
GmemLayout gmem_layout, SmemLayout smem_layout)
{
CUTE_STATIC_ASSERT_V(product_each(shape(cta_tiler)) == product_each(shape(smem_layout)));
// Use Shared Storage structure to allocate and distribute aligned SMEM addresses
extern __shared__ char shared_memory[];
using SharedStorage = SharedStorage<T, SmemLayout>;
SharedStorage& shared_storage = *reinterpret_cast<SharedStorage*>(shared_memory);
// Construct SMEM tensor
Tensor sA = make_tensor(make_smem_ptr(shared_storage.smem.data()), smem_layout); // (CTA_TILE_M,CTA_TILE_N,...)
// Shared memory barriers use 64bits in SMEM for synchronization
uint64_t* tma_load_mbar = shared_storage.tma_load_mbar;
// TMA requires special handling of strides to deal with coord codomain mapping
// Represent the full tensors -- get these from TMA
Tensor mA = tma.get_tma_tensor(shape(gmem_layout));
Tensor mB = make_tensor(make_gmem_ptr(g_out), gmem_layout);
constexpr int R = rank_v<CTA_Tiler>;
Tensor gA = local_tile(mA, cta_tiler, repeat<R>(_)); // (CTA_TILE_M,CTA_TILE_N,...REST_M,REST_N,...)
Tensor gB = local_tile(mB, cta_tiler, repeat<R>(_)); // (CTA_TILE_M,CTA_TILE_N,...REST_M,REST_N,...)
//
// Prepare the TMA_LOAD
//
auto cta_tma = tma.get_slice(Int<0>{}); // CTA slice
Tensor tAgA_x = cta_tma.partition_S(gA); // (TMA,TMA_M,TMA_N,REST_M,REST_N)
Tensor tAsA_x = cta_tma.partition_D(sA); // (TMA,TMA_M,TMA_N)
#if 0
if (thread0()) {
print(tma);
print("TILE : "); print(cta_tiler); print("\n");
print(" mA : "); print( mA.data()); print(" o "); print( mA.layout()); print("\n");
print(" gA : "); print( gA.data()); print(" o "); print( gA.layout()); print("\n");
print("tAgA_x: "); print(tAgA_x.data()); print(" o "); print(tAgA_x.layout()); print("\n");
print(" sA : "); print( sA.data()); print(" o "); print( sA.layout()); print("\n");
print("tAsA_x: "); print(tAsA_x.data()); print(" o "); print(tAsA_x.layout()); print("\n");
}
#endif
//
// Perform the TMA_LOAD
//
// INPUT: Group the REST_X modes and the TMA_X modes to easily iterate through the tiles
Tensor tAgA = group_modes<1,rank(tAgA_x)>(tAgA_x); // (TMA,REST)
Tensor tAsA = group_modes<1,rank(tAsA_x)>(tAsA_x); // (TMA,REST)
static_assert(size<1>(tAsA) == 1);
// OUTPUT: Group the CTA_TILE_X modes and REST_X modes for output
Tensor tBgB = group_modes<0,R>(group_modes<R,rank(gB)>(gB)); // (CTA_TILE, REST)
#if 0
if (thread0()) {
print("tAgA : "); print(tAgA.data()); print(" o "); print(tAgA.layout()); print("\n");
print("tAsA : "); print(tAsA.data()); print(" o "); print(tAsA.layout()); print("\n");
print("tBgB : "); print(tBgB.data()); print(" o "); print(tBgB.layout()); print("\n");
}
#endif
// Loop over the TMA stages, using smem as our buffer
for (int stage = 0; stage < size<1>(tAgA); ++stage)
{
// Set the bytes transferred in this TMA transaction (may involve multiple issues)
constexpr int kTmaTransactionBytes = size(sA) * sizeof_bits_v<T> / 8;
if (threadIdx.x == 0)
{
/// Initialize shared memory barrier
tma_load_mbar[0] = 0;
cute::initialize_barrier(tma_load_mbar[0], 1 /*numThreads*/);
cute::set_barrier_transaction_bytes(tma_load_mbar[0], kTmaTransactionBytes);
copy(tma.with(tma_load_mbar[0]), tAgA(_,stage), tAsA(_,0));
}
__syncthreads();
/// Wait on the shared memory barrier until the phase bit flips from kPhaseBit value
constexpr int kPhaseBit = 0;
cute::wait_barrier(tma_load_mbar[0], kPhaseBit);
//
// Write out trivially smem -> gmem
//
//if (thread0()) {
// print_tensor(sA);
//}
for (int i = threadIdx.x; i < size(sA); i += blockDim.x) {
tBgB(i,stage) = sA(i);
}
__syncthreads();
}
}
template <class T, class GMEM_Layout, class SMEM_Layout, class CTA_Tile>
void
test_tma_load(GMEM_Layout const& gmem_layout,
SMEM_Layout const& smem_layout,
CTA_Tile const& cta_tile)
{
thrust::host_vector<T> h_in(cosize(gmem_layout));
for (int i = 0; i < h_in.size(); ++i) { h_in[i] = T(i % 13); }
thrust::device_vector<T> d_in = h_in;
thrust::device_vector<T> d_out(h_in.size(), T(-1));
Tensor gA = make_tensor(d_in.data().get(), gmem_layout);
auto tma = make_tma_copy(SM90_TMA_LOAD{}, gA, smem_layout, cta_tile, Int<1>{});
//print(tma);
int smem_size = int(sizeof(SharedStorage<T, decltype(smem_layout)>));
tma_test_device_cute<<<1, 128, smem_size>>>(
thrust::raw_pointer_cast(d_in.data()),
thrust::raw_pointer_cast(d_out.data()),
tma, cta_tile,
gmem_layout,
smem_layout);
thrust::host_vector<T> h_out = d_out;
Tensor hA_in = make_tensor(h_in.data(), gmem_layout);
Tensor hA_out = make_tensor(h_out.data(), gmem_layout);
for (int i = 0; i < size(gmem_layout); ++i) {
EXPECT_EQ(hA_in(i), hA_out(i));
}
}
template <class T, class GMEM_Layout, class SMEM_Layout>
void
test_tma_load(GMEM_Layout const& gmem_layout,
SMEM_Layout const& smem_layout)
{
return test_tma_load<T>(gmem_layout, smem_layout, product_each(shape(smem_layout)));
}
TEST(SM90_CuTe_Hopper, Tma_Load_1D)
{
Layout smem_layout = Layout<_256, _1>{};
{
Layout gmem_layout = smem_layout;
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
{
Layout gmem_layout = make_layout(128, GenColMajor{});
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
}
TEST(SM90_CuTe_Hopper, Tma_Load_32x32_Col)
{
Layout smem_layout = Layout<Shape<_32,_32>, Stride<_1,_32>>{};
{
Layout gmem_layout = smem_layout;
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
{
Layout gmem_layout = make_layout(make_shape(32,32), GenColMajor{});
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
{
Layout gmem_layout = make_layout(make_shape(32,32), make_stride(Int<1>{}, 1024));
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
}
TEST(SM90_CuTe_Hopper, Tma_Load_32x32_Row)
{
Layout smem_layout = Layout<Shape<_32,_32>, Stride<_32,_1>>{};
{
Layout gmem_layout = smem_layout;
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
{
Layout gmem_layout = make_layout(make_shape(32,32), GenRowMajor{});
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
{
Layout gmem_layout = make_layout(make_shape(32,32), make_stride(1024, Int<1>{}));
test_tma_load<int8_t>(gmem_layout, smem_layout);
test_tma_load<half_t>(gmem_layout, smem_layout);
test_tma_load< float>(gmem_layout, smem_layout);
test_tma_load<double>(gmem_layout, smem_layout);
}
}
template <class T, template <typename> typename SWIZZLE_ATOM>
void
test_tma_load_swizzle_atom_mn()
{
auto smem_layout = SWIZZLE_ATOM<T>{};
Layout gmem_layout = make_layout(shape(smem_layout), GenColMajor{});
return test_tma_load<T>(gmem_layout, smem_layout, product_each(shape(smem_layout)));
}
template <class T, template <typename> typename SWIZZLE_ATOM>
void
test_tma_load_swizzle_atom_k()
{
auto smem_layout = SWIZZLE_ATOM<T>{};
Layout gmem_layout = make_layout(shape(smem_layout), GenRowMajor{});
return test_tma_load<T>(gmem_layout, smem_layout, product_each(shape(smem_layout)));
}
TEST(SM90_CuTe_Hopper, Tma_Load_Swizzle_Atoms)
{
test_tma_load_swizzle_atom_mn<int8_t, GMMA::Layout_MN_SW128_Atom>();
test_tma_load_swizzle_atom_mn<half_t, GMMA::Layout_MN_SW128_Atom>();
test_tma_load_swizzle_atom_mn< float, GMMA::Layout_MN_SW128_Atom>();
test_tma_load_swizzle_atom_mn<double, GMMA::Layout_MN_SW128_Atom>();
test_tma_load_swizzle_atom_mn<int8_t, GMMA::Layout_MN_SW64_Atom>();
test_tma_load_swizzle_atom_mn<half_t, GMMA::Layout_MN_SW64_Atom>();
test_tma_load_swizzle_atom_mn< float, GMMA::Layout_MN_SW64_Atom>();
test_tma_load_swizzle_atom_mn<double, GMMA::Layout_MN_SW64_Atom>();
test_tma_load_swizzle_atom_mn<int8_t, GMMA::Layout_MN_SW32_Atom>();
test_tma_load_swizzle_atom_mn<half_t, GMMA::Layout_MN_SW32_Atom>();
test_tma_load_swizzle_atom_mn< float, GMMA::Layout_MN_SW32_Atom>();
test_tma_load_swizzle_atom_mn<double, GMMA::Layout_MN_SW32_Atom>();
test_tma_load_swizzle_atom_mn<int8_t, GMMA::Layout_MN_INTER_Atom>();
test_tma_load_swizzle_atom_mn<half_t, GMMA::Layout_MN_INTER_Atom>();
test_tma_load_swizzle_atom_mn< float, GMMA::Layout_MN_INTER_Atom>();
test_tma_load_swizzle_atom_mn<double, GMMA::Layout_MN_INTER_Atom>();
test_tma_load_swizzle_atom_k<int8_t, GMMA::Layout_K_SW128_Atom>();
test_tma_load_swizzle_atom_k<half_t, GMMA::Layout_K_SW128_Atom>();
test_tma_load_swizzle_atom_k< float, GMMA::Layout_K_SW128_Atom>();
test_tma_load_swizzle_atom_k<double, GMMA::Layout_K_SW128_Atom>();
test_tma_load_swizzle_atom_k<int8_t, GMMA::Layout_K_SW64_Atom>();
test_tma_load_swizzle_atom_k<half_t, GMMA::Layout_K_SW64_Atom>();
test_tma_load_swizzle_atom_k< float, GMMA::Layout_K_SW64_Atom>();
test_tma_load_swizzle_atom_k<double, GMMA::Layout_K_SW64_Atom>();
test_tma_load_swizzle_atom_k<int8_t, GMMA::Layout_K_SW32_Atom>();
test_tma_load_swizzle_atom_k<half_t, GMMA::Layout_K_SW32_Atom>();
test_tma_load_swizzle_atom_k< float, GMMA::Layout_K_SW32_Atom>();
test_tma_load_swizzle_atom_k<double, GMMA::Layout_K_SW32_Atom>();
test_tma_load_swizzle_atom_k<int8_t, GMMA::Layout_K_INTER_Atom>();
test_tma_load_swizzle_atom_k<half_t, GMMA::Layout_K_INTER_Atom>();
test_tma_load_swizzle_atom_k< float, GMMA::Layout_K_INTER_Atom>();
test_tma_load_swizzle_atom_k<double, GMMA::Layout_K_INTER_Atom>();
}
template <class T, template <typename> typename SWIZZLE_ATOM>
void
test_tma_load_swizzle_tile_mn()
{
auto smem_layout = tile_to_shape(SWIZZLE_ATOM<T>{}, Shape<_128,_128>{});
Layout gmem_layout = make_layout(make_shape(int(size<0>(smem_layout)), int(size<1>(smem_layout))), GenColMajor{});
return test_tma_load<T>(gmem_layout, smem_layout, product_each(shape(smem_layout)));
}
template <class T, template <typename> typename SWIZZLE_ATOM>
void
test_tma_load_swizzle_tile_k()
{
auto smem_layout = tile_to_shape(SWIZZLE_ATOM<T>{}, Shape<_128,_128>{});
Layout gmem_layout = make_layout(make_shape(int(size<0>(smem_layout)), int(size<1>(smem_layout))), GenRowMajor{});
return test_tma_load<T>(gmem_layout, smem_layout, product_each(shape(smem_layout)));
}
TEST(SM90_CuTe_Hopper, Tma_Load_Swizzle_Tiles)
{
// Other T-types use too much smem
test_tma_load_swizzle_tile_mn<int8_t, GMMA::Layout_MN_SW128_Atom>();
test_tma_load_swizzle_tile_mn<half_t, GMMA::Layout_MN_SW128_Atom>();
test_tma_load_swizzle_tile_mn<int8_t, GMMA::Layout_MN_SW64_Atom>();
test_tma_load_swizzle_tile_mn<half_t, GMMA::Layout_MN_SW64_Atom>();
test_tma_load_swizzle_tile_mn<int8_t, GMMA::Layout_MN_SW32_Atom>();
test_tma_load_swizzle_tile_mn<half_t, GMMA::Layout_MN_SW32_Atom>();
test_tma_load_swizzle_tile_mn<int8_t, GMMA::Layout_MN_INTER_Atom>();
test_tma_load_swizzle_tile_mn<half_t, GMMA::Layout_MN_INTER_Atom>();
test_tma_load_swizzle_tile_k<int8_t, GMMA::Layout_K_SW128_Atom>();
test_tma_load_swizzle_tile_k<half_t, GMMA::Layout_K_SW128_Atom>();
test_tma_load_swizzle_tile_k<int8_t, GMMA::Layout_K_SW64_Atom>();
test_tma_load_swizzle_tile_k<half_t, GMMA::Layout_K_SW64_Atom>();
test_tma_load_swizzle_tile_k<int8_t, GMMA::Layout_K_SW32_Atom>();
test_tma_load_swizzle_tile_k<half_t, GMMA::Layout_K_SW32_Atom>();
test_tma_load_swizzle_tile_k<int8_t, GMMA::Layout_K_INTER_Atom>();
test_tma_load_swizzle_tile_k<half_t, GMMA::Layout_K_INTER_Atom>();
}
// Tensor by-mode
TEST(SM90_CuTe_Hopper, Tma_Load_Tensor)
{
// 3-mode TMA
{
Layout gmem_layout = make_layout(make_shape(128, 64, 5));
auto cta_tile = Shape<_64, _32>{}; // GMEM Tiling:
// Take 64-elem from m
// Take 32-elem from k
auto smem_layout = make_layout(Shape<_64,_32>{});
test_tma_load<half_t>(gmem_layout, smem_layout, cta_tile);
}
// 4-mode TMA
{
Layout gmem_layout = make_layout(make_shape(make_shape(80,40),make_shape(32,12)));
auto cta_tile = Shape<Shape<_16,_8>,Shape<_32,_2>>{}; // GMEM Tiling:
// Take 16-elem from m0, 8-elem from m1,
// Take 32-elem from k0, 2-elem from k1
auto smem_layout = make_layout(Shape<_128,_64>{});
test_tma_load<half_t>(gmem_layout, smem_layout, cta_tile);
}
// 5-mode TMA
{
Layout gmem_layout = make_layout(make_shape(make_shape(32,32,32),make_shape(32,12)));
auto cta_tile = Shape<Shape<_16,_4,_2>,Shape<_16,_2>>{}; // GMEM Tiling:
// Take 4-elem from m0, 4-elem from m1, 5-elem from m2
// Take 32-elem from k0, 2-elem from k1
auto smem_layout = make_layout(Shape<_128,_32>{});
test_tma_load<half_t>(gmem_layout, smem_layout, cta_tile);
}
}
// Tensor Multimode -- TMA with more than 5 modes in GMEM (packs residual modes into last TMA mode)
TEST(SM90_CuTe_Hopper, Tma_Load_Tensor_Multimode)
{
{
Layout gmem_layout = make_layout(make_shape(make_shape(32,3,2,2),make_shape(32,4,2)));
auto cta_tile = Shape<Shape<_32>, Shape<_32,_2>>{}; // GMEM Tiling:
// Take 32-elem from m0
// Take 32-elem from k0, 2-elem from k1
auto smem_layout = make_layout(Shape<_32,_64>{});
test_tma_load<half_t>(gmem_layout, smem_layout, cta_tile);
}
{
Layout gmem_layout = make_layout(make_shape(make_shape(64,3,2,2),make_shape(32,4,2)));
auto cta_tile = Shape<Shape<_32,_3>, Shape<_32,_2>>{}; // GMEM Tiling:
// Take 32-elem from m0, 3-elem from m1
// Take 32-elem from k0, 2-elem from k1
auto smem_layout = make_layout(Shape<_96,_64>{});
test_tma_load<half_t>(gmem_layout, smem_layout, cta_tile);
}
{
Layout gmem_layout = make_layout(make_shape(make_shape(64,3,2,3,2),make_shape(32,4,2,2)));
auto cta_tile = Shape<Shape<_32>, Shape<_16,_2>>{}; // GMEM Tiling:
// Take 32-elem from m0
// Take 16-elem from k0, 2-elem from k1
auto smem_layout = make_layout(Shape<_32,_32>{});
test_tma_load<half_t>(gmem_layout, smem_layout, cta_tile);
}
}
#endif
|
959c563344b4887bb83c3b36ad72aaae1f2d3d16.hip | // !!! This is a file automatically generated by hipify!!!
/*
MIT License
Copyright (c) 2019 Michael Ksel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "kernel/update_persistent_particles.h"
#include "common.h"
#include "cuda_utils.h"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
__device__ float calc_norm_assoc(float occ_accum, float rho_p)
{
return occ_accum > 0.0 ? rho_p / occ_accum : 0.0;
}
__device__ float calc_norm_unassoc(const GridCell& grid_cell)
{
return grid_cell.occ_mass > 0.0 ? grid_cell.pers_occ_mass / grid_cell.occ_mass : 0.0;
}
__device__ void set_normalization_components(GridCell* grid_cell_array, int i, float mu_A, float mu_UA)
{
grid_cell_array[i].mu_A = mu_A;
grid_cell_array[i].mu_UA = mu_UA;
}
__device__ float update_unnorm(Particle* particle_array, int i, MeasurementCell* meas_cell_array)
{
Particle& particle = particle_array[i];
return meas_cell_array[particle.grid_cell_idx].likelihood * particle.weight;
}
__device__ float normalize(Particle& particle, GridCell* grid_cell_array, MeasurementCell* meas_cell_array, float weight)
{
GridCell& cell = grid_cell_array[particle.grid_cell_idx];
MeasurementCell& meas_cell = meas_cell_array[particle.grid_cell_idx];
return meas_cell.p_A * cell.mu_A * weight + (1.0 - meas_cell.p_A) * cell.mu_UA * particle.weight;
}
__global__ void updatePersistentParticlesKernel1(Particle* particle_array, MeasurementCell* meas_cell_array, float* weight_array,
int particle_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < particle_count; i += blockDim.x * gridDim.x)
{
weight_array[i] = update_unnorm(particle_array, i, meas_cell_array);
}
}
__global__ void updatePersistentParticlesKernel2(GridCell* grid_cell_array, float* weight_array_accum, int cell_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < cell_count; i += blockDim.x * gridDim.x)
{
int start_idx = grid_cell_array[i].start_idx;
int end_idx = grid_cell_array[i].end_idx;
float m_occ_accum = subtract(weight_array_accum, start_idx, end_idx);
float rho_p = grid_cell_array[i].pers_occ_mass;
float mu_A = calc_norm_assoc(m_occ_accum, rho_p);
float mu_UA = calc_norm_unassoc(grid_cell_array[i]);
set_normalization_components(grid_cell_array, i, mu_A, mu_UA);
//printf("mu_A: %f, mu_UA: %f\n", mu_A, mu_UA);
}
}
__global__ void updatePersistentParticlesKernel3(Particle* particle_array, MeasurementCell* meas_cell_array, GridCell* grid_cell_array,
float* weight_array, int particle_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < particle_count; i += blockDim.x * gridDim.x)
{
weight_array[i] = normalize(particle_array[i], grid_cell_array, meas_cell_array, weight_array[i]);
}
}
| 959c563344b4887bb83c3b36ad72aaae1f2d3d16.cu | /*
MIT License
Copyright (c) 2019 Michael Kösel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "kernel/update_persistent_particles.h"
#include "common.h"
#include "cuda_utils.h"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
__device__ float calc_norm_assoc(float occ_accum, float rho_p)
{
return occ_accum > 0.0 ? rho_p / occ_accum : 0.0;
}
__device__ float calc_norm_unassoc(const GridCell& grid_cell)
{
return grid_cell.occ_mass > 0.0 ? grid_cell.pers_occ_mass / grid_cell.occ_mass : 0.0;
}
__device__ void set_normalization_components(GridCell* grid_cell_array, int i, float mu_A, float mu_UA)
{
grid_cell_array[i].mu_A = mu_A;
grid_cell_array[i].mu_UA = mu_UA;
}
__device__ float update_unnorm(Particle* particle_array, int i, MeasurementCell* meas_cell_array)
{
Particle& particle = particle_array[i];
return meas_cell_array[particle.grid_cell_idx].likelihood * particle.weight;
}
__device__ float normalize(Particle& particle, GridCell* grid_cell_array, MeasurementCell* meas_cell_array, float weight)
{
GridCell& cell = grid_cell_array[particle.grid_cell_idx];
MeasurementCell& meas_cell = meas_cell_array[particle.grid_cell_idx];
return meas_cell.p_A * cell.mu_A * weight + (1.0 - meas_cell.p_A) * cell.mu_UA * particle.weight;
}
__global__ void updatePersistentParticlesKernel1(Particle* particle_array, MeasurementCell* meas_cell_array, float* weight_array,
int particle_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < particle_count; i += blockDim.x * gridDim.x)
{
weight_array[i] = update_unnorm(particle_array, i, meas_cell_array);
}
}
__global__ void updatePersistentParticlesKernel2(GridCell* grid_cell_array, float* weight_array_accum, int cell_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < cell_count; i += blockDim.x * gridDim.x)
{
int start_idx = grid_cell_array[i].start_idx;
int end_idx = grid_cell_array[i].end_idx;
float m_occ_accum = subtract(weight_array_accum, start_idx, end_idx);
float rho_p = grid_cell_array[i].pers_occ_mass;
float mu_A = calc_norm_assoc(m_occ_accum, rho_p);
float mu_UA = calc_norm_unassoc(grid_cell_array[i]);
set_normalization_components(grid_cell_array, i, mu_A, mu_UA);
//printf("mu_A: %f, mu_UA: %f\n", mu_A, mu_UA);
}
}
__global__ void updatePersistentParticlesKernel3(Particle* particle_array, MeasurementCell* meas_cell_array, GridCell* grid_cell_array,
float* weight_array, int particle_count)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < particle_count; i += blockDim.x * gridDim.x)
{
weight_array[i] = normalize(particle_array[i], grid_cell_array, meas_cell_array, weight_array[i]);
}
}
|
49e1a98a865aff7d29454696c2eed3a4652fa7fe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*----------
* Streams - simple multi-stream example
* GPU Pro Tip: CUDA 7 Streams simplify concurrency
* NVIDIA Developer Blog
* Autor: Mark Harris
* ----------
* Universidad del Valle
* Programacin de Microprocesadores
* Mod.: K.Barrera, J.Celada
* Semestre 2 2020
* ----------
*/
#include <stdio.h>
#include <math.h>
//(const int N = 1 << 20;
const int N = 100;
__global__ void kernel(float *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
x[i] = sqrt(pow(3.14159,i));
}
}
int main()
{
const int num_streams = 2;
hipStream_t streams[num_streams];
float *h_data[num_streams], *d_data[num_streams];
for (int i = 0; i < num_streams; i++)
{
hipStreamCreate(&streams[i]);
h_data[i] = (float *)malloc(N*sizeof(float));
hipMalloc(&d_data[i], N * sizeof(float));
// launch one worker kernel per stream
hipLaunchKernelGGL(( kernel), dim3(1), dim3(64), 0, streams[i], d_data[i], N);
hipMemcpyAsync(h_data[i], d_data[i], N*sizeof(float),hipMemcpyDeviceToHost, streams[i]);
// launch a dummy kernel on the default stream
hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, 0, 0, 0);
}
for(int i = 0; i < N; i++)
printf("Value %d is: %f\n", i, h_data[0][i]);
hipDeviceReset();
return 0;
} | 49e1a98a865aff7d29454696c2eed3a4652fa7fe.cu | /*----------
* Streams - simple multi-stream example
* GPU Pro Tip: CUDA 7 Streams simplify concurrency
* NVIDIA Developer Blog
* Autor: Mark Harris
* ----------
* Universidad del Valle
* Programación de Microprocesadores
* Mod.: K.Barrera, J.Celada
* Semestre 2 2020
* ----------
*/
#include <stdio.h>
#include <math.h>
//(const int N = 1 << 20;
const int N = 100;
__global__ void kernel(float *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
x[i] = sqrt(pow(3.14159,i));
}
}
int main()
{
const int num_streams = 2;
cudaStream_t streams[num_streams];
float *h_data[num_streams], *d_data[num_streams];
for (int i = 0; i < num_streams; i++)
{
cudaStreamCreate(&streams[i]);
h_data[i] = (float *)malloc(N*sizeof(float));
cudaMalloc(&d_data[i], N * sizeof(float));
// launch one worker kernel per stream
kernel<<<1, 64, 0, streams[i]>>>(d_data[i], N);
cudaMemcpyAsync(h_data[i], d_data[i], N*sizeof(float),cudaMemcpyDeviceToHost, streams[i]);
// launch a dummy kernel on the default stream
kernel<<<1, 1>>>(0, 0);
}
for(int i = 0; i < N; i++)
printf("Value %d is: %f\n", i, h_data[0][i]);
cudaDeviceReset();
return 0;
} |
45d0a0d8684036cff87c37fb240df754755ba81c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void select(const double* ran, const double* total, const int* size,
double** frac, double** xs, int* selection) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
double cutoff = total[idx] * ran[idx];
double accum = 0;
const double* el_frac = frac[idx];
const double* el_xs = xs[idx];
int el_size = size[idx];
for (int i = 0; i < el_size - 1; ++i) {
accum += el_frac[i] * el_xs[i];
if (accum >= cutoff) selection[idx] = i;
}
selection[idx] = el_size - 1;
}
| 45d0a0d8684036cff87c37fb240df754755ba81c.cu | __global__ void select(const double* ran, const double* total, const int* size,
double** frac, double** xs, int* selection) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
double cutoff = total[idx] * ran[idx];
double accum = 0;
const double* el_frac = frac[idx];
const double* el_xs = xs[idx];
int el_size = size[idx];
for (int i = 0; i < el_size - 1; ++i) {
accum += el_frac[i] * el_xs[i];
if (accum >= cutoff) selection[idx] = i;
}
selection[idx] = el_size - 1;
}
|
66fae664cbb3ffbc988342e41236b1af263c645c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define _USE_MATH_DEFINES
#include "kernel.h"
__constant__ double SENSOR2;
__constant__ int NAHO;
__device__ int selectedCounts[MACRO_NMAX];
__device__ double tmpPhero_d[MACRO_MAX][MACRO_MAX];
__device__ hiprandState_t rnd_state[MACRO_NMAX];
//Misc
__device__ bool isGotFood(Food& food);
__device__ double atomicAddDouble(double* address, double val);
__device__ enum Direction genDirRand(int id);
__device__ double genProbRand(int id);
__device__ int genAntNumRand(int id);
__device__ double degToRad(double a);
__device__ double dist(Cell a,Cell b);
__device__ double distCandP(Cell a,double x,double y);
__device__ bool isOppositeDir(enum Direction nestDir,enum Direction dir);
__device__ bool isOppositeDir(Cell& cell, enum Direction dir);
__device__ enum Direction selectNextDir(Cell& cell, enum Direction dir);
__device__ double hilFunc(double x,double alpha);
//Initializer
__host__ void getDevicePtrs();
__global__ void randInit();
__global__ void antsInit();
__global__ void cellsInit();
__global__ void setNest();
__global__ void setDistFromNest();
__global__ void setNestDirs();
__global__ void setNearestDirFromNest();
__global__ void setFoodsDir();
//Calculation functions
__global__ void selectAnts();
__global__ void naturalFoodDecrease();
__global__ void evapolation();
__global__ void chemotaxis();
__global__ void diffusion();
__global__ void pheroUpdate();
__host__ void calculation(){
hipLaunchKernelGGL(( naturalFoodDecrease), dim3(1),dim3(MACRO_NUM_FOODS), 0, 0, );
hipLaunchKernelGGL(( evapolation), dim3(MACRO_MAX),dim3(MACRO_MAX), 0, 0, );
//sortKeyInit<<<1,MACRO_NMAX>>>();
//thrust::sort_by_key(sort_key_d_ptr, sort_key_d_ptr + MACRO_NMAX, ants_d_ptr);
hipLaunchKernelGGL(( selectAnts), dim3(1),dim3(MACRO_NMAX), 0, 0, );
hipLaunchKernelGGL(( chemotaxis), dim3(1),dim3(MACRO_NMAX), 0, 0, );
//hipMemcpyFromSymbol(cells,cells_d,MACRO_MAX*MACRO_MAX*sizeof(Cell),0);
//chemotaxis();
//hipMemcpyToSymbol(cells_d,cells,MACRO_MAX*MACRO_MAX*sizeof(Cell),0);
hipLaunchKernelGGL(( diffusion), dim3(MACRO_MAX),dim3(MACRO_MAX), 0, 0, );
hipLaunchKernelGGL(( pheroUpdate), dim3(MACRO_MAX),dim3(MACRO_MAX), 0, 0, );
}
//Initialize
__host__ void getDevicePtrs(){
hipGetSymbolAddress((void**)&sort_key_d_ptr_raw, sort_key_d);
sort_key_d_ptr = thrust::device_ptr<unsigned int>(sort_key_d_ptr_raw);
hipGetSymbolAddress((void**)&seeds_d_ptr_raw, seeds_d);
seeds_d_ptr = thrust::device_ptr<unsigned long long int>(seeds_d_ptr_raw);
hipGetSymbolAddress((void**)&ants_d_ptr_raw, ants_d);
ants_d_ptr = thrust::device_ptr<Ant>(ants_d_ptr_raw);
hipGetSymbolAddress((void**)&cells_d_ptr_raw, cells_d);
cells_d_ptr = thrust::device_ptr<Cell>(cells_d_ptr_raw);
hipGetSymbolAddress((void**)&foods_d_ptr_raw, foods_d);
foods_d_ptr = thrust::device_ptr<Food>(foods_d_ptr_raw);
}
__global__ void randInit(){
const int id = threadIdx.x + blockIdx.x * blockDim.x;
hiprand_init(seeds_d[id],0,0,&rnd_state[id]);
}
__global__ void antsReset(){
const int id = threadIdx.x + blockIdx.x * blockDim.x;
ants_d[id].status = FORAGE;
ants_d[id].i = MACRO_NEST_Y;
ants_d[id].j = MACRO_NEST_X;
ants_d[id].searchTime = 0;
ants_d[id].dir = genDirRand(id);
for (int i=0; i<MACRO_NUM_FOODS; i++){
ants_d[id].homing[i] = 0;
}
if(id<NAHO){
ants_d[id].ch = FOOL_CH;
}
else {
ants_d[id].ch = NORMAL_CH;
}
}
__global__ void cellsReset(){
const int i = threadIdx.x;
const int j = blockIdx.x;
cells_d[i][j].phero = 0.0;
}
__global__ void cellsInit(){
const int i = threadIdx.x;
const int j = blockIdx.x;
cells_d[i][j].foodNo = -1;
cells_d[i][j].status = NORMAL_CELL;
//Cell number initialize
cells_d[i][j].i = i;
cells_d[i][j].j = j;
//Cartesian initialize
cells_d[i][j].cart.x = (j-MACRO_CART_X_ZERO)*(sqrt(3.0)/2.0);
cells_d[i][j].cart.y = (abs(j-MACRO_CART_X_ZERO)%2)/2.0+(i-MACRO_CART_Y_ZERO);
//Edge initialize
cells_d[i][j].edge = NONE;
//Nest Dir initialize
cells_d[i][j].nestDir = NONE;
cells_d[i][j].distFromNest = 0.0;
}
__global__ void setEdges(){
const int i = threadIdx.x;
const int j = blockIdx.x;
if(i==MACRO_MAX-1){ //For upper edge
cells_d[i][j].edge |= UP;
if(abs((j-MACRO_CART_X_ZERO)%2)==1){
cells_d[i][j].edge |= (UPLEFT | UPRIGHT);
}
}
else if(i==0){//For lower edge
cells_d[i][j].edge |= LOW;
if(abs((j-MACRO_CART_X_ZERO)%2)==0){
cells_d[i][j].edge |= LOWLEFT | LOWRIGHT;
}
}
if(j==0){//For left edge
cells_d[i][j].edge |= LEFT;
}
else if(j==MACRO_MAX-1){//For right edge
cells_d[i][j].edge |= RIGHT;
}
}
__global__ void setNest(){
const int i = threadIdx.x;
const int j = blockIdx.x;
Cell* c;
if(i==MACRO_NEST_Y && j==MACRO_NEST_X){
cells_d[MACRO_NEST_Y][MACRO_NEST_X].status |= NEST_CELL;
for(enum Direction d = UP; d<=UPLEFT; (d<<=1) ){
c = getCell(cells_d,MACRO_NEST_Y,MACRO_NEST_X,d);
c->status |= NEST_NEIGHBOUR_CELL;
}
}
}
__global__ void setDistFromNest(){
const int i = threadIdx.x;
const int j = blockIdx.x;
Cell *nest_c;
nest_c = &cells_d[MACRO_NEST_Y][MACRO_NEST_X];
double d = dist(cells_d[i][j],*nest_c);
cells_d[i][j].distFromNest = d;
}
__device__ double dot(Cartesian a, Cartesian b) {
return (a.x * b.x + a.y * b.y);
}
__device__ double cross(Cartesian a, Cartesian b) {
return (a.x * b.y - a.y * b.x);
}
__global__ void setCriticalAngle() {
const int i = threadIdx.x;
const int j = blockIdx.x;
cells_d[i][j].criticalAngle = NONE;
if( (cells_d[i][j].status&NEAR_NEST)!=NORMAL_CELL ){
return;
}
Cartesian c = cells_d[i][j].cart;
c.x = -c.x/cells_d[i][j].distFromNest;
c.y = -c.y/cells_d[i][j].distFromNest;
for(enum Direction dir = UP; dir<=UPLEFT; (dir<<=1) ) {
Cartesian d;
switch (dir) {
case UP:
d.x = 0;
d.y = 1;
break;
case UPRIGHT:
d.x = 1;
d.y = tan(M_PI/4.0);
break;
case LOWRIGHT:
d.x = 1;
d.y = -tan(M_PI/4.0);
break;
case LOW:
d.x = 0;
d.y = -1;
break;
case LOWLEFT:
d.x = -1;
d.y = -tan(M_PI/4.0);
break;
case UPLEFT:
d.x = -1;
d.y = tan(M_PI/4.0);
break;
default:
break;
}
d.x = d.x/sqrt(dot(d,d));
d.y = d.y/sqrt(dot(d,d));
double dotVal = dot(c,d);
if (dotVal<=0.3){
cells_d[i][j].criticalAngle |= dir;
}
}
}
__global__ void setNearestDirFromNest(){
const int i = threadIdx.x;
const int j = blockIdx.x;
Cell& c = cells_d[i][j];
for (int itr=0; itr<6; itr++){
c.nearestDirFromNestList[itr] = NONE;
}
enum Direction dir = UP;
for(int itr=0; dir<=UPLEFT; itr++) {
if ( c.criticalAngle&dir == NONE ){
continue;
}
c.nearestDirFromNestList[itr] = selectNextDir(c, dir);
dir<<=1;
}
}
__global__ void setNestDirs(){
const int i = threadIdx.x;
const int j = blockIdx.x;
Cell *c;
double d = cells_d[i][j].distFromNest;
double tmp;
for(enum Direction dir = UP; dir<=UPLEFT; (dir<<=1) ){
c = getCell(cells_d,i,j,dir);
tmp=c->distFromNest;
if( fabs(tmp-d)<MACRO_EPS ){
cells_d[i][j].nestDir |= dir;
}
else if(tmp<d) {
cells_d[i][j].nestDir = dir;
d = tmp;
}
}
}
__global__ void foodsReset(){
const int i = threadIdx.x + blockIdx.x * blockDim.x;
foods_d[i].vol = MACRO_FOODSOURCE;
}
__global__ void setFoodsDir(){
const int i = threadIdx.x + blockIdx.x * blockDim.x;
const double dtheta = degToRad(MACRO_FOOD_ANGLE);
Cell *nearCell=NULL;
double x,y;
x=MACRO_FOOD_DIST * cos(i*dtheta);
y=MACRO_FOOD_DIST * sin(i*dtheta);
for(int j=0; j<MACRO_MAX; j++){
for(int k=0; k<MACRO_MAX; k++){
if(distCandP(cells_d[j][k],x,y)<=sqrt(3.0)/3.0+MACRO_EPS){
nearCell = &cells_d[j][k];
break;
}
}
}
if(nearCell==NULL){
}
else{
Cell *c=NULL;
double d = distCandP(*nearCell,x,y);
int j = nearCell->i;
int k = nearCell->j;
for(enum Direction dir = UP; dir<=UPLEFT; (dir<<=1) ){
c = getCell(cells_d,j,k,dir);
if( distCandP(*c,x,y)<d ){
nearCell = c;
d = distCandP(*nearCell,x,y);
}
}
foods_d[i].i = nearCell->i;
foods_d[i].j = nearCell->j;
nearCell->foodNo = i;
nearCell->status |= FOOD_CELL;
for(enum Direction dir = UP; dir<=UPLEFT; (dir<<=1) ){
c = getCell(cells_d,foods_d[i].i,foods_d[i].j,dir);
c->foodNo = i;
c->status |= FOOD_NEIGHBOUR_CELL;
}
}
}
//Calculation
__global__ void selectAnts(){
const int id = threadIdx.x + blockIdx.x * blockDim.x;
int rnd = genAntNumRand(id);
atomicAdd(&selectedCounts[rnd], 1);
}
__global__ void sortKeyInit(){
const int id = threadIdx.x + blockIdx.x * blockDim.x;
sort_key_d[id] = hiprand(&rnd_state[id]);
//printf("id:%d,%u\n",id,sort_key_d[id]);
}
__global__ void diffusion(){
const int i = blockIdx.x;
const int j = threadIdx.x;
double tmp = 0.0;
for (enum Direction dir = UP; dir<=UPLEFT; (dir<<=1) ){
tmp += getCell(cells_d,i,j,dir)->phero;
}
tmpPhero_d[i][j] = cells_d[i][j].phero+MACRO_DIFFE*(tmp/6.0-cells_d[i][j].phero);
}
__global__ void pheroUpdate(){
const int i = blockIdx.x;
const int j = threadIdx.x;
cells_d[i][j].phero = tmpPhero_d[i][j];
}
__global__ void naturalFoodDecrease(){
const int id = threadIdx.x + blockIdx.x * blockDim.x;
foods_d[id].vol=foods_d[id].vol+MACRO_REC-foods_d[id].vol*(MACRO_REC/100.0);
}
__global__ void evapolation(){
const int i = blockIdx.x;
const int j = threadIdx.x;
cells_d[i][j].phero *= (1.0-MACRO_EVAPOLATION_CONST);
}
__global__ void chemotaxis(){
const int id = threadIdx.x + blockIdx.x * blockDim.x;
Ant *ant = &(ants_d[id]);
for(int dummy=0; dummy<selectedCounts[id]; dummy++){
ant->searchTime++;
int i = ant->i;
int j = ant->j;
enum Direction dir = ant->dir;
enum Direction nestDir = cells_d[i][j].nestDir;
double leftPhero, frontPhero, rightPhero;
Cell *leftCell = getCell(cells_d,i,j,left(dir));
Cell *frontCell = getCell(cells_d,i,j,dir);
Cell *rightCell = getCell(cells_d,i,j,right(dir));
if(
ant->searchTime>=MACRO_MAX_SEARCH_TIME
&& ant->status!=EMERGENCY
){
ant->status = EMERGENCY;
}
if(ant->status==GOHOME){
atomicAddDouble(&(cells_d[i][j].phero),MACRO_EMI*MACRO_ENEST);
}
__threadfence();
if(ant->status==RANDOM_SEARCH){
leftPhero = 1.0;
frontPhero = 1.0;
rightPhero = 1.0;
}
else {
leftPhero = leftCell->phero;
frontPhero = frontCell->phero;
rightPhero = rightCell->phero;
}
if( (ant->status==GOHOME || ant->status==EMERGENCY) && isOppositeDir(cells_d[i][j], dir)){
enum Direction nextDir = cells_d[i][j].nearestDirFromNestList[dirToNum(dir)];
if( nextDir == left(dir) ){
ant->dir = left(dir);
frontCell = leftCell;
}
else if( nextDir == right(dir) ){
ant->dir = right(dir);
frontCell = rightCell;
}
else{
if(genProbRand(id)<=0.5){
ant->dir = right(dir);
frontCell = rightCell;
}
else{
ant->dir = left(dir);
frontCell = leftCell;
}
}
ant->i = frontCell->i;
ant->j = frontCell->j;
}
else{
double s1,s2,s3,s12,t,tot,rand;
if(ant->ch == NORMAL_CH){
t = MACRO_HIL_CONST;
}
else{
t = SENSOR2*MACRO_HIL_CONST;
}
s1=hilFunc(leftPhero,t);
s2=hilFunc(frontPhero,t);
s3=hilFunc(rightPhero,t);
/*
if(s1<MACRO_EPS && s2<MACRO_EPS && s3<MACRO_EPS){
s1=1.0;
s2=1.0;
s3=1.0;
}
*/
tot = s1+s2+s3;
s1/=tot;
s2/=tot;
s12=s1+s2;
rand=genProbRand(id);
if(rand<=s1){
ant->dir = left(dir);
ant->i = leftCell->i;
ant->j = leftCell->j;
}
else if(rand<=s12){
ant->i = frontCell->i;
ant->j = frontCell->j;
}
else{
ant->dir = right(dir);
ant->i = rightCell->i;
ant->j = rightCell->j;
}
}
if( (cells_d[ant->i][ant->j].status&NEAR_FOOD)!=NORMAL_CELL
&& foods_d[ cells_d[ant->i][ant->j].foodNo ].vol>=0.1
&& (ant->status != GOHOME && ant->status != EMERGENCY) ){
//atomicAddDouble(&(foods_d[ cells_d[ant->i][ant->j].foodNo ].vol),-MACRO_UNIT);
//ant->status = GOHOME;
//ant->searchTime = 0;
int fNo = cells_d[ant->i][ant->j].foodNo;
if(isGotFood(foods_d[fNo])){
ant->status = GOHOME;
ant->searchTime = 0;
ant->_foodNo = fNo;
ant->dir = left(left(left(dir)));
}
}
__threadfence();
if( (cells_d[ant->i][ant->j].status&NEAR_NEST)!=NORMAL_CELL
&& (ant->status == GOHOME || ant->status == EMERGENCY)){
if(ant->status == GOHOME){
ant->homing[ant->_foodNo]++;
//atomicAddDouble(&(cells_d[i][j].phero),MACRO_EMI*MACRO_ENEST);
}
ant->status = FORAGE;
ant->searchTime = 0;
ant->dir = genDirRand(id);
ant->i = MACRO_NEST_Y;
ant->j = MACRO_NEST_X;
}
}
selectedCounts[id] = 0;
}
//DataHandler
__device__ __host__ enum Direction operator<<(enum Direction d, int i){
return static_cast<enum Direction>(static_cast<int>(d)<<i);
}
__device__ __host__ enum Direction operator>>(enum Direction d, int i){
return static_cast<enum Direction>(static_cast<int>(d)>>i);
}
__device__ __host__ enum Direction operator|(enum Direction d1, enum Direction d2){
return static_cast<enum Direction>(static_cast<int>(d1)|static_cast<int>(d2));
}
__device__ __host__ enum Direction operator&(enum Direction d1, enum Direction d2){
return static_cast<enum Direction>(static_cast<int>(d1)&static_cast<int>(d2));
}
__device__ __host__ enum Direction& operator|=(enum Direction& d1, enum Direction d2){
d1 = (d1 | d2);
return d1;
}
__device__ __host__ enum Direction& operator&=(enum Direction& d1, enum Direction d2){
d1 = (d1 & d2);
return d1;
}
__device__ __host__ enum Direction& operator<<=(enum Direction& d1, int i){
d1 = (d1 << i);
return d1;
}
__device__ __host__ enum Direction& operator>>=(enum Direction& d1, int i){
d1 = (d1 >> i);
return d1;
}
__device__ __host__ bool operator<=(enum Direction d1, enum Direction d2){
return (static_cast<int>(d1) <= static_cast<int>(d2));
}
__device__ __host__ enum CELLStatus operator<<(enum CELLStatus d, int i){
return static_cast<enum CELLStatus>(static_cast<int>(d)<<i);
}
__device__ __host__ enum CELLStatus operator>>(enum CELLStatus d, int i){
return static_cast<enum CELLStatus>(static_cast<int>(d)>>i);
}
__device__ __host__ enum CELLStatus operator|(enum CELLStatus d1, enum CELLStatus d2){
return static_cast<enum CELLStatus>(static_cast<int>(d1)|static_cast<int>(d2));
}
__device__ __host__ enum CELLStatus operator&(enum CELLStatus d1, enum CELLStatus d2){
return static_cast<enum CELLStatus>(static_cast<int>(d1)&static_cast<int>(d2));
}
__device__ __host__ enum CELLStatus& operator|=(enum CELLStatus& d1, enum CELLStatus d2){
d1 = (d1 | d2);
return d1;
}
__device__ __host__ enum CELLStatus& operator&=(enum CELLStatus& d1, enum CELLStatus d2){
d1 = (d1 & d2);
return d1;
}
__device__ __host__ __forceinline__ enum Direction left(enum Direction dir){
if(dir == UP){
return UPLEFT;
}
else{
return (dir >> 1)&ALL_DIR;
}
}
__device__ __host__ __forceinline__ enum Direction right(enum Direction dir){
if(dir == UPLEFT){
return UP;
}
else{
return (dir << 1)&ALL_DIR;
}
}
__device__ __host__ __forceinline__ Cell* up(Cell cells[MACRO_MAX][MACRO_MAX],int i,int j){
if( (cells[i][j].edge&UP)!=NONE ){
return &cells[0][j];
}
else{
return &cells[i+1][j];
}
}
__device__ __host__ __forceinline__ Cell* upright(Cell cells[MACRO_MAX][MACRO_MAX],int i,int j){
int ii,jj;
if( (cells[i][j].edge&UPRIGHT)!=NONE ){
jj = 0;
if(abs(j-MACRO_CART_X_ZERO)%2==0){
ii = i;
}
else{
ii = i+1;
if(ii==MACRO_MAX){
ii = 0;
}
}
}
else{
jj = j+1;
if(abs(j-MACRO_CART_X_ZERO)%2==0){
ii = i;
}
else{
ii = i+1;
}
}
return &cells[ii][jj];
}
__device__ __host__ __forceinline__ Cell* lowright(Cell cells[MACRO_MAX][MACRO_MAX],int i,int j){
int ii,jj;
if( (cells[i][j].edge&LOWRIGHT)!=NONE ){
jj = 0;
if(abs(j-MACRO_CART_X_ZERO)%2==0){
ii = i-1;
if(ii<0){
ii=MACRO_MAX-1;
}
}
else{
ii = i;
}
}
else{
jj = j+1;
if(abs(j-MACRO_CART_X_ZERO)%2==0){
ii = i-1;
}
else{
ii = i;
}
}
return &cells[ii][jj];
}
__device__ __host__ __forceinline__ Cell* low(Cell cells[MACRO_MAX][MACRO_MAX],int i,int j){
if( (cells[i][j].edge&LOW)!=NONE ){
return &cells[MACRO_MAX-1][j];
}
else{
return &cells[i-1][j];
}
}
__device__ __host__ __forceinline__ Cell* lowleft(Cell cells[MACRO_MAX][MACRO_MAX],int i,int j){
int ii,jj;
if( (cells[i][j].edge&LOWLEFT)!=NONE ){
jj = MACRO_MAX-1;
if(abs(j-MACRO_CART_X_ZERO)%2==0){
ii = i-1;
if(ii<0){
ii = MACRO_MAX-1;
}
}
else{
ii = i;
}
}
else{
jj = j-1;
if(abs(j-MACRO_CART_X_ZERO)%2==0){
ii = i-1;
}
else{
ii=i;
}
}
return &cells[ii][jj];
}
__device__ __host__ __forceinline__ Cell* upleft(Cell cells[MACRO_MAX][MACRO_MAX],int i,int j){
int ii,jj;
if( (cells[i][j].edge&UPLEFT)!=NONE ){
jj = MACRO_MAX-1;
if(abs(j-MACRO_CART_X_ZERO)%2==0){
ii = i;
}
else{
ii= i+1;
if(ii==MACRO_MAX){
ii=0;
}
}
}
else{
jj = j-1;
if(abs(j-MACRO_CART_X_ZERO)%2==0){
ii = i;
}
else{
ii = i+1;
}
}
return &cells[ii][jj];
}
__device__ __host__ Cell* getCell(Cell cells[MACRO_MAX][MACRO_MAX],int i,int j, enum Direction dir){
switch (dir){
case UP:
return up(cells,i,j);
case UPRIGHT:
return upright(cells,i,j);
case LOWRIGHT:
return lowright(cells,i,j);
case LOW:
return low(cells,i,j);
case LOWLEFT:
return lowleft(cells,i,j);
case UPLEFT:
return upleft(cells,i,j);
default:
return NULL;
}
}
__device__ __host__ int dirToNum(enum Direction dir){
switch (dir){
case UP:
return 0;
case UPRIGHT:
return 1;
case LOWRIGHT:
return 2;
case LOW:
return 3;
case LOWLEFT:
return 4;
case UPLEFT:
return 5;
default:
return -1;
}
}
//Misc
__device__ __forceinline__ bool isGotFood(Food& food){
unsigned long long int* address_as_ull =
(unsigned long long int*)(&(food.vol));
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
if(__longlong_as_double(assumed)<0.1){
return false;
}
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(-MACRO_UNIT + __longlong_as_double(assumed)));
} while (assumed != old);
return true;
}
__device__ __forceinline__ double atomicAddDouble(double* address, double val){
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ __forceinline__ enum Direction genDirRand(int id){
return static_cast<enum Direction>(1 << (hiprand(&rnd_state[id])%6));
}
__device__ __forceinline__ double genProbRand(int id){
return hiprand_uniform_double(&rnd_state[id]);
}
__device__ __forceinline__ int genAntNumRand(int id){
return hiprand(&rnd_state[id])%MACRO_NMAX;
}
__device__ __forceinline__ double degToRad(double a) {
return a * M_PI / 180.0;
}
__device__ __forceinline__ double dist(Cell a,Cell b){
return sqrt( (a.cart.x - b.cart.x)*(a.cart.x - b.cart.x)
+ (a.cart.y - b.cart.y)*(a.cart.y - b.cart.y) );
}
__device__ __forceinline__ double distCandP(Cell a,double x,double y){
return sqrt( (a.cart.x - x)*(a.cart.x - x)
+ (a.cart.y - y)*(a.cart.y - y) );
}
__device__ __forceinline__ bool isOppositeDir(enum Direction nestDir,enum Direction dir){
//If theta = 60 deg., this is OK.
if( (dir&nestDir) !=NONE
|| (left(dir)&nestDir) !=NONE
|| (right(dir)&nestDir) !=NONE){
return false;
}
else{
return true;
}
}
__device__ __forceinline__ bool isOppositeDir(Cell& cell, enum Direction dir){
if ( (cell.criticalAngle & dir)==dir ){
return true;
}
else{
return false;
}
}
__device__ __forceinline__ enum Direction selectNextDir(Cell& cell, enum Direction dir){
int rightCount = 0;
int leftCount = 0;
for (enum Direction currentDir=right(dir); currentDir!=dir; currentDir=right(currentDir)){
if( (cell.criticalAngle & currentDir)!=currentDir ){
break;
}
rightCount++;
}
for (enum Direction currentDir=left(dir); currentDir!=dir; currentDir=left(currentDir)){
if( (cell.criticalAngle & currentDir)!=currentDir ){
break;
}
leftCount++;
}
if ( rightCount < leftCount ){
return right(dir);
}
else if ( rightCount > leftCount ){
return left(dir);
}
else{
return NONE;
}
}
__device__ __forceinline__ double hilFunc(double x,double alpha){
return pow(alpha*x+0.05,10);
}
__host__ void initialize(){
getDevicePtrs();
//antsInit<<<MACRO_NMAX,1>>>();
hipLaunchKernelGGL(( cellsInit), dim3(MACRO_MAX),dim3(MACRO_MAX), 0, 0, );
hipLaunchKernelGGL(( setEdges), dim3(MACRO_MAX),dim3(MACRO_MAX), 0, 0, );
hipLaunchKernelGGL(( setNest), dim3(MACRO_MAX),dim3(MACRO_MAX), 0, 0, );
hipLaunchKernelGGL(( setDistFromNest), dim3(MACRO_MAX),dim3(MACRO_MAX), 0, 0, );
hipLaunchKernelGGL(( setCriticalAngle), dim3(MACRO_MAX),dim3(MACRO_MAX), 0, 0, );
hipLaunchKernelGGL(( setNearestDirFromNest), dim3(MACRO_MAX),dim3(MACRO_MAX), 0, 0, );
hipLaunchKernelGGL(( setNestDirs), dim3(MACRO_MAX),dim3(MACRO_MAX), 0, 0, );
hipLaunchKernelGGL(( setFoodsDir), dim3(MACRO_NUM_FOODS),dim3(1), 0, 0, );
}
__host__ void reset(double sensor,int naho,unsigned long long int step){
hipMemcpyToSymbol(SENSOR2,&sensor,sizeof(double),0);
hipMemcpyToSymbol(NAHO,&naho,sizeof(int),0);
//initialize();
//antsInit<<<MACRO_NMAX,1>>>();
//cellsInit<<<MACRO_MAX,MACRO_MAX>>>();
//setEdges<<<MACRO_MAX,MACRO_MAX>>>();
//setNest<<<MACRO_MAX,MACRO_MAX>>>();
//setDistFromNest<<<MACRO_MAX,MACRO_MAX>>>();
//setNestDirs<<<MACRO_MAX,MACRO_MAX>>>();
//setFoodsDir<<<MACRO_NUM_FOODS,1>>>();
srand(MACRO_RND_SEED+step);
thrust::host_vector<unsigned long long int> seeds_vec_h(MACRO_NMAX);
std::generate(seeds_vec_h.begin(), seeds_vec_h.end(), rand);
thrust::copy(seeds_vec_h.begin(), seeds_vec_h.end(), seeds_d_ptr);
hipLaunchKernelGGL(( randInit), dim3(MACRO_NMAX),dim3(1), 0, 0, );
hipLaunchKernelGGL(( antsReset), dim3(MACRO_NMAX),dim3(1), 0, 0, );
hipLaunchKernelGGL(( cellsReset), dim3(MACRO_MAX),dim3(MACRO_MAX), 0, 0, );
hipLaunchKernelGGL(( foodsReset), dim3(MACRO_NUM_FOODS),dim3(1), 0, 0, );
}
| 66fae664cbb3ffbc988342e41236b1af263c645c.cu | #define _USE_MATH_DEFINES
#include "kernel.h"
__constant__ double SENSOR2;
__constant__ int NAHO;
__device__ int selectedCounts[MACRO_NMAX];
__device__ double tmpPhero_d[MACRO_MAX][MACRO_MAX];
__device__ curandState rnd_state[MACRO_NMAX];
//Misc
__device__ bool isGotFood(Food& food);
__device__ double atomicAddDouble(double* address, double val);
__device__ enum Direction genDirRand(int id);
__device__ double genProbRand(int id);
__device__ int genAntNumRand(int id);
__device__ double degToRad(double a);
__device__ double dist(Cell a,Cell b);
__device__ double distCandP(Cell a,double x,double y);
__device__ bool isOppositeDir(enum Direction nestDir,enum Direction dir);
__device__ bool isOppositeDir(Cell& cell, enum Direction dir);
__device__ enum Direction selectNextDir(Cell& cell, enum Direction dir);
__device__ double hilFunc(double x,double alpha);
//Initializer
__host__ void getDevicePtrs();
__global__ void randInit();
__global__ void antsInit();
__global__ void cellsInit();
__global__ void setNest();
__global__ void setDistFromNest();
__global__ void setNestDirs();
__global__ void setNearestDirFromNest();
__global__ void setFoodsDir();
//Calculation functions
__global__ void selectAnts();
__global__ void naturalFoodDecrease();
__global__ void evapolation();
__global__ void chemotaxis();
__global__ void diffusion();
__global__ void pheroUpdate();
__host__ void calculation(){
naturalFoodDecrease<<<1,MACRO_NUM_FOODS>>>();
evapolation<<<MACRO_MAX,MACRO_MAX>>>();
//sortKeyInit<<<1,MACRO_NMAX>>>();
//thrust::sort_by_key(sort_key_d_ptr, sort_key_d_ptr + MACRO_NMAX, ants_d_ptr);
selectAnts<<<1,MACRO_NMAX>>>();
chemotaxis<<<1,MACRO_NMAX>>>();
//cudaMemcpyFromSymbol(cells,cells_d,MACRO_MAX*MACRO_MAX*sizeof(Cell),0);
//chemotaxis();
//cudaMemcpyToSymbol(cells_d,cells,MACRO_MAX*MACRO_MAX*sizeof(Cell),0);
diffusion<<<MACRO_MAX,MACRO_MAX>>>();
pheroUpdate<<<MACRO_MAX,MACRO_MAX>>>();
}
//Initialize
__host__ void getDevicePtrs(){
cudaGetSymbolAddress((void**)&sort_key_d_ptr_raw, sort_key_d);
sort_key_d_ptr = thrust::device_ptr<unsigned int>(sort_key_d_ptr_raw);
cudaGetSymbolAddress((void**)&seeds_d_ptr_raw, seeds_d);
seeds_d_ptr = thrust::device_ptr<unsigned long long int>(seeds_d_ptr_raw);
cudaGetSymbolAddress((void**)&ants_d_ptr_raw, ants_d);
ants_d_ptr = thrust::device_ptr<Ant>(ants_d_ptr_raw);
cudaGetSymbolAddress((void**)&cells_d_ptr_raw, cells_d);
cells_d_ptr = thrust::device_ptr<Cell>(cells_d_ptr_raw);
cudaGetSymbolAddress((void**)&foods_d_ptr_raw, foods_d);
foods_d_ptr = thrust::device_ptr<Food>(foods_d_ptr_raw);
}
__global__ void randInit(){
const int id = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(seeds_d[id],0,0,&rnd_state[id]);
}
__global__ void antsReset(){
const int id = threadIdx.x + blockIdx.x * blockDim.x;
ants_d[id].status = FORAGE;
ants_d[id].i = MACRO_NEST_Y;
ants_d[id].j = MACRO_NEST_X;
ants_d[id].searchTime = 0;
ants_d[id].dir = genDirRand(id);
for (int i=0; i<MACRO_NUM_FOODS; i++){
ants_d[id].homing[i] = 0;
}
if(id<NAHO){
ants_d[id].ch = FOOL_CH;
}
else {
ants_d[id].ch = NORMAL_CH;
}
}
__global__ void cellsReset(){
const int i = threadIdx.x;
const int j = blockIdx.x;
cells_d[i][j].phero = 0.0;
}
__global__ void cellsInit(){
const int i = threadIdx.x;
const int j = blockIdx.x;
cells_d[i][j].foodNo = -1;
cells_d[i][j].status = NORMAL_CELL;
//Cell number initialize
cells_d[i][j].i = i;
cells_d[i][j].j = j;
//Cartesian initialize
cells_d[i][j].cart.x = (j-MACRO_CART_X_ZERO)*(sqrt(3.0)/2.0);
cells_d[i][j].cart.y = (abs(j-MACRO_CART_X_ZERO)%2)/2.0+(i-MACRO_CART_Y_ZERO);
//Edge initialize
cells_d[i][j].edge = NONE;
//Nest Dir initialize
cells_d[i][j].nestDir = NONE;
cells_d[i][j].distFromNest = 0.0;
}
__global__ void setEdges(){
const int i = threadIdx.x;
const int j = blockIdx.x;
if(i==MACRO_MAX-1){ //For upper edge
cells_d[i][j].edge |= UP;
if(abs((j-MACRO_CART_X_ZERO)%2)==1){
cells_d[i][j].edge |= (UPLEFT | UPRIGHT);
}
}
else if(i==0){//For lower edge
cells_d[i][j].edge |= LOW;
if(abs((j-MACRO_CART_X_ZERO)%2)==0){
cells_d[i][j].edge |= LOWLEFT | LOWRIGHT;
}
}
if(j==0){//For left edge
cells_d[i][j].edge |= LEFT;
}
else if(j==MACRO_MAX-1){//For right edge
cells_d[i][j].edge |= RIGHT;
}
}
__global__ void setNest(){
const int i = threadIdx.x;
const int j = blockIdx.x;
Cell* c;
if(i==MACRO_NEST_Y && j==MACRO_NEST_X){
cells_d[MACRO_NEST_Y][MACRO_NEST_X].status |= NEST_CELL;
for(enum Direction d = UP; d<=UPLEFT; (d<<=1) ){
c = getCell(cells_d,MACRO_NEST_Y,MACRO_NEST_X,d);
c->status |= NEST_NEIGHBOUR_CELL;
}
}
}
__global__ void setDistFromNest(){
const int i = threadIdx.x;
const int j = blockIdx.x;
Cell *nest_c;
nest_c = &cells_d[MACRO_NEST_Y][MACRO_NEST_X];
double d = dist(cells_d[i][j],*nest_c);
cells_d[i][j].distFromNest = d;
}
__device__ double dot(Cartesian a, Cartesian b) {
return (a.x * b.x + a.y * b.y);
}
__device__ double cross(Cartesian a, Cartesian b) {
return (a.x * b.y - a.y * b.x);
}
__global__ void setCriticalAngle() {
const int i = threadIdx.x;
const int j = blockIdx.x;
cells_d[i][j].criticalAngle = NONE;
if( (cells_d[i][j].status&NEAR_NEST)!=NORMAL_CELL ){
return;
}
Cartesian c = cells_d[i][j].cart;
c.x = -c.x/cells_d[i][j].distFromNest;
c.y = -c.y/cells_d[i][j].distFromNest;
for(enum Direction dir = UP; dir<=UPLEFT; (dir<<=1) ) {
Cartesian d;
switch (dir) {
case UP:
d.x = 0;
d.y = 1;
break;
case UPRIGHT:
d.x = 1;
d.y = tan(M_PI/4.0);
break;
case LOWRIGHT:
d.x = 1;
d.y = -tan(M_PI/4.0);
break;
case LOW:
d.x = 0;
d.y = -1;
break;
case LOWLEFT:
d.x = -1;
d.y = -tan(M_PI/4.0);
break;
case UPLEFT:
d.x = -1;
d.y = tan(M_PI/4.0);
break;
default:
break;
}
d.x = d.x/sqrt(dot(d,d));
d.y = d.y/sqrt(dot(d,d));
double dotVal = dot(c,d);
if (dotVal<=0.3){
cells_d[i][j].criticalAngle |= dir;
}
}
}
__global__ void setNearestDirFromNest(){
const int i = threadIdx.x;
const int j = blockIdx.x;
Cell& c = cells_d[i][j];
for (int itr=0; itr<6; itr++){
c.nearestDirFromNestList[itr] = NONE;
}
enum Direction dir = UP;
for(int itr=0; dir<=UPLEFT; itr++) {
if ( c.criticalAngle&dir == NONE ){
continue;
}
c.nearestDirFromNestList[itr] = selectNextDir(c, dir);
dir<<=1;
}
}
__global__ void setNestDirs(){
const int i = threadIdx.x;
const int j = blockIdx.x;
Cell *c;
double d = cells_d[i][j].distFromNest;
double tmp;
for(enum Direction dir = UP; dir<=UPLEFT; (dir<<=1) ){
c = getCell(cells_d,i,j,dir);
tmp=c->distFromNest;
if( fabs(tmp-d)<MACRO_EPS ){
cells_d[i][j].nestDir |= dir;
}
else if(tmp<d) {
cells_d[i][j].nestDir = dir;
d = tmp;
}
}
}
__global__ void foodsReset(){
const int i = threadIdx.x + blockIdx.x * blockDim.x;
foods_d[i].vol = MACRO_FOODSOURCE;
}
__global__ void setFoodsDir(){
const int i = threadIdx.x + blockIdx.x * blockDim.x;
const double dtheta = degToRad(MACRO_FOOD_ANGLE);
Cell *nearCell=NULL;
double x,y;
x=MACRO_FOOD_DIST * cos(i*dtheta);
y=MACRO_FOOD_DIST * sin(i*dtheta);
for(int j=0; j<MACRO_MAX; j++){
for(int k=0; k<MACRO_MAX; k++){
if(distCandP(cells_d[j][k],x,y)<=sqrt(3.0)/3.0+MACRO_EPS){
nearCell = &cells_d[j][k];
break;
}
}
}
if(nearCell==NULL){
}
else{
Cell *c=NULL;
double d = distCandP(*nearCell,x,y);
int j = nearCell->i;
int k = nearCell->j;
for(enum Direction dir = UP; dir<=UPLEFT; (dir<<=1) ){
c = getCell(cells_d,j,k,dir);
if( distCandP(*c,x,y)<d ){
nearCell = c;
d = distCandP(*nearCell,x,y);
}
}
foods_d[i].i = nearCell->i;
foods_d[i].j = nearCell->j;
nearCell->foodNo = i;
nearCell->status |= FOOD_CELL;
for(enum Direction dir = UP; dir<=UPLEFT; (dir<<=1) ){
c = getCell(cells_d,foods_d[i].i,foods_d[i].j,dir);
c->foodNo = i;
c->status |= FOOD_NEIGHBOUR_CELL;
}
}
}
//Calculation
__global__ void selectAnts(){
const int id = threadIdx.x + blockIdx.x * blockDim.x;
int rnd = genAntNumRand(id);
atomicAdd(&selectedCounts[rnd], 1);
}
__global__ void sortKeyInit(){
const int id = threadIdx.x + blockIdx.x * blockDim.x;
sort_key_d[id] = curand(&rnd_state[id]);
//printf("id:%d,%u\n",id,sort_key_d[id]);
}
__global__ void diffusion(){
const int i = blockIdx.x;
const int j = threadIdx.x;
double tmp = 0.0;
for (enum Direction dir = UP; dir<=UPLEFT; (dir<<=1) ){
tmp += getCell(cells_d,i,j,dir)->phero;
}
tmpPhero_d[i][j] = cells_d[i][j].phero+MACRO_DIFFE*(tmp/6.0-cells_d[i][j].phero);
}
__global__ void pheroUpdate(){
const int i = blockIdx.x;
const int j = threadIdx.x;
cells_d[i][j].phero = tmpPhero_d[i][j];
}
__global__ void naturalFoodDecrease(){
const int id = threadIdx.x + blockIdx.x * blockDim.x;
foods_d[id].vol=foods_d[id].vol+MACRO_REC-foods_d[id].vol*(MACRO_REC/100.0);
}
__global__ void evapolation(){
const int i = blockIdx.x;
const int j = threadIdx.x;
cells_d[i][j].phero *= (1.0-MACRO_EVAPOLATION_CONST);
}
__global__ void chemotaxis(){
const int id = threadIdx.x + blockIdx.x * blockDim.x;
Ant *ant = &(ants_d[id]);
for(int dummy=0; dummy<selectedCounts[id]; dummy++){
ant->searchTime++;
int i = ant->i;
int j = ant->j;
enum Direction dir = ant->dir;
enum Direction nestDir = cells_d[i][j].nestDir;
double leftPhero, frontPhero, rightPhero;
Cell *leftCell = getCell(cells_d,i,j,left(dir));
Cell *frontCell = getCell(cells_d,i,j,dir);
Cell *rightCell = getCell(cells_d,i,j,right(dir));
if(
ant->searchTime>=MACRO_MAX_SEARCH_TIME
&& ant->status!=EMERGENCY
){
ant->status = EMERGENCY;
}
if(ant->status==GOHOME){
atomicAddDouble(&(cells_d[i][j].phero),MACRO_EMI*MACRO_ENEST);
}
__threadfence();
if(ant->status==RANDOM_SEARCH){
leftPhero = 1.0;
frontPhero = 1.0;
rightPhero = 1.0;
}
else {
leftPhero = leftCell->phero;
frontPhero = frontCell->phero;
rightPhero = rightCell->phero;
}
if( (ant->status==GOHOME || ant->status==EMERGENCY) && isOppositeDir(cells_d[i][j], dir)){
enum Direction nextDir = cells_d[i][j].nearestDirFromNestList[dirToNum(dir)];
if( nextDir == left(dir) ){
ant->dir = left(dir);
frontCell = leftCell;
}
else if( nextDir == right(dir) ){
ant->dir = right(dir);
frontCell = rightCell;
}
else{
if(genProbRand(id)<=0.5){
ant->dir = right(dir);
frontCell = rightCell;
}
else{
ant->dir = left(dir);
frontCell = leftCell;
}
}
ant->i = frontCell->i;
ant->j = frontCell->j;
}
else{
double s1,s2,s3,s12,t,tot,rand;
if(ant->ch == NORMAL_CH){
t = MACRO_HIL_CONST;
}
else{
t = SENSOR2*MACRO_HIL_CONST;
}
s1=hilFunc(leftPhero,t);
s2=hilFunc(frontPhero,t);
s3=hilFunc(rightPhero,t);
/*
if(s1<MACRO_EPS && s2<MACRO_EPS && s3<MACRO_EPS){
s1=1.0;
s2=1.0;
s3=1.0;
}
*/
tot = s1+s2+s3;
s1/=tot;
s2/=tot;
s12=s1+s2;
rand=genProbRand(id);
if(rand<=s1){
ant->dir = left(dir);
ant->i = leftCell->i;
ant->j = leftCell->j;
}
else if(rand<=s12){
ant->i = frontCell->i;
ant->j = frontCell->j;
}
else{
ant->dir = right(dir);
ant->i = rightCell->i;
ant->j = rightCell->j;
}
}
if( (cells_d[ant->i][ant->j].status&NEAR_FOOD)!=NORMAL_CELL
&& foods_d[ cells_d[ant->i][ant->j].foodNo ].vol>=0.1
&& (ant->status != GOHOME && ant->status != EMERGENCY) ){
//atomicAddDouble(&(foods_d[ cells_d[ant->i][ant->j].foodNo ].vol),-MACRO_UNIT);
//ant->status = GOHOME;
//ant->searchTime = 0;
int fNo = cells_d[ant->i][ant->j].foodNo;
if(isGotFood(foods_d[fNo])){
ant->status = GOHOME;
ant->searchTime = 0;
ant->_foodNo = fNo;
ant->dir = left(left(left(dir)));
}
}
__threadfence();
if( (cells_d[ant->i][ant->j].status&NEAR_NEST)!=NORMAL_CELL
&& (ant->status == GOHOME || ant->status == EMERGENCY)){
if(ant->status == GOHOME){
ant->homing[ant->_foodNo]++;
//atomicAddDouble(&(cells_d[i][j].phero),MACRO_EMI*MACRO_ENEST);
}
ant->status = FORAGE;
ant->searchTime = 0;
ant->dir = genDirRand(id);
ant->i = MACRO_NEST_Y;
ant->j = MACRO_NEST_X;
}
}
selectedCounts[id] = 0;
}
//DataHandler
__device__ __host__ enum Direction operator<<(enum Direction d, int i){
return static_cast<enum Direction>(static_cast<int>(d)<<i);
}
__device__ __host__ enum Direction operator>>(enum Direction d, int i){
return static_cast<enum Direction>(static_cast<int>(d)>>i);
}
__device__ __host__ enum Direction operator|(enum Direction d1, enum Direction d2){
return static_cast<enum Direction>(static_cast<int>(d1)|static_cast<int>(d2));
}
__device__ __host__ enum Direction operator&(enum Direction d1, enum Direction d2){
return static_cast<enum Direction>(static_cast<int>(d1)&static_cast<int>(d2));
}
__device__ __host__ enum Direction& operator|=(enum Direction& d1, enum Direction d2){
d1 = (d1 | d2);
return d1;
}
__device__ __host__ enum Direction& operator&=(enum Direction& d1, enum Direction d2){
d1 = (d1 & d2);
return d1;
}
__device__ __host__ enum Direction& operator<<=(enum Direction& d1, int i){
d1 = (d1 << i);
return d1;
}
__device__ __host__ enum Direction& operator>>=(enum Direction& d1, int i){
d1 = (d1 >> i);
return d1;
}
__device__ __host__ bool operator<=(enum Direction d1, enum Direction d2){
return (static_cast<int>(d1) <= static_cast<int>(d2));
}
__device__ __host__ enum CELLStatus operator<<(enum CELLStatus d, int i){
return static_cast<enum CELLStatus>(static_cast<int>(d)<<i);
}
__device__ __host__ enum CELLStatus operator>>(enum CELLStatus d, int i){
return static_cast<enum CELLStatus>(static_cast<int>(d)>>i);
}
__device__ __host__ enum CELLStatus operator|(enum CELLStatus d1, enum CELLStatus d2){
return static_cast<enum CELLStatus>(static_cast<int>(d1)|static_cast<int>(d2));
}
__device__ __host__ enum CELLStatus operator&(enum CELLStatus d1, enum CELLStatus d2){
return static_cast<enum CELLStatus>(static_cast<int>(d1)&static_cast<int>(d2));
}
__device__ __host__ enum CELLStatus& operator|=(enum CELLStatus& d1, enum CELLStatus d2){
d1 = (d1 | d2);
return d1;
}
__device__ __host__ enum CELLStatus& operator&=(enum CELLStatus& d1, enum CELLStatus d2){
d1 = (d1 & d2);
return d1;
}
__device__ __host__ __forceinline__ enum Direction left(enum Direction dir){
if(dir == UP){
return UPLEFT;
}
else{
return (dir >> 1)&ALL_DIR;
}
}
__device__ __host__ __forceinline__ enum Direction right(enum Direction dir){
if(dir == UPLEFT){
return UP;
}
else{
return (dir << 1)&ALL_DIR;
}
}
__device__ __host__ __forceinline__ Cell* up(Cell cells[MACRO_MAX][MACRO_MAX],int i,int j){
if( (cells[i][j].edge&UP)!=NONE ){
return &cells[0][j];
}
else{
return &cells[i+1][j];
}
}
__device__ __host__ __forceinline__ Cell* upright(Cell cells[MACRO_MAX][MACRO_MAX],int i,int j){
int ii,jj;
if( (cells[i][j].edge&UPRIGHT)!=NONE ){
jj = 0;
if(abs(j-MACRO_CART_X_ZERO)%2==0){
ii = i;
}
else{
ii = i+1;
if(ii==MACRO_MAX){
ii = 0;
}
}
}
else{
jj = j+1;
if(abs(j-MACRO_CART_X_ZERO)%2==0){
ii = i;
}
else{
ii = i+1;
}
}
return &cells[ii][jj];
}
__device__ __host__ __forceinline__ Cell* lowright(Cell cells[MACRO_MAX][MACRO_MAX],int i,int j){
int ii,jj;
if( (cells[i][j].edge&LOWRIGHT)!=NONE ){
jj = 0;
if(abs(j-MACRO_CART_X_ZERO)%2==0){
ii = i-1;
if(ii<0){
ii=MACRO_MAX-1;
}
}
else{
ii = i;
}
}
else{
jj = j+1;
if(abs(j-MACRO_CART_X_ZERO)%2==0){
ii = i-1;
}
else{
ii = i;
}
}
return &cells[ii][jj];
}
__device__ __host__ __forceinline__ Cell* low(Cell cells[MACRO_MAX][MACRO_MAX],int i,int j){
if( (cells[i][j].edge&LOW)!=NONE ){
return &cells[MACRO_MAX-1][j];
}
else{
return &cells[i-1][j];
}
}
__device__ __host__ __forceinline__ Cell* lowleft(Cell cells[MACRO_MAX][MACRO_MAX],int i,int j){
int ii,jj;
if( (cells[i][j].edge&LOWLEFT)!=NONE ){
jj = MACRO_MAX-1;
if(abs(j-MACRO_CART_X_ZERO)%2==0){
ii = i-1;
if(ii<0){
ii = MACRO_MAX-1;
}
}
else{
ii = i;
}
}
else{
jj = j-1;
if(abs(j-MACRO_CART_X_ZERO)%2==0){
ii = i-1;
}
else{
ii=i;
}
}
return &cells[ii][jj];
}
__device__ __host__ __forceinline__ Cell* upleft(Cell cells[MACRO_MAX][MACRO_MAX],int i,int j){
int ii,jj;
if( (cells[i][j].edge&UPLEFT)!=NONE ){
jj = MACRO_MAX-1;
if(abs(j-MACRO_CART_X_ZERO)%2==0){
ii = i;
}
else{
ii= i+1;
if(ii==MACRO_MAX){
ii=0;
}
}
}
else{
jj = j-1;
if(abs(j-MACRO_CART_X_ZERO)%2==0){
ii = i;
}
else{
ii = i+1;
}
}
return &cells[ii][jj];
}
__device__ __host__ Cell* getCell(Cell cells[MACRO_MAX][MACRO_MAX],int i,int j, enum Direction dir){
switch (dir){
case UP:
return up(cells,i,j);
case UPRIGHT:
return upright(cells,i,j);
case LOWRIGHT:
return lowright(cells,i,j);
case LOW:
return low(cells,i,j);
case LOWLEFT:
return lowleft(cells,i,j);
case UPLEFT:
return upleft(cells,i,j);
default:
return NULL;
}
}
__device__ __host__ int dirToNum(enum Direction dir){
switch (dir){
case UP:
return 0;
case UPRIGHT:
return 1;
case LOWRIGHT:
return 2;
case LOW:
return 3;
case LOWLEFT:
return 4;
case UPLEFT:
return 5;
default:
return -1;
}
}
//Misc
__device__ __forceinline__ bool isGotFood(Food& food){
unsigned long long int* address_as_ull =
(unsigned long long int*)(&(food.vol));
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
if(__longlong_as_double(assumed)<0.1){
return false;
}
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(-MACRO_UNIT + __longlong_as_double(assumed)));
} while (assumed != old);
return true;
}
__device__ __forceinline__ double atomicAddDouble(double* address, double val){
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ __forceinline__ enum Direction genDirRand(int id){
return static_cast<enum Direction>(1 << (curand(&rnd_state[id])%6));
}
__device__ __forceinline__ double genProbRand(int id){
return curand_uniform_double(&rnd_state[id]);
}
__device__ __forceinline__ int genAntNumRand(int id){
return curand(&rnd_state[id])%MACRO_NMAX;
}
__device__ __forceinline__ double degToRad(double a) {
return a * M_PI / 180.0;
}
__device__ __forceinline__ double dist(Cell a,Cell b){
return sqrt( (a.cart.x - b.cart.x)*(a.cart.x - b.cart.x)
+ (a.cart.y - b.cart.y)*(a.cart.y - b.cart.y) );
}
__device__ __forceinline__ double distCandP(Cell a,double x,double y){
return sqrt( (a.cart.x - x)*(a.cart.x - x)
+ (a.cart.y - y)*(a.cart.y - y) );
}
__device__ __forceinline__ bool isOppositeDir(enum Direction nestDir,enum Direction dir){
//If theta = 60 deg., this is OK.
if( (dir&nestDir) !=NONE
|| (left(dir)&nestDir) !=NONE
|| (right(dir)&nestDir) !=NONE){
return false;
}
else{
return true;
}
}
__device__ __forceinline__ bool isOppositeDir(Cell& cell, enum Direction dir){
if ( (cell.criticalAngle & dir)==dir ){
return true;
}
else{
return false;
}
}
__device__ __forceinline__ enum Direction selectNextDir(Cell& cell, enum Direction dir){
int rightCount = 0;
int leftCount = 0;
for (enum Direction currentDir=right(dir); currentDir!=dir; currentDir=right(currentDir)){
if( (cell.criticalAngle & currentDir)!=currentDir ){
break;
}
rightCount++;
}
for (enum Direction currentDir=left(dir); currentDir!=dir; currentDir=left(currentDir)){
if( (cell.criticalAngle & currentDir)!=currentDir ){
break;
}
leftCount++;
}
if ( rightCount < leftCount ){
return right(dir);
}
else if ( rightCount > leftCount ){
return left(dir);
}
else{
return NONE;
}
}
__device__ __forceinline__ double hilFunc(double x,double alpha){
return pow(alpha*x+0.05,10);
}
__host__ void initialize(){
getDevicePtrs();
//antsInit<<<MACRO_NMAX,1>>>();
cellsInit<<<MACRO_MAX,MACRO_MAX>>>();
setEdges<<<MACRO_MAX,MACRO_MAX>>>();
setNest<<<MACRO_MAX,MACRO_MAX>>>();
setDistFromNest<<<MACRO_MAX,MACRO_MAX>>>();
setCriticalAngle<<<MACRO_MAX,MACRO_MAX>>>();
setNearestDirFromNest<<<MACRO_MAX,MACRO_MAX>>>();
setNestDirs<<<MACRO_MAX,MACRO_MAX>>>();
setFoodsDir<<<MACRO_NUM_FOODS,1>>>();
}
__host__ void reset(double sensor,int naho,unsigned long long int step){
cudaMemcpyToSymbol(SENSOR2,&sensor,sizeof(double),0);
cudaMemcpyToSymbol(NAHO,&naho,sizeof(int),0);
//initialize();
//antsInit<<<MACRO_NMAX,1>>>();
//cellsInit<<<MACRO_MAX,MACRO_MAX>>>();
//setEdges<<<MACRO_MAX,MACRO_MAX>>>();
//setNest<<<MACRO_MAX,MACRO_MAX>>>();
//setDistFromNest<<<MACRO_MAX,MACRO_MAX>>>();
//setNestDirs<<<MACRO_MAX,MACRO_MAX>>>();
//setFoodsDir<<<MACRO_NUM_FOODS,1>>>();
srand(MACRO_RND_SEED+step);
thrust::host_vector<unsigned long long int> seeds_vec_h(MACRO_NMAX);
std::generate(seeds_vec_h.begin(), seeds_vec_h.end(), rand);
thrust::copy(seeds_vec_h.begin(), seeds_vec_h.end(), seeds_d_ptr);
randInit<<<MACRO_NMAX,1>>>();
antsReset<<<MACRO_NMAX,1>>>();
cellsReset<<<MACRO_MAX,MACRO_MAX>>>();
foodsReset<<<MACRO_NUM_FOODS,1>>>();
}
|
31bac8879c5772eee0ff430f0b77089ae83575b1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "test_float_1D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
float posX = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
test_float_1D), dim3(gridBlock),dim3(threadBlock), 0, 0, output,posX);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
test_float_1D), dim3(gridBlock),dim3(threadBlock), 0, 0, output,posX);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
test_float_1D), dim3(gridBlock),dim3(threadBlock), 0, 0, output,posX);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 31bac8879c5772eee0ff430f0b77089ae83575b1.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "test_float_1D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
float posX = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
test_float_1D<<<gridBlock,threadBlock>>>(output,posX);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
test_float_1D<<<gridBlock,threadBlock>>>(output,posX);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
test_float_1D<<<gridBlock,threadBlock>>>(output,posX);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
684b28caf26668427c986c23b8d22af99c3abfe7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/relu_layer.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
#ifdef USE_ROCM
template<typename Dtype>
__global__ void ReLUForward(const int_tp n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
#endif // USE_ROCM
template<typename Dtype>
void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int_tp count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
viennacl::ocl::kernel &oclk_relu_forward = program.get_kernel(
CL_KERNEL_SELECT("relu_forward"));
viennacl::ocl::enqueue(
oclk_relu_forward(count, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx), negative_slope),
ctx.get_queue());
#endif // USE_GREENTEA
}
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
#ifdef USE_ROCM
template<typename Dtype>
__global__ void ReLUBackward(const int_tp n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index]
* ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);
}
}
#endif // USE_ROCM
template<typename Dtype>
void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int_tp count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
viennacl::ocl::kernel &oclk_relu_backward = program.get_kernel(
CL_KERNEL_SELECT("relu_backward"));
viennacl::ocl::enqueue(
oclk_relu_backward(count, WrapHandle((cl_mem) top_diff, &ctx),
WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) bottom_diff, &ctx),
negative_slope),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer);
} // namespace caffe
| 684b28caf26668427c986c23b8d22af99c3abfe7.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/relu_layer.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
#ifdef USE_CUDA
template<typename Dtype>
__global__ void ReLUForward(const int_tp n, const Dtype* in, Dtype* out,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope;
}
}
#endif // USE_CUDA
template<typename Dtype>
void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int_tp count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, bottom_data, top_data, negative_slope);
CUDA_POST_KERNEL_CHECK;
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
viennacl::ocl::kernel &oclk_relu_forward = program.get_kernel(
CL_KERNEL_SELECT("relu_forward"));
viennacl::ocl::enqueue(
oclk_relu_forward(count, WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) top_data, &ctx), negative_slope),
ctx.get_queue());
#endif // USE_GREENTEA
}
// << " count: " << count << " bottom_data: "
// << (unsigned long)bottom_data
// << " top_data: " << (unsigned long)top_data
// << " blocks: " << CAFFE_GET_BLOCKS(count)
// << " threads: " << CAFFE_CUDA_NUM_THREADS;
}
#ifdef USE_CUDA
template<typename Dtype>
__global__ void ReLUBackward(const int_tp n, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff,
Dtype negative_slope) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index]
* ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope);
}
}
#endif // USE_CUDA
template<typename Dtype>
void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int_tp count = bottom[0]->count();
Dtype negative_slope = this->layer_param_.relu_param().negative_slope();
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
// NOLINT_NEXT_LINE(whitespace/operators)
ReLUBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS)(
count, top_diff, bottom_data, bottom_diff, negative_slope);
CUDA_POST_KERNEL_CHECK;
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
viennacl::ocl::kernel &oclk_relu_backward = program.get_kernel(
CL_KERNEL_SELECT("relu_backward"));
viennacl::ocl::enqueue(
oclk_relu_backward(count, WrapHandle((cl_mem) top_diff, &ctx),
WrapHandle((cl_mem) bottom_data, &ctx),
WrapHandle((cl_mem) bottom_diff, &ctx),
negative_slope),
ctx.get_queue());
#endif // USE_GREENTEA
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer);
} // namespace caffe
|
f7c9881830be907d0aef8eb9d1213fe360dcee83.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
__global__ void mult(int x, int y, int *res) {
*res = x * y;
}
int gpu(int x, int y) {
int *dev_res;
int res = 0;
hipMalloc((void**)&dev_res, sizof(int));
hipLaunchKernelGGL(( mult), dim3(1), dim3(1), 0, 0, x, y, dev_res);
hipMemcpy(&res, dev_res, sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_res);
return res;
}
| f7c9881830be907d0aef8eb9d1213fe360dcee83.cu | #include <cuda.h>
#include <iostream>
#include <stdio.h>
__global__ void mult(int x, int y, int *res) {
*res = x * y;
}
int gpu(int x, int y) {
int *dev_res;
int res = 0;
cudaMalloc((void**)&dev_res, sizof(int));
mult<<<1, 1>>>(x, y, dev_res);
cudaMemcpy(&res, dev_res, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_res);
return res;
}
|
94acfd4b0b09c9f5f8316c7be585fb3c070eceb5.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "random.h"
// Setup seed for a given thread, pass the location of local seed storage,
// the global array of seeds and the absolute thread location
__device__ void gsrand(unsigned int *val, unsigned int *seeds, int thread)
{
*val = seeds[thread];
return;
}
// This will generate a random float between 0 and 1, must pass it the local
// location of the seed value for this thread.
__device__ float grand(unsigned int *seed)
{
*seed = ((*seed * 1664525) + 1013904223);
return (((float)*seed) / (4294967295.0f));
}
// This function allocates memory for seeds on the gpu, generates them,
// and copies them over.
unsigned int *gen_seeds(int num_threads)
{
unsigned int *s_d;
unsigned int *s_h;
int i;
srand(time(NULL));
s_h = (unsigned int *)malloc(num_threads * sizeof(unsigned int));
if (!s_h) {
return NULL;
}
hipMalloc((void **) &s_d, num_threads * sizeof(unsigned int));
if (!s_d) {
free(s_h);
return NULL;
}
for (i = 0; i < num_threads; i++) {
s_h[i] = rand();
}
printf("memcpy %d\n", hipMemcpy(s_d, s_h, num_threads * sizeof(unsigned int), hipMemcpyHostToDevice));
free(s_h);
return s_d;
}
| 94acfd4b0b09c9f5f8316c7be585fb3c070eceb5.cu | #include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "random.h"
// Setup seed for a given thread, pass the location of local seed storage,
// the global array of seeds and the absolute thread location
__device__ void gsrand(unsigned int *val, unsigned int *seeds, int thread)
{
*val = seeds[thread];
return;
}
// This will generate a random float between 0 and 1, must pass it the local
// location of the seed value for this thread.
__device__ float grand(unsigned int *seed)
{
*seed = ((*seed * 1664525) + 1013904223);
return (((float)*seed) / (4294967295.0f));
}
// This function allocates memory for seeds on the gpu, generates them,
// and copies them over.
unsigned int *gen_seeds(int num_threads)
{
unsigned int *s_d;
unsigned int *s_h;
int i;
srand(time(NULL));
s_h = (unsigned int *)malloc(num_threads * sizeof(unsigned int));
if (!s_h) {
return NULL;
}
cudaMalloc((void **) &s_d, num_threads * sizeof(unsigned int));
if (!s_d) {
free(s_h);
return NULL;
}
for (i = 0; i < num_threads; i++) {
s_h[i] = rand();
}
printf("memcpy %d\n", cudaMemcpy(s_d, s_h, num_threads * sizeof(unsigned int), cudaMemcpyHostToDevice));
free(s_h);
return s_d;
}
|
26e9506ae4523dfd5be9ca0dbbe617d2b30019ff.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Solves the Panfilov model using an explicit numerical scheme.
* Based on code orginally provided by Xing Cai, Simula Research Laboratory
* and reimplementation by Scott B. Baden, UCSD
*
* Modified and restructured by Didem Unat, Koc University
*
*/
/*
Explanation: This is a four-version combined source file. We have four simulate
functions: simulate1, simulate2, simulate3, simulate4 corresponding
to each version, respectively. To run a certain version, user needs
to modify "simulate" in main with "simulate1" in default. And I
defined blocksize as global variable which needs to be tuned.
*/
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
using namespace std;
// Utilities
//
// Timer
// Make successive calls and take a difference to get the elapsed time.
static const double kMicro = 1.0e-6;
int blocksize = 8;
double getTime()
{
struct timeval TV;
struct timezone TZ;
const int RC = gettimeofday(&TV, &TZ);
if(RC == -1) {
cerr << "ERROR: Bad call to gettimeofday" << endl;
return(-1);
}
return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) );
} // end getTime()
// Allocate a 2D array
double *alloc2D(int m,int n){
double *E;
int nx=n, ny=m;
E = (double*)malloc(sizeof(double)*nx*ny);
assert(E);
return(E);
}
// Reports statistics about the computation
// These values should not vary (except to within roundoff)
// when we use different numbers of processes to solve the problem
double stats(double *E, int m, int n, double *_mx){
double mx = -1;
double l2norm = 0;
int i, j;
for (j=1; j<=m; j++)
for (i=1; i<=n; i++) {
l2norm += E[j*(n+2)+i]*E[j*(n+2)+i];
if (E[j*(n+2)+i] > mx)
mx = E[j*(n+2)+i];
}
*_mx = mx;
l2norm /= (double) ((m)*(n));
l2norm = sqrt(l2norm);
return l2norm;
}
// External functions
extern "C" {
void splot(double *E, double T, int niter, int m, int n);
}
void cmdLine(int argc, char *argv[], double& T, int& n, int& px, int& py, int& plot_freq, int& no_comm, int&num_threads);
__global__ void setBoundaryLeft(double* E_prev, const int m, const int n)
{
int j = blockIdx.x*blockDim.x+threadIdx.x+1;
if (j<=m) E_prev[j*(n+2)+0] = E_prev[j*(n+2)+2];
}
__global__ void setBoundaryRight(double* E_prev, const int m, const int n)
{
int j = blockIdx.x*blockDim.x+threadIdx.x+1;
if (j<=m) E_prev[j*(n+2)+n+1] = E_prev[j*(n+2)+n-1];
}
__global__ void setBoundaryUp(double* E_prev, const int m, const int n)
{
int i = blockIdx.x*blockDim.x+threadIdx.x+1;
if (i<=n) E_prev[i] = E_prev[2*(n+2)+i];
}
__global__ void setBoundaryDown(double* E_prev, const int m, const int n)
{
int i = blockIdx.x*blockDim.x+threadIdx.x+1;
if (i<=n) E_prev[(m+1)*(n+2)+i] = E_prev[(m-1)*(n+2)+i];
}
__global__ void solvePDE(double *E, double *E_prev, const double alpha, const int m, const int n)
{
int j = blockIdx.y*blockDim.y+threadIdx.y+1;
int i = blockIdx.x*blockDim.x+threadIdx.x+1;
if ((j<=m) && (i<=n))
{
E[j*(n+2)+i] = E_prev[j*(n+2)+i]+alpha*(E_prev[j*(n+2)+i+1]+E_prev[j*(n+2)+i-1]-4*E_prev[j*(n+2)+i]+E_prev[(j+1)*(n+2)+i]+E_prev[(j-1)*(n+2)+i]);
}
}
__global__ void solveODE_E(double *E, double *R, const double kk,
const double dt, const double a, const int m, const int n)
{
int j = blockIdx.y*blockDim.y+threadIdx.y+1;
int i = blockIdx.x*blockDim.x+threadIdx.x+1;
if ((j<=m) && (i<=n))
{
E[j*(n+2)+i] = E[j*(n+2)+i] -dt*(kk* E[j*(n+2)+i]*(E[j*(n+2)+i] - a)*(E[j*(n+2)+i]-1)+ E[j*(n+2)+i] *R[j*(n+2)+i]);
}
}
__global__ void solveODE_R(double *E, double *R, const double kk,
const double dt, const double epsilon, const double M1,
const double M2, const double b, const int m, const int n)
{
int j = blockIdx.y*blockDim.y+threadIdx.y+1;
int i = blockIdx.x*blockDim.x+threadIdx.x+1;
if ((j<=m) && (i<=n))
{
R[j*(n+2)+i] = R[j*(n+2)+i] + dt*(epsilon+M1* R[j*(n+2)+i]/( E[j*(n+2)+i]+M2))*(-R[j*(n+2)+i]-kk* E[j*(n+2)+i]*(E[j*(n+2)+i]-b-1));
}
}
void simulate1 (double* E, double* E_prev,double* R,
const double alpha, const int n, const int m, const double kk,
const double dt, const double a, const double epsilon,
const double M1,const double M2, const double b)
{
/*
* Copy data from boundary of the computational box
* to the padding region, set up for differencing
* on the boundary of the computational box
* Using mirror boundaries
*/
hipLaunchKernelGGL(( setBoundaryLeft), dim3(ceil(m/64.0)),dim3(64), 0, 0, E_prev, m, n);
hipLaunchKernelGGL(( setBoundaryRight), dim3(ceil(m/64.0)),dim3(64), 0, 0, E_prev, m, n);
hipLaunchKernelGGL(( setBoundaryUp), dim3(ceil(n/64.0)),dim3(64), 0, 0, E_prev, m, n);
hipLaunchKernelGGL(( setBoundaryDown), dim3(ceil(n/64.0)),dim3(64), 0, 0, E_prev, m, n);
// Solve for the excitation, the PDE
dim3 DimGrid(ceil((double)n/blocksize), ceil((double)m/blocksize), 1);
dim3 DimBlock(blocksize, blocksize, 1);
hipLaunchKernelGGL(( solvePDE), dim3(DimGrid), dim3(DimBlock), 0, 0, E, E_prev, alpha, m, n);
/*
* Solve the ODE, advancing excitation and recovery to the
* next timtestep
*/
hipLaunchKernelGGL(( solveODE_E), dim3(DimGrid), dim3(DimBlock), 0, 0, E, R, kk, dt, a, m, n);
hipLaunchKernelGGL(( solveODE_R), dim3(DimGrid), dim3(DimBlock), 0, 0, E, R, kk, dt, epsilon, M1, M2, b, m, n);
}
__global__ void simulateKernel2 (double* E, double* E_prev,double* R,
const double alpha, const int n, const int m, const double kk,
const double dt, const double a, const double epsilon,
const double M1,const double M2, const double b)
{
int j = blockIdx.y*blockDim.y+threadIdx.y+1;
int i = blockIdx.x*blockDim.x+threadIdx.x+1;
/*
* Copy data from boundary of the computational box
* to the padding region, set up for differencing
* on the boundary of the computational box
* Using mirror boundaries
*/
if ((j<=m) && (i<=n))
{
if (i == 2) E_prev[j*(n+2)+0] = E_prev[j*(n+2)+2];
if (i == n - 1) E_prev[j*(n+2)+n+1] = E_prev[j*(n+2)+n-1];
if (j == 2) E_prev[i] = E_prev[2*(n+2)+i];
if (j == m - 1) E_prev[(m+1)*(n+2)+i] = E_prev[(m-1)*(n+2)+i];
}
__syncthreads();
if ((j<=m) && (i<=n))
{
// Solve for the excitation, the PDE
E[j*(n+2)+i] = E_prev[j*(n+2)+i]+alpha*(E_prev[j*(n+2)+i+1]+E_prev[j*(n+2)+i-1]-4*E_prev[j*(n+2)+i]+E_prev[(j+1)*(n+2)+i]+E_prev[(j-1)*(n+2)+i]);
/*
* Solve the ODE, advancing excitation and recovery to the
* next timtestep
*/
E[j*(n+2)+i] = E[j*(n+2)+i] -dt*(kk* E[j*(n+2)+i]*(E[j*(n+2)+i] - a)*(E[j*(n+2)+i]-1)+ E[j*(n+2)+i] *R[j*(n+2)+i]);
R[j*(n+2)+i] = R[j*(n+2)+i] + dt*(epsilon+M1* R[j*(n+2)+i]/( E[j*(n+2)+i]+M2))*(-R[j*(n+2)+i]-kk* E[j*(n+2)+i]*(E[j*(n+2)+i]-b-1));
}
}
void simulate2 (double* E, double* E_prev,double* R,
const double alpha, const int n, const int m, const double kk,
const double dt, const double a, const double epsilon,
const double M1,const double M2, const double b)
{
dim3 DimGrid(ceil((double)n/blocksize), ceil((double)m/blocksize), 1);
dim3 DimBlock(blocksize, blocksize, 1);
hipLaunchKernelGGL(( simulateKernel2), dim3(DimGrid), dim3(DimBlock), 0, 0, E, E_prev, R, alpha, n, m, kk, dt, a, epsilon, M1, M2, b);
}
__global__ void simulateKernel3 (double* E, double* E_prev,double* R,
const double alpha, const int n, const int m, const double kk,
const double dt, const double a, const double epsilon,
const double M1,const double M2, const double b)
{
int j = blockIdx.y*blockDim.y+threadIdx.y+1;
int i = blockIdx.x*blockDim.x+threadIdx.x+1;
double e, r;
/*
* Copy data from boundary of the computational box
* to the padding region, set up for differencing
* on the boundary of the computational box
* Using mirror boundaries
*/
if ((j<=m) && (i<=n))
{
if (i == 2) E_prev[j*(n+2)+0] = E_prev[j*(n+2)+2];
if (i == n - 1) E_prev[j*(n+2)+n+1] = E_prev[j*(n+2)+n-1];
if (j == 2) E_prev[i] = E_prev[2*(n+2)+i];
if (j == m - 1) E_prev[(m+1)*(n+2)+i] = E_prev[(m-1)*(n+2)+i];
}
__syncthreads();
if ((j<=m) && (i<=n))
{
// Solve for the excitation, the PDE
E[j*(n+2)+i] = E_prev[j*(n+2)+i]+alpha*(E_prev[j*(n+2)+i+1]+E_prev[j*(n+2)+i-1]-4*E_prev[j*(n+2)+i]+E_prev[(j+1)*(n+2)+i]+E_prev[(j-1)*(n+2)+i]);
/*
* Solve the ODE, advancing excitation and recovery to the
* next timtestep
*/
e = E[j*(n+2)+i];
r = R[j*(n+2)+i];
e = e - dt * (kk * e * (e - a) * ( e - 1 ) + e * r);
r = r + dt * (epsilon + M1 * r / (e + M2)) * (-r - kk * e * (e - b - 1));
E[j*(n+2)+i] = e;
R[j*(n+2)+i] = r;
}
}
void simulate3 (double* E, double* E_prev,double* R,
const double alpha, const int n, const int m, const double kk,
const double dt, const double a, const double epsilon,
const double M1,const double M2, const double b)
{
dim3 DimGrid(ceil((double)n/blocksize), ceil((double)m/blocksize), 1);
dim3 DimBlock(blocksize, blocksize, 1);
hipLaunchKernelGGL(( simulateKernel3), dim3(DimGrid), dim3(DimBlock), 0, 0, E, E_prev, R, alpha, n, m, kk, dt, a, epsilon, M1, M2, b);
}
__global__ void simulateKernel4 (double* E, double* E_prev,double* R,
const double alpha, const int n, const int m, const double kk,
const double dt, const double a, const double epsilon,
const double M1,const double M2, const double b)
{
int j = blockIdx.y*blockDim.y+threadIdx.y+1;
int i = blockIdx.x*blockDim.x+threadIdx.x+1;
double e, r;
extern __shared__ double E_prev_tile[];
if ((j<=m) && (i<=n))
{
/*
* Copy data from boundary of the computational box
* to the padding region, set up for differencing
* on the boundary of the computational box
* Using mirror boundaries
*/
if (i == 2) E_prev[j*(n+2)+0] = E_prev[j*(n+2)+2];
if (i == n - 1) E_prev[j*(n+2)+n+1] = E_prev[j*(n+2)+n-1];
if (j == 2) E_prev[i] = E_prev[2*(n+2)+i];
if (j == m - 1) E_prev[(m+1)*(n+2)+i] = E_prev[(m-1)*(n+2)+i];
}
__syncthreads();
if ((j<=m) && (i<=n))
{
//Transfer data to on-chip memory
E_prev_tile[(threadIdx.y+1)*(blockDim.x+2)+threadIdx.x+1] = E_prev[j*(n+2)+i];
if (threadIdx.y == 0) E_prev_tile[threadIdx.x+1] = E_prev[(j-1)*(n+2)+i];
if (threadIdx.x == 0) E_prev_tile[(threadIdx.y+1)*(blockDim.x+2)] = E_prev[j*(n+2)+i-1];
if ((threadIdx.y == blockDim.y-1) || (j == m)) E_prev_tile[(threadIdx.y+2)*(blockDim.x+2)+threadIdx.x+1] = E_prev[(j+1)*(n+2)+i];
if ((threadIdx.x == blockDim.x-1) || (i == n)) E_prev_tile[(threadIdx.y+1)*(blockDim.x+2)+threadIdx.x+2] = E_prev[j*(n+2)+i+1];
}
__syncthreads();
if ((j<=m) && (i<=n))
{
// Solve for the excitation, the PDE
E[j*(n+2)+i] = E_prev_tile[(threadIdx.y+1)*(blockDim.x+2)+threadIdx.x+1]
+alpha*(E_prev_tile[(threadIdx.y+1)*(blockDim.x+2)+threadIdx.x+2]
+E_prev_tile[(threadIdx.y+1)*(blockDim.x+2)+threadIdx.x]
-4*E_prev_tile[(threadIdx.y+1)*(blockDim.x+2)+threadIdx.x+1]
+E_prev_tile[(threadIdx.y+2)*(blockDim.x+2)+threadIdx.x+1]
+E_prev_tile[threadIdx.y*(blockDim.x+2)+threadIdx.x+1]);
/*
* Solve the ODE, advancing excitation and recovery to the
* next timtestep
*/
e = E[j*(n+2)+i];
r = R[j*(n+2)+i];
e = e - dt * (kk * e * (e - a) * ( e - 1 ) + e * r);
r = r + dt * (epsilon + M1 * r / (e + M2)) * (-r - kk * e * (e - b - 1));
E[j*(n+2)+i] = e;
R[j*(n+2)+i] = r;
}
}
void simulate4 (double* E, double* E_prev,double* R,
const double alpha, const int n, const int m, const double kk,
const double dt, const double a, const double epsilon,
const double M1,const double M2, const double b)
{
dim3 DimGrid(ceil((double)n/blocksize), ceil((double)m/blocksize), 1);
dim3 DimBlock(blocksize, blocksize, 1);
hipLaunchKernelGGL(( simulateKernel4), dim3(DimGrid), dim3(DimBlock), sizeof(double)*(blocksize+2)*(blocksize+2), 0, E, E_prev, R, alpha, n, m, kk, dt, a, epsilon, M1, M2, b);
}
// Main program
int main (int argc, char** argv)
{
/*
* Solution arrays
* E is the "Excitation" variable, a voltage
* R is the "Recovery" variable
* E_prev is the Excitation variable for the previous timestep,
* and is used in time integration
*/
double *E, *R, *E_prev;
// Various constants - these definitions shouldn't change
const double a=0.1, b=0.1, kk=8.0, M1= 0.07, M2=0.3, epsilon=0.01, d=5e-5;
double T=1000.0;
int m=200,n=200;
int plot_freq = 0;
int px = 1, py = 1;
int no_comm = 0;
int num_threads=1;
cmdLine( argc, argv, T, n,px, py, plot_freq, no_comm, num_threads);
m = n;
// Allocate contiguous memory for solution arrays
// The computational box is defined on [1:m+1,1:n+1]
// We pad the arrays in order to facilitate differencing on the
// boundaries of the computation box
E = alloc2D(m+2,n+2);
E_prev = alloc2D(m+2,n+2);
R = alloc2D(m+2,n+2);
int i,j;
// Initialization
for (j=1; j<=m; j++)
for (i=1; i<=n; i++)
E_prev[j*(n+2)+i] = R[j*(n+2)+i] = 0;
for (j=1; j<=m; j++)
for (i=n/2+1; i<=n; i++)
E_prev[j*(n+2)+i] = 1.0;
for (j=m/2+1; j<=m; j++)
for (i=1; i<=n; i++)
R[j*(n+2)+i] = 1.0;
double dx = 1.0/n;
// For time integration, these values shouldn't change
double rp= kk*(b+1)*(b+1)/4;
double dte=(dx*dx)/(d*4+((dx*dx))*(rp+kk));
double dtr=1/(epsilon+((M1/M2)*rp));
double dt = (dte<dtr) ? 0.95*dte : 0.95*dtr;
double alpha = d*dt/(dx*dx);
cout << "Grid Size : " << n << endl;
cout << "Duration of Sim : " << T << endl;
cout << "Time step dt : " << dt << endl;
cout << "Process geometry: " << px << " x " << py << endl;
if (no_comm)
cout << "Communication : DISABLED" << endl;
cout << endl;
//cuda: transfer data to GPU
double *E_cuda, *R_cuda, *E_prev_cuda;
int size = (m+2)*(n+2)*sizeof(double);
hipMalloc((void **) &E_cuda, size);
hipMemcpy(E_cuda, E, size, hipMemcpyHostToDevice);
hipMalloc((void **) &R_cuda, size);
hipMemcpy(R_cuda, R, size, hipMemcpyHostToDevice);
hipMalloc((void **) &E_prev_cuda, size);
hipMemcpy(E_prev_cuda, E_prev, size, hipMemcpyHostToDevice);
// Start the timer
double t0 = getTime();
// Simulated time is different from the integer timestep number
// Simulated time
double t = 0.0;
// Integer timestep number
int niter=0;
while (t<T) {
t += dt;
niter++;
simulate1(E_cuda, E_prev_cuda, R_cuda, alpha, n, m, kk, dt, a, epsilon, M1, M2, b);
//swap current E with previous E
double *tmp = E_cuda; E_cuda = E_prev_cuda; E_prev_cuda = tmp;
if (plot_freq){
int k = (int)(t/plot_freq);
if ((t - k * plot_freq) < dt){
hipMemcpy(E, E_cuda, size, hipMemcpyDeviceToHost);
splot(E,t,niter,m+2,n+2);
}
}
}//end of while loop
double time_elapsed = getTime() - t0;
double Gflops = (double)(niter * (1E-9 * n * n ) * 28.0) / time_elapsed ;
double BW = (double)(niter * 1E-9 * (n * n * sizeof(double) * 4.0 ))/time_elapsed;
cout << "Number of Iterations : " << niter << endl;
cout << "Elapsed Time (sec) : " << time_elapsed << endl;
cout << "Sustained Gflops Rate : " << Gflops << endl;
cout << "Sustained Bandwidth (GB/sec): " << BW << endl << endl;
//cuda: transfer data from GPU
hipMemcpy(E_prev, E_prev_cuda, size, hipMemcpyDeviceToHost);
hipMemcpy(E, E_cuda, size, hipMemcpyDeviceToHost);
hipMemcpy(R, R_cuda, size, hipMemcpyDeviceToHost);
hipFree(E_cuda); hipFree(R_cuda); hipFree(E_prev_cuda);
double mx;
double l2norm = stats(E_prev,m,n,&mx);
cout << "Max: " << mx << " L2norm: "<< l2norm << endl;
if (plot_freq){
cout << "\n\nEnter any input to close the program and the plot..." << endl;
getchar();
}
free (E);
free (E_prev);
free (R);
return 0;
}
| 26e9506ae4523dfd5be9ca0dbbe617d2b30019ff.cu | /*
* Solves the Panfilov model using an explicit numerical scheme.
* Based on code orginally provided by Xing Cai, Simula Research Laboratory
* and reimplementation by Scott B. Baden, UCSD
*
* Modified and restructured by Didem Unat, Koc University
*
*/
/*
Explanation: This is a four-version combined source file. We have four simulate
functions: simulate1, simulate2, simulate3, simulate4 corresponding
to each version, respectively. To run a certain version, user needs
to modify "simulate" in main with "simulate1" in default. And I
defined blocksize as global variable which needs to be tuned.
*/
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <iostream>
#include <iomanip>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <cuda.h>
using namespace std;
// Utilities
//
// Timer
// Make successive calls and take a difference to get the elapsed time.
static const double kMicro = 1.0e-6;
int blocksize = 8;
double getTime()
{
struct timeval TV;
struct timezone TZ;
const int RC = gettimeofday(&TV, &TZ);
if(RC == -1) {
cerr << "ERROR: Bad call to gettimeofday" << endl;
return(-1);
}
return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) );
} // end getTime()
// Allocate a 2D array
double *alloc2D(int m,int n){
double *E;
int nx=n, ny=m;
E = (double*)malloc(sizeof(double)*nx*ny);
assert(E);
return(E);
}
// Reports statistics about the computation
// These values should not vary (except to within roundoff)
// when we use different numbers of processes to solve the problem
double stats(double *E, int m, int n, double *_mx){
double mx = -1;
double l2norm = 0;
int i, j;
for (j=1; j<=m; j++)
for (i=1; i<=n; i++) {
l2norm += E[j*(n+2)+i]*E[j*(n+2)+i];
if (E[j*(n+2)+i] > mx)
mx = E[j*(n+2)+i];
}
*_mx = mx;
l2norm /= (double) ((m)*(n));
l2norm = sqrt(l2norm);
return l2norm;
}
// External functions
extern "C" {
void splot(double *E, double T, int niter, int m, int n);
}
void cmdLine(int argc, char *argv[], double& T, int& n, int& px, int& py, int& plot_freq, int& no_comm, int&num_threads);
__global__ void setBoundaryLeft(double* E_prev, const int m, const int n)
{
int j = blockIdx.x*blockDim.x+threadIdx.x+1;
if (j<=m) E_prev[j*(n+2)+0] = E_prev[j*(n+2)+2];
}
__global__ void setBoundaryRight(double* E_prev, const int m, const int n)
{
int j = blockIdx.x*blockDim.x+threadIdx.x+1;
if (j<=m) E_prev[j*(n+2)+n+1] = E_prev[j*(n+2)+n-1];
}
__global__ void setBoundaryUp(double* E_prev, const int m, const int n)
{
int i = blockIdx.x*blockDim.x+threadIdx.x+1;
if (i<=n) E_prev[i] = E_prev[2*(n+2)+i];
}
__global__ void setBoundaryDown(double* E_prev, const int m, const int n)
{
int i = blockIdx.x*blockDim.x+threadIdx.x+1;
if (i<=n) E_prev[(m+1)*(n+2)+i] = E_prev[(m-1)*(n+2)+i];
}
__global__ void solvePDE(double *E, double *E_prev, const double alpha, const int m, const int n)
{
int j = blockIdx.y*blockDim.y+threadIdx.y+1;
int i = blockIdx.x*blockDim.x+threadIdx.x+1;
if ((j<=m) && (i<=n))
{
E[j*(n+2)+i] = E_prev[j*(n+2)+i]+alpha*(E_prev[j*(n+2)+i+1]+E_prev[j*(n+2)+i-1]-4*E_prev[j*(n+2)+i]+E_prev[(j+1)*(n+2)+i]+E_prev[(j-1)*(n+2)+i]);
}
}
__global__ void solveODE_E(double *E, double *R, const double kk,
const double dt, const double a, const int m, const int n)
{
int j = blockIdx.y*blockDim.y+threadIdx.y+1;
int i = blockIdx.x*blockDim.x+threadIdx.x+1;
if ((j<=m) && (i<=n))
{
E[j*(n+2)+i] = E[j*(n+2)+i] -dt*(kk* E[j*(n+2)+i]*(E[j*(n+2)+i] - a)*(E[j*(n+2)+i]-1)+ E[j*(n+2)+i] *R[j*(n+2)+i]);
}
}
__global__ void solveODE_R(double *E, double *R, const double kk,
const double dt, const double epsilon, const double M1,
const double M2, const double b, const int m, const int n)
{
int j = blockIdx.y*blockDim.y+threadIdx.y+1;
int i = blockIdx.x*blockDim.x+threadIdx.x+1;
if ((j<=m) && (i<=n))
{
R[j*(n+2)+i] = R[j*(n+2)+i] + dt*(epsilon+M1* R[j*(n+2)+i]/( E[j*(n+2)+i]+M2))*(-R[j*(n+2)+i]-kk* E[j*(n+2)+i]*(E[j*(n+2)+i]-b-1));
}
}
void simulate1 (double* E, double* E_prev,double* R,
const double alpha, const int n, const int m, const double kk,
const double dt, const double a, const double epsilon,
const double M1,const double M2, const double b)
{
/*
* Copy data from boundary of the computational box
* to the padding region, set up for differencing
* on the boundary of the computational box
* Using mirror boundaries
*/
setBoundaryLeft<<<ceil(m/64.0),64>>>(E_prev, m, n);
setBoundaryRight<<<ceil(m/64.0),64>>>(E_prev, m, n);
setBoundaryUp<<<ceil(n/64.0),64>>>(E_prev, m, n);
setBoundaryDown<<<ceil(n/64.0),64>>>(E_prev, m, n);
// Solve for the excitation, the PDE
dim3 DimGrid(ceil((double)n/blocksize), ceil((double)m/blocksize), 1);
dim3 DimBlock(blocksize, blocksize, 1);
solvePDE<<<DimGrid, DimBlock>>>(E, E_prev, alpha, m, n);
/*
* Solve the ODE, advancing excitation and recovery to the
* next timtestep
*/
solveODE_E<<<DimGrid, DimBlock>>>(E, R, kk, dt, a, m, n);
solveODE_R<<<DimGrid, DimBlock>>>(E, R, kk, dt, epsilon, M1, M2, b, m, n);
}
__global__ void simulateKernel2 (double* E, double* E_prev,double* R,
const double alpha, const int n, const int m, const double kk,
const double dt, const double a, const double epsilon,
const double M1,const double M2, const double b)
{
int j = blockIdx.y*blockDim.y+threadIdx.y+1;
int i = blockIdx.x*blockDim.x+threadIdx.x+1;
/*
* Copy data from boundary of the computational box
* to the padding region, set up for differencing
* on the boundary of the computational box
* Using mirror boundaries
*/
if ((j<=m) && (i<=n))
{
if (i == 2) E_prev[j*(n+2)+0] = E_prev[j*(n+2)+2];
if (i == n - 1) E_prev[j*(n+2)+n+1] = E_prev[j*(n+2)+n-1];
if (j == 2) E_prev[i] = E_prev[2*(n+2)+i];
if (j == m - 1) E_prev[(m+1)*(n+2)+i] = E_prev[(m-1)*(n+2)+i];
}
__syncthreads();
if ((j<=m) && (i<=n))
{
// Solve for the excitation, the PDE
E[j*(n+2)+i] = E_prev[j*(n+2)+i]+alpha*(E_prev[j*(n+2)+i+1]+E_prev[j*(n+2)+i-1]-4*E_prev[j*(n+2)+i]+E_prev[(j+1)*(n+2)+i]+E_prev[(j-1)*(n+2)+i]);
/*
* Solve the ODE, advancing excitation and recovery to the
* next timtestep
*/
E[j*(n+2)+i] = E[j*(n+2)+i] -dt*(kk* E[j*(n+2)+i]*(E[j*(n+2)+i] - a)*(E[j*(n+2)+i]-1)+ E[j*(n+2)+i] *R[j*(n+2)+i]);
R[j*(n+2)+i] = R[j*(n+2)+i] + dt*(epsilon+M1* R[j*(n+2)+i]/( E[j*(n+2)+i]+M2))*(-R[j*(n+2)+i]-kk* E[j*(n+2)+i]*(E[j*(n+2)+i]-b-1));
}
}
void simulate2 (double* E, double* E_prev,double* R,
const double alpha, const int n, const int m, const double kk,
const double dt, const double a, const double epsilon,
const double M1,const double M2, const double b)
{
dim3 DimGrid(ceil((double)n/blocksize), ceil((double)m/blocksize), 1);
dim3 DimBlock(blocksize, blocksize, 1);
simulateKernel2<<<DimGrid, DimBlock>>>(E, E_prev, R, alpha, n, m, kk, dt, a, epsilon, M1, M2, b);
}
__global__ void simulateKernel3 (double* E, double* E_prev,double* R,
const double alpha, const int n, const int m, const double kk,
const double dt, const double a, const double epsilon,
const double M1,const double M2, const double b)
{
int j = blockIdx.y*blockDim.y+threadIdx.y+1;
int i = blockIdx.x*blockDim.x+threadIdx.x+1;
double e, r;
/*
* Copy data from boundary of the computational box
* to the padding region, set up for differencing
* on the boundary of the computational box
* Using mirror boundaries
*/
if ((j<=m) && (i<=n))
{
if (i == 2) E_prev[j*(n+2)+0] = E_prev[j*(n+2)+2];
if (i == n - 1) E_prev[j*(n+2)+n+1] = E_prev[j*(n+2)+n-1];
if (j == 2) E_prev[i] = E_prev[2*(n+2)+i];
if (j == m - 1) E_prev[(m+1)*(n+2)+i] = E_prev[(m-1)*(n+2)+i];
}
__syncthreads();
if ((j<=m) && (i<=n))
{
// Solve for the excitation, the PDE
E[j*(n+2)+i] = E_prev[j*(n+2)+i]+alpha*(E_prev[j*(n+2)+i+1]+E_prev[j*(n+2)+i-1]-4*E_prev[j*(n+2)+i]+E_prev[(j+1)*(n+2)+i]+E_prev[(j-1)*(n+2)+i]);
/*
* Solve the ODE, advancing excitation and recovery to the
* next timtestep
*/
e = E[j*(n+2)+i];
r = R[j*(n+2)+i];
e = e - dt * (kk * e * (e - a) * ( e - 1 ) + e * r);
r = r + dt * (epsilon + M1 * r / (e + M2)) * (-r - kk * e * (e - b - 1));
E[j*(n+2)+i] = e;
R[j*(n+2)+i] = r;
}
}
void simulate3 (double* E, double* E_prev,double* R,
const double alpha, const int n, const int m, const double kk,
const double dt, const double a, const double epsilon,
const double M1,const double M2, const double b)
{
dim3 DimGrid(ceil((double)n/blocksize), ceil((double)m/blocksize), 1);
dim3 DimBlock(blocksize, blocksize, 1);
simulateKernel3<<<DimGrid, DimBlock>>>(E, E_prev, R, alpha, n, m, kk, dt, a, epsilon, M1, M2, b);
}
__global__ void simulateKernel4 (double* E, double* E_prev,double* R,
const double alpha, const int n, const int m, const double kk,
const double dt, const double a, const double epsilon,
const double M1,const double M2, const double b)
{
int j = blockIdx.y*blockDim.y+threadIdx.y+1;
int i = blockIdx.x*blockDim.x+threadIdx.x+1;
double e, r;
extern __shared__ double E_prev_tile[];
if ((j<=m) && (i<=n))
{
/*
* Copy data from boundary of the computational box
* to the padding region, set up for differencing
* on the boundary of the computational box
* Using mirror boundaries
*/
if (i == 2) E_prev[j*(n+2)+0] = E_prev[j*(n+2)+2];
if (i == n - 1) E_prev[j*(n+2)+n+1] = E_prev[j*(n+2)+n-1];
if (j == 2) E_prev[i] = E_prev[2*(n+2)+i];
if (j == m - 1) E_prev[(m+1)*(n+2)+i] = E_prev[(m-1)*(n+2)+i];
}
__syncthreads();
if ((j<=m) && (i<=n))
{
//Transfer data to on-chip memory
E_prev_tile[(threadIdx.y+1)*(blockDim.x+2)+threadIdx.x+1] = E_prev[j*(n+2)+i];
if (threadIdx.y == 0) E_prev_tile[threadIdx.x+1] = E_prev[(j-1)*(n+2)+i];
if (threadIdx.x == 0) E_prev_tile[(threadIdx.y+1)*(blockDim.x+2)] = E_prev[j*(n+2)+i-1];
if ((threadIdx.y == blockDim.y-1) || (j == m)) E_prev_tile[(threadIdx.y+2)*(blockDim.x+2)+threadIdx.x+1] = E_prev[(j+1)*(n+2)+i];
if ((threadIdx.x == blockDim.x-1) || (i == n)) E_prev_tile[(threadIdx.y+1)*(blockDim.x+2)+threadIdx.x+2] = E_prev[j*(n+2)+i+1];
}
__syncthreads();
if ((j<=m) && (i<=n))
{
// Solve for the excitation, the PDE
E[j*(n+2)+i] = E_prev_tile[(threadIdx.y+1)*(blockDim.x+2)+threadIdx.x+1]
+alpha*(E_prev_tile[(threadIdx.y+1)*(blockDim.x+2)+threadIdx.x+2]
+E_prev_tile[(threadIdx.y+1)*(blockDim.x+2)+threadIdx.x]
-4*E_prev_tile[(threadIdx.y+1)*(blockDim.x+2)+threadIdx.x+1]
+E_prev_tile[(threadIdx.y+2)*(blockDim.x+2)+threadIdx.x+1]
+E_prev_tile[threadIdx.y*(blockDim.x+2)+threadIdx.x+1]);
/*
* Solve the ODE, advancing excitation and recovery to the
* next timtestep
*/
e = E[j*(n+2)+i];
r = R[j*(n+2)+i];
e = e - dt * (kk * e * (e - a) * ( e - 1 ) + e * r);
r = r + dt * (epsilon + M1 * r / (e + M2)) * (-r - kk * e * (e - b - 1));
E[j*(n+2)+i] = e;
R[j*(n+2)+i] = r;
}
}
void simulate4 (double* E, double* E_prev,double* R,
const double alpha, const int n, const int m, const double kk,
const double dt, const double a, const double epsilon,
const double M1,const double M2, const double b)
{
dim3 DimGrid(ceil((double)n/blocksize), ceil((double)m/blocksize), 1);
dim3 DimBlock(blocksize, blocksize, 1);
simulateKernel4<<<DimGrid, DimBlock, sizeof(double)*(blocksize+2)*(blocksize+2)>>>(E, E_prev, R, alpha, n, m, kk, dt, a, epsilon, M1, M2, b);
}
// Main program
int main (int argc, char** argv)
{
/*
* Solution arrays
* E is the "Excitation" variable, a voltage
* R is the "Recovery" variable
* E_prev is the Excitation variable for the previous timestep,
* and is used in time integration
*/
double *E, *R, *E_prev;
// Various constants - these definitions shouldn't change
const double a=0.1, b=0.1, kk=8.0, M1= 0.07, M2=0.3, epsilon=0.01, d=5e-5;
double T=1000.0;
int m=200,n=200;
int plot_freq = 0;
int px = 1, py = 1;
int no_comm = 0;
int num_threads=1;
cmdLine( argc, argv, T, n,px, py, plot_freq, no_comm, num_threads);
m = n;
// Allocate contiguous memory for solution arrays
// The computational box is defined on [1:m+1,1:n+1]
// We pad the arrays in order to facilitate differencing on the
// boundaries of the computation box
E = alloc2D(m+2,n+2);
E_prev = alloc2D(m+2,n+2);
R = alloc2D(m+2,n+2);
int i,j;
// Initialization
for (j=1; j<=m; j++)
for (i=1; i<=n; i++)
E_prev[j*(n+2)+i] = R[j*(n+2)+i] = 0;
for (j=1; j<=m; j++)
for (i=n/2+1; i<=n; i++)
E_prev[j*(n+2)+i] = 1.0;
for (j=m/2+1; j<=m; j++)
for (i=1; i<=n; i++)
R[j*(n+2)+i] = 1.0;
double dx = 1.0/n;
// For time integration, these values shouldn't change
double rp= kk*(b+1)*(b+1)/4;
double dte=(dx*dx)/(d*4+((dx*dx))*(rp+kk));
double dtr=1/(epsilon+((M1/M2)*rp));
double dt = (dte<dtr) ? 0.95*dte : 0.95*dtr;
double alpha = d*dt/(dx*dx);
cout << "Grid Size : " << n << endl;
cout << "Duration of Sim : " << T << endl;
cout << "Time step dt : " << dt << endl;
cout << "Process geometry: " << px << " x " << py << endl;
if (no_comm)
cout << "Communication : DISABLED" << endl;
cout << endl;
//cuda: transfer data to GPU
double *E_cuda, *R_cuda, *E_prev_cuda;
int size = (m+2)*(n+2)*sizeof(double);
cudaMalloc((void **) &E_cuda, size);
cudaMemcpy(E_cuda, E, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &R_cuda, size);
cudaMemcpy(R_cuda, R, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &E_prev_cuda, size);
cudaMemcpy(E_prev_cuda, E_prev, size, cudaMemcpyHostToDevice);
// Start the timer
double t0 = getTime();
// Simulated time is different from the integer timestep number
// Simulated time
double t = 0.0;
// Integer timestep number
int niter=0;
while (t<T) {
t += dt;
niter++;
simulate1(E_cuda, E_prev_cuda, R_cuda, alpha, n, m, kk, dt, a, epsilon, M1, M2, b);
//swap current E with previous E
double *tmp = E_cuda; E_cuda = E_prev_cuda; E_prev_cuda = tmp;
if (plot_freq){
int k = (int)(t/plot_freq);
if ((t - k * plot_freq) < dt){
cudaMemcpy(E, E_cuda, size, cudaMemcpyDeviceToHost);
splot(E,t,niter,m+2,n+2);
}
}
}//end of while loop
double time_elapsed = getTime() - t0;
double Gflops = (double)(niter * (1E-9 * n * n ) * 28.0) / time_elapsed ;
double BW = (double)(niter * 1E-9 * (n * n * sizeof(double) * 4.0 ))/time_elapsed;
cout << "Number of Iterations : " << niter << endl;
cout << "Elapsed Time (sec) : " << time_elapsed << endl;
cout << "Sustained Gflops Rate : " << Gflops << endl;
cout << "Sustained Bandwidth (GB/sec): " << BW << endl << endl;
//cuda: transfer data from GPU
cudaMemcpy(E_prev, E_prev_cuda, size, cudaMemcpyDeviceToHost);
cudaMemcpy(E, E_cuda, size, cudaMemcpyDeviceToHost);
cudaMemcpy(R, R_cuda, size, cudaMemcpyDeviceToHost);
cudaFree(E_cuda); cudaFree(R_cuda); cudaFree(E_prev_cuda);
double mx;
double l2norm = stats(E_prev,m,n,&mx);
cout << "Max: " << mx << " L2norm: "<< l2norm << endl;
if (plot_freq){
cout << "\n\nEnter any input to close the program and the plot..." << endl;
getchar();
}
free (E);
free (E_prev);
free (R);
return 0;
}
|
468c1831101a41985847e2c5d0492401090c7ab2.hip | // !!! This is a file automatically generated by hipify!!!
// includes, cuda
#include <cstdint>
#include <climits>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <cudaDefs.h>
#include <imageManager.h>
#include <cfloat>
#include <arrayUtils.cuh>
#include <imageUtils.cuh>
#include <benchmark.h>
#define TPB_1D 8 // ThreadsPerBlock in one dimension
#define TPB_2D TPB_1D*TPB_1D // ThreadsPerBlock = TPB_1D*TPB_1D (2D block)
#define TPB_REDUCTION 512 // ThreadsPerBlock (1D block)
hipError_t error = hipSuccess;
hipDeviceProp_t deviceProp = hipDeviceProp_t();
using DT = uint32_t;
struct alignas(8) ResultType {
float fitness;
uint32_t idx;
__host__ __device__ ResultType& operator=(ResultType&&) = default; //Forcing a move assignment operator to be generated by the compiler
__host__ __device__ volatile ResultType& operator=(volatile const ResultType& other) volatile
{
fitness = other.fitness;
idx = other.idx;
return *this;
}
};
#pragma region KERNELS
/// <summary>
/// Simple reduction with use of a single 1D thread block
/// </summary>
/// <param name="data">Data to be reduced.</param>
/// <param name="length">Data length.</param>
/// <returns>void ... the result is stored in data[0].</returns>
__global__ void getBest(ResultType* data, const uint32_t length)
{
__shared__ ResultType sData[TPB_REDUCTION];
uint32_t tid = threadIdx.x;
const uint32_t offset = blockDim.x;
sData[tid] = { FLT_MAX , tid }; //Initial fill of the shared memory
if (tid >= length) return;
sData[tid] = data[tid];
uint32_t nextId = tid + offset;
ResultType* ptr = &data[nextId]; //Pointer to global mem;
while (nextId < length) //Compare rest of data from the global memory
{
if (ptr->fitness < sData[tid].fitness)
{
sData[tid] = *ptr;
}
ptr += offset;
nextId += offset;
}
__syncthreads(); //Start reduction from now
if constexpr (TPB_REDUCTION >= 128)
{
#pragma unroll
for (uint32_t s = (TPB_REDUCTION >> 1); s > 32; s >>= 1) //This can be UNROLLED when the TPB is fixed for the application
{
if (tid < s)
{
if (sData[tid + s].fitness < sData[tid].fitness)
{
sData[tid] = sData[tid + s];
}
}
__syncthreads();
}
}
if (tid < 32) //Only one warp is active here, no sync is needed.
{
volatile ResultType* vsData = sData;
if constexpr (TPB_REDUCTION >= 64) vsData[tid] = (vsData[tid].fitness < vsData[tid + 32].fitness) ? vsData[tid] : vsData[tid + 32];
if constexpr (TPB_REDUCTION >= 32) vsData[tid] = (vsData[tid].fitness < vsData[tid + 16].fitness) ? vsData[tid] : vsData[tid + 16];
if constexpr (TPB_REDUCTION >= 16) vsData[tid] = (vsData[tid].fitness < vsData[tid + 8].fitness) ? vsData[tid] : vsData[tid + 8];
if constexpr (TPB_REDUCTION >= 8) vsData[tid] = (vsData[tid].fitness < vsData[tid + 4].fitness) ? vsData[tid] : vsData[tid + 4];
if constexpr (TPB_REDUCTION >= 4) vsData[tid] = (vsData[tid].fitness < vsData[tid + 2].fitness) ? vsData[tid] : vsData[tid + 2];
if constexpr (TPB_REDUCTION >= 2) vsData[tid] = (vsData[tid].fitness < vsData[tid + 1].fitness) ? vsData[tid] : vsData[tid + 1];
}
if (tid == 0) //The zero thread saves the result into Global mem
{
data[0] = sData[0];
}
}
/// <summary>
/// Every THREAD of 2D block [TPB_1DxTPB_1D] computes one final fitness value for a single pixel of the reference image. One corner of the query image is "virtually" attached to this pixel position.
/// A SINGLE THREAD compares the query image with the given region of the reference image.
/// </summary>
/// <param name="ref">Reference data.</param>
/// <param name="rWidth">Width of the reference data.</param>
/// <param name="rHeight">Height of the reference data.</param>
/// <param name="query">Query data.</param>
/// <param name="qWidth">Width of the query data.</param>
/// <param name="qHeight">Height of the query data.</param>
/// <param name="blockResults">Each block of the grid will store exactly one value into the global memory. Them, this data is reduced in another kernel to find the best value/solution.</param>
/// <returns></returns>
__global__ void find(const DT* __restrict__ ref, const uint32_t rWidth, const uint32_t rHeight,
const DT* __restrict__ query, const uint32_t qWidth, const uint32_t qHeight,
ResultType* __restrict__ blockResults)
{
uint32_t tid = threadIdx.x + threadIdx.y * blockDim.x;
uint32_t rx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t ry = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t qx, qy;
const DT* r = nullptr;
const DT* q = nullptr;
__shared__ ResultType sData[TPB_2D];
sData[tid] = { FLT_MAX, ry * rWidth + rx };
//EUCLIDEAN DISTANCE
/*
if ((ry <= rHeight - qHeight) && (rx <= rWidth - qWidth))
{
r = &ref[ry * rWidth + rx]; //Pointer to starting ROW position in the reference image.
q = &query[0]; //Pointer to starting ROW position in the query image.
sData[tid].fitness = 0.0f;
for (qy = 0; qy < qHeight; qy++) //Each thread will process the whole query image
{
for (qx = 0; qx < qWidth; qx++) //Each thread will process the whole query image
{
sData[tid].fitness += (r[qx] - q[qx]) * (r[qx] - q[qx]); //Cummulate the value
}
r += rWidth; //Move one row down in the reference image.
q += qWidth; //Move one row down in the query image.
}
}
*/
//EQUALITY CHECK ONLY
bool equality = true;
if ((ry <= rHeight - qHeight) && (rx <= rWidth - qWidth))
{
r = &ref[ry * rWidth + rx]; //Pointer to starting ROW position in the reference image.
q = &query[0]; //Pointer to starting ROW position in the query image.
for (qy = 0; equality && (qy < qHeight); qy++) //Each thread will process the whole query image
{
for (qx = 0; equality && (qx < qWidth); qx++) //Each thread will process the whole query image
{
equality = (r[qx] == q[qx]);
}
r += rWidth; //Move one row down in the reference image.
q += qWidth; //Move one row down in the query image.
}
if (equality)
sData[tid].fitness = 0.0f;
}
__syncthreads(); //The parallel reduction will start here, all WARPS has to finish previous instructions.
if constexpr (TPB_2D >= 128)
{
#pragma unroll
for (uint32_t s = (TPB_2D >> 1); s > 32; s >>= 1) //This can be UNROLLED when the TPB is fixed for the application
{
if (tid < s)
{
if (sData[tid + s].fitness < sData[tid].fitness)
{
sData[tid] = sData[tid + s];
}
}
__syncthreads();
}
}
if (tid < 32) //Only one warm is active here, no sync is needed.
{
volatile ResultType* vsData = sData;
if constexpr (TPB_2D >= 64) vsData[tid] = (vsData[tid].fitness < vsData[tid + 32].fitness) ? vsData[tid] : vsData[tid + 32];
if constexpr (TPB_2D >= 32) vsData[tid] = (vsData[tid].fitness < vsData[tid + 16].fitness) ? vsData[tid] : vsData[tid + 16];
if constexpr (TPB_2D >= 16) vsData[tid] = (vsData[tid].fitness < vsData[tid + 8].fitness) ? vsData[tid] : vsData[tid + 8];
if constexpr (TPB_2D >= 8) vsData[tid] = (vsData[tid].fitness < vsData[tid + 4].fitness) ? vsData[tid] : vsData[tid + 4];
if constexpr (TPB_2D >= 4) vsData[tid] = (vsData[tid].fitness < vsData[tid + 2].fitness) ? vsData[tid] : vsData[tid + 2];
if constexpr (TPB_2D >= 2) vsData[tid] = (vsData[tid].fitness < vsData[tid + 1].fitness) ? vsData[tid] : vsData[tid + 1];
}
if (tid == 0) //0-th thread stores the final BEST result for a given block
{
blockResults[blockIdx.y * gridDim.x + blockIdx.x] = sData[0];
//printf("%d %f\n", blockIdx.y * gridDim.x + blockIdx.x, sData[0].fitness);
}
}
/// <summary>
/// Every THREAD of 2D block [TPB_1DxTPB_1D] computes one final fitness value for a single pixel of the reference image. One corner of the query image is "virtually" attached to this pixel position.
/// A SINGLE THREAD compares the query image with the given region of the reference image.
/// </summary>
/// <param name="ref">Reference data stored in a TextureObject.</param>
/// <param name="rWidth">Width of the reference data.</param>
/// <param name="rHeight">Height of the reference data.</param>
/// <param name="query">Query data stored in a TextureObject.</param>
/// <param name="qWidth">Width of the query data.</param>
/// <param name="qHeight">Height of the query data.</param>
/// <param name="blockResults">Each block of the grid will store exactly one value into the global memory. Them, this data is reduced in another kernel to find the best value/solution.</param>
/// <returns></returns>
__global__ void findTex(const hipTextureObject_t ref, const uint32_t rWidth, const uint32_t rHeight,
const hipTextureObject_t query, const uint32_t qWidth, const uint32_t qHeight,
ResultType* __restrict__ blockResults)
{
uint32_t tid = threadIdx.x + threadIdx.y * blockDim.x;
uint32_t rx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t ry = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t qx, qy;
uint32_t r, q;
__shared__ ResultType sData[TPB_2D];
sData[tid] = { FLT_MAX, ry * rWidth + rx };
//EUCLIDEAN DISTANCE
/*
if ((ry <= rHeight - qHeight) && (rx <= rWidth - qWidth))
{
sData[tid].fitness = 0.0f;
for (qy = 0; qy < qHeight; qy++, ry++) //Each thread will process the whole query image
{
for (qx = 0; qx < qWidth; qx++, rx++) //Each thread will process the whole query image
{
r = tex2D<uint32_t>(ref, rx, ry);
q = tex2D<uint32_t>(query, qx, qy);
sData[tid].fitness += (r-q) * (r-q); //Cummulate the value
}
rx = blockIdx.x * blockDim.x + threadIdx.x;
}
}
*/
//EQUALITY CHECK ONLY
bool equality = true;
if ((ry <= rHeight - qHeight) && (rx <= rWidth - qWidth))
{
for (qy = 0; equality && (qy < qHeight); qy++, ry++) //Each thread will process the whole query image
{
for (qx = 0; equality && (qx < qWidth); qx++, rx++) //Each thread will process the whole query image
{
r = tex2D<uint32_t>(ref, rx, ry);
q = tex2D<uint32_t>(query, qx, qy);
equality = (r == q);
}
rx = blockIdx.x * blockDim.x + threadIdx.x;
}
if (equality)
sData[tid].fitness = 0.0f;
}
__syncthreads(); //The parallel reduction will start here, all WARPS has to finish previous instructions.
if constexpr (TPB_2D >= 128)
{
#pragma unroll
for (uint32_t s = (TPB_2D >> 1); s > 32; s >>= 1) //This can be UNROLLED when the TPB is fixed for the application
{
if (tid < s)
{
if (sData[tid + s].fitness < sData[tid].fitness)
{
sData[tid] = sData[tid + s];
}
}
__syncthreads();
}
}
if (tid < 32) //Only one warm is active here, no sync is needed.
{
volatile ResultType* vsData = sData;
if constexpr (TPB_2D >= 64) vsData[tid] = (vsData[tid].fitness < vsData[tid + 32].fitness) ? vsData[tid] : vsData[tid + 32];
if constexpr (TPB_2D >= 32) vsData[tid] = (vsData[tid].fitness < vsData[tid + 16].fitness) ? vsData[tid] : vsData[tid + 16];
if constexpr (TPB_2D >= 16) vsData[tid] = (vsData[tid].fitness < vsData[tid + 8].fitness) ? vsData[tid] : vsData[tid + 8];
if constexpr (TPB_2D >= 8) vsData[tid] = (vsData[tid].fitness < vsData[tid + 4].fitness) ? vsData[tid] : vsData[tid + 4];
if constexpr (TPB_2D >= 4) vsData[tid] = (vsData[tid].fitness < vsData[tid + 2].fitness) ? vsData[tid] : vsData[tid + 2];
if constexpr (TPB_2D >= 2) vsData[tid] = (vsData[tid].fitness < vsData[tid + 1].fitness) ? vsData[tid] : vsData[tid + 1];
}
if (tid == 0) //0-th thread stores the final BEST result for a given block
{
blockResults[blockIdx.y * gridDim.x + blockIdx.x] = sData[0];
//printf("%d %f\n", blockIdx.y * gridDim.x + blockIdx.x, sData[0].fitness);
}
}
#pragma endregion KERNELS
#pragma region TEXTURES
__host__ TextureInfo createTextureObjectFrom2DArray(const ImageInfo<DT>& ii)
{
TextureInfo ti;
// Size info
ti.size = { ii.width, ii.height, 1 };
//Texture Data settings
ti.texChannelDesc = hipCreateChannelDesc<uint32_t>(); // hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindUnsigned);
checkCudaErrors(hipMallocArray(&ti.texArrayData, &ti.texChannelDesc, ii.width, ii.height));
checkCudaErrors(hipMemcpyToArray(ti.texArrayData, 0, 0, ii.dPtr, ii.pitch * ii.height, hipMemcpyDeviceToDevice));
// Specify texture resource
ti.resDesc.resType = hipResourceTypeArray;
ti.resDesc.res.array.array = ti.texArrayData;
// Specify texture object parameters
ti.texDesc.addressMode[0] = hipAddressModeClamp;
ti.texDesc.addressMode[1] = hipAddressModeClamp;
ti.texDesc.filterMode = hipFilterModePoint;
ti.texDesc.readMode = hipReadModeElementType;
ti.texDesc.normalizedCoords = false;
// Create texture object
checkCudaErrors(hipCreateTextureObject(&ti.texObj, &ti.resDesc, &ti.texDesc, NULL));
return ti;
}
#pragma endregion TEXTURES
#pragma region PROCESSING
template<bool USE_TEXTURES=false>
void processData(const ImageInfo<DT>& ref, const ImageInfo<DT>& query)
{
float gpuTime = 0.0;
//How many block of the size of [16x16] will process the reference image?
//Too much to manage. That's we use a 1D grid of [16x16] blocks that will move down the image.
//This we need (((ref.width - query.width + 1) + 16 - 1)/16) blocks!!!
dim3 block{ TPB_1D , TPB_1D ,1 };
dim3 grid{ ((ref.width - query.width + 1) + TPB_1D - 1) / TPB_1D,
((ref.height - query.height + 1) + TPB_1D - 1) / TPB_1D,
1 };
ResultType* dBlockResults = nullptr;
auto dBlockResultsLength = grid.x * grid.y;
checkCudaErrors(hipMalloc(&dBlockResults, dBlockResultsLength * sizeof(ResultType)));
//1. Try to compute all possible matches.
if constexpr (USE_TEXTURES == false) {
gpuTime = GPUTIME(1, find << <grid, block >> > (ref.dPtr, ref.width, ref.height, query.dPtr, query.width, query.height, dBlockResults));
printf("\x1B[93m[GPU time] %s: %f ms\033[0m\n", "find", gpuTime);
} else {
TextureInfo tiRef = createTextureObjectFrom2DArray(ref);
TextureInfo tiQuery = createTextureObjectFrom2DArray(query);
gpuTime = GPUTIME(1,hipLaunchKernelGGL(( findTex) , dim3(grid), dim3(block) , 0, 0, tiRef.texObj, tiRef.size.width, tiRef.size.height, tiQuery.texObj, tiQuery.size.width, tiQuery.size.height, dBlockResults));
printf("\x1B[93m[GPU time] %s: %f ms\033[0m\n", "findTex", gpuTime);
if (tiRef.texObj) checkCudaErrors(hipDestroyTextureObject(tiRef.texObj));
if (tiRef.texArrayData) checkCudaErrors(hipFreeArray(tiRef.texArrayData));
if (tiQuery.texObj) checkCudaErrors(hipDestroyTextureObject(tiQuery.texObj));
if (tiQuery.texArrayData) checkCudaErrors(hipFreeArray(tiQuery.texArrayData));
}
//2. Search for the best match
block = { TPB_REDUCTION ,1,1 };
grid = { 1, 1, 1 };
gpuTime = GPUTIME(1, getBest << <grid, block >> > (dBlockResults, dBlockResultsLength));
printf("\x1B[93m[GPU time] %s: %f ms\033[0m\n", "getBest", gpuTime);
ResultType bestBlockResult;
checkCudaErrors(hipMemcpy(&bestBlockResult, dBlockResults, sizeof(ResultType), hipMemcpyKind::hipMemcpyDeviceToHost));
printf("Best fitness value: %f\n", bestBlockResult.fitness);
printf("Winner index: %u\n", bestBlockResult.idx);
printf("Winner's LEFT-TOP CORNER X: %u\n", bestBlockResult.idx % ref.width);
printf("Winner's LEFT-TOP CORNER Y: %u\n", ref.height - (bestBlockResult.idx / ref.width) - query.height);
if (dBlockResults) hipFree(dBlockResults);
}
#pragma endregion PROCESSING
int main(int argc, char* argv[])
{
initializeCUDA(deviceProp);
FreeImage_Initialise();
ImageInfo<DT> ref;
ImageInfo<DT> query;
prepareData<false>("../../images/reference.tif", ref);
prepareData<false>("../../images/query.tif", query);
processData<false>(ref, query);
FreeImage_DeInitialise();
if (ref.dPtr) hipFree(ref.dPtr);
if (query.dPtr) hipFree(query.dPtr);
}
| 468c1831101a41985847e2c5d0492401090c7ab2.cu | // includes, cuda
#include <cstdint>
#include <climits>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <cudaDefs.h>
#include <imageManager.h>
#include <cfloat>
#include <arrayUtils.cuh>
#include <imageUtils.cuh>
#include <benchmark.h>
#define TPB_1D 8 // ThreadsPerBlock in one dimension
#define TPB_2D TPB_1D*TPB_1D // ThreadsPerBlock = TPB_1D*TPB_1D (2D block)
#define TPB_REDUCTION 512 // ThreadsPerBlock (1D block)
cudaError_t error = cudaSuccess;
cudaDeviceProp deviceProp = cudaDeviceProp();
using DT = uint32_t;
struct alignas(8) ResultType {
float fitness;
uint32_t idx;
__host__ __device__ ResultType& operator=(ResultType&&) = default; //Forcing a move assignment operator to be generated by the compiler
__host__ __device__ volatile ResultType& operator=(volatile const ResultType& other) volatile
{
fitness = other.fitness;
idx = other.idx;
return *this;
}
};
#pragma region KERNELS
/// <summary>
/// Simple reduction with use of a single 1D thread block
/// </summary>
/// <param name="data">Data to be reduced.</param>
/// <param name="length">Data length.</param>
/// <returns>void ... the result is stored in data[0].</returns>
__global__ void getBest(ResultType* data, const uint32_t length)
{
__shared__ ResultType sData[TPB_REDUCTION];
uint32_t tid = threadIdx.x;
const uint32_t offset = blockDim.x;
sData[tid] = { FLT_MAX , tid }; //Initial fill of the shared memory
if (tid >= length) return;
sData[tid] = data[tid];
uint32_t nextId = tid + offset;
ResultType* ptr = &data[nextId]; //Pointer to global mem;
while (nextId < length) //Compare rest of data from the global memory
{
if (ptr->fitness < sData[tid].fitness)
{
sData[tid] = *ptr;
}
ptr += offset;
nextId += offset;
}
__syncthreads(); //Start reduction from now
if constexpr (TPB_REDUCTION >= 128)
{
#pragma unroll
for (uint32_t s = (TPB_REDUCTION >> 1); s > 32; s >>= 1) //This can be UNROLLED when the TPB is fixed for the application
{
if (tid < s)
{
if (sData[tid + s].fitness < sData[tid].fitness)
{
sData[tid] = sData[tid + s];
}
}
__syncthreads();
}
}
if (tid < 32) //Only one warp is active here, no sync is needed.
{
volatile ResultType* vsData = sData;
if constexpr (TPB_REDUCTION >= 64) vsData[tid] = (vsData[tid].fitness < vsData[tid + 32].fitness) ? vsData[tid] : vsData[tid + 32];
if constexpr (TPB_REDUCTION >= 32) vsData[tid] = (vsData[tid].fitness < vsData[tid + 16].fitness) ? vsData[tid] : vsData[tid + 16];
if constexpr (TPB_REDUCTION >= 16) vsData[tid] = (vsData[tid].fitness < vsData[tid + 8].fitness) ? vsData[tid] : vsData[tid + 8];
if constexpr (TPB_REDUCTION >= 8) vsData[tid] = (vsData[tid].fitness < vsData[tid + 4].fitness) ? vsData[tid] : vsData[tid + 4];
if constexpr (TPB_REDUCTION >= 4) vsData[tid] = (vsData[tid].fitness < vsData[tid + 2].fitness) ? vsData[tid] : vsData[tid + 2];
if constexpr (TPB_REDUCTION >= 2) vsData[tid] = (vsData[tid].fitness < vsData[tid + 1].fitness) ? vsData[tid] : vsData[tid + 1];
}
if (tid == 0) //The zero thread saves the result into Global mem
{
data[0] = sData[0];
}
}
/// <summary>
/// Every THREAD of 2D block [TPB_1DxTPB_1D] computes one final fitness value for a single pixel of the reference image. One corner of the query image is "virtually" attached to this pixel position.
/// A SINGLE THREAD compares the query image with the given region of the reference image.
/// </summary>
/// <param name="ref">Reference data.</param>
/// <param name="rWidth">Width of the reference data.</param>
/// <param name="rHeight">Height of the reference data.</param>
/// <param name="query">Query data.</param>
/// <param name="qWidth">Width of the query data.</param>
/// <param name="qHeight">Height of the query data.</param>
/// <param name="blockResults">Each block of the grid will store exactly one value into the global memory. Them, this data is reduced in another kernel to find the best value/solution.</param>
/// <returns></returns>
__global__ void find(const DT* __restrict__ ref, const uint32_t rWidth, const uint32_t rHeight,
const DT* __restrict__ query, const uint32_t qWidth, const uint32_t qHeight,
ResultType* __restrict__ blockResults)
{
uint32_t tid = threadIdx.x + threadIdx.y * blockDim.x;
uint32_t rx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t ry = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t qx, qy;
const DT* r = nullptr;
const DT* q = nullptr;
__shared__ ResultType sData[TPB_2D];
sData[tid] = { FLT_MAX, ry * rWidth + rx };
//EUCLIDEAN DISTANCE
/*
if ((ry <= rHeight - qHeight) && (rx <= rWidth - qWidth))
{
r = &ref[ry * rWidth + rx]; //Pointer to starting ROW position in the reference image.
q = &query[0]; //Pointer to starting ROW position in the query image.
sData[tid].fitness = 0.0f;
for (qy = 0; qy < qHeight; qy++) //Each thread will process the whole query image
{
for (qx = 0; qx < qWidth; qx++) //Each thread will process the whole query image
{
sData[tid].fitness += (r[qx] - q[qx]) * (r[qx] - q[qx]); //Cummulate the value
}
r += rWidth; //Move one row down in the reference image.
q += qWidth; //Move one row down in the query image.
}
}
*/
//EQUALITY CHECK ONLY
bool equality = true;
if ((ry <= rHeight - qHeight) && (rx <= rWidth - qWidth))
{
r = &ref[ry * rWidth + rx]; //Pointer to starting ROW position in the reference image.
q = &query[0]; //Pointer to starting ROW position in the query image.
for (qy = 0; equality && (qy < qHeight); qy++) //Each thread will process the whole query image
{
for (qx = 0; equality && (qx < qWidth); qx++) //Each thread will process the whole query image
{
equality = (r[qx] == q[qx]);
}
r += rWidth; //Move one row down in the reference image.
q += qWidth; //Move one row down in the query image.
}
if (equality)
sData[tid].fitness = 0.0f;
}
__syncthreads(); //The parallel reduction will start here, all WARPS has to finish previous instructions.
if constexpr (TPB_2D >= 128)
{
#pragma unroll
for (uint32_t s = (TPB_2D >> 1); s > 32; s >>= 1) //This can be UNROLLED when the TPB is fixed for the application
{
if (tid < s)
{
if (sData[tid + s].fitness < sData[tid].fitness)
{
sData[tid] = sData[tid + s];
}
}
__syncthreads();
}
}
if (tid < 32) //Only one warm is active here, no sync is needed.
{
volatile ResultType* vsData = sData;
if constexpr (TPB_2D >= 64) vsData[tid] = (vsData[tid].fitness < vsData[tid + 32].fitness) ? vsData[tid] : vsData[tid + 32];
if constexpr (TPB_2D >= 32) vsData[tid] = (vsData[tid].fitness < vsData[tid + 16].fitness) ? vsData[tid] : vsData[tid + 16];
if constexpr (TPB_2D >= 16) vsData[tid] = (vsData[tid].fitness < vsData[tid + 8].fitness) ? vsData[tid] : vsData[tid + 8];
if constexpr (TPB_2D >= 8) vsData[tid] = (vsData[tid].fitness < vsData[tid + 4].fitness) ? vsData[tid] : vsData[tid + 4];
if constexpr (TPB_2D >= 4) vsData[tid] = (vsData[tid].fitness < vsData[tid + 2].fitness) ? vsData[tid] : vsData[tid + 2];
if constexpr (TPB_2D >= 2) vsData[tid] = (vsData[tid].fitness < vsData[tid + 1].fitness) ? vsData[tid] : vsData[tid + 1];
}
if (tid == 0) //0-th thread stores the final BEST result for a given block
{
blockResults[blockIdx.y * gridDim.x + blockIdx.x] = sData[0];
//printf("%d %f\n", blockIdx.y * gridDim.x + blockIdx.x, sData[0].fitness);
}
}
/// <summary>
/// Every THREAD of 2D block [TPB_1DxTPB_1D] computes one final fitness value for a single pixel of the reference image. One corner of the query image is "virtually" attached to this pixel position.
/// A SINGLE THREAD compares the query image with the given region of the reference image.
/// </summary>
/// <param name="ref">Reference data stored in a TextureObject.</param>
/// <param name="rWidth">Width of the reference data.</param>
/// <param name="rHeight">Height of the reference data.</param>
/// <param name="query">Query data stored in a TextureObject.</param>
/// <param name="qWidth">Width of the query data.</param>
/// <param name="qHeight">Height of the query data.</param>
/// <param name="blockResults">Each block of the grid will store exactly one value into the global memory. Them, this data is reduced in another kernel to find the best value/solution.</param>
/// <returns></returns>
__global__ void findTex(const cudaTextureObject_t ref, const uint32_t rWidth, const uint32_t rHeight,
const cudaTextureObject_t query, const uint32_t qWidth, const uint32_t qHeight,
ResultType* __restrict__ blockResults)
{
uint32_t tid = threadIdx.x + threadIdx.y * blockDim.x;
uint32_t rx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t ry = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t qx, qy;
uint32_t r, q;
__shared__ ResultType sData[TPB_2D];
sData[tid] = { FLT_MAX, ry * rWidth + rx };
//EUCLIDEAN DISTANCE
/*
if ((ry <= rHeight - qHeight) && (rx <= rWidth - qWidth))
{
sData[tid].fitness = 0.0f;
for (qy = 0; qy < qHeight; qy++, ry++) //Each thread will process the whole query image
{
for (qx = 0; qx < qWidth; qx++, rx++) //Each thread will process the whole query image
{
r = tex2D<uint32_t>(ref, rx, ry);
q = tex2D<uint32_t>(query, qx, qy);
sData[tid].fitness += (r-q) * (r-q); //Cummulate the value
}
rx = blockIdx.x * blockDim.x + threadIdx.x;
}
}
*/
//EQUALITY CHECK ONLY
bool equality = true;
if ((ry <= rHeight - qHeight) && (rx <= rWidth - qWidth))
{
for (qy = 0; equality && (qy < qHeight); qy++, ry++) //Each thread will process the whole query image
{
for (qx = 0; equality && (qx < qWidth); qx++, rx++) //Each thread will process the whole query image
{
r = tex2D<uint32_t>(ref, rx, ry);
q = tex2D<uint32_t>(query, qx, qy);
equality = (r == q);
}
rx = blockIdx.x * blockDim.x + threadIdx.x;
}
if (equality)
sData[tid].fitness = 0.0f;
}
__syncthreads(); //The parallel reduction will start here, all WARPS has to finish previous instructions.
if constexpr (TPB_2D >= 128)
{
#pragma unroll
for (uint32_t s = (TPB_2D >> 1); s > 32; s >>= 1) //This can be UNROLLED when the TPB is fixed for the application
{
if (tid < s)
{
if (sData[tid + s].fitness < sData[tid].fitness)
{
sData[tid] = sData[tid + s];
}
}
__syncthreads();
}
}
if (tid < 32) //Only one warm is active here, no sync is needed.
{
volatile ResultType* vsData = sData;
if constexpr (TPB_2D >= 64) vsData[tid] = (vsData[tid].fitness < vsData[tid + 32].fitness) ? vsData[tid] : vsData[tid + 32];
if constexpr (TPB_2D >= 32) vsData[tid] = (vsData[tid].fitness < vsData[tid + 16].fitness) ? vsData[tid] : vsData[tid + 16];
if constexpr (TPB_2D >= 16) vsData[tid] = (vsData[tid].fitness < vsData[tid + 8].fitness) ? vsData[tid] : vsData[tid + 8];
if constexpr (TPB_2D >= 8) vsData[tid] = (vsData[tid].fitness < vsData[tid + 4].fitness) ? vsData[tid] : vsData[tid + 4];
if constexpr (TPB_2D >= 4) vsData[tid] = (vsData[tid].fitness < vsData[tid + 2].fitness) ? vsData[tid] : vsData[tid + 2];
if constexpr (TPB_2D >= 2) vsData[tid] = (vsData[tid].fitness < vsData[tid + 1].fitness) ? vsData[tid] : vsData[tid + 1];
}
if (tid == 0) //0-th thread stores the final BEST result for a given block
{
blockResults[blockIdx.y * gridDim.x + blockIdx.x] = sData[0];
//printf("%d %f\n", blockIdx.y * gridDim.x + blockIdx.x, sData[0].fitness);
}
}
#pragma endregion KERNELS
#pragma region TEXTURES
__host__ TextureInfo createTextureObjectFrom2DArray(const ImageInfo<DT>& ii)
{
TextureInfo ti;
// Size info
ti.size = { ii.width, ii.height, 1 };
//Texture Data settings
ti.texChannelDesc = cudaCreateChannelDesc<uint32_t>(); // cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindUnsigned);
checkCudaErrors(cudaMallocArray(&ti.texArrayData, &ti.texChannelDesc, ii.width, ii.height));
checkCudaErrors(cudaMemcpyToArray(ti.texArrayData, 0, 0, ii.dPtr, ii.pitch * ii.height, cudaMemcpyDeviceToDevice));
// Specify texture resource
ti.resDesc.resType = cudaResourceTypeArray;
ti.resDesc.res.array.array = ti.texArrayData;
// Specify texture object parameters
ti.texDesc.addressMode[0] = cudaAddressModeClamp;
ti.texDesc.addressMode[1] = cudaAddressModeClamp;
ti.texDesc.filterMode = cudaFilterModePoint;
ti.texDesc.readMode = cudaReadModeElementType;
ti.texDesc.normalizedCoords = false;
// Create texture object
checkCudaErrors(cudaCreateTextureObject(&ti.texObj, &ti.resDesc, &ti.texDesc, NULL));
return ti;
}
#pragma endregion TEXTURES
#pragma region PROCESSING
template<bool USE_TEXTURES=false>
void processData(const ImageInfo<DT>& ref, const ImageInfo<DT>& query)
{
float gpuTime = 0.0;
//How many block of the size of [16x16] will process the reference image?
//Too much to manage. That's we use a 1D grid of [16x16] blocks that will move down the image.
//This we need (((ref.width - query.width + 1) + 16 - 1)/16) blocks!!!
dim3 block{ TPB_1D , TPB_1D ,1 };
dim3 grid{ ((ref.width - query.width + 1) + TPB_1D - 1) / TPB_1D,
((ref.height - query.height + 1) + TPB_1D - 1) / TPB_1D,
1 };
ResultType* dBlockResults = nullptr;
auto dBlockResultsLength = grid.x * grid.y;
checkCudaErrors(cudaMalloc(&dBlockResults, dBlockResultsLength * sizeof(ResultType)));
//1. Try to compute all possible matches.
if constexpr (USE_TEXTURES == false) {
gpuTime = GPUTIME(1, find << <grid, block >> > (ref.dPtr, ref.width, ref.height, query.dPtr, query.width, query.height, dBlockResults));
printf("\x1B[93m[GPU time] %s: %f ms\033[0m\n", "find", gpuTime);
} else {
TextureInfo tiRef = createTextureObjectFrom2DArray(ref);
TextureInfo tiQuery = createTextureObjectFrom2DArray(query);
gpuTime = GPUTIME(1, findTex <<<grid, block >>> (tiRef.texObj, tiRef.size.width, tiRef.size.height, tiQuery.texObj, tiQuery.size.width, tiQuery.size.height, dBlockResults));
printf("\x1B[93m[GPU time] %s: %f ms\033[0m\n", "findTex", gpuTime);
if (tiRef.texObj) checkCudaErrors(cudaDestroyTextureObject(tiRef.texObj));
if (tiRef.texArrayData) checkCudaErrors(cudaFreeArray(tiRef.texArrayData));
if (tiQuery.texObj) checkCudaErrors(cudaDestroyTextureObject(tiQuery.texObj));
if (tiQuery.texArrayData) checkCudaErrors(cudaFreeArray(tiQuery.texArrayData));
}
//2. Search for the best match
block = { TPB_REDUCTION ,1,1 };
grid = { 1, 1, 1 };
gpuTime = GPUTIME(1, getBest << <grid, block >> > (dBlockResults, dBlockResultsLength));
printf("\x1B[93m[GPU time] %s: %f ms\033[0m\n", "getBest", gpuTime);
ResultType bestBlockResult;
checkCudaErrors(cudaMemcpy(&bestBlockResult, dBlockResults, sizeof(ResultType), cudaMemcpyKind::cudaMemcpyDeviceToHost));
printf("Best fitness value: %f\n", bestBlockResult.fitness);
printf("Winner index: %u\n", bestBlockResult.idx);
printf("Winner's LEFT-TOP CORNER X: %u\n", bestBlockResult.idx % ref.width);
printf("Winner's LEFT-TOP CORNER Y: %u\n", ref.height - (bestBlockResult.idx / ref.width) - query.height);
if (dBlockResults) cudaFree(dBlockResults);
}
#pragma endregion PROCESSING
int main(int argc, char* argv[])
{
initializeCUDA(deviceProp);
FreeImage_Initialise();
ImageInfo<DT> ref;
ImageInfo<DT> query;
prepareData<false>("../../images/reference.tif", ref);
prepareData<false>("../../images/query.tif", query);
processData<false>(ref, query);
FreeImage_DeInitialise();
if (ref.dPtr) cudaFree(ref.dPtr);
if (query.dPtr) cudaFree(query.dPtr);
}
|
c97ef8f08160df2201f2b1bb58c596d0f54e131f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* File: MBSet.cu
*
* Created on June 24, 2012
*
* Purpose: This program displays Mandelbrot set using the GPU via CUDA and
* OpenGL immediate mode.
*
*/
#include <iostream>
#include <stack>
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include <math.h>
#include "Complex.cu"
#include <GL/freeglut.h>
#include <GL/glut.h>
#include <math.h>
#include <GL/glext.h>
#include <GL/gl.h>
#include <GL/glu.h>
// Size of window in pixels, both width and height
#define WINDOW_DIM 512
#define NUM_THREADS 32
using namespace std;
// Initial screen coordinates, both host and device.
// x goes from -2 to 1 and y oges from -1.2 to 1.8
Complex minC(-2.0, -1.2);
Complex maxC(1.0, 1.8);
Complex* dev_minC;
Complex* dev_maxC;
int* dev_MArr;
int MArr[512*512];
const int maxIt = 2000; // Msximum Iterations
double coordinateReal(double x){//calculate real part of coordinate from x value
return (minC.r + x*(maxC.r-minC.r)/(511));
}
double coordinateImaginary(double y){
return (maxC.i - y*(maxC.i-minC.i)/(511));
}
// Define the RGB Class
class RGB
{
public:
RGB()
: r(0), g(0), b(0) {}
RGB(double r0, double g0, double b0)
: r(r0), g(g0), b(b0) {}
public:
double r;
double g;
double b;
};
RGB* colors = 0; // Array of color values
void InitializeColors()
{
colors = new RGB[maxIt + 1];
for (int i = 0; i < maxIt; ++i)
{
if (i < 5)
{ // Try this.. just white for small it counts
colors[i] = RGB(1, 1, 1);
}
else
{
colors[i] = RGB(drand48(), drand48(), drand48());
}
}
colors[maxIt] = RGB(); // black
}
__global__ void calculateMandlebrot(Complex* dev_minC, Complex* dev_maxC, int* dev_MArr){
int n;
int id = threadIdx.x + blockIdx.x * blockDim.x; //what pixel should I work on
int x = id%512; //what is x position
int y = id/512; //what is y position
double coordImaginary = (dev_maxC->i) - y*((dev_maxC->i)-(dev_minC->i))/(511);
double coordReal = (dev_minC->r) + x*((dev_maxC->r)-(dev_minC->r))/(511);
Complex cCoord = Complex(coordReal, coordImaginary);
Complex zCoord = Complex(coordReal, coordImaginary);
for(n=0;n<maxIt;++n){
if(zCoord.magnitude2() > 4.0)
break;
zCoord = (zCoord * zCoord) + cCoord;
}
dev_MArr[id] = n;
/*for( int y=0; y<WINDOW_DIM; ++y){
double coordIm = coordinateImaginary(y);
for( int x=0; x<WINDOW_DIM; ++x){
double coordReal = coordinateReal(x);
Complex cCoord = Complex(coordReal,coordIm);
bool isInside = true;
Complex zCoord = Complex(coordReal,coordIm);
for( n=0;n<=maxIt;++n){//calculate whether it is in the set
if(zCoord.magnitude2() > 2.0)
{
isInside = false;
break;
}
// z = z^2 + c
zCoord = (zCoord * zCoord) + cCoord;
}
MArr[x][y] = n; //set array point to iteration count.
}
}*/
}
void displayMandlebrot(){
//cout << "hello from displayMandlebrot" << endl;
glBegin(GL_POINTS);
for(int x =0; x< WINDOW_DIM; x++){
for(int y=0; y<WINDOW_DIM; y++){
//draw it based on iteration
int pix = y*512 + x;
int iterationCount = MArr[pix];
glColor3f(colors[iterationCount].r,colors[iterationCount].g,colors[iterationCount].b);
glVertex2f(x,y);
}
}
glEnd();
}
void display(void){
//cout << "hello from display ()" << endl;
glClear(GL_COLOR_BUFFER_BIT);
glClear(GL_DEPTH_BUFFER_BIT);
displayMandlebrot();
glutSwapBuffers();
}
void init(){
glShadeModel(GL_FLAT);
glViewport(0,0,WINDOW_DIM, WINDOW_DIM);
//drawMandlebrot();
}
void getReadyForCalcMandlebrot(){
//allocate space for device copies
hipMalloc((void**)&dev_MArr, WINDOW_DIM * WINDOW_DIM * sizeof(int));
hipMalloc((void**)&dev_minC, sizeof(Complex));
hipMalloc((void**)&dev_maxC, sizeof(Complex));
//copy inputs to device
hipMemcpy(dev_minC, &minC, sizeof(Complex), hipMemcpyHostToDevice);
hipMemcpy(dev_maxC, &maxC, sizeof(Complex), hipMemcpyHostToDevice);
//launch calculateMandlebrot() kernel
hipLaunchKernelGGL(( calculateMandlebrot), dim3(WINDOW_DIM * WINDOW_DIM / NUM_THREADS), dim3(NUM_THREADS) , 0, 0, dev_minC, dev_maxC, dev_MArr);
//copy result back to host
hipMemcpy(MArr, dev_MArr, WINDOW_DIM * WINDOW_DIM * sizeof(int), hipMemcpyDeviceToHost);
//free
hipFree(dev_minC); hipFree(dev_maxC); hipFree(dev_MArr);
}
int main(int argc, char** argv)
{ getReadyForCalcMandlebrot();
// Initialize OPENGL here
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glutInitWindowSize(WINDOW_DIM,WINDOW_DIM);
glutInitWindowPosition(100,100);
glutCreateWindow("Mandlebrot");
init();
glViewport(0,0, (GLsizei) 512, (GLsizei) 512);
glMatrixMode (GL_PROJECTION);
glLoadIdentity();
glOrtho(0, WINDOW_DIM, 0, WINDOW_DIM, -WINDOW_DIM, WINDOW_DIM);
// Set up necessary host and device buffers
// set up the opengl callbacks for display, mouse and keyboard
glutDisplayFunc(display);
glutIdleFunc(display);
// Calculate the interation counts
// Grad students, pick the colors for the 0 .. 1999 iteration count pixels
InitializeColors();
glutMainLoop(); // THis will callback the display, keyboard and mouse
return 0;
}
| c97ef8f08160df2201f2b1bb58c596d0f54e131f.cu | /*
* File: MBSet.cu
*
* Created on June 24, 2012
*
* Purpose: This program displays Mandelbrot set using the GPU via CUDA and
* OpenGL immediate mode.
*
*/
#include <iostream>
#include <stack>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <math.h>
#include "Complex.cu"
#include <GL/freeglut.h>
#include <GL/glut.h>
#include <math.h>
#include <GL/glext.h>
#include <GL/gl.h>
#include <GL/glu.h>
// Size of window in pixels, both width and height
#define WINDOW_DIM 512
#define NUM_THREADS 32
using namespace std;
// Initial screen coordinates, both host and device.
// x goes from -2 to 1 and y oges from -1.2 to 1.8
Complex minC(-2.0, -1.2);
Complex maxC(1.0, 1.8);
Complex* dev_minC;
Complex* dev_maxC;
int* dev_MArr;
int MArr[512*512];
const int maxIt = 2000; // Msximum Iterations
double coordinateReal(double x){//calculate real part of coordinate from x value
return (minC.r + x*(maxC.r-minC.r)/(511));
}
double coordinateImaginary(double y){
return (maxC.i - y*(maxC.i-minC.i)/(511));
}
// Define the RGB Class
class RGB
{
public:
RGB()
: r(0), g(0), b(0) {}
RGB(double r0, double g0, double b0)
: r(r0), g(g0), b(b0) {}
public:
double r;
double g;
double b;
};
RGB* colors = 0; // Array of color values
void InitializeColors()
{
colors = new RGB[maxIt + 1];
for (int i = 0; i < maxIt; ++i)
{
if (i < 5)
{ // Try this.. just white for small it counts
colors[i] = RGB(1, 1, 1);
}
else
{
colors[i] = RGB(drand48(), drand48(), drand48());
}
}
colors[maxIt] = RGB(); // black
}
__global__ void calculateMandlebrot(Complex* dev_minC, Complex* dev_maxC, int* dev_MArr){
int n;
int id = threadIdx.x + blockIdx.x * blockDim.x; //what pixel should I work on
int x = id%512; //what is x position
int y = id/512; //what is y position
double coordImaginary = (dev_maxC->i) - y*((dev_maxC->i)-(dev_minC->i))/(511);
double coordReal = (dev_minC->r) + x*((dev_maxC->r)-(dev_minC->r))/(511);
Complex cCoord = Complex(coordReal, coordImaginary);
Complex zCoord = Complex(coordReal, coordImaginary);
for(n=0;n<maxIt;++n){
if(zCoord.magnitude2() > 4.0)
break;
zCoord = (zCoord * zCoord) + cCoord;
}
dev_MArr[id] = n;
/*for( int y=0; y<WINDOW_DIM; ++y){
double coordIm = coordinateImaginary(y);
for( int x=0; x<WINDOW_DIM; ++x){
double coordReal = coordinateReal(x);
Complex cCoord = Complex(coordReal,coordIm);
bool isInside = true;
Complex zCoord = Complex(coordReal,coordIm);
for( n=0;n<=maxIt;++n){//calculate whether it is in the set
if(zCoord.magnitude2() > 2.0)
{
isInside = false;
break;
}
// z = z^2 + c
zCoord = (zCoord * zCoord) + cCoord;
}
MArr[x][y] = n; //set array point to iteration count.
}
}*/
}
void displayMandlebrot(){
//cout << "hello from displayMandlebrot" << endl;
glBegin(GL_POINTS);
for(int x =0; x< WINDOW_DIM; x++){
for(int y=0; y<WINDOW_DIM; y++){
//draw it based on iteration
int pix = y*512 + x;
int iterationCount = MArr[pix];
glColor3f(colors[iterationCount].r,colors[iterationCount].g,colors[iterationCount].b);
glVertex2f(x,y);
}
}
glEnd();
}
void display(void){
//cout << "hello from display ()" << endl;
glClear(GL_COLOR_BUFFER_BIT);
glClear(GL_DEPTH_BUFFER_BIT);
displayMandlebrot();
glutSwapBuffers();
}
void init(){
glShadeModel(GL_FLAT);
glViewport(0,0,WINDOW_DIM, WINDOW_DIM);
//drawMandlebrot();
}
void getReadyForCalcMandlebrot(){
//allocate space for device copies
cudaMalloc((void**)&dev_MArr, WINDOW_DIM * WINDOW_DIM * sizeof(int));
cudaMalloc((void**)&dev_minC, sizeof(Complex));
cudaMalloc((void**)&dev_maxC, sizeof(Complex));
//copy inputs to device
cudaMemcpy(dev_minC, &minC, sizeof(Complex), cudaMemcpyHostToDevice);
cudaMemcpy(dev_maxC, &maxC, sizeof(Complex), cudaMemcpyHostToDevice);
//launch calculateMandlebrot() kernel
calculateMandlebrot<<<WINDOW_DIM * WINDOW_DIM / NUM_THREADS, NUM_THREADS >>>(dev_minC, dev_maxC, dev_MArr);
//copy result back to host
cudaMemcpy(MArr, dev_MArr, WINDOW_DIM * WINDOW_DIM * sizeof(int), cudaMemcpyDeviceToHost);
//free
cudaFree(dev_minC); cudaFree(dev_maxC); cudaFree(dev_MArr);
}
int main(int argc, char** argv)
{ getReadyForCalcMandlebrot();
// Initialize OPENGL here
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glutInitWindowSize(WINDOW_DIM,WINDOW_DIM);
glutInitWindowPosition(100,100);
glutCreateWindow("Mandlebrot");
init();
glViewport(0,0, (GLsizei) 512, (GLsizei) 512);
glMatrixMode (GL_PROJECTION);
glLoadIdentity();
glOrtho(0, WINDOW_DIM, 0, WINDOW_DIM, -WINDOW_DIM, WINDOW_DIM);
// Set up necessary host and device buffers
// set up the opengl callbacks for display, mouse and keyboard
glutDisplayFunc(display);
glutIdleFunc(display);
// Calculate the interation counts
// Grad students, pick the colors for the 0 .. 1999 iteration count pixels
InitializeColors();
glutMainLoop(); // THis will callback the display, keyboard and mouse
return 0;
}
|
adc5dacfc83e9a2c0e35890aab2c089808c3f2f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/common/data_type.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/util/cuda_half_util.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void TrilGpu(const int64_t elem_cnt, const int64_t num_rows, const int64_t num_cols,
const int64_t diagonal, const T* x, const T fill, T* y) {
const int64_t matrix_size = num_rows * num_cols;
CUDA_1D_KERNEL_LOOP_T(int64_t, k, elem_cnt) {
const int64_t offset_in_matrix = k % matrix_size;
const int64_t i = offset_in_matrix / num_cols;
const int64_t j = offset_in_matrix - num_cols * i;
y[k] = j > i + diagonal ? fill : x[k];
}
}
template<typename T>
__global__ void TrilWarpProcessRowGpu(const int64_t total_rows, const int64_t num_rows,
const int64_t num_cols, const int64_t diagonal, const T* x,
const T fill, T* y) {
const int64_t warp_id = (blockIdx.x * blockDim.x + threadIdx.x) / kCudaWarpSize;
const int64_t lan_id = threadIdx.x % kCudaWarpSize;
const int64_t num_warp = blockDim.x * gridDim.x / kCudaWarpSize;
for (int64_t i = warp_id; i < total_rows; i += num_warp) {
const int64_t row = i % num_rows;
for (int64_t col = lan_id; col < num_cols; col += kCudaWarpSize) {
const int64_t idx = i * num_cols + col;
y[idx] = col > row + diagonal ? fill : x[idx];
}
}
}
template<>
__global__ void TrilWarpProcessRowGpu<half>(const int64_t total_rows, const int64_t num_rows,
const int64_t num_cols, const int64_t diagonal,
const half* x, const half fill, half* y) {
const int64_t h2_num_cols = num_cols / 2;
const auto* x_h2 = reinterpret_cast<const half2*>(x);
auto* y_h2 = reinterpret_cast<half2*>(y);
const int64_t warp_id = (blockIdx.x * blockDim.x + threadIdx.x) / kCudaWarpSize;
const int64_t lan_id = threadIdx.x % kCudaWarpSize;
const int64_t num_warp = blockDim.x * gridDim.x / kCudaWarpSize;
for (int64_t i = warp_id; i < total_rows; i += num_warp) {
const int64_t row = i % num_rows;
for (int64_t col = lan_id; col < h2_num_cols; col += kCudaWarpSize) {
const int64_t idx = i * h2_num_cols + col;
const half2 x_val = x_h2[idx];
half2 y_val;
y_val.x = (2 * col) > row + diagonal ? fill : x_val.x;
y_val.y = (2 * col + 1) > row + diagonal ? fill : x_val.y;
y_h2[idx] = y_val;
}
}
}
template<typename T>
__global__ void FusedScaleTrilGpu(const int64_t elem_cnt, const int64_t num_rows,
const int64_t num_cols, const int64_t diagonal, const T scale,
const T* x, const T fill, T* y) {
const int64_t matrix_size = num_rows * num_cols;
CUDA_1D_KERNEL_LOOP_T(int64_t, k, elem_cnt) {
const int64_t offset_in_matrix = k % matrix_size;
const int64_t i = offset_in_matrix / num_cols;
const int64_t j = offset_in_matrix - num_cols * i;
y[k] = j > i + diagonal ? fill : (scale * x[k]);
}
}
template<typename T>
__global__ void FusedScaleTrilWarpProcessRowGpu(const int64_t total_rows, const int64_t num_rows,
const int64_t num_cols, const int64_t diagonal,
const T scale, const T* x, const T fill, T* y) {
const int64_t warp_id = (blockIdx.x * blockDim.x + threadIdx.x) / kCudaWarpSize;
const int64_t lan_id = threadIdx.x % kCudaWarpSize;
const int64_t num_warp = blockDim.x * gridDim.x / kCudaWarpSize;
for (int64_t i = warp_id; i < total_rows; i += num_warp) {
const int64_t row = i % num_rows;
for (int64_t col = lan_id; col < num_cols; col += kCudaWarpSize) {
const int64_t idx = i * num_cols + col;
y[idx] = col > row + diagonal ? fill : (scale * x[idx]);
}
}
}
template<>
__global__ void FusedScaleTrilWarpProcessRowGpu<half>(const int64_t total_rows,
const int64_t num_rows,
const int64_t num_cols,
const int64_t diagonal, const half scale,
const half* x, const half fill, half* y) {
const int64_t h2_num_cols = num_cols / 2;
const auto* x_h2 = reinterpret_cast<const half2*>(x);
auto* y_h2 = reinterpret_cast<half2*>(y);
const half2 h2_scale = __half2half2(scale);
const int64_t warp_id = (blockIdx.x * blockDim.x + threadIdx.x) / kCudaWarpSize;
const int64_t lan_id = threadIdx.x % kCudaWarpSize;
const int64_t num_warp = blockDim.x * gridDim.x / kCudaWarpSize;
for (int64_t i = warp_id; i < total_rows; i += num_warp) {
const int64_t row = i % num_rows;
for (int64_t col = lan_id; col < h2_num_cols; col += kCudaWarpSize) {
const int64_t idx = i * h2_num_cols + col;
const half2 scaled_x = __hmul2(h2_scale, x_h2[idx]);
half2 y_val;
y_val.x = (2 * col) > row + diagonal ? fill : scaled_x.x;
y_val.y = (2 * col + 1) > row + diagonal ? fill : scaled_x.y;
y_h2[idx] = y_val;
}
}
}
template<typename T>
T GetAttrVal(bool is_floating_val, double floating_value, int64_t integer_value) {
return is_floating_val ? static_cast<T>(floating_value) : static_cast<T>(integer_value);
}
template<>
half GetAttrVal<half>(bool is_floating_val, double floating_value, int64_t integer_value) {
return is_floating_val ? __float2half(floating_value) : __float2half(integer_value);
}
} // namespace
template<typename T>
class GpuTrilKernel final : public user_op::OpKernel {
public:
GpuTrilKernel() = default;
~GpuTrilKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("in", 0);
const auto shape = x->shape();
const auto diagonal = ctx->Attr<int64_t>("diagonal");
const int64_t num_rows = shape.At(shape.NumAxes() - 2);
const int64_t num_cols = shape.At(shape.NumAxes() - 1);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("out", 0);
const int32_t elem_cnt = shape.elem_cnt();
const T fill = GetAttrVal<T>(ctx->Attr<bool>("is_floating_fill_value"),
ctx->Attr<double>("floating_fill_value"),
ctx->Attr<int64_t>("integer_fill_value"));
if (num_cols % (kCudaWarpSize * 2) == 0) {
const int64_t total_rows = elem_cnt / num_cols;
hipLaunchKernelGGL(( TrilWarpProcessRowGpu), dim3(BlocksNum4ThreadsNum(total_rows * kCudaWarpSize)),
dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(),
total_rows, num_rows, num_cols, diagonal, x->dptr<T>(), fill, y->mut_dptr<T>());
} else {
hipLaunchKernelGGL(( TrilGpu), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->device_ctx()->cuda_stream(), elem_cnt, num_rows, num_cols, diagonal,
x->dptr<T>(), fill, y->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_GPU_TRIL_KERNEL(dtype) \
REGISTER_USER_KERNEL("tril") \
.SetCreateFn<GpuTrilKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value)) \
.SetInplaceProposalFn([](const user_op::InferContext&, \
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, true)); \
return Maybe<void>::Ok(); \
});
REGISTER_GPU_TRIL_KERNEL(float)
REGISTER_GPU_TRIL_KERNEL(double)
REGISTER_GPU_TRIL_KERNEL(int8_t)
REGISTER_GPU_TRIL_KERNEL(int32_t)
REGISTER_GPU_TRIL_KERNEL(int64_t)
REGISTER_GPU_TRIL_KERNEL(half)
template<typename T>
class GpuFusedScaleTrilKernel final : public user_op::OpKernel {
public:
GpuFusedScaleTrilKernel() = default;
~GpuFusedScaleTrilKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("in", 0);
const auto shape = x->shape();
const auto diagonal = ctx->Attr<int64_t>("diagonal");
const int32_t num_rows = shape.At(shape.NumAxes() - 2);
const int32_t num_cols = shape.At(shape.NumAxes() - 1);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("out", 0);
const int32_t elem_cnt = shape.elem_cnt();
const T fill = GetAttrVal<T>(ctx->Attr<bool>("is_floating_fill_value"),
ctx->Attr<double>("floating_fill_value"),
ctx->Attr<int64_t>("integer_fill_value"));
const T scale = GetAttrVal<T>(ctx->Attr<bool>("is_floating_scale_value"),
ctx->Attr<double>("floating_scale_value"),
ctx->Attr<int64_t>("integer_scale_value"));
if (num_cols % (kCudaWarpSize * 2) == 0) {
const int64_t total_rows = elem_cnt / num_cols;
hipLaunchKernelGGL(( FusedScaleTrilWarpProcessRowGpu), dim3(BlocksNum4ThreadsNum(total_rows * kCudaWarpSize)),
dim3(kCudaThreadsNumPerBlock), 0,
ctx->device_ctx()->cuda_stream(),
total_rows, num_rows, num_cols, diagonal, scale, x->dptr<T>(), fill, y->mut_dptr<T>());
} else {
hipLaunchKernelGGL(( FusedScaleTrilGpu), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->device_ctx()->cuda_stream(),
elem_cnt, num_rows, num_cols, diagonal, scale, x->dptr<T>(), fill, y->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(dtype) \
REGISTER_USER_KERNEL("fused_scale_tril") \
.SetCreateFn<GpuFusedScaleTrilKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value)) \
.SetInplaceProposalFn([](const user_op::InferContext&, \
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, true)); \
return Maybe<void>::Ok(); \
});
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(float)
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(double)
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(int8_t)
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(int32_t)
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(int64_t)
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(half)
} // namespace oneflow
| adc5dacfc83e9a2c0e35890aab2c089808c3f2f7.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/common/data_type.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/util/cuda_half_util.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void TrilGpu(const int64_t elem_cnt, const int64_t num_rows, const int64_t num_cols,
const int64_t diagonal, const T* x, const T fill, T* y) {
const int64_t matrix_size = num_rows * num_cols;
CUDA_1D_KERNEL_LOOP_T(int64_t, k, elem_cnt) {
const int64_t offset_in_matrix = k % matrix_size;
const int64_t i = offset_in_matrix / num_cols;
const int64_t j = offset_in_matrix - num_cols * i;
y[k] = j > i + diagonal ? fill : x[k];
}
}
template<typename T>
__global__ void TrilWarpProcessRowGpu(const int64_t total_rows, const int64_t num_rows,
const int64_t num_cols, const int64_t diagonal, const T* x,
const T fill, T* y) {
const int64_t warp_id = (blockIdx.x * blockDim.x + threadIdx.x) / kCudaWarpSize;
const int64_t lan_id = threadIdx.x % kCudaWarpSize;
const int64_t num_warp = blockDim.x * gridDim.x / kCudaWarpSize;
for (int64_t i = warp_id; i < total_rows; i += num_warp) {
const int64_t row = i % num_rows;
for (int64_t col = lan_id; col < num_cols; col += kCudaWarpSize) {
const int64_t idx = i * num_cols + col;
y[idx] = col > row + diagonal ? fill : x[idx];
}
}
}
template<>
__global__ void TrilWarpProcessRowGpu<half>(const int64_t total_rows, const int64_t num_rows,
const int64_t num_cols, const int64_t diagonal,
const half* x, const half fill, half* y) {
const int64_t h2_num_cols = num_cols / 2;
const auto* x_h2 = reinterpret_cast<const half2*>(x);
auto* y_h2 = reinterpret_cast<half2*>(y);
const int64_t warp_id = (blockIdx.x * blockDim.x + threadIdx.x) / kCudaWarpSize;
const int64_t lan_id = threadIdx.x % kCudaWarpSize;
const int64_t num_warp = blockDim.x * gridDim.x / kCudaWarpSize;
for (int64_t i = warp_id; i < total_rows; i += num_warp) {
const int64_t row = i % num_rows;
for (int64_t col = lan_id; col < h2_num_cols; col += kCudaWarpSize) {
const int64_t idx = i * h2_num_cols + col;
const half2 x_val = x_h2[idx];
half2 y_val;
y_val.x = (2 * col) > row + diagonal ? fill : x_val.x;
y_val.y = (2 * col + 1) > row + diagonal ? fill : x_val.y;
y_h2[idx] = y_val;
}
}
}
template<typename T>
__global__ void FusedScaleTrilGpu(const int64_t elem_cnt, const int64_t num_rows,
const int64_t num_cols, const int64_t diagonal, const T scale,
const T* x, const T fill, T* y) {
const int64_t matrix_size = num_rows * num_cols;
CUDA_1D_KERNEL_LOOP_T(int64_t, k, elem_cnt) {
const int64_t offset_in_matrix = k % matrix_size;
const int64_t i = offset_in_matrix / num_cols;
const int64_t j = offset_in_matrix - num_cols * i;
y[k] = j > i + diagonal ? fill : (scale * x[k]);
}
}
template<typename T>
__global__ void FusedScaleTrilWarpProcessRowGpu(const int64_t total_rows, const int64_t num_rows,
const int64_t num_cols, const int64_t diagonal,
const T scale, const T* x, const T fill, T* y) {
const int64_t warp_id = (blockIdx.x * blockDim.x + threadIdx.x) / kCudaWarpSize;
const int64_t lan_id = threadIdx.x % kCudaWarpSize;
const int64_t num_warp = blockDim.x * gridDim.x / kCudaWarpSize;
for (int64_t i = warp_id; i < total_rows; i += num_warp) {
const int64_t row = i % num_rows;
for (int64_t col = lan_id; col < num_cols; col += kCudaWarpSize) {
const int64_t idx = i * num_cols + col;
y[idx] = col > row + diagonal ? fill : (scale * x[idx]);
}
}
}
template<>
__global__ void FusedScaleTrilWarpProcessRowGpu<half>(const int64_t total_rows,
const int64_t num_rows,
const int64_t num_cols,
const int64_t diagonal, const half scale,
const half* x, const half fill, half* y) {
const int64_t h2_num_cols = num_cols / 2;
const auto* x_h2 = reinterpret_cast<const half2*>(x);
auto* y_h2 = reinterpret_cast<half2*>(y);
const half2 h2_scale = __half2half2(scale);
const int64_t warp_id = (blockIdx.x * blockDim.x + threadIdx.x) / kCudaWarpSize;
const int64_t lan_id = threadIdx.x % kCudaWarpSize;
const int64_t num_warp = blockDim.x * gridDim.x / kCudaWarpSize;
for (int64_t i = warp_id; i < total_rows; i += num_warp) {
const int64_t row = i % num_rows;
for (int64_t col = lan_id; col < h2_num_cols; col += kCudaWarpSize) {
const int64_t idx = i * h2_num_cols + col;
const half2 scaled_x = __hmul2(h2_scale, x_h2[idx]);
half2 y_val;
y_val.x = (2 * col) > row + diagonal ? fill : scaled_x.x;
y_val.y = (2 * col + 1) > row + diagonal ? fill : scaled_x.y;
y_h2[idx] = y_val;
}
}
}
template<typename T>
T GetAttrVal(bool is_floating_val, double floating_value, int64_t integer_value) {
return is_floating_val ? static_cast<T>(floating_value) : static_cast<T>(integer_value);
}
template<>
half GetAttrVal<half>(bool is_floating_val, double floating_value, int64_t integer_value) {
return is_floating_val ? __float2half(floating_value) : __float2half(integer_value);
}
} // namespace
template<typename T>
class GpuTrilKernel final : public user_op::OpKernel {
public:
GpuTrilKernel() = default;
~GpuTrilKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("in", 0);
const auto shape = x->shape();
const auto diagonal = ctx->Attr<int64_t>("diagonal");
const int64_t num_rows = shape.At(shape.NumAxes() - 2);
const int64_t num_cols = shape.At(shape.NumAxes() - 1);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("out", 0);
const int32_t elem_cnt = shape.elem_cnt();
const T fill = GetAttrVal<T>(ctx->Attr<bool>("is_floating_fill_value"),
ctx->Attr<double>("floating_fill_value"),
ctx->Attr<int64_t>("integer_fill_value"));
if (num_cols % (kCudaWarpSize * 2) == 0) {
const int64_t total_rows = elem_cnt / num_cols;
TrilWarpProcessRowGpu<<<BlocksNum4ThreadsNum(total_rows * kCudaWarpSize),
kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>(
total_rows, num_rows, num_cols, diagonal, x->dptr<T>(), fill, y->mut_dptr<T>());
} else {
TrilGpu<<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0,
ctx->device_ctx()->cuda_stream()>>>(elem_cnt, num_rows, num_cols, diagonal,
x->dptr<T>(), fill, y->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_GPU_TRIL_KERNEL(dtype) \
REGISTER_USER_KERNEL("tril") \
.SetCreateFn<GpuTrilKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value)) \
.SetInplaceProposalFn([](const user_op::InferContext&, \
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, true)); \
return Maybe<void>::Ok(); \
});
REGISTER_GPU_TRIL_KERNEL(float)
REGISTER_GPU_TRIL_KERNEL(double)
REGISTER_GPU_TRIL_KERNEL(int8_t)
REGISTER_GPU_TRIL_KERNEL(int32_t)
REGISTER_GPU_TRIL_KERNEL(int64_t)
REGISTER_GPU_TRIL_KERNEL(half)
template<typename T>
class GpuFusedScaleTrilKernel final : public user_op::OpKernel {
public:
GpuFusedScaleTrilKernel() = default;
~GpuFusedScaleTrilKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("in", 0);
const auto shape = x->shape();
const auto diagonal = ctx->Attr<int64_t>("diagonal");
const int32_t num_rows = shape.At(shape.NumAxes() - 2);
const int32_t num_cols = shape.At(shape.NumAxes() - 1);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("out", 0);
const int32_t elem_cnt = shape.elem_cnt();
const T fill = GetAttrVal<T>(ctx->Attr<bool>("is_floating_fill_value"),
ctx->Attr<double>("floating_fill_value"),
ctx->Attr<int64_t>("integer_fill_value"));
const T scale = GetAttrVal<T>(ctx->Attr<bool>("is_floating_scale_value"),
ctx->Attr<double>("floating_scale_value"),
ctx->Attr<int64_t>("integer_scale_value"));
if (num_cols % (kCudaWarpSize * 2) == 0) {
const int64_t total_rows = elem_cnt / num_cols;
FusedScaleTrilWarpProcessRowGpu<<<BlocksNum4ThreadsNum(total_rows * kCudaWarpSize),
kCudaThreadsNumPerBlock, 0,
ctx->device_ctx()->cuda_stream()>>>(
total_rows, num_rows, num_cols, diagonal, scale, x->dptr<T>(), fill, y->mut_dptr<T>());
} else {
FusedScaleTrilGpu<<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0,
ctx->device_ctx()->cuda_stream()>>>(
elem_cnt, num_rows, num_cols, diagonal, scale, x->dptr<T>(), fill, y->mut_dptr<T>());
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(dtype) \
REGISTER_USER_KERNEL("fused_scale_tril") \
.SetCreateFn<GpuFusedScaleTrilKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("out", 0) == GetDataType<dtype>::value)) \
.SetInplaceProposalFn([](const user_op::InferContext&, \
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, true)); \
return Maybe<void>::Ok(); \
});
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(float)
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(double)
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(int8_t)
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(int32_t)
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(int64_t)
REGISTER_GPU_FUSED_SCALE_TRIL_KERNEL(half)
} // namespace oneflow
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.