source
stringlengths
3
92
c
stringlengths
26
2.25M
nmf_pgd.c
/* Generated by Cython 0.29.21 */ /* BEGIN: Cython Metadata { "distutils": { "depends": [], "language": "c", "name": "gensim.models.nmf_pgd", "sources": [ "gensim/models/nmf_pgd.pyx" ] }, "module_name": "gensim.models.nmf_pgd" } END: Cython Metadata */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_21" #define CYTHON_HEX_VERSION 0x001D15F0 #define CYTHON_FUTURE_DIVISION 0 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) #endif #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #ifndef PyObject_Unicode #define PyObject_Unicode PyObject_Str #endif #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if PY_VERSION_HEX >= 0x030900A4 #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) #else #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_MARK_ERR_POS(f_index, lineno) \ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } #define __PYX_ERR(f_index, lineno, Ln_error) \ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__gensim__models__nmf_pgd #define __PYX_HAVE_API__gensim__models__nmf_pgd /* Early includes */ #include <math.h> #include "pythread.h" #include <string.h> #include <stdlib.h> #include <stdio.h> #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "gensim/models/nmf_pgd.pyx", "stringsource", }; /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define __Pyx_MemoryView_Len(m) (m.shape[0]) /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":279 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); __Pyx_SET_SIZE(list, len + 1); return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); __Pyx_SET_SIZE(list, len + 1); return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* HasAttr.proto */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* PyObjectGetAttrStrNoError.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag); /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'libc.math' */ /* Module declarations from 'gensim.models.nmf_pgd' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static double __pyx_f_6gensim_6models_7nmf_pgd_fmin(double, double); /*proto*/ static double __pyx_f_6gensim_6models_7nmf_pgd_fmax(double, double); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 }; #define __Pyx_MODULE_NAME "gensim.models.nmf_pgd" extern int __pyx_module_is_main_gensim__models__nmf_pgd; int __pyx_module_is_main_gensim__models__nmf_pgd = 0; /* Implementation of 'gensim.models.nmf_pgd' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_O[] = "O"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_h[] = "h"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_WtW[] = "WtW"; static const char __pyx_k_Wtv[] = "Wtv"; static const char __pyx_k_new[] = "__new__"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_dict[] = "__dict__"; static const char __pyx_k_grad[] = "grad"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_kappa[] = "kappa"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_pickle[] = "pickle"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_hessian[] = "hessian"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_solve_h[] = "solve_h"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_pyx_type[] = "__pyx_type"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_n_samples[] = "n_samples"; static const char __pyx_k_pyx_state[] = "__pyx_state"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_violation[] = "violation"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_pyx_result[] = "__pyx_result"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_sample_idx[] = "sample_idx"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_PickleError[] = "PickleError"; static const char __pyx_k_permutation[] = "permutation"; static const char __pyx_k_n_components[] = "n_components"; static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; static const char __pyx_k_stringsource[] = "stringsource"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_projected_grad[] = "projected_grad"; static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_component_idx_1[] = "component_idx_1"; static const char __pyx_k_component_idx_2[] = "component_idx_2"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_gensim_models_nmf_pgd[] = "gensim.models.nmf_pgd"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_gensim_models_nmf_pgd_pyx[] = "gensim/models/nmf_pgd.pyx"; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_PickleError; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_View_MemoryView; static PyObject *__pyx_n_s_WtW; static PyObject *__pyx_n_s_Wtv; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_s_component_idx_1; static PyObject *__pyx_n_s_component_idx_2; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_gensim_models_nmf_pgd; static PyObject *__pyx_kp_s_gensim_models_nmf_pgd_pyx; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_grad; static PyObject *__pyx_n_s_h; static PyObject *__pyx_n_s_hessian; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_kappa; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_n_components; static PyObject *__pyx_n_s_n_samples; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_new; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_permutation; static PyObject *__pyx_n_s_pickle; static PyObject *__pyx_n_s_projected_grad; static PyObject *__pyx_n_s_pyx_PickleError; static PyObject *__pyx_n_s_pyx_checksum; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_result; static PyObject *__pyx_n_s_pyx_state; static PyObject *__pyx_n_s_pyx_type; static PyObject *__pyx_n_s_pyx_unpickle_Enum; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_sample_idx; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_solve_h; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_kp_s_stringsource; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_test; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_update; static PyObject *__pyx_n_s_violation; static PyObject *__pyx_pf_6gensim_6models_7nmf_pgd_solve_h(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_h, __Pyx_memviewslice __pyx_v_Wtv, __Pyx_memviewslice __pyx_v_WtW, __Pyx_memviewslice __pyx_v_permutation, double __pyx_v_kappa); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_184977713; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__15; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__22; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__26; static PyObject *__pyx_codeobj__20; static PyObject *__pyx_codeobj__27; /* Late includes */ /* "gensim/models/nmf_pgd.pyx":12 * from cython.parallel import prange * * cdef double fmin(double x, double y) nogil: # <<<<<<<<<<<<<< * return x if x < y else y * */ static double __pyx_f_6gensim_6models_7nmf_pgd_fmin(double __pyx_v_x, double __pyx_v_y) { double __pyx_r; double __pyx_t_1; /* "gensim/models/nmf_pgd.pyx":13 * * cdef double fmin(double x, double y) nogil: * return x if x < y else y # <<<<<<<<<<<<<< * * cdef double fmax(double x, double y) nogil: */ if (((__pyx_v_x < __pyx_v_y) != 0)) { __pyx_t_1 = __pyx_v_x; } else { __pyx_t_1 = __pyx_v_y; } __pyx_r = __pyx_t_1; goto __pyx_L0; /* "gensim/models/nmf_pgd.pyx":12 * from cython.parallel import prange * * cdef double fmin(double x, double y) nogil: # <<<<<<<<<<<<<< * return x if x < y else y * */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "gensim/models/nmf_pgd.pyx":15 * return x if x < y else y * * cdef double fmax(double x, double y) nogil: # <<<<<<<<<<<<<< * return x if x > y else y * */ static double __pyx_f_6gensim_6models_7nmf_pgd_fmax(double __pyx_v_x, double __pyx_v_y) { double __pyx_r; double __pyx_t_1; /* "gensim/models/nmf_pgd.pyx":16 * * cdef double fmax(double x, double y) nogil: * return x if x > y else y # <<<<<<<<<<<<<< * * def solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa): */ if (((__pyx_v_x > __pyx_v_y) != 0)) { __pyx_t_1 = __pyx_v_x; } else { __pyx_t_1 = __pyx_v_y; } __pyx_r = __pyx_t_1; goto __pyx_L0; /* "gensim/models/nmf_pgd.pyx":15 * return x if x < y else y * * cdef double fmax(double x, double y) nogil: # <<<<<<<<<<<<<< * return x if x > y else y * */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "gensim/models/nmf_pgd.pyx":18 * return x if x > y else y * * def solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa): # <<<<<<<<<<<<<< * """Find optimal dense vector representation for current W and r matrices. * */ /* Python wrapper */ static PyObject *__pyx_pw_6gensim_6models_7nmf_pgd_1solve_h(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_6gensim_6models_7nmf_pgd_solve_h[] = "solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa)\nFind optimal dense vector representation for current W and r matrices.\n\n Parameters\n ----------\n h : matrix\n Dense representation of documents in current batch.\n Wtv : matrix\n WtW : matrix\n\n Returns\n -------\n float\n Cumulative difference between previous and current h vectors.\n\n "; static PyMethodDef __pyx_mdef_6gensim_6models_7nmf_pgd_1solve_h = {"solve_h", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6gensim_6models_7nmf_pgd_1solve_h, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6gensim_6models_7nmf_pgd_solve_h}; static PyObject *__pyx_pw_6gensim_6models_7nmf_pgd_1solve_h(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_h = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_Wtv = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_WtW = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_permutation = { 0, 0, { 0 }, { 0 }, { 0 } }; double __pyx_v_kappa; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("solve_h (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_h,&__pyx_n_s_Wtv,&__pyx_n_s_WtW,&__pyx_n_s_permutation,&__pyx_n_s_kappa,0}; PyObject* values[5] = {0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_h)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_Wtv)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("solve_h", 1, 5, 5, 1); __PYX_ERR(0, 18, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_WtW)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("solve_h", 1, 5, 5, 2); __PYX_ERR(0, 18, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_permutation)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("solve_h", 1, 5, 5, 3); __PYX_ERR(0, 18, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_kappa)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("solve_h", 1, 5, 5, 4); __PYX_ERR(0, 18, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "solve_h") < 0)) __PYX_ERR(0, 18, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 5) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); } __pyx_v_h = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_h.memview)) __PYX_ERR(0, 18, __pyx_L3_error) __pyx_v_Wtv = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_Wtv.memview)) __PYX_ERR(0, 18, __pyx_L3_error) __pyx_v_WtW = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_WtW.memview)) __PYX_ERR(0, 18, __pyx_L3_error) __pyx_v_permutation = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_permutation.memview)) __PYX_ERR(0, 18, __pyx_L3_error) __pyx_v_kappa = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_kappa == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 18, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("solve_h", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 18, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("gensim.models.nmf_pgd.solve_h", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_6gensim_6models_7nmf_pgd_solve_h(__pyx_self, __pyx_v_h, __pyx_v_Wtv, __pyx_v_WtW, __pyx_v_permutation, __pyx_v_kappa); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_6gensim_6models_7nmf_pgd_solve_h(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_h, __Pyx_memviewslice __pyx_v_Wtv, __Pyx_memviewslice __pyx_v_WtW, __Pyx_memviewslice __pyx_v_permutation, double __pyx_v_kappa) { Py_ssize_t __pyx_v_n_components; CYTHON_UNUSED Py_ssize_t __pyx_v_n_samples; double __pyx_v_violation; double __pyx_v_grad; double __pyx_v_projected_grad; double __pyx_v_hessian; Py_ssize_t __pyx_v_sample_idx; Py_ssize_t __pyx_v_component_idx_1; Py_ssize_t __pyx_v_component_idx_2; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; Py_ssize_t __pyx_t_7; Py_ssize_t __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; double __pyx_t_14; PyObject *__pyx_t_15 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("solve_h", 0); /* "gensim/models/nmf_pgd.pyx":35 * """ * * cdef Py_ssize_t n_components = h.shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t n_samples = h.shape[1] * cdef double violation = 0 */ __pyx_v_n_components = (__pyx_v_h.shape[0]); /* "gensim/models/nmf_pgd.pyx":36 * * cdef Py_ssize_t n_components = h.shape[0] * cdef Py_ssize_t n_samples = h.shape[1] # <<<<<<<<<<<<<< * cdef double violation = 0 * cdef double grad, projected_grad, hessian */ __pyx_v_n_samples = (__pyx_v_h.shape[1]); /* "gensim/models/nmf_pgd.pyx":37 * cdef Py_ssize_t n_components = h.shape[0] * cdef Py_ssize_t n_samples = h.shape[1] * cdef double violation = 0 # <<<<<<<<<<<<<< * cdef double grad, projected_grad, hessian * cdef Py_ssize_t sample_idx = 0 */ __pyx_v_violation = 0.0; /* "gensim/models/nmf_pgd.pyx":39 * cdef double violation = 0 * cdef double grad, projected_grad, hessian * cdef Py_ssize_t sample_idx = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t component_idx_1 = 0 * cdef Py_ssize_t component_idx_2 = 0 */ __pyx_v_sample_idx = 0; /* "gensim/models/nmf_pgd.pyx":40 * cdef double grad, projected_grad, hessian * cdef Py_ssize_t sample_idx = 0 * cdef Py_ssize_t component_idx_1 = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t component_idx_2 = 0 * */ __pyx_v_component_idx_1 = 0; /* "gensim/models/nmf_pgd.pyx":41 * cdef Py_ssize_t sample_idx = 0 * cdef Py_ssize_t component_idx_1 = 0 * cdef Py_ssize_t component_idx_2 = 0 # <<<<<<<<<<<<<< * * for sample_idx in prange(n_samples, nogil=True): */ __pyx_v_component_idx_2 = 0; /* "gensim/models/nmf_pgd.pyx":43 * cdef Py_ssize_t component_idx_2 = 0 * * for sample_idx in prange(n_samples, nogil=True): # <<<<<<<<<<<<<< * for component_idx_1 in range(n_components): * component_idx_1 = permutation[component_idx_1] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_1 = __pyx_v_n_samples; if ((1 == 0)) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_3 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_violation) private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_4, __pyx_t_5, __pyx_t_6, __pyx_t_7, __pyx_t_8, __pyx_t_9) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_component_idx_1) lastprivate(__pyx_v_component_idx_2) lastprivate(__pyx_v_grad) lastprivate(__pyx_v_hessian) lastprivate(__pyx_v_projected_grad) firstprivate(__pyx_v_sample_idx) lastprivate(__pyx_v_sample_idx) #endif /* _OPENMP */ for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ { __pyx_v_sample_idx = (Py_ssize_t)(0 + 1 * __pyx_t_2); /* Initialize private variables to invalid values */ __pyx_v_component_idx_1 = ((Py_ssize_t)0xbad0bad0); __pyx_v_component_idx_2 = ((Py_ssize_t)0xbad0bad0); __pyx_v_grad = ((double)__PYX_NAN()); __pyx_v_hessian = ((double)__PYX_NAN()); __pyx_v_projected_grad = ((double)__PYX_NAN()); /* "gensim/models/nmf_pgd.pyx":44 * * for sample_idx in prange(n_samples, nogil=True): * for component_idx_1 in range(n_components): # <<<<<<<<<<<<<< * component_idx_1 = permutation[component_idx_1] * */ __pyx_t_4 = __pyx_v_n_components; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_component_idx_1 = __pyx_t_6; /* "gensim/models/nmf_pgd.pyx":45 * for sample_idx in prange(n_samples, nogil=True): * for component_idx_1 in range(n_components): * component_idx_1 = permutation[component_idx_1] # <<<<<<<<<<<<<< * * grad = -Wtv[component_idx_1, sample_idx] */ __pyx_t_7 = __pyx_v_component_idx_1; __pyx_v_component_idx_1 = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_permutation.data) + __pyx_t_7)) ))); /* "gensim/models/nmf_pgd.pyx":47 * component_idx_1 = permutation[component_idx_1] * * grad = -Wtv[component_idx_1, sample_idx] # <<<<<<<<<<<<<< * * for component_idx_2 in range(n_components): */ __pyx_t_7 = __pyx_v_component_idx_1; __pyx_t_8 = __pyx_v_sample_idx; __pyx_v_grad = (-(*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_Wtv.data + __pyx_t_7 * __pyx_v_Wtv.strides[0]) ) + __pyx_t_8 * __pyx_v_Wtv.strides[1]) )))); /* "gensim/models/nmf_pgd.pyx":49 * grad = -Wtv[component_idx_1, sample_idx] * * for component_idx_2 in range(n_components): # <<<<<<<<<<<<<< * grad += WtW[component_idx_1, component_idx_2] * h[component_idx_2, sample_idx] * */ __pyx_t_9 = __pyx_v_n_components; __pyx_t_10 = __pyx_t_9; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { __pyx_v_component_idx_2 = __pyx_t_11; /* "gensim/models/nmf_pgd.pyx":50 * * for component_idx_2 in range(n_components): * grad += WtW[component_idx_1, component_idx_2] * h[component_idx_2, sample_idx] # <<<<<<<<<<<<<< * * hessian = WtW[component_idx_1, component_idx_1] */ __pyx_t_8 = __pyx_v_component_idx_1; __pyx_t_7 = __pyx_v_component_idx_2; __pyx_t_12 = __pyx_v_component_idx_2; __pyx_t_13 = __pyx_v_sample_idx; __pyx_v_grad = (__pyx_v_grad + ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_WtW.data + __pyx_t_8 * __pyx_v_WtW.strides[0]) )) + __pyx_t_7)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_h.data + __pyx_t_12 * __pyx_v_h.strides[0]) )) + __pyx_t_13)) ))))); } /* "gensim/models/nmf_pgd.pyx":52 * grad += WtW[component_idx_1, component_idx_2] * h[component_idx_2, sample_idx] * * hessian = WtW[component_idx_1, component_idx_1] # <<<<<<<<<<<<<< * * grad = grad * kappa / hessian */ __pyx_t_13 = __pyx_v_component_idx_1; __pyx_t_12 = __pyx_v_component_idx_1; __pyx_v_hessian = (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_WtW.data + __pyx_t_13 * __pyx_v_WtW.strides[0]) )) + __pyx_t_12)) ))); /* "gensim/models/nmf_pgd.pyx":54 * hessian = WtW[component_idx_1, component_idx_1] * * grad = grad * kappa / hessian # <<<<<<<<<<<<<< * * projected_grad = fmin(0, grad) if h[component_idx_1, sample_idx] == 0 else grad */ __pyx_v_grad = ((__pyx_v_grad * __pyx_v_kappa) / __pyx_v_hessian); /* "gensim/models/nmf_pgd.pyx":56 * grad = grad * kappa / hessian * * projected_grad = fmin(0, grad) if h[component_idx_1, sample_idx] == 0 else grad # <<<<<<<<<<<<<< * * violation += projected_grad * projected_grad */ __pyx_t_12 = __pyx_v_component_idx_1; __pyx_t_13 = __pyx_v_sample_idx; if ((((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_h.data + __pyx_t_12 * __pyx_v_h.strides[0]) )) + __pyx_t_13)) ))) == 0.0) != 0)) { __pyx_t_14 = __pyx_f_6gensim_6models_7nmf_pgd_fmin(0.0, __pyx_v_grad); } else { __pyx_t_14 = __pyx_v_grad; } __pyx_v_projected_grad = __pyx_t_14; /* "gensim/models/nmf_pgd.pyx":58 * projected_grad = fmin(0, grad) if h[component_idx_1, sample_idx] == 0 else grad * * violation += projected_grad * projected_grad # <<<<<<<<<<<<<< * * h[component_idx_1, sample_idx] = fmax(h[component_idx_1, sample_idx] - grad, 0.) */ __pyx_v_violation = (__pyx_v_violation + (__pyx_v_projected_grad * __pyx_v_projected_grad)); /* "gensim/models/nmf_pgd.pyx":60 * violation += projected_grad * projected_grad * * h[component_idx_1, sample_idx] = fmax(h[component_idx_1, sample_idx] - grad, 0.) # <<<<<<<<<<<<<< * * return sqrt(violation) */ __pyx_t_13 = __pyx_v_component_idx_1; __pyx_t_12 = __pyx_v_sample_idx; __pyx_t_7 = __pyx_v_component_idx_1; __pyx_t_8 = __pyx_v_sample_idx; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_h.data + __pyx_t_7 * __pyx_v_h.strides[0]) )) + __pyx_t_8)) )) = __pyx_f_6gensim_6models_7nmf_pgd_fmax(((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_h.data + __pyx_t_13 * __pyx_v_h.strides[0]) )) + __pyx_t_12)) ))) - __pyx_v_grad), 0.); } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "gensim/models/nmf_pgd.pyx":43 * cdef Py_ssize_t component_idx_2 = 0 * * for sample_idx in prange(n_samples, nogil=True): # <<<<<<<<<<<<<< * for component_idx_1 in range(n_components): * component_idx_1 = permutation[component_idx_1] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "gensim/models/nmf_pgd.pyx":62 * h[component_idx_1, sample_idx] = fmax(h[component_idx_1, sample_idx] - grad, 0.) * * return sqrt(violation) # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __pyx_t_15 = PyFloat_FromDouble(sqrt(__pyx_v_violation)); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 62, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_15); __pyx_r = __pyx_t_15; __pyx_t_15 = 0; goto __pyx_L0; /* "gensim/models/nmf_pgd.pyx":18 * return x if x > y else y * * def solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa): # <<<<<<<<<<<<<< * """Find optimal dense vector representation for current W and r matrices. * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_15); __Pyx_AddTraceback("gensim.models.nmf_pgd.solve_h", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_h, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_Wtv, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_WtW, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_permutation, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error) } else { /* "View.MemoryView":123 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":129 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 129, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":130 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 133, __pyx_L1_error) /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 136, __pyx_L1_error) /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":139 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":140 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error) __pyx_t_3 = __pyx_v_format; __Pyx_INCREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":141 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ if (unlikely(__pyx_v_self->_format == Py_None)) { PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); __PYX_ERR(1, 141, __pyx_L1_error) } __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error) __pyx_v_self->format = __pyx_t_7; /* "View.MemoryView":144 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":145 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 148, __pyx_L1_error) /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_8 = 0; __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_9; __pyx_v_idx = __pyx_t_8; __pyx_t_8 = (__pyx_t_8 + 1); /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":153 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 153, __pyx_L1_error) /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":154 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":158 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":159 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error) if (likely(__pyx_t_4)) { /* "View.MemoryView":161 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":162 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":164 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 164, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":166 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":169 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":170 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 176, __pyx_L1_error) /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":179 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":180 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 180, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 180, __pyx_L1_error) } __pyx_t_1 = (__pyx_v_self->len / __pyx_v_itemsize); __pyx_t_9 = __pyx_t_1; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "View.MemoryView":181 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":182 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":186 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":188 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 192, __pyx_L1_error) /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":193 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":194 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":195 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":196 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":197 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":198 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":199 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":200 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":203 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":205 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":207 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":216 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":218 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":219 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":223 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":227 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":228 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* Python wrapper */ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":231 * * def __len__(self): * return self._shape[0] # <<<<<<<<<<<<<< * * def __getattr__(self, attr): */ __pyx_r = (__pyx_v_self->_shape[0]); goto __pyx_L0; /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":234 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":237 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":240 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":249 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":252 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error) /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":253 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":255 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":282 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":284 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_v_state = 0; PyObject *__pyx_v__dict = 0; int __pyx_v_use_setstate; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":5 * cdef object _dict * cdef bint use_setstate * state = (self.name,) # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_self->name); __Pyx_GIVEREF(__pyx_v_self->name); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); __pyx_v_state = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":6 * cdef bint use_setstate * state = (self.name,) * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__dict = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_2 = (__pyx_v__dict != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "(tree fragment)":8 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "(tree fragment)":9 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = self.name is not None */ __pyx_v_use_setstate = 1; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":11 * use_setstate = True * else: * use_setstate = self.name is not None # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state */ /*else*/ { __pyx_t_3 = (__pyx_v_self->name != Py_None); __pyx_v_use_setstate = __pyx_t_3; } __pyx_L3:; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ __pyx_t_3 = (__pyx_v_use_setstate != 0); if (__pyx_t_3) { /* "(tree fragment)":13 * use_setstate = self.name is not None * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ } /* "(tree fragment)":15 * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":17 * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":300 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":304 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":307 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":309 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":346 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":347 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":349 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error) /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":351 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":352 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":356 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":357 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":359 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":361 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error) /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":364 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":366 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":368 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":370 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyThread_type_lock __pyx_t_6; PyThread_type_lock __pyx_t_7; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":374 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * elif (<__pyx_buffer *> &self.view).obj == Py_None: * */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ goto __pyx_L3; } /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ __pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0); if (__pyx_t_2) { /* "View.MemoryView":377 * elif (<__pyx_buffer *> &self.view).obj == Py_None: * * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< * Py_DECREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; /* "View.MemoryView":378 * * (<__pyx_buffer *> &self.view).obj = NULL * Py_DECREF(Py_None) # <<<<<<<<<<<<<< * * cdef int i */ Py_DECREF(Py_None); /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ } __pyx_L3:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":383 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; __pyx_t_4 = __pyx_t_3; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":385 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":388 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":387 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":389 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":391 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":395 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 397, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":398 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":400 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":405 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":407 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 407, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":411 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":413 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":414 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ __pyx_t_1 = (__pyx_v_self->view.readonly != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 418, __pyx_L1_error) /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ } /* "View.MemoryView":420 * raise TypeError("Cannot assign to read-only memoryview") * * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (likely(__pyx_t_2 != Py_None)) { PyObject* sequence = __pyx_t_2; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 420, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_3; __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":423 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_obj = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":425 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L5; } /* "View.MemoryView":427 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error) __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L5:; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L4; } /* "View.MemoryView":429 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L4:; /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":435 * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; /* "View.MemoryView":436 * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":437 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L9_try_end:; } /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":439 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; __Pyx_memviewslice *__pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error) __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error) /* "View.MemoryView":446 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error) __pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error) /* "View.MemoryView":447 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error) /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; char const *__pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":451 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":456 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error) __pyx_v_dst_slice = __pyx_t_1; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_2) { /* "View.MemoryView":459 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_2 = ((__pyx_v_tmp == NULL) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":461 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error) /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":462 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":464 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":466 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_2 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_2) { /* "View.MemoryView":468 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":470 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L8:; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":475 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":476 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":479 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } __pyx_L6_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __Pyx_XGOTREF(__pyx_t_12); __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":482 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":483 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":488 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":491 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":493 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":498 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":499 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "View.MemoryView":494 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_1); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 495, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":504 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":510 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":512 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(1, 514, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; char *__pyx_t_5; void *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = (__pyx_v_self->view.readonly != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 520, __pyx_L1_error) /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ } /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); if (__pyx_t_1) { /* "View.MemoryView":523 * * if flags & PyBUF_ND: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_4 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_4; /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L6; } /* "View.MemoryView":525 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L6:; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":528 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_4 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_4; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L7; } /* "View.MemoryView":530 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L7:; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":533 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_4 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_4; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L8; } /* "View.MemoryView":535 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L8:; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":538 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_5 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_5; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L9; } /* "View.MemoryView":540 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L9:; /* "View.MemoryView":542 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_6 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_6; /* "View.MemoryView":543 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_7 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_7; /* "View.MemoryView":544 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = self.view.readonly */ __pyx_t_8 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_8; /* "View.MemoryView":545 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = self.view.readonly * info.obj = self */ __pyx_t_8 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_8; /* "View.MemoryView":546 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = self.view.readonly # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_t_1 = __pyx_v_self->view.readonly; __pyx_v_info->readonly = __pyx_t_1; /* "View.MemoryView":547 * info.len = self.view.len * info.readonly = self.view.readonly * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":554 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":555 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error) /* "View.MemoryView":556 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":560 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":564 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 570, __pyx_L1_error) /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":572 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__12, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":579 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":583 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":587 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":591 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":596 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":598 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":599 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":601 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":603 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":607 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":609 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":613 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":616 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":622 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":623 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":628 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":629 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":633 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":635 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":636 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":641 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":645 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":647 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":648 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":653 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":658 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":659 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":660 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":664 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":672 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":674 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":676 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":677 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":678 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 679, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__15); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":683 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":685 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__15); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":686 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":689 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(1, 689, __pyx_L1_error) /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":691 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":692 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":694 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":696 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__15); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":698 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L0; /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":701 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 703, __pyx_L1_error) /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":711 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":718 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); /* "View.MemoryView":722 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(1, 722, __pyx_L1_error) } } #endif /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":725 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":726 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":728 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":729 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":735 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":736 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":741 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":742 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 746, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":751 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error) /* "View.MemoryView":748 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error) /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":755 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":756 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":757 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":758 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":760 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":761 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":762 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":764 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":765 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":766 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":768 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error) /* "View.MemoryView":774 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":778 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) } /* "View.MemoryView":779 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) } /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":783 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":830 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":832 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error) /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":835 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":838 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error) /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":843 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":845 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":848 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":850 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":853 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":855 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":859 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":861 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":863 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":866 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":868 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":871 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":875 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":878 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":881 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":884 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":885 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":886 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":890 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":892 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":897 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":899 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":900 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":902 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":904 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":912 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":913 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":917 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 917, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 917, __pyx_L1_error) } __pyx_v_shape = (__pyx_v_view->len / __pyx_v_itemsize); /* "View.MemoryView":918 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":920 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":921 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":923 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":926 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":928 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 928, __pyx_L1_error) /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":931 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 931, __pyx_L1_error) /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":933 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":935 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":937 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; long __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":944 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":946 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":947 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":951 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = (__pyx_v_ndim / 2); __pyx_t_4 = __pyx_t_3; for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":952 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":953 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; /* "View.MemoryView":954 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_8) { } else { __pyx_t_7 = __pyx_t_8; goto __pyx_L6_bool_binop_done; } __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_7 = __pyx_t_8; __pyx_L6_bool_binop_done:; if (__pyx_t_7) { /* "View.MemoryView":957 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error) /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":959 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":977 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":981 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":983 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":987 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error) /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":989 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":993 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":1008 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":1013 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1015 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1016 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1018 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1019 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1021 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1022 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1023 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1024 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1025 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: */ Py_INCREF(Py_None); /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); if (__pyx_t_1) { /* "View.MemoryView":1028 * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * else: * result.flags = PyBUF_RECORDS_RO */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ goto __pyx_L4; } /* "View.MemoryView":1030 * result.flags = PyBUF_RECORDS * else: * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ /*else*/ { __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; } __pyx_L4:; /* "View.MemoryView":1032 * result.flags = PyBUF_RECORDS_RO * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1033 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1036 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1037 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1039 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1040 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L6_break; /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L6_break:; /* "View.MemoryView":1042 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1043 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1044 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1046 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1047 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1049 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1056 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1057 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1059 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1060 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; Py_ssize_t __pyx_t_5; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1067 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1068 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1069 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1071 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1072 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1074 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_dim = __pyx_t_4; /* "View.MemoryView":1075 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1076 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1077 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_5 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1083 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1084 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1095 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1096 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1098 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1099 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1101 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1103 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1111 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1113 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1121 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1122 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1124 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1126 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1127 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1129 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_1; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1131 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1132 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1135 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1137 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; /* "View.MemoryView":1147 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1148 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1149 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1150 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1154 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1155 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1157 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1158 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); /* "View.MemoryView":1159 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1160 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1162 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1163 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1167 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1168 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1173 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; /* "View.MemoryView":1179 * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for shape in src.shape[:ndim]: */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1181 * cdef Py_ssize_t shape, size = src.memview.view.itemsize * * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< * size *= shape * */ __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_shape = (__pyx_t_2[0]); /* "View.MemoryView":1182 * * for shape in src.shape[:ndim]: * size *= shape # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * __pyx_v_shape); } /* "View.MemoryView":1184 * size *= shape * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1197 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_idx = __pyx_t_4; /* "View.MemoryView":1198 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1199 * for idx in range(ndim): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1201 * stride *= shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1202 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1203 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1205 * stride *= shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1219 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1220 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1222 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1224 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error) /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1227 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1228 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1229 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1230 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1231 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1233 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); /* "View.MemoryView":1237 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1239 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1242 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1244 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1246 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1254 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1253 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 1253, __pyx_L1_error) /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1258 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 1258, __pyx_L1_error) /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":1263 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 1263, __pyx_L1_error) /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1265 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(1, 1265, __pyx_L1_error) } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; void *__pyx_t_7; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1276 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1277 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1279 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1280 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1281 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1285 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1287 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1289 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1291 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_5; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1294 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1295 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1297 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1300 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error) /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1305 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1307 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_7; /* "View.MemoryView":1308 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1314 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1316 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1320 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1321 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); /* "View.MemoryView":1322 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1323 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1324 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_8 = (__pyx_t_2 != 0); if (__pyx_t_8) { /* "View.MemoryView":1329 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error) /* "View.MemoryView":1330 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error) /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1332 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1333 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1334 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1336 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1337 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1344 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1346 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1347 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1348 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1349 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1351 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1352 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1353 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1354 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1367 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1374 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1381 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_4) { /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_4 = (__pyx_v_inc != 0); if (__pyx_t_4) { /* "View.MemoryView":1384 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1386 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1388 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1389 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1391 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1400 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1401 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1403 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; /* "View.MemoryView":1411 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1412 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1415 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1416 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); /* "View.MemoryView":1417 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1419 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1420 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1422 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = 0; PyObject *__pyx_v___pyx_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); if (__pyx_t_1) { /* "(tree fragment)":5 * cdef object __pyx_result * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 6, __pyx_L1_error) /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ } /* "(tree fragment)":7 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_6 = (__pyx_t_1 != 0); if (__pyx_t_6) { /* "(tree fragment)":9 * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":10 * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); /* "(tree fragment)":12 * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v___pyx_result->name); __Pyx_DECREF(__pyx_v___pyx_result->name); __pyx_v___pyx_result->name = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 13, __pyx_L1_error) } __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_4 = ((__pyx_t_3 > 1) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_5 = (__pyx_t_4 != 0); __pyx_t_2 = __pyx_t_5; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { /* "(tree fragment)":14 * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 14, __pyx_L1_error) } __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_array___dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { __pyx_array___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { __pyx_array___len__, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "gensim.models.nmf_pgd.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "gensim.models.nmf_pgd.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_memoryview___dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "gensim.models.nmf_pgd.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_memoryviewslice___dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "gensim.models.nmf_pgd._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_nmf_pgd(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_nmf_pgd}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "nmf_pgd", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, {&__pyx_n_s_WtW, __pyx_k_WtW, sizeof(__pyx_k_WtW), 0, 0, 1, 1}, {&__pyx_n_s_Wtv, __pyx_k_Wtv, sizeof(__pyx_k_Wtv), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_s_component_idx_1, __pyx_k_component_idx_1, sizeof(__pyx_k_component_idx_1), 0, 0, 1, 1}, {&__pyx_n_s_component_idx_2, __pyx_k_component_idx_2, sizeof(__pyx_k_component_idx_2), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_gensim_models_nmf_pgd, __pyx_k_gensim_models_nmf_pgd, sizeof(__pyx_k_gensim_models_nmf_pgd), 0, 0, 1, 1}, {&__pyx_kp_s_gensim_models_nmf_pgd_pyx, __pyx_k_gensim_models_nmf_pgd_pyx, sizeof(__pyx_k_gensim_models_nmf_pgd_pyx), 0, 0, 1, 0}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_grad, __pyx_k_grad, sizeof(__pyx_k_grad), 0, 0, 1, 1}, {&__pyx_n_s_h, __pyx_k_h, sizeof(__pyx_k_h), 0, 0, 1, 1}, {&__pyx_n_s_hessian, __pyx_k_hessian, sizeof(__pyx_k_hessian), 0, 0, 1, 1}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_kappa, __pyx_k_kappa, sizeof(__pyx_k_kappa), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_n_components, __pyx_k_n_components, sizeof(__pyx_k_n_components), 0, 0, 1, 1}, {&__pyx_n_s_n_samples, __pyx_k_n_samples, sizeof(__pyx_k_n_samples), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_permutation, __pyx_k_permutation, sizeof(__pyx_k_permutation), 0, 0, 1, 1}, {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, {&__pyx_n_s_projected_grad, __pyx_k_projected_grad, sizeof(__pyx_k_projected_grad), 0, 0, 1, 1}, {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_sample_idx, __pyx_k_sample_idx, sizeof(__pyx_k_sample_idx), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_solve_h, __pyx_k_solve_h, sizeof(__pyx_k_solve_h), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {&__pyx_n_s_violation, __pyx_k_violation, sizeof(__pyx_k_violation), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 44, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 495, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__12 = PyTuple_New(1); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__12, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__12); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__15 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__15)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); /* "gensim/models/nmf_pgd.pyx":18 * return x if x > y else y * * def solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa): # <<<<<<<<<<<<<< * """Find optimal dense vector representation for current W and r matrices. * */ __pyx_tuple__19 = PyTuple_Pack(14, __pyx_n_s_h, __pyx_n_s_Wtv, __pyx_n_s_WtW, __pyx_n_s_permutation, __pyx_n_s_kappa, __pyx_n_s_n_components, __pyx_n_s_n_samples, __pyx_n_s_violation, __pyx_n_s_grad, __pyx_n_s_projected_grad, __pyx_n_s_hessian, __pyx_n_s_sample_idx, __pyx_n_s_component_idx_1, __pyx_n_s_component_idx_2); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(0, 18, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); __pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(5, 0, 14, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_gensim_models_nmf_pgd_pyx, __pyx_n_s_solve_h, 18, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) __PYX_ERR(0, 18, __pyx_L1_error) /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__22); __Pyx_GIVEREF(__pyx_tuple__22); /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_tuple__26 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__26); __Pyx_GIVEREF(__pyx_tuple__26); __pyx_codeobj__27 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__26, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__27)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_array.tp_print = 0; #endif if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_MemviewEnum.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryview.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryviewslice.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #ifndef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #elif PY_MAJOR_VERSION < 3 #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" void #else #define __Pyx_PyMODINIT_FUNC void #endif #else #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" PyObject * #else #define __Pyx_PyMODINIT_FUNC PyObject * #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC initnmf_pgd(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC initnmf_pgd(void) #else __Pyx_PyMODINIT_FUNC PyInit_nmf_pgd(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_nmf_pgd(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_nmf_pgd(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; static PyThread_type_lock __pyx_t_2[8]; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'nmf_pgd' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_nmf_pgd(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("nmf_pgd", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_gensim__models__nmf_pgd) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "gensim.models.nmf_pgd")) { if (unlikely(PyDict_SetItemString(modules, "gensim.models.nmf_pgd", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) (void)__Pyx_modinit_type_import_code(); (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "gensim/models/nmf_pgd.pyx":18 * return x if x > y else y * * def solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa): # <<<<<<<<<<<<<< * """Find optimal dense vector representation for current W and r matrices. * */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_6gensim_6models_7nmf_pgd_1solve_h, NULL, __pyx_n_s_gensim_models_nmf_pgd); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_solve_h, __pyx_t_1) < 0) __PYX_ERR(0, 18, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "gensim/models/nmf_pgd.pyx":1 * # Author: Timofey Yefimov # <<<<<<<<<<<<<< * * # cython: cdivision=True */ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":209 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":316 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":317 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_2[0] = PyThread_allocate_lock(); __pyx_t_2[1] = PyThread_allocate_lock(); __pyx_t_2[2] = PyThread_allocate_lock(); __pyx_t_2[3] = PyThread_allocate_lock(); __pyx_t_2[4] = PyThread_allocate_lock(); __pyx_t_2[5] = PyThread_allocate_lock(); __pyx_t_2[6] = PyThread_allocate_lock(); __pyx_t_2[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":549 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 549, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":995 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 995, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init gensim.models.nmf_pgd", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init gensim.models.nmf_pgd"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (unlikely(memviewslice->memview || memviewslice->data)) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (unlikely(!memview || (PyObject *) memview == Py_None)) return; if (unlikely(__pyx_get_slice_count(memview) < 0)) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (unlikely(first_time)) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (unlikely(!memview || (PyObject *) memview == Py_None)) { memslice->memview = NULL; return; } if (unlikely(__pyx_get_slice_count(memview) <= 0)) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (unlikely(last_time)) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } if (unlikely(stop <= start)) return __Pyx_NewRef(__pyx_empty_unicode); length = stop - start; cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* PyObjectGetAttrStrNoError */ static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) __Pyx_PyErr_Clear(); } static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); } #endif result = __Pyx_PyObject_GetAttrStr(obj, attr_name); if (unlikely(!result)) { __Pyx_PyObject_GetAttrStr_ClearAttributeError(); } return result; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); if (likely(reduce_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (reduce == object_reduce || PyErr_Occurred()) { goto __PYX_BAD; } setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); if (likely(setstate_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (!setstate || PyErr_Occurred()) { goto __PYX_BAD; } } PyType_Modified((PyTypeObject*)type_obj); } } goto __PYX_GOOD; __PYX_BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; __PYX_GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t <= '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case '?': return "'bool'"; case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number, ndim; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ndim = ctx->head->field->type->ndim; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } CYTHON_FALLTHROUGH; case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } CYTHON_FALLTHROUGH; case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (unlikely(buf->strides[dim] != sizeof(void *))) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (unlikely(buf->strides[dim] != buf->itemsize)) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (unlikely(stride < buf->itemsize)) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (unlikely(buf->suboffsets)) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (unlikely(buf->ndim != ndim)) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail; } if (unlikely((unsigned) buf->itemsize != dtype->size)) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->len > 0) { for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (unlikely(!__pyx_check_strides(buf, i, ndim, spec))) goto fail; if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec))) goto fail; } if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))) goto fail; } if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS_RO | writable_flag, 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (unlikely(from_mvs->suboffsets[i] >= 0)) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
delete.c
#include <omp.h> int main () { int x = 0; int i; #pragma omp parallel { if(x){ i = 1; } else { i = 0; } } }
Ave_var.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <math.h> #include "grb2.h" #include "wgrib2.h" #include "fnlist.h" /* * ave_var * * v 0.1 * * 12/2016: Public Domain: Wesley Ebisuzaki * based on Ave_test.c public domain (Wesley Ebisuzaki) * * ave_var, computes the mean and variance using the Welford method (one-pass) */ /* from http://jonisalonen.com/2013/deriving-welfords-method-for-computing-variance/ variance(samples): M := 0 S := 0 for k from 1 to N: x := samples[k] oldM := M M := M + (x-M)/k S := S + (x-M)*(x-oldM) return S/(N-1) */ // #define DEBUG extern int decode, file_append, nx, ny, save_translation; extern int flush_mode; extern unsigned int *translation; extern int use_scale, dec_scale, bin_scale, wanted_bits, max_bits; extern enum output_grib_type grib_type; struct ave_var_struct { double *M, *S; float *min, *max; unsigned int ndata; struct full_date time0, time1, time2; // time2 = verf time int has_val, n_fields, n_missing; int dt, dt_unit, nx, ny; unsigned char *first_sec[9]; unsigned char *next_sec[9]; int use_scale, dec_scale, bin_scale, wanted_bits, max_bits; enum output_grib_type grib_type; struct seq_file out; }; static int write_ave_var(struct ave_var_struct *save); static int free_ave_var_struct(struct ave_var_struct *save); static int init_ave_var_struct(struct ave_var_struct *save, unsigned int ndata); static int update_ave_var_struct(struct ave_var_struct *save, unsigned char **sec, float *data, unsigned int ndata,int missing); static int free_ave_var_struct(struct ave_var_struct *save) { if (save->has_val == 1) { free(save->M); free(save->S); free(save->min); free(save->max); free_sec(save->first_sec); free_sec(save->next_sec); } free(save); return 0; } static int init_ave_var_struct(struct ave_var_struct *save, unsigned int ndata) { unsigned int i; if (ndata == 0) fatal_error("ave_var_var_struct: ndata = 0",""); if (save->ndata != ndata) { if (save->ndata != 0) { free(save->M); free(save->S); free(save->min); free(save->max); } if ((save->M = (double *) malloc( ((size_t) ndata) * sizeof(double))) == NULL) fatal_error("ave_ave_var: memory allocation problem: val",""); if ((save->S = (double *) malloc( ((size_t) ndata) * sizeof(double))) == NULL) fatal_error("ave_ave_var: memory allocation problem: val",""); if ((save->min = (float *) malloc( ((size_t) ndata) * sizeof(float))) == NULL) fatal_error("ave_ave_var: memory allocation problem: val",""); if ((save->max = (float *) malloc( ((size_t) ndata) * sizeof(float))) == NULL) fatal_error("ave_ave_var: memory allocation problem: val",""); save->ndata = ndata; } for (i=0; i < ndata; i++) { save->M[i] = save->S[i] = 0.0; save->min[i] = save->max[i] = UNDEFINED; } save->has_val = 1; save->n_fields = 0; save->n_missing = 0; free_sec(save->first_sec); free_sec(save->next_sec); return 0; } static int update_ave_var_struct(struct ave_var_struct *save, unsigned char **sec, float *data, unsigned int ndata,int missing) { unsigned int i, ii; double oldM, x; if (save->ndata != ndata) fatal_error("ave_var: dimension mismatch",""); /* the data needs to be translated from we:sn to raw, need to do it now, translation[] may be different if called from finalized phase */ save->n_fields += 1; #pragma omp parallel for private(i,ii,x,oldM) for (i = 0; i < ndata; i++) { ii = translation == NULL ? i : translation[i]; if (DEFINED_VAL(data[i]) && DEFINED_VAL(save->M[ii])) { x = data[i]; oldM = save->M[ii]; save->M[ii] += (x-save->M[ii]) / (double) save->n_fields; save->S[ii] += (x-save->M[ii]) * (x-oldM); } else { save->M[ii] = UNDEFINED; save->S[ii] = UNDEFINED; } } if (save->n_fields == 1) { for (i = 0; i < ndata; i++) { ii = translation == NULL ? i : translation[i]; save->max[ii] = save->min[ii] = data[i]; } } else { #pragma omp parallel for private(i,ii) for (i = 0; i < ndata; i++) { ii = translation == NULL ? i : translation[i]; if (DEFINED_VAL(data[i]) && DEFINED_VAL(save->M[ii])) { save->max[ii] = data[i] > save->max[ii] ? data[i] : save->max[ii]; save->min[ii] = data[i] < save->min[ii] ? data[i] : save->min[ii]; } } } if (save->n_fields == 1) { save->nx = nx; save->ny = ny; save->use_scale = use_scale; save->dec_scale = dec_scale; save->bin_scale = bin_scale; save->wanted_bits = wanted_bits; save->max_bits = max_bits; save->grib_type = grib_type; } save->n_missing += missing; // update current reference time Get_time(sec[1]+12,&(save->time1)); // update current verf time if (Verf_time(sec, &(save->time2)) != 0) { fatal_error("update_ave_var_struct: could not find verification time",""); } return 0; } static int write_ave_var(struct ave_var_struct *save) { int j, n, pdt, i_ave; unsigned int i, ndata; float *data; unsigned char *p, *sec4; sec4 = NULL; if (save->has_val == 0) return 0; ndata = save->ndata; if ((data = (float *) malloc(sizeof(float) * ((size_t) ndata))) == NULL) fatal_error("ave_var: memory allocation",""); /* mean value */ #pragma omp parallel for private(i) for (i = 0; i < ndata; i++) { data[i] = (UNDEFINED_VAL(save->M[i])) ? UNDEFINED : save->M[i]; } pdt = GB2_ProdDefTemplateNo(save->first_sec); // average of an analysis or forecast i_ave= 0; if (pdt == 0) { sec4 = (unsigned char *) malloc(58 * sizeof(unsigned char)); if (sec4 == NULL) fatal_error("ave: memory allocation",""); for (i = 0; i < 34; i++) { sec4[i] = save->first_sec[4][i]; } uint_char((unsigned int) 58, sec4); // length sec4[8] = 8; // pdt // verification time Save_time(&(save->time2), sec4+34); sec4[41] = 1; uint_char(save->n_missing, sec4+42); sec4[i_ave = 46] = 0; // average sec4[47] = 1; // rt++ sec4[48] = save->dt_unit; // total length of stat processing uint_char(save->dt*(save->n_fields+save->n_missing-1), sec4+49); sec4[53] = save->dt_unit; // time step uint_char(save->dt, sec4+54); } // average of an ensemble forecast, use pdt 4.11 else if (pdt == 1) { sec4 = (unsigned char *) malloc(61 * sizeof(unsigned char)); if (sec4 == NULL) fatal_error("ave_var: memory allocation",""); for (i = 0; i < 37; i++) { sec4[i] = save->first_sec[4][i]; } uint_char((unsigned int) 61, sec4); // length sec4[8] = 11; // pdt // verification time Save_time(&(save->time2), sec4+37); sec4[44] = 1; // 1 time range uint_char(save->n_missing, sec4+45); sec4[i_ave = 49] = 0; // average sec4[50] = 1; // rt=constant sec4[51] = save->dt_unit; // total length of stat processing uint_char(save->dt*(save->n_fields+save->n_missing-1), sec4+52); sec4[56] = save->dt_unit; // time step uint_char(save->dt, sec4+57); } // average of derived fcst base on all ens members, use pdt 4.12 else if (pdt == 2) { sec4 = (unsigned char *) malloc(60 * sizeof(unsigned char)); if (sec4 == NULL) fatal_error("fcst_ave: memory allocation",""); for (i = 0; i < 36; i++) { sec4[i] = save->first_sec[4][i]; } uint_char((unsigned int) 60, sec4); // length sec4[8] = 12; // pdt // verification time Save_time(&(save->time2), sec4+36); sec4[43] = 1; // 1 time range uint_char(save->n_missing, sec4+44); sec4[i_ave = 48] = 0; // average sec4[49] = 1; // rt=constant sec4[50] = save->dt_unit; // total length of stat processing uint_char(save->dt*(save->n_fields+save->n_missing-1), sec4+51); sec4[55] = save->dt_unit; // time step uint_char(save->dt, sec4+56); } // average of an average or accumulation else if (pdt == 8) { i = GB2_Sec4_size(save->first_sec); n = save->first_sec[4][41]; if (i != 46 + 12*n) fatal_error("ave: invalid sec4 size for pdt=8",""); // keep pdt == 8 but make it 12 bytes bigger sec4 = (unsigned char *) malloc( (i+12) * sizeof(unsigned char)); if (sec4 == NULL) fatal_error("ave: memory allocation",""); uint_char((unsigned int) i+12, sec4); // new length for (i = 4; i < 34; i++) { // keep base of pdt sec4[i] = save->first_sec[4][i]; } // new verification time Save_time(&(save->time2), sec4+34); // number of stat-proc loops is increased by 1 sec4[41] = n + 1; // copy old stat-proc loops // for (j = n*12-1; j >= 0; j--) sec4[58+j] = save->first_sec[4][46+j]; for (j = 0; j < n*12; j++) sec4[46+12+j] = save->first_sec[4][46+j]; uint_char(save->n_missing, sec4+42); sec4[i_ave = 46] = 0; // average sec4[47] = 1; // rt++ sec4[48] = save->dt_unit; // total length of stat processing uint_char(save->dt*(save->n_fields+save->n_missing-1), sec4+49); // processing number sec4[53] = save->dt_unit; // time step uint_char(save->dt, sec4+54); } // pdt 4.12 -> pdt 4.12 ave -> ave of ave else if (pdt == 12) { i = GB2_Sec4_size(save->first_sec); n = save->first_sec[4][43]; // number of time ranges if (i != 48 + 12*n) fatal_error("ave: invalid sec4 size for pdt=12",""); // keep pdt == 12 but make it 12 bytes bigger sec4 = (unsigned char *) malloc( (i+12) * sizeof(unsigned char)); if (sec4 == NULL) fatal_error("ave: memory allocation",""); uint_char((unsigned int) i+12, sec4); // new length for (i = 4; i < 36; i++) { // keep base of pdt sec4[i] = save->first_sec[4][i]; } // new verification time Save_time(&(save->time2), sec4+36); // number of stat-proc loops is increased by 1 sec4[43] = n + 1; // copy old stat-proc loops for (j = 0; j < n*12; j++) sec4[48+12+j] = save->first_sec[4][48+j]; uint_char(save->n_missing, sec4+44); sec4[i_ave = 48] = 0; // average sec4[49] = 1; // rt++ sec4[50] = save->dt_unit; // total length of stat processing uint_char(save->dt*(save->n_fields+save->n_missing-1), sec4+51); // processing number sec4[55] = save->dt_unit; // time step uint_char(save->dt, sec4+56); } else { fatal_error_i("ave_var with pdt %d is not supported",pdt); } // write grib file p = save->first_sec[4]; save->first_sec[4] = sec4; grib_wrt(save->first_sec, data, ndata, save->nx, save->ny, save->use_scale, save->dec_scale, save->bin_scale, save->wanted_bits, save->max_bits, save->grib_type, &(save->out)); if (flush_mode) fflush_file(&(save->out)); /* Sample Standard Deviation */ if (save->n_fields > 1) { #pragma omp parallel for private(i) for (i = 0; i < ndata; i++) { data[i] = (UNDEFINED_VAL(save->M[i])) ? UNDEFINED : sqrt(save->S[i]/(save->n_fields - 1)); } } else { for (i = 0; i < ndata; i++) { data[i] = UNDEFINED; } } if (i_ave == 0) fatal_error("ave_var: i_ave not defined",""); sec4[i_ave] = 6; // (sample) standard deviation /* note standard devation can be writen out in same scaling as mean */ grib_wrt(save->first_sec, data, ndata, save->nx, save->ny, save->use_scale, save->dec_scale, save->bin_scale, save->wanted_bits, save->max_bits, save->grib_type, &(save->out)); if (flush_mode) fflush_file(&(save->out)); sec4[i_ave] = 3; // min /* note standard devation can be writen out in same scaling as mean */ grib_wrt(save->first_sec, save->min, ndata, save->nx, save->ny, save->use_scale, save->dec_scale, save->bin_scale, save->wanted_bits, save->max_bits, save->grib_type, &(save->out)); if (flush_mode) fflush_file(&(save->out)); sec4[i_ave] = 2; // max /* note standard devation can be writen out in same scaling as mean */ grib_wrt(save->first_sec, save->max, ndata, save->nx, save->ny, save->use_scale, save->dec_scale, save->bin_scale, save->wanted_bits, save->max_bits, save->grib_type, &(save->out)); if (flush_mode) fflush_file(&(save->out)); save->first_sec[4] = p; free(data); free(sec4); return 0; } /* * HEADER:000:ave_var:output:2:average/std dev/min/max X=time step, Y=output */ int f_ave_var(ARG2) { struct ave_var_struct *save; int i, pdt, new_type; struct full_date time, ttime; int missing; char string[10]; // initialization if (mode == -1) { save_translation = decode = 1; // allocate static structure *local = save = (struct ave_var_struct *) malloc( sizeof(struct ave_var_struct)); if (save == NULL) fatal_error("memory allocation f_ave_var",""); i = sscanf(arg1, "%d%2s", &save->dt, string); if (i != 2) fatal_error("ave_var: delta-time: (int)(2 characters) %s", arg1); save->dt_unit = -1; if (strcmp(string,"hr") == 0) save->dt_unit = 1; else if (strcmp(string,"dy") == 0) save->dt_unit = 2; else if (strcmp(string,"mo") == 0) save->dt_unit = 3; else if (strcmp(string,"yr") == 0) save->dt_unit = 4; if (save->dt_unit == -1) fatal_error("ave_var: unsupported time unit %s", string); if (fopen_file(&(save->out), arg2, file_append ? "ab" : "wb") != 0) { free(save); fatal_error("ave_var: could not open %s", arg2); } save->has_val = 0; save->ndata = 0; save->n_fields = 0; save->M = save->S = NULL; init_sec(save->first_sec); init_sec(save->next_sec); return 0; } save = (struct ave_var_struct *) *local; if (mode == -2) { // cleanup if (save->has_val == 1) { write_ave_var(save); } fclose_file(&(save->out)); free_ave_var_struct(save); return 0; } if (mode < 0) fatal_error("ave_var: programming error",""); pdt = GB2_ProdDefTemplateNo(sec); // only support pdt == 0, 1, 2, 8, 12 if (pdt != 0 && pdt != 1 && pdt != 2 && pdt != 8 && pdt != 12) return 0; // first time through .. save data and return if (save->has_val == 0) { // new data: write and save init_ave_var_struct(save, ndata); update_ave_var_struct(save, sec, data, ndata, 0); // copy sec copy_sec(sec, save->first_sec); copy_sec(sec, save->next_sec); // time0 = lowest reference time // time1 = current reference time // time2 = verification time Get_time(sec[1]+12,&(save->time0)); save->time1 = save->time0; if (Verf_time(sec, &(save->time2)) != 0) { fatal_error("ave_var: could not determine the verification time",""); } save->has_val = 1; return 0; } // check to see if new variable new_type = 0; // get the reference time of field Get_time(sec[1]+12, &time); // get the reference time of last field ttime = save->time1; Add_time(&ttime, save->dt, save->dt_unit); missing = 0; while ((i=Cmp_time(&time, &ttime)) > 0) { missing++; Add_time(&ttime, save->dt, save->dt_unit); } if (i != 0) new_type = 1; if (mode == 98) fprintf(stderr, "ave: compare ref time new_type = %d missing=%d\n", new_type, missing); if (new_type == 0) { if (same_sec0(sec,save->first_sec) == 0 || same_sec1_not_time(mode, sec,save->first_sec) == 0 || same_sec3(sec,save->first_sec) == 0 || same_sec4_not_time(mode, sec,save->first_sec) == 0) new_type = 1; if (mode == 98) fprintf(stderr, "ave: testsec %d %d %d %d\n", same_sec0(sec,save->first_sec), same_sec1_not_time(0, sec,save->first_sec), same_sec3(sec,save->first_sec), same_sec4_not_time(0, sec,save->first_sec)); if (mode == 98) fprintf(stderr, "ave_var: new_type %d\n", new_type); } // finished determining whether new_type if (new_type == 0) { // update sum if (mode == 98) fprintf(stderr, "ave_var: update sum\n"); update_ave_var_struct(save, sec, data, ndata, missing); return 0; } // new field, do grib output and save current data write_ave_var(save); init_ave_var_struct(save, ndata); update_ave_var_struct(save, sec, data, ndata, 0); copy_sec(sec, save->first_sec); copy_sec(sec, save->next_sec); return 0; }
auto_context.h
/* Copyright (c) 2016, TU Dresden All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the TU Dresden nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL TU DRESDEN BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include "types.h" /** Methods for calculating auto-context feature channels. */ /** * @brief Calculates the L2 mean (plain average) of a set of object coordinates. * * @param objCoords Object coordinates to average. * @return jp::coord3_t Mean. */ jp::coord3_t l2Mean(const std::vector<cv::Vec3f>& objCoords) { cv::Vec3f mean(0, 0, 0); for(unsigned i = 0; i < objCoords.size(); i++) mean += objCoords[i]; mean /= (float) objCoords.size(); return jp::coord3_t(mean[0], mean[1], mean[2]); } /** * @brief Calculates the L1 mean (geometric median) of a set of object coordinates. * * Uses the Weizfeld implementation of Y. Vardi and C.-H. Zhang: * The multivariate l1-median and associated data depth. In Proceedings of the National * Academy of Sciences, 2000 * * @param objCoords Object coordinates to average. * @return jp::coord3_t Mean. */ jp::coord3_t l1Mean(const std::vector<cv::Vec3f>& objCoords) { cv::Vec3f mean = l2Mean(objCoords); int it = 0, maxIt = 10; // will stop after maxIt iterations float delta, minDelta = 0.001; // will stop when change after last iteration is smaller than minDelta do { cv::Vec3f T1(0, 0, 0); cv::Vec3f newMean; float weightSum = 0.f; bool setHit = false; float supportInSet = 0; for(unsigned i = 0; i < objCoords.size(); i++) { float dist = cv::norm(objCoords[i] - mean); // check whether the l1 estimate coincides with on of the set points if(dist < EPS) { setHit = true; supportInSet ++; continue; } T1 += objCoords[i] / dist; weightSum += 1 / dist; } T1 /= weightSum; if(!setHit) { newMean = T1; } else // point in the set was hit by the estimate, calculate estimate without this point { cv::Vec3f R(0, 0, 0); for(unsigned i = 0; i < objCoords.size(); i++) { float dist = cv::norm(objCoords[i] - mean); if(dist < EPS) continue; R += (objCoords[i] - mean) / dist; } float gamma = std::min(1.0, supportInSet / cv::norm(R)); newMean = (1-gamma) * T1 + gamma * mean; } delta = cv::norm(mean - newMean); it++; mean = newMean; } while((it < maxIt) && (delta > minDelta)); return jp::coord3_t(mean[0], mean[1], mean[2]); } /** * @brief Calculates the (robustly) smoothed/regularized object coordinate at a specific location. * * Smoothing combines trees and a small local neighbourhood. It calculates the geometric median of all these points. * * @param cX X position of where the smoothed coordinate should be calculated. * @param cY Y position of where the smooted coordinate should be calculated. * @param objID Object ID of which the prediction should be smoothed. * @param objProb Segmentation. Pixels with label 0 will be ignored. * @param forest Random forest that made the object coordinate prediction * @param leafImgs Prediction of the forest. One leaf image per tree in the forest. Each pixel stores the leaf index where the corresponding patch arrived at. * @return jp::coord3_t Smoothed object coordinate. */ jp::coord3_t coordFilter( int cX, int cY, jp::id_t objID, const jp::img_label_t& objProb, const std::vector<jp::RegressionTree<jp::feature_t>>& forest, const std::vector<jp::img_leaf_t>& leafImgs) { int k = 3; // Size of the pixel neighbourhood. // create interval of the local neighbourhood (respecting image borders) int minX = std::max(0, cX - (k / 2)); int maxX = std::min(objProb.cols-1, cX + (k / 2)); int minY = std::max(0, cY - (k / 2)); int maxY = std::min(objProb.rows-1, cY + (k / 2)); std::vector<cv::Vec3f> objCoords; objCoords.reserve(k * k * forest.size()); // iterate over neighbourhood and collect object coordinates (used to calculate the geometric median) for(int x = minX; x <= maxX; x++) for(int y = minY; y <= maxY; y++) { if(objProb(y, x) == 0) continue; // ignore background pixels for(int t = 0; t < forest.size(); t++) { const std::vector<jp::mode_t>* modes = forest[t].getModes(leafImgs[t](y, x), objID); for(int m = 0; m < 1; m++) { jp::coord3_t curM = modes->at(m).mean; if(curM[0] == 0 && curM[1] == 0 && curM[2] == 0) continue; // ignore empty predictions objCoords.push_back(cv::Vec3f(curM[0], curM[1], curM[2])); } } } if(objCoords.empty()) return jp::coord3_t(0, 0, 0); return l1Mean(objCoords); // calculate the geometric median } /** * @brief Fills in the auto-context feature channels in input data using the prediction of the last forest. * * Auto-context feature channels are robustly smoothed object coordinate and object class predictions (given per object). * * @param forest Random forest that made the object coordinate prediction * @param leafImgs Prediction of the forest. One leaf image per tree in the forest. Each pixel stores the leaf index where the corresponding patch arrived at. * @param rProbabilities Object probability maps. One per object. * @param inputData Output parameter. Current frame for which the auto-context feature channels should be calculated (associated members will be replaced). * @return void */ void computeAutoContextChannels( const std::vector<jp::RegressionTree<jp::feature_t>>& rForest, const std::vector<jp::img_leaf_t>& leafImgs, const std::vector<jp::img_stat_t>& rProbabilities, jp::img_data_t& inputData) { GlobalProperties* gp = GlobalProperties::getInstance(); int imgWidth = inputData.seg.cols; int imgHeight = inputData.seg.rows; int acSubSample = gp->fP.acSubsample; // auto-context feature channels can be stored sub-sampled to save memory int acImgWidth = imgWidth / acSubSample; int acImgHeight = imgHeight / acSubSample; // object class feature channel are median smoothed object probability maps #pragma omp parallel for for(unsigned o = 0; o < rProbabilities.size(); o++) { jp::img_label_t dProb(acImgHeight, acImgWidth); for(unsigned x=0; x < acImgWidth; x++) for(unsigned y=0; y < acImgHeight; y++) dProb(y, x) = rProbabilities[o](y*acSubSample, x*acSubSample) * 255; cv::medianBlur(dProb, inputData.labelData[o], 5); } // object coordinate feature channel are robustly (l1-)smoothed object coordinate predictions std::vector<jp::img_leaf_t> subLeafImgs; for(unsigned tIdx = 0; tIdx < leafImgs.size(); tIdx++) subLeafImgs.push_back(jp::img_leaf_t(acImgHeight, acImgWidth)); // sub sample the leaf images #pragma omp parallel for for(unsigned x=0; x < acImgWidth; x++) for(unsigned y=0; y < acImgHeight; y++) for(unsigned tIdx = 0; tIdx < leafImgs.size(); tIdx++) subLeafImgs[tIdx](y, x) = leafImgs[tIdx](y*acSubSample, x*acSubSample); std::vector<jp::img_coord_t> coordPredictions; for(unsigned o = 0; o < gp->fP.objectCount; o++) coordPredictions.push_back(jp::img_coord_t::zeros(acImgHeight, acImgWidth)); // create smoothed object coordinate predictions #pragma omp parallel for for(unsigned x=0; x < acImgWidth; x++) for(unsigned y=0; y < acImgHeight; y++) for(int objID = 1; objID < gp->fP.objectCount+1; objID++) coordPredictions[objID-1](y, x) = coordFilter(x, y, objID, inputData.labelData[objID-1], rForest, subLeafImgs); for(unsigned o = 0; o < coordPredictions.size(); o++) inputData.coordData[o] = coordPredictions[o]; }
StmtOpenMP.h
//===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// \brief This file defines OpenMP AST classes for executable directives and /// clauses. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMTOPENMP_H #define LLVM_CLANG_AST_STMTOPENMP_H #include "clang/AST/Expr.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/Stmt.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for directives. //===----------------------------------------------------------------------===// /// \brief This is a basic class for representing single OpenMP executable /// directive. /// class OMPExecutableDirective : public Stmt { friend class ASTStmtReader; /// \brief Kind of the directive. OpenMPDirectiveKind Kind; /// \brief Starting location of the directive (directive keyword). SourceLocation StartLoc; /// \brief Ending location of the directive. SourceLocation EndLoc; /// \brief Numbers of clauses. const unsigned NumClauses; /// \brief Number of child expressions/stmts. const unsigned NumChildren; /// \brief Offset from this to the start of clauses. /// There are NumClauses pointers to clauses, they are followed by /// NumChildren pointers to child stmts/exprs (if the directive type /// requires an associated stmt, then it has to be the first of them). const unsigned ClausesOffset; /// \brief Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>( reinterpret_cast<char *>(this) + ClausesOffset); return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses); } protected: /// \brief Build instance of directive of class \a K. /// /// \param SC Statement class. /// \param K Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// template <typename T> OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses, unsigned NumChildren) : Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)), EndLoc(std::move(EndLoc)), NumClauses(NumClauses), NumChildren(NumChildren), ClausesOffset(llvm::RoundUpToAlignment(sizeof(T), llvm::alignOf<OMPClause *>())) {} /// \brief Sets the list of variables for this clause. /// /// \param Clauses The list of clauses for the directive. /// void setClauses(ArrayRef<OMPClause *> Clauses); /// \brief Set the associated statement for the directive. /// /// /param S Associated statement. /// void setAssociatedStmt(Stmt *S) { assert(hasAssociatedStmt() && "no associated statement."); *child_begin() = S; } public: /// \brief Iterates over a filtered subrange of clauses applied to a /// directive. /// /// This iterator visits only those declarations that meet some run-time /// criteria. template <class FilterPredicate> class filtered_clause_iterator { ArrayRef<OMPClause *>::const_iterator Current; ArrayRef<OMPClause *>::const_iterator End; FilterPredicate Pred; void SkipToNextClause() { while (Current != End && !Pred(*Current)) ++Current; } public: typedef const OMPClause *value_type; filtered_clause_iterator() : Current(), End() {} filtered_clause_iterator(ArrayRef<OMPClause *> Arr, FilterPredicate Pred) : Current(Arr.begin()), End(Arr.end()), Pred(Pred) { SkipToNextClause(); } value_type operator*() const { return *Current; } value_type operator->() const { return *Current; } filtered_clause_iterator &operator++() { ++Current; SkipToNextClause(); return *this; } filtered_clause_iterator operator++(int) { filtered_clause_iterator tmp(*this); ++(*this); return tmp; } bool operator!() { return Current == End; } operator bool() { return Current != End; } }; /// \brief Gets a single clause of the specified kind \a K associated with the /// current directive iff there is only one clause of this kind (and assertion /// is fired if there is more than one clause is associated with the /// directive). Returns nullptr if no clause of kind \a K is associated with /// the directive. const OMPClause *getSingleClause(OpenMPClauseKind K) const; /// \brief Returns starting location of directive kind. SourceLocation getLocStart() const { return StartLoc; } /// \brief Returns ending location of directive. SourceLocation getLocEnd() const { return EndLoc; } /// \brief Set starting location of directive kind. /// /// \param Loc New starting location of directive. /// void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// \brief Set ending location of directive. /// /// \param Loc New ending location of directive. /// void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// \brief Get number of clauses. unsigned getNumClauses() const { return NumClauses; } /// \brief Returns specified clause. /// /// \param i Number of clause. /// OMPClause *getClause(unsigned i) const { return clauses()[i]; } /// \brief Returns true if directive has associated statement. bool hasAssociatedStmt() const { return NumChildren > 0; } /// \brief Returns statement associated with the directive. Stmt *getAssociatedStmt() const { assert(hasAssociatedStmt() && "no associated statement."); return const_cast<Stmt *>(*child_begin()); } OpenMPDirectiveKind getDirectiveKind() const { return Kind; } static bool classof(const Stmt *S) { return S->getStmtClass() >= firstOMPExecutableDirectiveConstant && S->getStmtClass() <= lastOMPExecutableDirectiveConstant; } child_range children() { if (!hasAssociatedStmt()) return child_range(); Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end()); return child_range(ChildStorage, ChildStorage + NumChildren); } ArrayRef<OMPClause *> clauses() { return getClauses(); } ArrayRef<OMPClause *> clauses() const { return const_cast<OMPExecutableDirective *>(this)->getClauses(); } }; /// \brief This represents '#pragma omp parallel' directive. /// /// \code /// #pragma omp parallel private(a,b) reduction(+: c,d) /// \endcode /// In this example directive '#pragma omp parallel' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelDirective : public OMPExecutableDirective { /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending Location of the directive. /// OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel, StartLoc, EndLoc, NumClauses, 1) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement associated with the directive. /// static OMPParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// \brief Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelDirectiveClass; } }; /// \brief This is a common base class for loop directives ('omp simd', 'omp /// for', 'omp for simd' etc.). It is responsible for the loop code generation. /// class OMPLoopDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Number of collapsed loops as specified by 'collapse' clause. unsigned CollapsedNum; /// \brief Offsets to the stored exprs. /// This enumeration contains offsets to all the pointers to children /// expressions stored in OMPLoopDirective. /// The first 9 children are nesessary for all the loop directives, and /// the next 7 are specific to the worksharing ones. /// After the fixed children, three arrays of length CollapsedNum are /// allocated: loop counters, their updates and final values. /// enum { AssociatedStmtOffset = 0, IterationVariableOffset = 1, LastIterationOffset = 2, CalcLastIterationOffset = 3, PreConditionOffset = 4, CondOffset = 5, SeparatedCondOffset = 6, InitOffset = 7, IncOffset = 8, // The '...End' enumerators do not correspond to child expressions - they // specify the offset to the end (and start of the following counters/ // updates/finals arrays). DefaultEnd = 9, // The following 7 exprs are used by worksharing loops only. IsLastIterVariableOffset = 9, LowerBoundVariableOffset = 10, UpperBoundVariableOffset = 11, StrideVariableOffset = 12, EnsureUpperBoundOffset = 13, NextLowerBoundOffset = 14, NextUpperBoundOffset = 15, // Offset to the end (and start of the following counters/updates/finals // arrays) for worksharing loop directives. WorksharingEnd = 16, }; /// \brief Get the counters storage. MutableArrayRef<Expr *> getCounters() { Expr **Storage = reinterpret_cast<Expr **>( &(*(std::next(child_begin(), getArraysOffset(getDirectiveKind()))))); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// \brief Get the updates storage. MutableArrayRef<Expr *> getUpdates() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// \brief Get the final counter updates storage. MutableArrayRef<Expr *> getFinals() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } protected: /// \brief Build instance of loop directive of class \a Kind. /// /// \param SC Statement class. /// \param Kind Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed loops from 'collapse' clause. /// \param NumClauses Number of clauses. /// \param NumSpecialChildren Number of additional directive-specific stmts. /// template <typename T> OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses, unsigned NumSpecialChildren = 0) : OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses, numLoopChildren(CollapsedNum, Kind) + NumSpecialChildren), CollapsedNum(CollapsedNum) {} /// \brief Offset to the start of children expression arrays. static unsigned getArraysOffset(OpenMPDirectiveKind Kind) { return isOpenMPWorksharingDirective(Kind) ? WorksharingEnd : DefaultEnd; } /// \brief Children number. static unsigned numLoopChildren(unsigned CollapsedNum, OpenMPDirectiveKind Kind) { return getArraysOffset(Kind) + 3 * CollapsedNum; // Counters, Updates and Finals } void setIterationVariable(Expr *IV) { *std::next(child_begin(), IterationVariableOffset) = IV; } void setLastIteration(Expr *LI) { *std::next(child_begin(), LastIterationOffset) = LI; } void setCalcLastIteration(Expr *CLI) { *std::next(child_begin(), CalcLastIterationOffset) = CLI; } void setPreCond(Expr *PC) { *std::next(child_begin(), PreConditionOffset) = PC; } void setCond(Expr *Cond, Expr *SeparatedCond) { *std::next(child_begin(), CondOffset) = Cond; *std::next(child_begin(), SeparatedCondOffset) = SeparatedCond; } void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; } void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; } void setIsLastIterVariable(Expr *IL) { assert(isOpenMPWorksharingDirective(getDirectiveKind()) && "expected worksharing loop directive"); *std::next(child_begin(), IsLastIterVariableOffset) = IL; } void setLowerBoundVariable(Expr *LB) { assert(isOpenMPWorksharingDirective(getDirectiveKind()) && "expected worksharing loop directive"); *std::next(child_begin(), LowerBoundVariableOffset) = LB; } void setUpperBoundVariable(Expr *UB) { assert(isOpenMPWorksharingDirective(getDirectiveKind()) && "expected worksharing loop directive"); *std::next(child_begin(), UpperBoundVariableOffset) = UB; } void setStrideVariable(Expr *ST) { assert(isOpenMPWorksharingDirective(getDirectiveKind()) && "expected worksharing loop directive"); *std::next(child_begin(), StrideVariableOffset) = ST; } void setEnsureUpperBound(Expr *EUB) { assert(isOpenMPWorksharingDirective(getDirectiveKind()) && "expected worksharing loop directive"); *std::next(child_begin(), EnsureUpperBoundOffset) = EUB; } void setNextLowerBound(Expr *NLB) { assert(isOpenMPWorksharingDirective(getDirectiveKind()) && "expected worksharing loop directive"); *std::next(child_begin(), NextLowerBoundOffset) = NLB; } void setNextUpperBound(Expr *NUB) { assert(isOpenMPWorksharingDirective(getDirectiveKind()) && "expected worksharing loop directive"); *std::next(child_begin(), NextUpperBoundOffset) = NUB; } void setCounters(ArrayRef<Expr *> A); void setUpdates(ArrayRef<Expr *> A); void setFinals(ArrayRef<Expr *> A); public: /// \brief The expressions built for the OpenMP loop CodeGen for the /// whole collapsed loop nest. struct HelperExprs { /// \brief Loop iteration variable. Expr *IterationVarRef; /// \brief Loop last iteration number. Expr *LastIteration; /// \brief Calculation of last iteration. Expr *CalcLastIteration; /// \brief Loop pre-condition. Expr *PreCond; /// \brief Loop condition. Expr *Cond; /// \brief A condition with 1 iteration separated. Expr *SeparatedCond; /// \brief Loop iteration variable init. Expr *Init; /// \brief Loop increment. Expr *Inc; /// \brief IsLastIteration - local flag variable passed to runtime. Expr *IL; /// \brief LowerBound - local variable passed to runtime. Expr *LB; /// \brief UpperBound - local variable passed to runtime. Expr *UB; /// \brief Stride - local variable passed to runtime. Expr *ST; /// \brief EnsureUpperBound -- expression LB = min(LB, NumIterations). Expr *EUB; /// \brief Update of LowerBound for statically sheduled 'omp for' loops. Expr *NLB; /// \brief Update of UpperBound for statically sheduled 'omp for' loops. Expr *NUB; /// \brief Counters Loop counters. SmallVector<Expr *, 4> Counters; /// \brief Expressions for loop counters update for CodeGen. SmallVector<Expr *, 4> Updates; /// \brief Final loop counter values for GodeGen. SmallVector<Expr *, 4> Finals; /// \brief Check if all the expressions are built (does not check the /// worksharing ones). bool builtAll() { return IterationVarRef != nullptr && LastIteration != nullptr && PreCond != nullptr && Cond != nullptr && SeparatedCond != nullptr && Init != nullptr && Inc != nullptr; } /// \brief Initialize all the fields to null. /// \param Size Number of elements in the counters/finals/updates arrays. void clear(unsigned Size) { IterationVarRef = nullptr; LastIteration = nullptr; CalcLastIteration = nullptr; PreCond = nullptr; Cond = nullptr; SeparatedCond = nullptr; Init = nullptr; Inc = nullptr; IL = nullptr; LB = nullptr; UB = nullptr; ST = nullptr; EUB = nullptr; NLB = nullptr; NUB = nullptr; Counters.resize(Size); Updates.resize(Size); Finals.resize(Size); for (unsigned i = 0; i < Size; ++i) { Counters[i] = nullptr; Updates[i] = nullptr; Finals[i] = nullptr; } } }; /// \brief Get number of collapsed loops. unsigned getCollapsedNumber() const { return CollapsedNum; } Expr *getIterationVariable() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IterationVariableOffset))); } Expr *getLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LastIterationOffset))); } Expr *getCalcLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CalcLastIterationOffset))); } Expr *getPreCond() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PreConditionOffset))); } Expr *getCond(bool SeparateIter) const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), (SeparateIter ? SeparatedCondOffset : CondOffset)))); } Expr *getInit() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset))); } Expr *getInc() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset))); } Expr *getIsLastIterVariable() const { assert(isOpenMPWorksharingDirective(getDirectiveKind()) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IsLastIterVariableOffset))); } Expr *getLowerBoundVariable() const { assert(isOpenMPWorksharingDirective(getDirectiveKind()) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LowerBoundVariableOffset))); } Expr *getUpperBoundVariable() const { assert(isOpenMPWorksharingDirective(getDirectiveKind()) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), UpperBoundVariableOffset))); } Expr *getStrideVariable() const { assert(isOpenMPWorksharingDirective(getDirectiveKind()) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), StrideVariableOffset))); } Expr *getEnsureUpperBound() const { assert(isOpenMPWorksharingDirective(getDirectiveKind()) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), EnsureUpperBoundOffset))); } Expr *getNextLowerBound() const { assert(isOpenMPWorksharingDirective(getDirectiveKind()) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextLowerBoundOffset))); } Expr *getNextUpperBound() const { assert(isOpenMPWorksharingDirective(getDirectiveKind()) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextUpperBoundOffset))); } const Stmt *getBody() const { // This relies on the loop form is already checked by Sema. Stmt *Body = getAssociatedStmt()->IgnoreContainers(true); Body = cast<ForStmt>(Body)->getBody(); for (unsigned Cnt = 1; Cnt < CollapsedNum; ++Cnt) { Body = Body->IgnoreContainers(); Body = cast<ForStmt>(Body)->getBody(); } return Body; } ArrayRef<Expr *> counters() { return getCounters(); } ArrayRef<Expr *> counters() const { return const_cast<OMPLoopDirective *>(this)->getCounters(); } ArrayRef<Expr *> updates() { return getUpdates(); } ArrayRef<Expr *> updates() const { return const_cast<OMPLoopDirective *>(this)->getUpdates(); } ArrayRef<Expr *> finals() { return getFinals(); } ArrayRef<Expr *> finals() const { return const_cast<OMPLoopDirective *>(this)->getFinals(); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass || T->getStmtClass() == OMPForDirectiveClass || T->getStmtClass() == OMPForSimdDirectiveClass || T->getStmtClass() == OMPParallelForDirectiveClass || T->getStmtClass() == OMPParallelForSimdDirectiveClass; } }; /// \brief This represents '#pragma omp simd' directive. /// /// \code /// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// \brief Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// \brief Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass; } }; /// \brief This represents '#pragma omp for' directive. /// /// \code /// #pragma omp for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for' has clauses 'private' with the /// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c' /// and 'd'. /// class OMPForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// \brief Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// \brief Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForDirectiveClass; } }; /// \brief This represents '#pragma omp for simd' directive. /// /// \code /// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// \brief Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// \brief Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForSimdDirectiveClass; } }; /// \brief This represents '#pragma omp sections' directive. /// /// \code /// #pragma omp sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp sections' has clauses 'private' with /// the variables 'a' and 'b' and 'reduction' with operator '+' and variables /// 'c' and 'd'. /// class OMPSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections, StartLoc, EndLoc, NumClauses, 1) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// \brief Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSectionsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionsDirectiveClass; } }; /// \brief This represents '#pragma omp section' directive. /// /// \code /// #pragma omp section /// \endcode /// class OMPSectionDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section, StartLoc, EndLoc, 0, 1) {} /// \brief Build an empty directive. /// explicit OMPSectionDirective() : OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section, SourceLocation(), SourceLocation(), 0, 1) {} public: /// \brief Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPSectionDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt); /// \brief Creates an empty directive. /// /// \param C AST context. /// static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionDirectiveClass; } }; /// \brief This represents '#pragma omp single' directive. /// /// \code /// #pragma omp single private(a,b) copyprivate(c,d) /// \endcode /// In this example directive '#pragma omp single' has clauses 'private' with /// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'. /// class OMPSingleDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single, StartLoc, EndLoc, NumClauses, 1) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSingleDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPSingleDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// \brief Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSingleDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSingleDirectiveClass; } }; /// \brief This represents '#pragma omp master' directive. /// /// \code /// #pragma omp master /// \endcode /// class OMPMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master, StartLoc, EndLoc, 0, 1) {} /// \brief Build an empty directive. /// explicit OMPMasterDirective() : OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master, SourceLocation(), SourceLocation(), 0, 1) {} public: /// \brief Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPMasterDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt); /// \brief Creates an empty directive. /// /// \param C AST context. /// static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterDirectiveClass; } }; /// \brief This represents '#pragma omp critical' directive. /// /// \code /// #pragma omp critical /// \endcode /// class OMPCriticalDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Name of the directive. DeclarationNameInfo DirName; /// \brief Build directive with the given start and end location. /// /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical, StartLoc, EndLoc, 0, 1), DirName(Name) {} /// \brief Build an empty directive. /// explicit OMPCriticalDirective() : OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical, SourceLocation(), SourceLocation(), 0, 1), DirName() {} /// \brief Set name of the directive. /// /// \param Name Name of the directive. /// void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; } public: /// \brief Creates directive. /// /// \param C AST context. /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPCriticalDirective * Create(const ASTContext &C, const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt); /// \brief Creates an empty directive. /// /// \param C AST context. /// static OMPCriticalDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// \brief Return name of the directive. /// DeclarationNameInfo getDirectiveName() const { return DirName; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCriticalDirectiveClass; } }; /// \brief This represents '#pragma omp parallel for' directive. /// /// \code /// #pragma omp parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// \brief Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// \brief Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForDirectiveClass; } }; /// \brief This represents '#pragma omp parallel for simd' directive. /// /// \code /// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for simd' has clauses /// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j' /// and linear step 's', 'reduction' with operator '+' and variables 'c' and /// 'd'. /// class OMPParallelForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// \brief Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, OMPD_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// \brief Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForSimdDirectiveClass; } }; /// \brief This represents '#pragma omp parallel sections' directive. /// /// \code /// #pragma omp parallel sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel sections' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPParallelSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, OMPD_parallel_sections, StartLoc, EndLoc, NumClauses, 1) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, OMPD_parallel_sections, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPParallelSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// \brief Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelSectionsDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelSectionsDirectiveClass; } }; /// \brief This represents '#pragma omp task' directive. /// /// \code /// #pragma omp task private(a,b) final(d) /// \endcode /// In this example directive '#pragma omp task' has clauses 'private' with the /// variables 'a' and 'b' and 'final' with condition 'd'. /// class OMPTaskDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, StartLoc, EndLoc, NumClauses, 1) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTaskDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// \brief Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskDirectiveClass; } }; /// \brief This represents '#pragma omp taskyield' directive. /// /// \code /// #pragma omp taskyield /// \endcode /// class OMPTaskyieldDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield, StartLoc, EndLoc, 0, 0) {} /// \brief Build an empty directive. /// explicit OMPTaskyieldDirective() : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield, SourceLocation(), SourceLocation(), 0, 0) {} public: /// \brief Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskyieldDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Creates an empty directive. /// /// \param C AST context. /// static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskyieldDirectiveClass; } }; /// \brief This represents '#pragma omp barrier' directive. /// /// \code /// #pragma omp barrier /// \endcode /// class OMPBarrierDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier, StartLoc, EndLoc, 0, 0) {} /// \brief Build an empty directive. /// explicit OMPBarrierDirective() : OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier, SourceLocation(), SourceLocation(), 0, 0) {} public: /// \brief Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPBarrierDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Creates an empty directive. /// /// \param C AST context. /// static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPBarrierDirectiveClass; } }; /// \brief This represents '#pragma omp taskwait' directive. /// /// \code /// #pragma omp taskwait /// \endcode /// class OMPTaskwaitDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait, StartLoc, EndLoc, 0, 0) {} /// \brief Build an empty directive. /// explicit OMPTaskwaitDirective() : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait, SourceLocation(), SourceLocation(), 0, 0) {} public: /// \brief Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskwaitDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Creates an empty directive. /// /// \param C AST context. /// static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskwaitDirectiveClass; } }; /// \brief This represents '#pragma omp flush' directive. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has 2 arguments- variables 'a' /// and 'b'. /// 'omp flush' directive does not have clauses but have an optional list of /// variables to flush. This list of variables is stored within some fake clause /// FlushClause. class OMPFlushDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush, StartLoc, EndLoc, NumClauses, 0) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPFlushDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush, SourceLocation(), SourceLocation(), NumClauses, 0) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses (only single OMPFlushClause clause is /// allowed). /// static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// \brief Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPFlushDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPFlushDirectiveClass; } }; /// \brief This represents '#pragma omp ordered' directive. /// /// \code /// #pragma omp ordered /// \endcode /// class OMPOrderedDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered, StartLoc, EndLoc, 0, 1) {} /// \brief Build an empty directive. /// explicit OMPOrderedDirective() : OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered, SourceLocation(), SourceLocation(), 0, 1) {} public: /// \brief Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPOrderedDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt); /// \brief Creates an empty directive. /// /// \param C AST context. /// static OMPOrderedDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPOrderedDirectiveClass; } }; /// \brief This represents '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has clause 'capture'. /// class OMPAtomicDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic, StartLoc, EndLoc, NumClauses, 4) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPAtomicDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic, SourceLocation(), SourceLocation(), NumClauses, 4) {} /// \brief Set 'x' part of the associated expression/statement. void setX(Expr *X) { *std::next(child_begin()) = X; } /// \brief Set 'v' part of the associated expression/statement. void setV(Expr *V) { *std::next(child_begin(), 2) = V; } /// \brief Set 'expr' part of the associated expression/statement. void setExpr(Expr *E) { *std::next(child_begin(), 3) = E; } public: /// \brief Creates directive with a list of \a Clauses and 'x', 'v' and 'expr' /// parts of the atomic construct (see Section 2.12.6, atomic Construct, for /// detailed description of 'x', 'v' and 'expr'). /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param X 'x' part of the associated expression/statement. /// \param V 'v' part of the associated expression/statement. /// \param E 'expr' part of the associated expression/statement. /// static OMPAtomicDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V, Expr *E); /// \brief Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPAtomicDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// \brief Get 'x' part of the associated expression/statement. Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); } const Expr *getX() const { return cast_or_null<Expr>(*std::next(child_begin())); } /// \brief Get 'v' part of the associated expression/statement. Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } const Expr *getV() const { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } /// \brief Get 'expr' part of the associated expression/statement. Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } const Expr *getExpr() const { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPAtomicDirectiveClass; } }; /// \brief This represents '#pragma omp target' directive. /// /// \code /// #pragma omp target if(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'if' with /// condition 'a'. /// class OMPTargetDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target, StartLoc, EndLoc, NumClauses, 1) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// \brief Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDirectiveClass; } }; /// \brief This represents '#pragma omp teams' directive. /// /// \code /// #pragma omp teams if(a) /// \endcode /// In this example directive '#pragma omp teams' has clause 'if' with /// condition 'a'. /// class OMPTeamsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// \brief Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams, StartLoc, EndLoc, NumClauses, 1) {} /// \brief Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTeamsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// \brief Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// \brief Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDirectiveClass; } }; } // end namespace clang #endif
coro_solver.h
/** * @author : Zhao Chonyyao (cyzhao@zju.edu.cn) * @date : 2021-04-30 * @description: co-rotation solver. * @version : 1.0 */ #ifndef PhysIKA_CORO_SOLVER #define PhysIKA_CORO_SOLVER #include <Eigen/Eigenvalues> #include "Solver/linear_solver/customized_pcg.h" #include "Solver/linear_solver/coro_preconditioner.h" #include "newton_method.h" namespace PhysIKA { /** * geometry multigrid based coro solver. * */ template <typename T> class gmg_coro_solver : public newton_base<T, 3> { public: gmg_coro_solver(const std::shared_ptr<Problem<T, 3>>& pb, const size_t max_iter, const T tol, const bool line_search, const bool hes_is_constant, const std::shared_ptr<ldlt_type<T>>& fac_hes_rest, std::shared_ptr<elas_intf<T, 3>>& elas_intf_ptr, const SPM_R<T> hes_rest, const T cg_tol = 1e-3, std::shared_ptr<dat_str_core<T, 3>> dat_str = nullptr) : newton_base<T, 3>(pb, max_iter, tol, line_search, hes_is_constant, nullptr, dat_str), pd_fac_(make_shared<gmg_coro_precond_fac<T>>(fac_hes_rest, elas_intf_ptr)), cg_tol_(cg_tol), hes_rest_(hes_rest) {} public: std::shared_ptr<gmg_coro_precond_fac<T>> pd_fac_; const T cg_tol_; //add this member for testing spectrum, remove this later const SPM_R<T> hes_rest_; /* When kinetic_ = nullptr and w_pos = 0, A = K. We would like to check || U^TK(x)U - \Lambda || where U^T K_tild e U = \Lambda_tilde and \Lambda is the eigenavlues of K(x). U is caculated by U = R U_bar^T where U_bar^T is the transpose of the eigenvector of K_bar (rest shape). */ int test_spectrum(const SPM_R<T>& A, const size_t band, const size_t num_band, MAT<T>& eig_vec) const { printf("test spectrum.\n"); const size_t num_eig = band * num_band; const size_t removed = band; MAT<T> eig_vec_rest; VEC<T> eig_val_rest; IF_ERR(return, get_spectrum(hes_rest_, band, num_band, eig_vec_rest, eig_val_rest)); eig_vec_rest = eig_vec_rest.rightCols(num_eig - removed).eval(); eig_val_rest = eig_val_rest.bottomRows(num_eig - removed).eval(); const auto R_ = pd_fac_->get_R(); #if 0 SPM_R<T> A_tilde = R_eigen * hes_rest_ * R_eigen.transpose();{ SPM_C<T> R_eigen(R_->rows(), R_->cols());{ vector<Triplet<T>> trips; for(size_t i = 0; i < R_->rowsb(); ++i){ const auto R_i = (*R_)(i); for(size_t m = 0; m < 3; ++m) for(size_t n = 0; n < 3; ++n){ trips.push_back(Triplet<T>(3 * i + m, 3 * i + n, R_i(m, n))); } } R_eigen.reserve(trips.size()); R_eigen.setFromTriplets(trips.begin(), trips.end()); } } printf("|| A - A_tilde || = %f \n", (A - A_tilde).norm()); #endif MAT<T> eig_vec_tilde(eig_vec_rest.rows(), eig_vec_rest.cols()); #pragma omp parallel for for (size_t i = 0; i < eig_vec_rest.cols(); ++i) { eig_vec_tilde.col(i) = (*R_) * eig_vec_rest.col(i); } eig_vec; VEC<T> eig_val; // eig(A, eig_vec, eig_val); IF_ERR(return, get_spectrum(A, band, num_band, eig_vec, eig_val)); eig_val = eig_val.bottomRows(num_eig - removed).eval(); cout << "eig val of A \n" << eig_val << endl; const MAT<T> eig_val_diag = eig_val.asDiagonal(); const MAT<T> diff = eig_vec_tilde.transpose() * A * eig_vec_tilde - eig_val_diag; cout << "diff \n" << diff.diagonal() << endl; T norm = diff.norm(); printf("|| U^TK(x)U - \\Lambda || = %f \n", norm); return 0; } int solve_linear_eq(const SPM_R<T>& A, const T* b, const SPM_R<T>& J, const T* c, const T* x0, T* solution) const override { pd_fac_->opt_R(x0); auto coro_pcg_solver = make_shared<PCG<T, SPM_R<T>>>(pd_fac_->build_preconditioner(), cg_tol_); return coro_pcg_solver->solve(A, b, solution); } }; /** * algebraic multigrid based coro solver. * */ template <typename T> class amg_coro_solver : public newton_base<T, 3> { public: amg_coro_solver(const std::shared_ptr<Problem<T, 3>>& pb, const size_t max_iter, const T tol, const bool line_search, const bool hes_is_constant, const SPM<T>& hes_rest, const std::shared_ptr<ldlt_type<T>>& fac_hes_rest, const T cg_tol = 1e-3, std::shared_ptr<dat_str_core<T, 3>> dat_str = nullptr) : newton_base<T, 3>(pb, max_iter, tol, line_search, hes_is_constant, nullptr, dat_str), pd_fac_(make_shared<amg_coro_precond_fac<T>>(fac_hes_rest, hes_rest)), cg_tol_(cg_tol) {} private: std::shared_ptr<amg_coro_precond_fac<T>> pd_fac_; const T cg_tol_; int solve_linear_eq(const SPM_R<T>& A, const T* b, const SPM_R<T>& J, const T* c, const T* x0, T* solution) const override { pd_fac_->opt_R(A); auto coro_pcg_solver = make_shared<PCG<T, SPM_R<T>>>(pd_fac_->build_preconditioner(), cg_tol_); return coro_pcg_solver->solve(A, b, solution); } }; } // namespace PhysIKA #endif
test.c
#include <stdio.h> #include <omp.h> omp_lock_t my_lock; int main(int argc, char **argv) { int i, thread_id; int global_nloops, private_nloops; global_nloops = 0; #pragma omp parallel private(private_nloops, thread_id) { private_nloops = 0; thread_id = omp_get_thread_num(); #pragma omp for for (i=0; i<100000; ++i) { ++private_nloops; } #pragma omp critical { printf("Thread %d adding its iterations (%d) to the sum (%d)...\n", thread_id, private_nloops, global_nloops); global_nloops += private_nloops; printf("CRITICAL ...total nloops now equals %d.\n", global_nloops); } } //================================================================================== global_nloops = 0; omp_init_lock(&my_lock); #pragma omp parallel private(private_nloops, thread_id) { private_nloops = 0; thread_id = omp_get_thread_num(); #pragma omp for for (i=0; i<100000; ++i) { ++private_nloops; } omp_set_lock(&my_lock); { printf("Thread %d adding its iterations (%d) to the sum (%d)...\n", thread_id, private_nloops, global_nloops); global_nloops += private_nloops; printf("LOCK ...total nloops now equals %d.\n", global_nloops); } omp_unset_lock(&my_lock); } omp_destroy_lock(&my_lock); return 0; }
GB_binop__minus_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_uint32) // A.*B function (eWiseMult): GB (_AemultB_01__minus_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__minus_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__minus_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_uint32) // A*D function (colscale): GB (_AxD__minus_uint32) // D*A function (rowscale): GB (_DxB__minus_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__minus_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__minus_uint32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_uint32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_uint32) // C=scalar+B GB (_bind1st__minus_uint32) // C=scalar+B' GB (_bind1st_tran__minus_uint32) // C=A+scalar GB (_bind2nd__minus_uint32) // C=A'+scalar GB (_bind2nd_tran__minus_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_UINT32 || GxB_NO_MINUS_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__minus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__minus_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__minus_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
edge_data_c2c.h
/* ============================================================================== KratosPFEMApplication A library based on: Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2007 Pooyan Dadvand, Riccardo Rossi pooyan@cimne.upc.edu rrossi@cimne.upc.edu - CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ // // Project Name: Kratos // Last Modified by: $Author: antonia $ // Date: $Date: 2009-01-14 08:26:51 $ // Revision: $Revision: 1.11 $ // // #if !defined(KRATOS_EDGE_DATA_C2C_H_INCLUDED ) #define KRATOS_EDGE_DATA_C2C_H_INCLUDED //we suggest defining the following macro #define USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION //we suggest defining the following macro*/*/ // #define USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" //#include "geometries/geometry.h" #include "utilities/geometry_utilities.h" #include "free_surface_application.h" #include "utilities/openmp_utils.h" namespace Kratos { // template<unsigned int TDim> // class EdgeConstructionScratch // { // public: // array_1d<double, TDim+1> N; // boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim> dN_dx; // double volume; // double weighting_factor = 1.0 / static_cast<double>(TDim+1); // boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim+1> mass_consistent; // array_1d<double, TDim+1> mass_lumped; // array_1d<unsigned int, TDim+1> nodal_indices; // array_1d<double, TDim+1> heights; // // } //structure definition for fast access to edge data using CSR format template<unsigned int TDim> class EdgesStructureTypeC2C { public: //component ij of the consistent mass matrix (M = Ni * Nj * dOmega) double Mass; //components kl of the laplacian matrix of edge ij (L = dNi/dxk * dNj/dxl * dOmega) //double Laplacian; boost::numeric::ublas::bounded_matrix<double, TDim, TDim> LaplacianIJ; //components k of the gradient matrix of edge ij (G = Ni * dNj/dxl * dOmega) array_1d<double, TDim> Ni_DNj; //components k of the transposed gradient matrix of edge ij (GT = dNi/dxl * Nj * dOmega) //TRANSPOSED GRADIENT array_1d<double, TDim> DNi_Nj; //************************************************************************************* //************************************************************************************* //gradient integrated by parts //RHSi += DNi_Nj pj + Aboundary * pext ==> RHS += Ni_DNj p_j - DNi_Nj p_i //ATTENTION: + Aboundary * pext is NOT included!! it should be included "manually" inline void Add_Gp(array_1d<double, TDim>& destination, const double& p_i, const double& p_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination[comp] -= Ni_DNj[comp] * p_j - DNi_Nj[comp] * p_i; } inline void Sub_Gp(array_1d<double, TDim>& destination, const double& p_i, const double& p_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination[comp] += Ni_DNj[comp] * p_j - DNi_Nj[comp] * p_i; } //************************************************************************************* //************************************************************************************* //gradient //RHSi += Ni_DNj[k]*v[k] inline void Add_D_v(double& destination, const array_1d<double, TDim>& v_i, const array_1d<double, TDim>& v_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination += Ni_DNj[comp] * (v_j[comp] - v_i[comp]); } inline void Sub_D_v(double& destination, const array_1d<double, TDim>& v_i, const array_1d<double, TDim>& v_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination -= Ni_DNj[comp] * (v_j[comp] - v_i[comp]); } //************************************************************************************* //************************************************************************************* //gradient //RHSi += Ni_DNj pj inline void Add_grad_p(array_1d<double, TDim>& destination, const double& p_i, const double& p_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination[comp] += Ni_DNj[comp] * (p_j - p_i); } inline void Sub_grad_p(array_1d<double, TDim>& destination, const double& p_i, const double& p_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination[comp] -= Ni_DNj[comp] * (p_j - p_i); } //************************************************************************************* //************************************************************************************* //gradient //RHSi += DNi_Nj[k]*v[k] inline void Add_div_v(double& destination, const array_1d<double, TDim>& v_i, const array_1d<double, TDim>& v_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination -= Ni_DNj[comp] * v_j[comp] - DNi_Nj[comp] * v_i[comp]; } inline void Sub_div_v(double& destination, const array_1d<double, TDim>& v_i, const array_1d<double, TDim>& v_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination += Ni_DNj[comp] * v_j[comp] - DNi_Nj[comp] * v_i[comp]; } //************************************************************************************* //************************************************************************************* //gets the trace of the laplacian matrix inline void CalculateScalarLaplacian(double& l_ij) { l_ij = LaplacianIJ(0, 0); for (unsigned int comp = 1; comp < TDim; comp++) l_ij += LaplacianIJ(comp, comp); } inline void Add_ConvectiveContribution(array_1d<double, TDim>& destination, const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& U_i, const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& U_j) { // #ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION // double temp = a_i[0] * Ni_DNj[0]; // for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) // temp += a_i[k_comp] * Ni_DNj[k_comp]; // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // destination[l_comp] += temp * (U_j[l_comp] - U_i[l_comp]); // #else // double aux_i = a_i[0] * Ni_DNj[0]; // double aux_j = a_j[0] * Ni_DNj[0]; // for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) // { // aux_i += a_i[k_comp] * Ni_DNj[k_comp]; // aux_j += a_j[k_comp] * Ni_DNj[k_comp]; // } // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // destination[l_comp] += aux_j * U_j[l_comp] - aux_i * U_i[l_comp]; // #endif // for (unsigned int comp = 0; comp < TDim; comp++) // destination[comp] -= Ni_DNj[comp] * p_j - DNi_Nj[comp] * p_i; double second = a_i[0] * DNi_Nj[0]; double first = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { second += a_i[k_comp] * DNi_Nj[k_comp]; first += a_j[k_comp] * Ni_DNj[k_comp]; } for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] += first * U_j[l_comp] - second * U_i[l_comp]; } inline void Sub_ConvectiveContribution(array_1d<double, TDim>& destination, const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& U_i, const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& U_j) { // #ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION // double temp = a_i[0] * Ni_DNj[0]; // for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) // temp += a_i[k_comp] * Ni_DNj[k_comp]; // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // destination[l_comp] -= temp * (U_j[l_comp] - U_i[l_comp]); // #else // double aux_i = a_i[0] * Ni_DNj[0]; // double aux_j = a_j[0] * Ni_DNj[0]; // for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) // { // aux_i += a_i[k_comp] * Ni_DNj[k_comp]; // aux_j += a_j[k_comp] * Ni_DNj[k_comp]; // } // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // destination[l_comp] -= aux_j * U_j[l_comp] - aux_i * U_i[l_comp]; // #endif double second = a_i[0] * DNi_Nj[0]; double first = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { second += a_i[k_comp] * DNi_Nj[k_comp]; first += a_j[k_comp] * Ni_DNj[k_comp]; } for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] -= first * U_j[l_comp] - second * U_i[l_comp]; } inline void Sub_ConvectiveContribution(double& destination, const array_1d<double, TDim>& a_i, const double& phi_i, const array_1d<double, TDim>& a_j, const double& phi_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION double temp = a_i[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; destination -= temp * (phi_j - phi_i); #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } destination -= aux_j * phi_j - aux_i * phi_i; #endif // double second = a_i[0] * DNi_Nj[0]; // double first = a_j[0] * Ni_DNj[0]; // for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) // { // second += a_i[k_comp] * DNi_Nj[k_comp]; // first += a_j[k_comp] * Ni_DNj[k_comp]; // } // destination -= first * phi_j - second * phi_i; } inline void Add_ConvectiveContribution(double& destination, const array_1d<double, TDim>& a_i, const double& phi_i, const array_1d<double, TDim>& a_j, const double& phi_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION double temp = a_i[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; destination += temp * (phi_j - phi_i); #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } destination += aux_j * phi_j - aux_i * phi_i; #endif // double second = a_i[0] * DNi_Nj[0]; // double first = a_j[0] * Ni_DNj[0]; // for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) // { // second += a_i[k_comp] * DNi_Nj[k_comp]; // first += a_j[k_comp] * Ni_DNj[k_comp]; // } // destination += first * phi_j - second * phi_i; } //************************************************************************************* //************************************************************************************* inline void CalculateConvectionStabilization_LOW(array_1d<double, TDim>& stab_low, const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& U_i, const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& U_j) { double conv_stab = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) for (unsigned int m_comp = 0; m_comp < TDim; m_comp++) conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp, m_comp); for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) stab_low[l_comp] = conv_stab * (U_j[l_comp] - U_i[l_comp]); // double temp = 0.0; // double lij = 0.0; // for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) // { // lij += LaplacianIJ(k_comp,k_comp); // temp = a_i[k_comp] * a_i[k_comp]; // } // // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // stab_low[l_comp] = temp * lij * (U_j[l_comp] - U_i[l_comp]); } // inline void CalculateConvectionStabilization_LOW( array_1d<double,TDim>& stab_low, // const array_1d<double,TDim>& a_i, const array_1d<double,TDim>& U_i, const double& p_i, // const array_1d<double,TDim>& a_j, const array_1d<double,TDim>& U_j, const double& p_j // ) // { // double conv_stab = 0.0; // for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) // for (unsigned int m_comp = 0; m_comp < TDim; m_comp++) // conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp,m_comp); // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // stab_low[l_comp] = conv_stab * (U_j[l_comp] - U_i[l_comp]); // //// adding pressure // double press_diff = p_j-p_i; // for (unsigned int j_comp = 0; j_comp < TDim; j_comp++) // { // for (unsigned int i_comp = 0; i_comp < TDim; i_comp++) // stab_low[j_comp] -= a_i[i_comp] * LaplacianIJ(i_comp,j_comp) * press_diff ; // } // // // } inline void CalculateConvectionStabilization_LOW(double& stab_low, const array_1d<double, TDim>& a_i, const double& phi_i, const array_1d<double, TDim>& a_j, const double& phi_j) { double conv_stab = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) for (unsigned int m_comp = 0; m_comp < TDim; m_comp++) conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp, m_comp); stab_low = conv_stab * (phi_j - phi_i); } //************************************************************************************* //************************************************************************************* inline void CalculateConvectionStabilization_HIGH(array_1d<double, TDim>& stab_high, const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& pi_i, const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& pi_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION double temp = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) stab_high[l_comp] = -temp * (pi_j[l_comp] - pi_i[l_comp]); //check if the minus sign is correct // double temp_i = 0.0; // double temp_j = 0.0; // for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) // { // temp_j += a_i[k_comp] * Ni_DNj[k_comp]; // temp_i += a_i[k_comp] * DNi_Nj[k_comp]; // } // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // stab_high[l_comp] = +(temp_j*pi_j[l_comp] - temp_i*pi_i[l_comp]); //check if the minus sign is correct // double temp_i = 0.0; // double temp_j = 0.0; // for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) // { // temp_i += a_i[k_comp] * Ni_DNj[k_comp]; // temp_j += a_i[k_comp] * DNi_Nj[k_comp]; // } // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // stab_high[l_comp] = (temp_j*pi_j[l_comp] + temp_i*pi_i[l_comp]); //check if the minus sign is correct #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) stab_high[l_comp] = -(aux_j * pi_j[l_comp] - aux_i * pi_i[l_comp]); #endif } inline void CalculateConvectionStabilization_HIGH(double& stab_high, const array_1d<double, TDim>& a_i, const double& pi_i, const array_1d<double, TDim>& a_j, const double& pi_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION double temp = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; stab_high = -temp * (pi_j - pi_i); //check if the minus sign is correct #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } stab_high = -(aux_j * pi_j - aux_i * pi_i); #endif } //************************************************************************************* //************************************************************************************* inline void Add_StabContribution(array_1d<double, TDim>& destination, const double tau, const double beta, const array_1d<double, TDim>& stab_low, const array_1d<double, TDim>& stab_high) { for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] += tau * (stab_low[l_comp] - beta * stab_high[l_comp]); } inline void Add_StabContribution(double& destination, const double tau, const double beta, const double& stab_low, const double& stab_high) { destination += tau * (stab_low - beta * stab_high); } inline void Sub_StabContribution(array_1d<double, TDim>& destination, const double tau, const double beta, const array_1d<double, TDim>& stab_low, const array_1d<double, TDim>& stab_high) { for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] -= tau * (stab_low[l_comp] - beta * stab_high[l_comp]); } inline void Sub_StabContribution(double& destination, const double tau, const double beta, const double& stab_low, const double& stab_high) { destination -= tau * (stab_low - beta * stab_high); } //************************************************************************************* //************************************************************************************* inline void Add_ViscousContribution(array_1d<double, TDim>& destination, const array_1d<double, TDim>& U_i, const double& nu_i, const array_1d<double, TDim>& U_j, const double& nu_j) { //calculate scalar laplacian double L = 0.0; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) L += LaplacianIJ(l_comp, l_comp); //double nu_avg = 0.5*(nu_i+nu_j); for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] += nu_i * L * (U_j[l_comp] - U_i[l_comp]); } inline void Sub_ViscousContribution(array_1d<double, TDim>& destination, const array_1d<double, TDim>& U_i, const double& nu_i, const array_1d<double, TDim>& U_j, const double& nu_j) { //calculate scalar laplacian double L = 0.0; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) L += LaplacianIJ(l_comp, l_comp); //double nu_avg = 0.5*(nu_i+nu_j); for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] -= nu_i * L * (U_j[l_comp] - U_i[l_comp]); } }; //class definition of matrices using CSR format template<unsigned int TDim, class TSparseSpace> class MatrixContainerC2C { public: //name for the self defined structure typedef EdgesStructureTypeC2C<TDim> CSR_Tuple; typedef vector<CSR_Tuple> EdgesVectorType; //name for row start and column index vectors typedef vector<unsigned int> IndicesVectorType; //names for separately stored node based values typedef vector<double> ValuesVectorType; // typedef std::vector< array_1d<double,TDim> > CalcVectorType; typedef vector< array_1d<double, TDim> > CalcVectorType; //constructor and destructor MatrixContainerC2C() { }; ~MatrixContainerC2C() { }; //functions to return private values inline unsigned int GetNumberEdges() { return mNumberEdges; } inline EdgesVectorType& GetEdgeValues() { return mNonzeroEdgeValues; } inline IndicesVectorType& GetColumnIndex() { return mColumnIndex; } inline IndicesVectorType& GetRowStartIndex() { return mRowStartIndex; } inline ValuesVectorType& GetLumpedMass() { return mLumpedMassMatrix; } inline ValuesVectorType& GetInvertedMass() { return mInvertedMassMatrix; } inline CalcVectorType& GetDiagGradient() { return mDiagGradientMatrix; } inline ValuesVectorType& GetHmin() { return mHmin; } //******************************************************** //function to size and initialize the vector of CSR tuples void ConstructCSRVector(ModelPart& model_part) { KRATOS_TRY //SIZE OF CSR VECTOR //defining the number of nodes and edges int n_nodes = model_part.Nodes().size(); //remark: no colouring algorithm is used here (symmetry is neglected) // respectively edge ij is considered different from edge ji mNumberEdges = 0; //counter to assign and get global nodal index int i_node = 0; //counting the edges connecting the nodes for (typename ModelPart::NodesContainerType::iterator node_it = model_part.NodesBegin(); node_it != model_part.NodesEnd(); node_it++) { //counting neighbours of each node mNumberEdges += (node_it->GetValue(NEIGHBOUR_NODES)).size(); //DIAGONAL TERMS //mNumberEdges++; //assigning global index to each node node_it->FastGetSolutionStepValue(AUX_INDEX) = static_cast<double> (i_node++); } //error message in case number of nodes does not coincide with number of indices if (i_node != n_nodes) KRATOS_WATCH("ERROR - Highest nodal index doesn't coincide with number of nodes!"); //allocating memory for block of CSR data - setting to zero for first-touch OpenMP allocation mNonzeroEdgeValues.resize(mNumberEdges); //SetToZero(mNonzeroEdgeValues); mColumnIndex.resize(mNumberEdges); //SetToZero(mColumnIndex); mRowStartIndex.resize(n_nodes + 1); //SetToZero(mRowStartIndex); mLumpedMassMatrix.resize(n_nodes); SetToZero(mLumpedMassMatrix); mInvertedMassMatrix.resize(n_nodes); SetToZero(mInvertedMassMatrix); mDiagGradientMatrix.resize(n_nodes); SetToZero(mDiagGradientMatrix); mHmin.resize(n_nodes); SetToZero(mHmin); //INITIALIZING OF THE CSR VECTOR //temporary variable as the row start index of a node depends on the number of neighbours of the previous one unsigned int row_start_temp = 0; int number_of_threads = OpenMPUtils::GetNumThreads(); std::vector<int> row_partition(number_of_threads); OpenMPUtils::DivideInPartitions(model_part.Nodes().size(), number_of_threads, row_partition); for (int k = 0; k < number_of_threads; k++) { #pragma omp parallel if (OpenMPUtils::ThisThread() == k) { for (unsigned int aux_i = static_cast<unsigned int> (row_partition[k]); aux_i < static_cast<unsigned int> (row_partition[k + 1]); aux_i++) { typename ModelPart::NodesContainerType::iterator node_it = model_part.NodesBegin() + aux_i; //main loop over all nodes // for (typename ModelPart::NodesContainerType::iterator node_it=model_part.NodesBegin(); node_it!=model_part.NodesEnd(); node_it++) // { //getting the global index of the node i_node = static_cast<unsigned int> (node_it->FastGetSolutionStepValue(AUX_INDEX)); //determining its neighbours GlobalPointersVector< Node < 3 > >& neighb_nodes = node_it->GetValue(NEIGHBOUR_NODES); //number of neighbours of node i determines row start index for the following node unsigned int n_neighbours = neighb_nodes.size(); //DIAGONAL TERMS //n_neighbours++; //reserving memory for work array std::vector<unsigned int> work_array; work_array.reserve(n_neighbours); //DIAGONAL TERMS //work_array.push_back(i_node); //nested loop over the neighbouring nodes for (GlobalPointersVector< Node < 3 > >::iterator neighb_it = neighb_nodes.begin(); neighb_it != neighb_nodes.end(); neighb_it++) { //getting global index of the neighbouring node work_array.push_back(static_cast<unsigned int> (neighb_it->FastGetSolutionStepValue(AUX_INDEX))); } //reordering neighbours following their global indices std::sort(work_array.begin(), work_array.end()); //setting current row start index mRowStartIndex[i_node] = row_start_temp; //nested loop over the by now ordered neighbours for (unsigned int counter = 0; counter < n_neighbours; counter++) { //getting global index of the neighbouring node unsigned int j_neighbour = work_array[counter]; //calculating CSR index unsigned int csr_index = mRowStartIndex[i_node] + counter; //saving column index j of the original matrix mColumnIndex[csr_index] = j_neighbour; //initializing the CSR vector entries with zero mNonzeroEdgeValues[csr_index].Mass = 0.0; //mNonzeroEdgeValues[csr_index].Laplacian = 0.0; noalias(mNonzeroEdgeValues[csr_index].LaplacianIJ) = ZeroMatrix(TDim, TDim); noalias(mNonzeroEdgeValues[csr_index].Ni_DNj) = ZeroVector(TDim); //TRANSPOSED GRADIENT noalias(mNonzeroEdgeValues[csr_index].DNi_Nj) = ZeroVector(TDim); } //preparing row start index for next node row_start_temp += n_neighbours; } } } //adding last entry (necessary for abort criterion of loops) mRowStartIndex[n_nodes] = mNumberEdges; //INITIALIZING NODE BASED VALUES //lumped mass matrix (elements Mi) /* #pragma omp parallel for for (int i_node=0; i_node<n_nodes; i_node++) mLumpedMassMatrix[i_node] = 0.0;*/ #pragma omp parallel for //set the heights to a huge number for (int i_node = 0; i_node < n_nodes; i_node++) mHmin[i_node] = 1e10; //diagonal of gradient matrix (elements Gii) // #pragma omp parallel for // for (int i_node=0; i_node<n_nodes; i_node++) // noalias(mDiagGradientMatrix[i_node]) = ZeroVector(TDim); KRATOS_CATCH("") } //********************************* //function to precalculate CSR data void BuildCSRData(ModelPart& model_part) { KRATOS_TRY //PRECALCULATING CSR DATA //defining temporary local variables for elementwise addition //shape functions array_1d<double, TDim + 1 > N; //shape function derivatives boost::numeric::ublas::bounded_matrix <double, TDim + 1, TDim> dN_dx; //volume double volume; //weighting factor double weighting_factor = 1.0 / static_cast<double> (TDim + 1); //elemental matrices boost::numeric::ublas::bounded_matrix <double, TDim + 1, TDim + 1 > mass_consistent; //boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim+1> laplacian; array_1d<double, TDim + 1 > mass_lumped; //global indices of elemental nodes array_1d<unsigned int, TDim + 1 > nodal_indices; array_1d<double, TDim + 1 > heights; //loop over all elements for (typename ModelPart::ElementsContainerType::iterator elem_it = model_part.ElementsBegin(); elem_it != model_part.ElementsEnd(); elem_it++) { //LOCAL ELEMENTWISE CALCULATIONS //getting geometry data of the element GeometryUtils::CalculateGeometryData(elem_it->GetGeometry(), dN_dx, N, volume); //calculate lenght of the heights of the element for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) { heights[ie_node] = dN_dx(ie_node, 0) * dN_dx(ie_node, 0); for (unsigned int comp = 1; comp < TDim; comp++) { heights[ie_node] += dN_dx(ie_node, comp) * dN_dx(ie_node, comp); } heights[ie_node] = 1.0 / sqrt(heights[ie_node]); // KRATOS_WATCH(heights); } //setting up elemental mass matrices CalculateMassMatrix(mass_consistent, volume); noalias(mass_lumped) = ZeroVector(TDim + 1); for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) { for (unsigned int je_node = 0; je_node <= TDim; je_node++) { //mass_consistent(ie_node,je_node) = N(ie_node) * N(je_node) * volume; mass_lumped[ie_node] += mass_consistent(ie_node, je_node); } //mass_lumped[ie_node] = volume * N[ie_node]; } /*OLD DATA STRUCTURE //calculating elemental laplacian matrix noalias(laplacian) = ZeroMatrix(TDim+1,TDim+1); for (unsigned int ie_node=0; ie_node<=TDim; ie_node++) for (unsigned int je_node=ie_node+1; je_node<=TDim; je_node++) //componentwise multiplication for (unsigned int component=0; component<TDim; component++) { //taking advantage of symmetry double temp = dN_dx(ie_node,component) * dN_dx(je_node,component) * volume; laplacian(ie_node,je_node) += temp; laplacian(je_node,ie_node) += temp; } //multiply gradient with volume referring to each gauss point dN_dx *= (volume / double(TDim+1));*/ //(corresponding to Ni * dOmega respectively Nj * dOmega) double weighted_volume = volume * weighting_factor; //ASSEMBLING GLOBAL DATA STRUCTURE //loop over the nodes of the element to determine their global indices for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) nodal_indices[ie_node] = static_cast<unsigned int> (elem_it->GetGeometry()[ie_node].FastGetSolutionStepValue(AUX_INDEX)); //assembling global "edge matrices" by adding local contributions for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) { //check the heights and change the value if minimal is found if (mHmin[ nodal_indices[ie_node] ] > heights[ie_node]) mHmin[ nodal_indices[ie_node] ] = heights[ie_node]; for (unsigned int je_node = 0; je_node <= TDim; je_node++) { //remark: there is no edge linking node i with itself! //DIAGONAL TERMS if (ie_node != je_node) { //calculating CSR index from global index unsigned int csr_index = GetCSRIndex(nodal_indices[ie_node], nodal_indices[je_node]); //assigning precalculated element data to the referring edges //contribution to edge mass mNonzeroEdgeValues[csr_index].Mass += mass_consistent(ie_node, je_node); //contribution to edge laplacian /*OLD DATA STRUCTURE mNonzeroEdgeValues[csr_index].Laplacian = laplacian(ie_node,je_node);*/ boost::numeric::ublas::bounded_matrix <double, TDim, TDim>& laplacian = mNonzeroEdgeValues[csr_index].LaplacianIJ; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) laplacian(l_comp, k_comp) += dN_dx(ie_node, l_comp) * dN_dx(je_node, k_comp) * volume; //contribution to edge gradient array_1d<double, TDim>& gradient = mNonzeroEdgeValues[csr_index].Ni_DNj; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) //gradient[l_comp] += dN_dx(je_node,l_comp); gradient[l_comp] += dN_dx(je_node, l_comp) * weighted_volume; //TRANSPOSED GRADIENT //contribution to transposed edge gradient array_1d<double, TDim>& transp_gradient = mNonzeroEdgeValues[csr_index].DNi_Nj; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) //transp_gradient[l_comp] += dN_dx(ie_node,l_comp); transp_gradient[l_comp] += dN_dx(ie_node, l_comp) * weighted_volume; } } } //assembling node based vectors for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) //diagonal of the global lumped mass matrix mLumpedMassMatrix[nodal_indices[ie_node]] += mass_lumped[ie_node]; for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) { //diagonal of the global gradient matrix array_1d<double, TDim>& gradient = mDiagGradientMatrix[nodal_indices[ie_node]]; for (unsigned int component = 0; component < TDim; component++) //gradient[component] += dN_dx(ie_node,component); gradient[component] += dN_dx(ie_node, component) * weighted_volume; } } //copy mass matrix to inverted mass matrix for (unsigned int inode = 0; inode < mLumpedMassMatrix.size(); inode++) { mInvertedMassMatrix[inode] = mLumpedMassMatrix[inode]; } //perform MPI syncronization between the domains //calculating inverted mass matrix (this requires syncronization for MPI paraellelism for (unsigned int inode = 0; inode < mInvertedMassMatrix.size(); inode++) { mInvertedMassMatrix[inode] = 1.0 / mInvertedMassMatrix[inode]; } KRATOS_CATCH("") } //****************************************** //function to calculate CSR index of edge ij unsigned int GetCSRIndex(unsigned int NodeI, unsigned int NeighbourJ) { KRATOS_TRY //index indicating data position of edge ij unsigned int csr_index; //searching for coincidence of stored column index and neighbour index j for (csr_index = mRowStartIndex[NodeI]; csr_index != mRowStartIndex[NodeI + 1]; csr_index++) if (mColumnIndex[csr_index] == NeighbourJ) break; //returning CSR index of edge ij return csr_index; KRATOS_CATCH("") } //*********************************************** //function to get pointer to CSR tuple of edge ij CSR_Tuple* GetTuplePointer(unsigned int NodeI, unsigned int NeighbourJ) { KRATOS_TRY //index indicating data position of edge ij unsigned int csr_index; //searching for coincidence of stored column index and neighbour index j for (csr_index = mRowStartIndex[NodeI]; csr_index != mRowStartIndex[NodeI + 1]; csr_index++) if (mColumnIndex[csr_index] == NeighbourJ) break; //returning pointer to CSR tuple of edge ij return &mNonzeroEdgeValues[csr_index]; KRATOS_CATCH("") } //******************************* //function to free dynamic memory void Clear() { KRATOS_TRY mNonzeroEdgeValues.clear(); mColumnIndex.clear(); mRowStartIndex.clear(); mInvertedMassMatrix.clear(); mLumpedMassMatrix.clear(); mDiagGradientMatrix.clear(); mHmin.clear(); KRATOS_CATCH("") } //**************************** //functions to access database //(note that this is already thought for parallel; // for a single processor this could be done in a faster way) void FillCoordinatesFromDatabase(CalcVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); #pragma omp parallel for firstprivate(n_nodes, it_begin) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //save value in the destination vector for (unsigned int component = 0; component < TDim; component++) (rDestination[i_node])[component] = (*node_it)[component]; } KRATOS_CATCH(""); } //**************************** //functions to access database //(note that this is already thought for parallel; // for a single processor this could be done in a faster way) void FillVectorFromDatabase(Variable<array_1d<double, 3 > >& rVariable, CalcVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get the requested value in vector form array_1d<double, 3 > & vector = node_it-> FastGetCurrentSolutionStepValue(rVariable, var_pos); //save value in the destination vector for (unsigned int component = 0; component < TDim; component++) (rDestination[i_node])[component] = vector[component]; } KRATOS_CATCH(""); } void FillOldVectorFromDatabase(Variable<array_1d<double, 3 > >& rVariable, CalcVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get the requested value in vector form array_1d<double, 3 > & vector = node_it->FastGetSolutionStepValue(rVariable, 1, var_pos); //save value in the destination vector for (unsigned int component = 0; component < TDim; component++) (rDestination[i_node])[component] = vector[component]; } KRATOS_CATCH(""); } void FillScalarFromDatabase(Variable<double>& rVariable, ValuesVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get the requested scalar value double& scalar = node_it-> FastGetCurrentSolutionStepValue(rVariable, var_pos); //save value in the destination vector rDestination[i_node] = scalar; } KRATOS_CATCH(""); } void FillOldScalarFromDatabase(Variable<double>& rVariable, ValuesVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get the requested scalar value double& scalar = node_it->FastGetSolutionStepValue(rVariable, 1, var_pos); //save value in the destination vector rDestination[i_node] = scalar; } KRATOS_CATCH(""); } void WriteVectorToDatabase(Variable<array_1d<double, 3 > >& rVariable, CalcVectorType& rOrigin, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get reference of destination array_1d<double, 3 > & vector = node_it->FastGetCurrentSolutionStepValue(rVariable, var_pos); //save vector in database for (unsigned int component = 0; component < TDim; component++) vector[component] = (rOrigin[i_node])[component]; } KRATOS_CATCH(""); } void WriteScalarToDatabase(Variable<double>& rVariable, ValuesVectorType& rOrigin, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); int i_node = i; //get reference of destination double& scalar = node_it-> FastGetCurrentSolutionStepValue(rVariable, var_pos); //save scalar in database scalar = rOrigin[i_node]; } KRATOS_CATCH(""); } //********************************************************************* //destination = origin1 + value * Minv*origin void Add_Minv_value( CalcVectorType& destination, const CalcVectorType& origin1, const double value, const ValuesVectorType& Minv_vec, const CalcVectorType& origin ) { KRATOS_TRY int loop_size = destination.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { array_1d<double, TDim>& dest = destination[i_node]; const double m_inv = Minv_vec[i_node]; const array_1d<double, TDim>& origin_vec1 = origin1[i_node]; const array_1d<double, TDim>& origin_value = origin[i_node]; double temp = value * m_inv; for (unsigned int comp = 0; comp < TDim; comp++) dest[comp] = origin_vec1[comp] + temp * origin_value[comp]; } KRATOS_CATCH("") } void Add_Minv_value( ValuesVectorType& destination, const ValuesVectorType& origin1, const double value, const ValuesVectorType& Minv_vec, const ValuesVectorType& origin ) { KRATOS_TRY int loop_size = destination.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { double& dest = destination[i_node]; const double m_inv = Minv_vec[i_node]; const double& origin_vec1 = origin1[i_node]; const double& origin_value = origin[i_node]; double temp = value * m_inv; dest = origin_vec1 + temp * origin_value; } KRATOS_CATCH("") } //********************************************************************** void AllocateAndSetToZero(CalcVectorType& data_vector, int size) { data_vector.resize(size); int loop_size = size; #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { array_1d<double, TDim>& aaa = data_vector[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) aaa[comp] = 0.0; } } void AllocateAndSetToZero(ValuesVectorType& data_vector, int size) { data_vector.resize(size); int loop_size = size; #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { data_vector[i_node] = 0.0; ; } } //********************************************************************** void SetToZero(CalcVectorType& data_vector) { int loop_size = data_vector.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { array_1d<double, TDim>& aaa = data_vector[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) aaa[comp] = 0.0; } } void SetToZero(ValuesVectorType& data_vector) { int loop_size = data_vector.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { data_vector[i_node] = 0.0; ; } } //********************************************************************** void AssignVectorToVector(const CalcVectorType& origin, CalcVectorType& destination ) { int loop_size = origin.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { const array_1d<double, TDim>& orig = origin[i_node]; array_1d<double, TDim>& dest = destination[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) dest[comp] = orig[comp]; } } void AssignVectorToVector(const ValuesVectorType& origin, ValuesVectorType& destination ) { int loop_size = origin.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { destination[i_node] = origin[i_node]; } } private: //number of edges unsigned int mNumberEdges; //CSR data vector for storage of the G, L and consistent M components of edge ij EdgesVectorType mNonzeroEdgeValues; //vector to store column indices of nonzero matrix elements for each row IndicesVectorType mColumnIndex; //index vector to access the start of matrix row i in the column vector IndicesVectorType mRowStartIndex; //inverse of the mass matrix ... for parallel calculation each subdomain should contain this correctly calculated (including contributions of the neighbours) ValuesVectorType mInvertedMassMatrix; //minimum height around one node ValuesVectorType mHmin; //lumped mass matrix (separately stored due to lack of diagonal elements of the consistent mass matrix) ValuesVectorType mLumpedMassMatrix; //diagonal of the gradient matrix (separately stored due to special calculations) CalcVectorType mDiagGradientMatrix; //******************************************* //functions to set up elemental mass matrices void CalculateMassMatrix(boost::numeric::ublas::bounded_matrix<double, 3, 3 > & mass_consistent, double volume) { for (unsigned int i_node = 0; i_node <= TDim; i_node++) { //diagonal terms mass_consistent(i_node, i_node) = 0.16666666666666666667 * volume; //1/6 //non-diagonal terms double temp = 0.08333333333333333333 * volume; // 1/12 for (unsigned int j_neighbour = i_node + 1; j_neighbour <= TDim; j_neighbour++) { //taking advantage of symmetry mass_consistent(i_node, j_neighbour) = temp; mass_consistent(j_neighbour, i_node) = temp; } } } void CalculateMassMatrix(boost::numeric::ublas::bounded_matrix<double, 4, 4 > & mass_consistent, double volume) { for (unsigned int i_node = 0; i_node <= TDim; i_node++) { //diagonal terms mass_consistent(i_node, i_node) = 0.1 * volume; //non-diagonal terms double temp = 0.05 * volume; for (unsigned int j_neighbour = i_node + 1; j_neighbour <= TDim; j_neighbour++) { //taking advantage of symmetry mass_consistent(i_node, j_neighbour) = temp; mass_consistent(j_neighbour, i_node) = temp; } } } }; } //namespace Kratos #endif //KRATOS_EDGE_DATA_C2C_H_INCLUDED defined
declare-variant-8.c
/* { dg-do compile { target c } } */ /* { dg-additional-options "-fdump-tree-gimple" } */ void f01 (void); #pragma omp declare variant (f01) match (user={condition(6 == 7)},implementation={vendor(gnu)}) void f02 (void); void f03 (void); #pragma omp declare variant (f03) match (user={condition(6 == 6)},implementation={atomic_default_mem_order(seq_cst)}) void f04 (void); void f05 (void); #pragma omp declare variant (f05) match (user={condition(1)},implementation={atomic_default_mem_order(relaxed)}) void f06 (void); #pragma omp requires atomic_default_mem_order(seq_cst) void f07 (void); #pragma omp declare variant (f07) match (construct={parallel,for},device={kind("any")}) void f08 (void); void f09 (void); #pragma omp declare variant (f09) match (construct={parallel,for},implementation={vendor("gnu")}) void f10 (void); void f11 (void); #pragma omp declare variant (f11) match (construct={parallel,for}) void f12 (void); void f13 (void); #pragma omp declare variant (f13) match (construct={parallel,for}) void f14 (void); #pragma omp declare target to (f13, f14) void f15 (void); #pragma omp declare variant (f15) match (implementation={vendor(llvm)}) void f16 (void); void f17 (void); #pragma omp declare variant (f17) match (construct={target,parallel}) void f18 (void); void f19 (void); #pragma omp declare variant (f19) match (construct={target,parallel}) void f20 (void); void f21 (void); #pragma omp declare variant (f21) match (construct={teams,parallel}) void f22 (void); void f23 (void); #pragma omp declare variant (f23) match (construct={teams,parallel,for}) void f24 (void); void f25 (void); #pragma omp declare variant (f25) match (construct={teams,parallel}) void f26 (void); void f27 (void); #pragma omp declare variant (f27) match (construct={teams,parallel,for}) void f28 (void); void f29 (void); #pragma omp declare variant (f29) match (implementation={vendor(gnu)}) void f30 (void); void f31 (void); #pragma omp declare variant (f31) match (construct={teams,parallel,for}) void f32 (void); void f33 (void); #pragma omp declare variant (f33) match (device={kind("any\0any")}) /* { dg-warning "unknown property '.any.000any.' of 'kind' selector" } */ void f34 (void); void f35 (void); #pragma omp declare variant (f35) match (implementation={vendor("gnu\0")}) /* { dg-warning "unknown property '.gnu.000.' of 'vendor' selector" } */ void f36 (void); void test1 (void) { int i; f02 (); /* { dg-final { scan-tree-dump-times "f02 \\\(\\\);" 1 "gimple" } } */ f04 (); /* { dg-final { scan-tree-dump-times "f03 \\\(\\\);" 1 "gimple" } } */ f06 (); /* { dg-final { scan-tree-dump-times "f06 \\\(\\\);" 1 "gimple" } } */ #pragma omp parallel #pragma omp for for (i = 0; i < 1; i++) f08 (); /* { dg-final { scan-tree-dump-times "f07 \\\(\\\);" 1 "gimple" } } */ #pragma omp parallel for for (i = 0; i < 1; i++) f10 (); /* { dg-final { scan-tree-dump-times "f09 \\\(\\\);" 1 "gimple" } } */ #pragma omp for for (i = 0; i < 1; i++) #pragma omp parallel f12 (); /* { dg-final { scan-tree-dump-times "f12 \\\(\\\);" 1 "gimple" } } */ #pragma omp parallel #pragma omp target #pragma omp for for (i = 0; i < 1; i++) f14 (); /* { dg-final { scan-tree-dump-times "f14 \\\(\\\);" 1 "gimple" } } */ f16 (); /* { dg-final { scan-tree-dump-times "f16 \\\(\\\);" 1 "gimple" } } */ f34 (); /* { dg-final { scan-tree-dump-times "f34 \\\(\\\);" 1 "gimple" } } */ f36 (); /* { dg-final { scan-tree-dump-times "f36 \\\(\\\);" 1 "gimple" } } */ } #pragma omp declare target void test2 (void) { #pragma omp parallel f18 (); /* { dg-final { scan-tree-dump-times "f17 \\\(\\\);" 1 "gimple" } } */ } #pragma omp end declare target void test3 (void); #pragma omp declare target to (test3) void test3 (void) { #pragma omp parallel f20 (); /* { dg-final { scan-tree-dump-times "f20 \\\(\\\);" 1 "gimple" } } */ } void f21 (void) { int i; #pragma omp for for (i = 0; i < 1; i++) f24 (); /* { dg-final { scan-tree-dump-times "f23 \\\(\\\);" 1 "gimple" } } */ } void f26 (void) { int i; #pragma omp for for (i = 0; i < 1; i++) f28 (); /* { dg-final { scan-tree-dump-times "f28 \\\(\\\);" 1 "gimple" } } */ } void f29 (void) { int i; #pragma omp for for (i = 0; i < 1; i++) f32 (); /* { dg-final { scan-tree-dump-times "f32 \\\(\\\);" 1 "gimple" } } */ }
test_verify_rowcols.c
#include "config.h" /* getopt needs _POSIX_C_SOURCE 2 */ #define _POSIX_C_SOURCE 2 #include <ctype.h> #include <limits.h> #include <math.h> #include <stddef.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <errno.h> #if defined(_MSC_VER) #include "wingetopt/src/getopt.h" #else #include <unistd.h> #endif #include "parasail.h" #include "parasail/cpuid.h" #include "parasail/io.h" #include "parasail/memory.h" #include "parasail/matrix_lookup.h" #include "func_verify_rowcols.h" static int verbose = 0; typedef struct gap_score { int open; int extend; } gap_score_t; gap_score_t gap_scores[] = { {10,1}, {10,2}, {14,2}, {40,2}, {INT_MIN,INT_MIN} }; static inline unsigned long binomial_coefficient( unsigned long n, unsigned long k) { /* from http://blog.plover.com/math/choose.html */ unsigned long r = 1; unsigned long d; if (k > n) { return 0; } for (d = 1; d <= k; d++) { r *= n--; r /= d; } return r; } static inline void k_combination2( unsigned long pos, unsigned long *a, unsigned long *b) { double s; double i = floor(sqrt(2.0 * pos)) - 1.0; if (i <= 1.0) { i = 1.0; } s = i * (i - 1.0) / 2.0; while (pos - s >= i) { s += i; i += 1; } *a = (unsigned long)(pos - s); *b = (unsigned long)(i); } static inline int diff_array( unsigned long size, int *a, int *b) { unsigned long i = 0; for (i=0; i<size; ++i) { if (a[i] != b[i]) return 1; } return 0; } static void check_functions( parasail_function_group_t f, parasail_sequences_t *sequences, unsigned long pair_limit_, const parasail_matrix_t *matrix_, gap_score_t gap) { const parasail_function_info_t *functions = f.fs; unsigned long matrix_index = 0; unsigned long gap_index = 0; unsigned long function_index = 0; long long pair_index = 0; long long pair_limit = (long long)pair_limit_; parasail_function_t *reference_function = NULL; const parasail_matrix_t ** matrices = parasail_matrices; const parasail_matrix_t * single_matrix[] = { matrix_, NULL }; if (NULL != matrix_) { matrices = single_matrix; } printf("checking %s functions\n", f.name); for (matrix_index=0; NULL!=matrices[matrix_index]; ++matrix_index) { const parasail_matrix_t *matrix = matrices[matrix_index]; const char *matrixname = matrix->name; if (verbose) printf("\t%s\n", matrixname); for (gap_index=0; INT_MIN!=gap_scores[gap_index].open; ++gap_index) { int open = gap_scores[gap_index].open; int extend = gap_scores[gap_index].extend; if (gap.open != INT_MIN && gap.extend != INT_MIN) { open = gap.open; extend = gap.extend; } if (verbose) printf("\t\topen=%d extend=%d\n", open, extend); reference_function = functions[0].pointer; for (function_index=1; NULL!=functions[function_index].pointer; ++function_index) { unsigned long saturated = 0; if (verbose) printf("\t\t\t%s\n", functions[function_index].name); #pragma omp parallel for for (pair_index=0; pair_index<pair_limit; ++pair_index) { parasail_result_t *reference_result = NULL; parasail_result_t *result = NULL; unsigned long a = 0; unsigned long b = 1; int *ref_score_row = NULL; int *ref_score_col = NULL; int *score_row = NULL; int *score_col = NULL; size_t size_a = 0; size_t size_b = 0; k_combination2(pair_index, &a, &b); size_a = sequences->seqs[a].seq.l; size_b = sequences->seqs[b].seq.l; /*printf("\t\t\t\tpair=%lld (%lu,%lu)\n", pair_index, a, b);*/ reference_result = reference_function( sequences->seqs[a].seq.s, size_a, sequences->seqs[b].seq.s, size_b, open, extend, matrix); result = functions[function_index].pointer( sequences->seqs[a].seq.s, size_a, sequences->seqs[b].seq.s, size_b, open, extend, matrix); if (parasail_result_is_saturated(result)) { /* no point in comparing a result that saturated */ parasail_result_free(reference_result); parasail_result_free(result); #pragma omp atomic saturated += 1; continue; } ref_score_row = parasail_result_get_score_row(reference_result); ref_score_col = parasail_result_get_score_col(reference_result); score_row = parasail_result_get_score_row(result); score_col = parasail_result_get_score_col(result); if (reference_result->score != result->score) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) wrong score (%d!=%d)\n", functions[function_index].name, a, b, open, extend, matrixname, reference_result->score, result->score); } } if (diff_array(size_b, ref_score_row, score_row)) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) bad score row\n", functions[function_index].name, a, b, open, extend, matrixname); } } if (diff_array(size_a, ref_score_col, score_col)) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) bad score col\n", functions[function_index].name, a, b, open, extend, matrixname); } } if (parasail_result_is_stats(result)) { int *ref_matches_row = parasail_result_get_matches_row(reference_result); int *ref_matches_col = parasail_result_get_matches_col(reference_result); int *ref_similar_row = parasail_result_get_similar_row(reference_result); int *ref_similar_col = parasail_result_get_similar_col(reference_result); int *ref_length_row = parasail_result_get_length_row(reference_result); int *ref_length_col = parasail_result_get_length_col(reference_result); int *matches_row = parasail_result_get_matches_row(result); int *matches_col = parasail_result_get_matches_col(result); int *similar_row = parasail_result_get_similar_row(result); int *similar_col = parasail_result_get_similar_col(result); int *length_row = parasail_result_get_length_row(result); int *length_col = parasail_result_get_length_col(result); if (diff_array(size_b, ref_matches_row, matches_row)) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) bad matches row\n", functions[function_index].name, a, b, open, extend, matrixname); } } if (diff_array(size_a, ref_matches_col, matches_col)) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) bad matches col\n", functions[function_index].name, a, b, open, extend, matrixname); } } if (diff_array(size_b, ref_similar_row, similar_row)) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) bad similar row\n", functions[function_index].name, a, b, open, extend, matrixname); } } if (diff_array(size_a, ref_similar_col, similar_col)) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) bad similar col\n", functions[function_index].name, a, b, open, extend, matrixname); } } if (diff_array(size_b, ref_length_row, length_row)) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) bad length row\n", functions[function_index].name, a, b, open, extend, matrixname); } } if (diff_array(size_a, ref_length_col, length_col)) { #pragma omp critical(printer) { printf("%s(%lu,%lu,%d,%d,%s) bad length col\n", functions[function_index].name, a, b, open, extend, matrixname); } } } parasail_result_free(reference_result); parasail_result_free(result); } if (verbose && saturated) { printf("%s %d %d %s saturated %lu times\n", functions[function_index].name, open, extend, matrixname, saturated); } } if (gap.open != INT_MIN && gap.extend != INT_MIN) { /* user-specified gap, don't loop */ break; } } } } int main(int argc, char **argv) { unsigned long seq_count = 0; unsigned long limit = 0; parasail_sequences_t *sequences = NULL; char *endptr = NULL; char *filename = NULL; int c = 0; int test_scores = 1; int test_stats = 0; char *matrixname = NULL; const parasail_matrix_t *matrix = NULL; gap_score_t gap = {INT_MIN,INT_MIN}; while ((c = getopt(argc, argv, "f:m:n:o:e:vsS")) != -1) { switch (c) { case 'f': filename = optarg; break; case 'm': matrixname = optarg; break; case 'n': errno = 0; seq_count = strtol(optarg, &endptr, 10); if (errno) { perror("strtol"); exit(1); } break; case 'o': errno = 0; gap.open = strtol(optarg, &endptr, 10); if (errno) { perror("strtol gap.open"); exit(1); } break; case 'e': errno = 0; gap.extend = strtol(optarg, &endptr, 10); if (errno) { perror("strtol gap.extend"); exit(1); } break; case 'v': verbose = 1; break; case 's': test_stats = 1; break; case 'S': test_scores = 0; break; case '?': if (optopt == 'f' || optopt == 'n') { fprintf(stderr, "Option -%c requires an argument.\n", optopt); } else if (isprint(optopt)) { fprintf(stderr, "Unknown option `-%c'.\n", optopt); } else { fprintf(stderr, "Unknown option character `\\x%x'.\n", optopt); } exit(1); default: fprintf(stderr, "default case in getopt\n"); exit(1); } } if (filename) { sequences = parasail_sequences_from_file(filename); if (0 == seq_count) { seq_count = sequences->l; } } else { fprintf(stderr, "no filename specified\n"); exit(1); } /* select the matrix */ if (matrixname) { matrix = parasail_matrix_lookup(matrixname); if (NULL == matrix) { fprintf(stderr, "Specified substitution matrix not found.\n"); exit(1); } } limit = binomial_coefficient(seq_count, 2); printf("%lu choose 2 is %lu\n", seq_count, limit); #if HAVE_SSE2 if (parasail_can_use_sse2()) { if (test_scores) { check_functions(parasail_nw_rowcol_sse2, sequences, limit, matrix, gap); check_functions(parasail_sg_rowcol_sse2, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_rowcol_sse2, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_rowcol_sse2, sequences, limit, matrix, gap); check_functions(parasail_sg_qx_rowcol_sse2, sequences, limit, matrix, gap); check_functions(parasail_sg_db_rowcol_sse2, sequences, limit, matrix, gap); check_functions(parasail_sg_de_rowcol_sse2, sequences, limit, matrix, gap); check_functions(parasail_sg_dx_rowcol_sse2, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_de_rowcol_sse2, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_db_rowcol_sse2, sequences, limit, matrix, gap); check_functions(parasail_sw_rowcol_sse2, sequences, limit, matrix, gap); } if (test_stats) { check_functions(parasail_nw_stats_rowcol_sse2, sequences, limit, matrix, gap); check_functions(parasail_sg_stats_rowcol_sse2, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_stats_rowcol_sse2, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_stats_rowcol_sse2, sequences, limit, matrix, gap); check_functions(parasail_sg_qx_stats_rowcol_sse2, sequences, limit, matrix, gap); check_functions(parasail_sg_db_stats_rowcol_sse2, sequences, limit, matrix, gap); check_functions(parasail_sg_de_stats_rowcol_sse2, sequences, limit, matrix, gap); check_functions(parasail_sg_dx_stats_rowcol_sse2, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_de_stats_rowcol_sse2, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_db_stats_rowcol_sse2, sequences, limit, matrix, gap); check_functions(parasail_sw_stats_rowcol_sse2, sequences, limit, matrix, gap); } } #endif #if HAVE_SSE41 if (parasail_can_use_sse41()) { if (test_scores) { check_functions(parasail_nw_rowcol_sse41, sequences, limit, matrix, gap); check_functions(parasail_sg_rowcol_sse41, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_rowcol_sse41, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_rowcol_sse41, sequences, limit, matrix, gap); check_functions(parasail_sg_qx_rowcol_sse41, sequences, limit, matrix, gap); check_functions(parasail_sg_db_rowcol_sse41, sequences, limit, matrix, gap); check_functions(parasail_sg_de_rowcol_sse41, sequences, limit, matrix, gap); check_functions(parasail_sg_dx_rowcol_sse41, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_de_rowcol_sse41, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_db_rowcol_sse41, sequences, limit, matrix, gap); check_functions(parasail_sw_rowcol_sse41, sequences, limit, matrix, gap); } if (test_stats) { check_functions(parasail_nw_stats_rowcol_sse41, sequences, limit, matrix, gap); check_functions(parasail_sg_stats_rowcol_sse41, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_stats_rowcol_sse41, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_stats_rowcol_sse41, sequences, limit, matrix, gap); check_functions(parasail_sg_qx_stats_rowcol_sse41, sequences, limit, matrix, gap); check_functions(parasail_sg_db_stats_rowcol_sse41, sequences, limit, matrix, gap); check_functions(parasail_sg_de_stats_rowcol_sse41, sequences, limit, matrix, gap); check_functions(parasail_sg_dx_stats_rowcol_sse41, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_de_stats_rowcol_sse41, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_db_stats_rowcol_sse41, sequences, limit, matrix, gap); check_functions(parasail_sw_stats_rowcol_sse41, sequences, limit, matrix, gap); } } #endif #if HAVE_AVX2 if (parasail_can_use_avx2()) { if (test_scores) { check_functions(parasail_nw_rowcol_avx2, sequences, limit, matrix, gap); check_functions(parasail_sg_rowcol_avx2, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_rowcol_avx2, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_rowcol_avx2, sequences, limit, matrix, gap); check_functions(parasail_sg_qx_rowcol_avx2, sequences, limit, matrix, gap); check_functions(parasail_sg_db_rowcol_avx2, sequences, limit, matrix, gap); check_functions(parasail_sg_de_rowcol_avx2, sequences, limit, matrix, gap); check_functions(parasail_sg_dx_rowcol_avx2, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_de_rowcol_avx2, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_db_rowcol_avx2, sequences, limit, matrix, gap); check_functions(parasail_sw_rowcol_avx2, sequences, limit, matrix, gap); } if (test_stats) { check_functions(parasail_nw_stats_rowcol_avx2, sequences, limit, matrix, gap); check_functions(parasail_sg_stats_rowcol_avx2, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_stats_rowcol_avx2, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_stats_rowcol_avx2, sequences, limit, matrix, gap); check_functions(parasail_sg_qx_stats_rowcol_avx2, sequences, limit, matrix, gap); check_functions(parasail_sg_db_stats_rowcol_avx2, sequences, limit, matrix, gap); check_functions(parasail_sg_de_stats_rowcol_avx2, sequences, limit, matrix, gap); check_functions(parasail_sg_dx_stats_rowcol_avx2, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_de_stats_rowcol_avx2, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_db_stats_rowcol_avx2, sequences, limit, matrix, gap); check_functions(parasail_sw_stats_rowcol_avx2, sequences, limit, matrix, gap); } } #endif #if HAVE_ALTIVEC if (parasail_can_use_altivec()) { if (test_scores) { check_functions(parasail_nw_rowcol_altivec, sequences, limit, matrix, gap); check_functions(parasail_sg_rowcol_altivec, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_rowcol_altivec, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_rowcol_altivec, sequences, limit, matrix, gap); check_functions(parasail_sg_qx_rowcol_altivec, sequences, limit, matrix, gap); check_functions(parasail_sg_db_rowcol_altivec, sequences, limit, matrix, gap); check_functions(parasail_sg_de_rowcol_altivec, sequences, limit, matrix, gap); check_functions(parasail_sg_dx_rowcol_altivec, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_de_rowcol_altivec, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_db_rowcol_altivec, sequences, limit, matrix, gap); check_functions(parasail_sw_rowcol_altivec, sequences, limit, matrix, gap); } if (test_stats) { check_functions(parasail_nw_stats_rowcol_altivec, sequences, limit, matrix, gap); check_functions(parasail_sg_stats_rowcol_altivec, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_stats_rowcol_altivec, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_stats_rowcol_altivec, sequences, limit, matrix, gap); check_functions(parasail_sg_qx_stats_rowcol_altivec, sequences, limit, matrix, gap); check_functions(parasail_sg_db_stats_rowcol_altivec, sequences, limit, matrix, gap); check_functions(parasail_sg_de_stats_rowcol_altivec, sequences, limit, matrix, gap); check_functions(parasail_sg_dx_stats_rowcol_altivec, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_de_stats_rowcol_altivec, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_db_stats_rowcol_altivec, sequences, limit, matrix, gap); check_functions(parasail_sw_stats_rowcol_altivec, sequences, limit, matrix, gap); } } #endif #if HAVE_NEON if (parasail_can_use_neon()) { if (test_scores) { check_functions(parasail_nw_rowcol_neon, sequences, limit, matrix, gap); check_functions(parasail_sg_rowcol_neon, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_rowcol_neon, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_rowcol_neon, sequences, limit, matrix, gap); check_functions(parasail_sg_qx_rowcol_neon, sequences, limit, matrix, gap); check_functions(parasail_sg_db_rowcol_neon, sequences, limit, matrix, gap); check_functions(parasail_sg_de_rowcol_neon, sequences, limit, matrix, gap); check_functions(parasail_sg_dx_rowcol_neon, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_de_rowcol_neon, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_db_rowcol_neon, sequences, limit, matrix, gap); check_functions(parasail_sw_rowcol_neon, sequences, limit, matrix, gap); } if (test_stats) { check_functions(parasail_nw_stats_rowcol_neon, sequences, limit, matrix, gap); check_functions(parasail_sg_stats_rowcol_neon, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_stats_rowcol_neon, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_stats_rowcol_neon, sequences, limit, matrix, gap); check_functions(parasail_sg_qx_stats_rowcol_neon, sequences, limit, matrix, gap); check_functions(parasail_sg_db_stats_rowcol_neon, sequences, limit, matrix, gap); check_functions(parasail_sg_de_stats_rowcol_neon, sequences, limit, matrix, gap); check_functions(parasail_sg_dx_stats_rowcol_neon, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_de_stats_rowcol_neon, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_db_stats_rowcol_neon, sequences, limit, matrix, gap); check_functions(parasail_sw_stats_rowcol_neon, sequences, limit, matrix, gap); } } #endif if (test_scores) { check_functions(parasail_nw_rowcol_disp, sequences, limit, matrix, gap); check_functions(parasail_sg_rowcol_disp, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_rowcol_disp, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_rowcol_disp, sequences, limit, matrix, gap); check_functions(parasail_sg_qx_rowcol_disp, sequences, limit, matrix, gap); check_functions(parasail_sg_db_rowcol_disp, sequences, limit, matrix, gap); check_functions(parasail_sg_de_rowcol_disp, sequences, limit, matrix, gap); check_functions(parasail_sg_dx_rowcol_disp, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_de_rowcol_disp, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_db_rowcol_disp, sequences, limit, matrix, gap); check_functions(parasail_sw_rowcol_disp, sequences, limit, matrix, gap); } if (test_stats) { check_functions(parasail_nw_stats_rowcol_disp, sequences, limit, matrix, gap); check_functions(parasail_sg_stats_rowcol_disp, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_stats_rowcol_disp, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_stats_rowcol_disp, sequences, limit, matrix, gap); check_functions(parasail_sg_qx_stats_rowcol_disp, sequences, limit, matrix, gap); check_functions(parasail_sg_db_stats_rowcol_disp, sequences, limit, matrix, gap); check_functions(parasail_sg_de_stats_rowcol_disp, sequences, limit, matrix, gap); check_functions(parasail_sg_dx_stats_rowcol_disp, sequences, limit, matrix, gap); check_functions(parasail_sg_qb_de_stats_rowcol_disp, sequences, limit, matrix, gap); check_functions(parasail_sg_qe_db_stats_rowcol_disp, sequences, limit, matrix, gap); check_functions(parasail_sw_stats_rowcol_disp, sequences, limit, matrix, gap); } parasail_sequences_free(sequences); return 0; }
GB_unaryop__minv_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_fp32_fp32 // op(A') function: GB_tran__minv_fp32_fp32 // C type: float // A type: float // cast: float cij = (float) aij // unaryop: cij = (1.0F)/aij #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = (1.0F)/x ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_fp32_fp32 ( float *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fsm3d_bfsm_openmp_v1.c
#include "openst/eikonal/fsm.h" #define M_FSM3D_IMP_NAME "BFSMv1" const char OPENST_FSM3D_COMPUTEPARTIAL_IMP_NAME[] = M_FSM3D_IMP_NAME; const size_t OPENST_FSM3D_COMPUTEPARTIAL_IMP_NAME_LENGTH = sizeof(M_FSM3D_IMP_NAME); int OpenST_FSM3D_ComputePartial(OPENST_FLOAT *U, OPENST_FLOAT *V, size_t NI, size_t NJ, size_t NK, OPENST_FLOAT HI, OPENST_FLOAT HJ, OPENST_FLOAT HK, int start_iter, int max_iter, int *converged, size_t BSIZE_I, size_t BSIZE_J, size_t BSIZE_K, OPENST_FLOAT EPS){ int total_it, it, notconvergedl, notconvergedt; int REVI, REVJ, REVK; size_t NBI, NBJ, NBK; #if (_OPENMP > 200203) size_t levelr, K1, K2, kr, level, I1, I2, ir, jr; #else #pragma message("WARNING: size_t to ptrdiff_t cast enabled") ptrdiff_t levelr, K1, K2, kr, level, I1, I2, ir, jr; #endif if(start_iter >= max_iter){ return max_iter; } total_it = start_iter; notconvergedl = 0; NBI = NI/BSIZE_I + (NI % BSIZE_I > 0); NBJ = NJ/BSIZE_J + (NJ % BSIZE_J > 0); NBK = NK/BSIZE_K + (NK % BSIZE_K > 0); #pragma omp parallel default(none) \ shared(BSIZE_I, BSIZE_J, BSIZE_K, NBI, NBJ, NBK, \ start_iter, total_it, notconvergedl, NI, NJ, NK, \ U, V, HI, HJ, HK, max_iter, EPS) \ private(it, REVI, REVJ, REVK, notconvergedt, \ levelr, K1, K2, level, I1, I2, ir, jr, kr) { for(it = start_iter; it < max_iter; ++it){ #pragma omp single nowait { ++total_it; notconvergedl = 0; } notconvergedt = 0; OpenST_FSM3D_GetSweepOrder(it, &REVI, &REVJ, &REVK); for(levelr = 0; levelr < NBI + NBJ + NBK - 2; ++levelr){ K1 = (NBI + NBJ - 2 < levelr) ? (levelr - NBI - NBJ + 2) : 0; K2 = (NBK - 1 > levelr) ? levelr : NBK - 1; for(kr = K1; kr <= K2; ++kr){ level = levelr - kr; I1 = (NBJ - 1 < level) ? (level - NBJ + 1) : 0; I2 = (NBI - 1 > level) ? level : NBI - 1; #pragma omp for nowait schedule(dynamic,1) for(ir = I1; ir <= I2; ++ir){ jr = level - ir; if(OpenST_FSM3D_BlockSerial(U, V, NI, NJ, NK, HI, HJ, HK, REVI, REVJ, REVK, ir * BSIZE_I, jr * BSIZE_J, kr * BSIZE_K, BSIZE_I, BSIZE_J, BSIZE_K, EPS)){ notconvergedt = 1; } } } #pragma omp barrier } #pragma omp atomic notconvergedl += notconvergedt; #pragma omp barrier #pragma omp flush (notconvergedl) if(!notconvergedl){ break; } #pragma omp barrier } } *converged = (notconvergedl == 0); return total_it; }
boomerAMG.c
#include <stdio.h> #include <stddef.h> #include <stdlib.h> #include <string.h> #include <mpi.h> #include "omp.h" #include "boomerAMG.h" static double boomerAMGParam[BOOMERAMG_NPARAM]; #ifdef HYPRE #include "_hypre_utilities.h" #include "HYPRE_parcsr_ls.h" #include "_hypre_parcsr_ls.h" #include "HYPRE.h" typedef struct hypre_data { MPI_Comm comm; HYPRE_Solver solver; HYPRE_IJMatrix A; HYPRE_IJVector b; HYPRE_IJVector x; HYPRE_BigInt ilower; HYPRE_BigInt *ii; HYPRE_Real *bb; HYPRE_Real *xx; int nRows; int Nthreads; } hypre_data; static hypre_data *data; int boomerAMGSetup(int nrows, int nz, const long long int *Ai, const long long int *Aj, const double *Av, const int null_space, const MPI_Comm ce, int Nthreads, int deviceID, const int useFP32, const double *param) { data = (hypre_data*) malloc(sizeof(struct hypre_data)); data->Nthreads = Nthreads; MPI_Comm comm; MPI_Comm_dup(ce, &comm); data->comm = comm; int rank; MPI_Comm_rank(comm,&rank); if(sizeof(HYPRE_Real) != ((useFP32) ? sizeof(float) : sizeof(double))) { if(rank == 0) printf("HYPRE has not been built to support FP32.\n"); MPI_Abort(ce, 1); } long long rowStart = nrows; MPI_Scan(MPI_IN_PLACE, &rowStart, 1, MPI_LONG_LONG, MPI_SUM, ce); rowStart -= nrows; data->nRows = nrows; HYPRE_BigInt ilower = (HYPRE_BigInt) rowStart; data->ilower = ilower; HYPRE_BigInt iupper = ilower + (HYPRE_BigInt) nrows - 1; HYPRE_IJMatrixCreate(comm,ilower,iupper,ilower,iupper,&data->A); HYPRE_IJMatrix A_ij = data->A; HYPRE_IJMatrixSetObjectType(A_ij,HYPRE_PARCSR); HYPRE_IJMatrixInitialize(A_ij); int i; for(i=0; i<nz; i++) { HYPRE_BigInt mati = (HYPRE_BigInt)(Ai[i]); HYPRE_BigInt matj = (HYPRE_BigInt)(Aj[i]); HYPRE_Real matv = (HYPRE_Real) Av[i]; HYPRE_Int ncols = 1; // number of columns per row HYPRE_IJMatrixSetValues(A_ij, 1, &ncols, &mati, &matj, &matv); } HYPRE_IJMatrixAssemble(A_ij); //HYPRE_IJMatrixPrint(A_ij, "matrix.dat"); // Create AMG solver HYPRE_BoomerAMGCreate(&data->solver); HYPRE_Solver solver = data->solver; int uparam = (int) param[0]; // Set AMG parameters if (uparam) { int i; for (i = 0; i < BOOMERAMG_NPARAM; i++) boomerAMGParam[i] = param[i+1]; } else { boomerAMGParam[0] = 10; /* coarsening */ boomerAMGParam[1] = 6; /* interpolation */ boomerAMGParam[2] = 1; /* number of cycles */ boomerAMGParam[3] = 6; /* smoother for crs level */ boomerAMGParam[4] = 3; /* sweeps */ boomerAMGParam[5] = -1; /* smoother */ boomerAMGParam[6] = 1; /* sweeps */ boomerAMGParam[7] = 0.25; /* threshold */ boomerAMGParam[8] = 0.0; /* non galerkin tolerance */ } HYPRE_BoomerAMGSetCoarsenType(solver,boomerAMGParam[0]); HYPRE_BoomerAMGSetInterpType(solver,boomerAMGParam[1]); //HYPRE_BoomerAMGSetKeepTranspose(solver, 1); //HYPRE_BoomerAMGSetChebyFraction(solver, 0.2); if (boomerAMGParam[5] > 0) { HYPRE_BoomerAMGSetCycleRelaxType(solver, boomerAMGParam[5], 1); HYPRE_BoomerAMGSetCycleRelaxType(solver, boomerAMGParam[5], 2); } HYPRE_BoomerAMGSetCycleRelaxType(solver, 9, 3); HYPRE_BoomerAMGSetCycleNumSweeps(solver, boomerAMGParam[6], 1); HYPRE_BoomerAMGSetCycleNumSweeps(solver, boomerAMGParam[6], 2); HYPRE_BoomerAMGSetCycleNumSweeps(solver, 1, 3); if (null_space) { HYPRE_BoomerAMGSetMinCoarseSize(solver, 2); HYPRE_BoomerAMGSetCycleRelaxType(solver, boomerAMGParam[3], 3); HYPRE_BoomerAMGSetCycleNumSweeps(solver, boomerAMGParam[4], 3); } HYPRE_BoomerAMGSetStrongThreshold(solver,boomerAMGParam[7]); if (boomerAMGParam[8] > 1e-3) { HYPRE_BoomerAMGSetNonGalerkinTol(solver,boomerAMGParam[8]); HYPRE_BoomerAMGSetLevelNonGalerkinTol(solver,0.0 , 0); HYPRE_BoomerAMGSetLevelNonGalerkinTol(solver,0.01, 1); HYPRE_BoomerAMGSetLevelNonGalerkinTol(solver,0.05, 2); } HYPRE_BoomerAMGSetAggNumLevels(solver, boomerAMGParam[9]); HYPRE_BoomerAMGSetMaxIter(solver,boomerAMGParam[2]); // number of V-cycles HYPRE_BoomerAMGSetTol(solver,0); HYPRE_BoomerAMGSetPrintLevel(solver,1); // Create and initialize rhs and solution vectors HYPRE_IJVectorCreate(comm,ilower,iupper,&data->b); HYPRE_IJVector b = data->b; HYPRE_IJVectorSetObjectType(b,HYPRE_PARCSR); HYPRE_IJVectorInitialize(b); HYPRE_IJVectorAssemble(b); HYPRE_IJVectorCreate(comm,ilower,iupper,&data->x); HYPRE_IJVector x = data->x; HYPRE_IJVectorSetObjectType(x,HYPRE_PARCSR); HYPRE_IJVectorInitialize(x); HYPRE_IJVectorAssemble(x); // Perform AMG setup HYPRE_ParVector par_b; HYPRE_ParVector par_x; HYPRE_IJVectorGetObject(b,(void**) &par_b); HYPRE_IJVectorGetObject(x,(void**) &par_x); HYPRE_ParCSRMatrix par_A; HYPRE_IJMatrixGetObject(data->A,(void**) &par_A); int _Nthreads = 1; #pragma omp parallel { int tid = omp_get_thread_num(); if(tid==0) _Nthreads = omp_get_num_threads(); } omp_set_num_threads(data->Nthreads); HYPRE_BoomerAMGSetup(solver,par_A,par_b,par_x); omp_set_num_threads(_Nthreads); data->ii = (HYPRE_BigInt*) malloc(data->nRows*sizeof(HYPRE_BigInt)); data->bb = (HYPRE_Real*) malloc(data->nRows*sizeof(HYPRE_Real)); data->xx = (HYPRE_Real*) malloc(data->nRows*sizeof(HYPRE_Real)); for(i=0;i<data->nRows;++i) data->ii[i] = ilower + (HYPRE_BigInt)i; return 0; } int boomerAMGSolve(void *x, void *b) { int i; int err; const HYPRE_Real *xx = (HYPRE_Real*) x; const HYPRE_Real *bb = (HYPRE_Real*) b; HYPRE_ParVector par_x; HYPRE_ParVector par_b; HYPRE_ParCSRMatrix par_A; HYPRE_IJVectorSetValues(data->b,data->nRows,data->ii,bb); HYPRE_IJVectorAssemble(data->b); HYPRE_IJVectorGetObject(data->b,(void**) &par_b); HYPRE_IJVectorAssemble(data->x); HYPRE_IJVectorGetObject(data->x,(void **) &par_x); HYPRE_IJMatrixGetObject(data->A,(void**) &par_A); int _Nthreads = 1; #pragma omp parallel { int tid = omp_get_thread_num(); if(tid==0) _Nthreads = omp_get_num_threads(); } omp_set_num_threads(data->Nthreads); err = HYPRE_BoomerAMGSolve(data->solver,par_A,par_b,par_x); if(err > 0) { int rank; MPI_Comm_rank(data->comm,&rank); if(rank == 0) printf("HYPRE_BoomerAMGSolve failed!\n"); return 1; } omp_set_num_threads(_Nthreads); HYPRE_IJVectorGetValues(data->x,data->nRows,data->ii,(HYPRE_Real*)xx); return 0; } void boomerAMGFree() { HYPRE_BoomerAMGDestroy(data->solver); HYPRE_IJMatrixDestroy(data->A); HYPRE_IJVectorDestroy(data->x); HYPRE_IJVectorDestroy(data->b); free(data); } // Just to fix a hypre linking error void hypre_blas_xerbla() { } void hypre_blas_lsame() { } #else int boomerAMGSetup(int nrows, int nz, const long long int *Ai, const long long int *Aj, const double *Av, const int null_space, const MPI_Comm ce, int Nthreads, int deviceID const double *param) { int rank; MPI_Comm_rank(ce,&rank); if(rank == 0) printf("ERROR: Recompile with HYPRE support!\n"); return 1; } int boomerAMGSolve(void *x, void *b) { int rank; MPI_Comm_rank(ce,&rank); if(rank == 0) printf("ERROR: Recompile with HYPRE support!\n"); return 1; } void boomerAMGFree() { int rank; MPI_Comm_rank(ce,&rank); if(rank == 0) printf("ERROR: Recompile with HYPRE support!\n"); MPI_Abort(ce, 1); } #endif
GB_binop__pow_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_fc64) // A.*B function (eWiseMult): GB (_AemultB_08__pow_fc64) // A.*B function (eWiseMult): GB (_AemultB_02__pow_fc64) // A.*B function (eWiseMult): GB (_AemultB_04__pow_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_fc64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__pow_fc64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_fc64) // C=scalar+B GB (_bind1st__pow_fc64) // C=scalar+B' GB (_bind1st_tran__pow_fc64) // C=A+scalar GB (_bind2nd__pow_fc64) // C=A'+scalar GB (_bind2nd_tran__pow_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = GB_cpow (aij, bij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_cpow (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_FC64 || GxB_NO_POW_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pow_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_fc64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_cpow (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_fc64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_cpow (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_cpow (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_fc64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_cpow (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
atomic_messages.c
// RUN: %clang_cc1 -verify -fopenmp -ferror-limit 100 %s -Wuninitialized // RUN: %clang_cc1 -verify -fopenmp-simd -ferror-limit 100 %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp atomic read argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } int foo() { L1: foo(); #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected an expression statement}} { foo(); goto L1; // expected-error {{use of undeclared label 'L1'}} } goto L2; // expected-error {{use of undeclared label 'L2'}} #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected an expression statement}} { foo(); L2: foo(); } return 0; } struct S { int a; }; int readint() { int a = 0, b = 0; // Test for atomic read #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} foo(); #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} a += b; #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected lvalue expression}} a = 0; #pragma omp atomic read a = b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}} #pragma omp atomic read read a = b; return 0; } int readS() { struct S a, b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}} expected-error@+1 {{unexpected OpenMP clause 'allocate' in directive '#pragma omp atomic'}} #pragma omp atomic read read allocate(a) // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected expression of scalar type}} a = b; return a.a; } int writeint() { int a = 0, b = 0; // Test for atomic write #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} foo(); #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} a += b; #pragma omp atomic write a = 0; #pragma omp atomic write a = b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}} #pragma omp atomic write write a = b; return 0; } int writeS() { struct S a, b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}} #pragma omp atomic write write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected expression of scalar type}} a = b; return a.a; } int updateint() { int a = 0, b = 0; // Test for atomic update #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected built-in binary or unary operator}} foo(); #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected built-in binary operator}} a = b; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} a = b || a; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} a = a && b; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = (float)a + b; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = 2 * b; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = b + *&a; #pragma omp atomic update *&a = *&a + 2; #pragma omp atomic update a++; #pragma omp atomic ++a; #pragma omp atomic update a--; #pragma omp atomic --a; #pragma omp atomic update a += b; #pragma omp atomic a %= b; #pragma omp atomic update a *= b; #pragma omp atomic a -= b; #pragma omp atomic update a /= b; #pragma omp atomic a &= b; #pragma omp atomic update a ^= b; #pragma omp atomic a |= b; #pragma omp atomic update a <<= b; #pragma omp atomic a >>= b; #pragma omp atomic update a = b + a; #pragma omp atomic a = a * b; #pragma omp atomic update a = b - a; #pragma omp atomic a = a / b; #pragma omp atomic update a = b & a; #pragma omp atomic a = a ^ b; #pragma omp atomic update a = b | a; #pragma omp atomic a = a << b; #pragma omp atomic a = b >> a; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'update' clause}} #pragma omp atomic update update a /= b; return 0; } int captureint() { int a = 0, b = 0, c = 0; // Test for atomic capture #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected compound statement}} ; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} foo(); #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected built-in binary or unary operator}} a = b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = b || a; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} b = a = a && b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = (float)a + b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = 2 * b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = b + *&a; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected exactly two expression statements}} { a = b; } #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected exactly two expression statements}} {} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of the first expression}} {a = b;a = b;} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of the first expression}} {a = b; a = b || a;} #pragma omp atomic capture {b = a; a = a && b;} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = (float)a + b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = 2 * b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = b + *&a; #pragma omp atomic capture c = *&a = *&a + 2; #pragma omp atomic capture c = a++; #pragma omp atomic capture c = ++a; #pragma omp atomic capture c = a--; #pragma omp atomic capture c = --a; #pragma omp atomic capture c = a += b; #pragma omp atomic capture c = a %= b; #pragma omp atomic capture c = a *= b; #pragma omp atomic capture c = a -= b; #pragma omp atomic capture c = a /= b; #pragma omp atomic capture c = a &= b; #pragma omp atomic capture c = a ^= b; #pragma omp atomic capture c = a |= b; #pragma omp atomic capture c = a <<= b; #pragma omp atomic capture c = a >>= b; #pragma omp atomic capture c = a = b + a; #pragma omp atomic capture c = a = a * b; #pragma omp atomic capture c = a = b - a; #pragma omp atomic capture c = a = a / b; #pragma omp atomic capture c = a = b & a; #pragma omp atomic capture c = a = a ^ b; #pragma omp atomic capture c = a = b | a; #pragma omp atomic capture c = a = a << b; #pragma omp atomic capture c = a = b >> a; #pragma omp atomic capture { c = *&a; *&a = *&a + 2;} #pragma omp atomic capture { *&a = *&a + 2; c = *&a;} #pragma omp atomic capture {c = a; a++;} #pragma omp atomic capture {c = a; (a)++;} #pragma omp atomic capture {++a;c = a;} #pragma omp atomic capture {c = a;a--;} #pragma omp atomic capture {--a;c = a;} #pragma omp atomic capture {c = a; a += b;} #pragma omp atomic capture {c = a; (a) += b;} #pragma omp atomic capture {a %= b; c = a;} #pragma omp atomic capture {c = a; a *= b;} #pragma omp atomic capture {a -= b;c = a;} #pragma omp atomic capture {c = a; a /= b;} #pragma omp atomic capture {a &= b; c = a;} #pragma omp atomic capture {c = a; a ^= b;} #pragma omp atomic capture {a |= b; c = a;} #pragma omp atomic capture {c = a; a <<= b;} #pragma omp atomic capture {a >>= b; c = a;} #pragma omp atomic capture {c = a; a = b + a;} #pragma omp atomic capture {a = a * b; c = a;} #pragma omp atomic capture {c = a; a = b - a;} #pragma omp atomic capture {a = a / b; c = a;} #pragma omp atomic capture {c = a; a = b & a;} #pragma omp atomic capture {a = a ^ b; c = a;} #pragma omp atomic capture {c = a; a = b | a;} #pragma omp atomic capture {a = a << b; c = a;} #pragma omp atomic capture {c = a; a = b >> a;} #pragma omp atomic capture {c = a; a = foo();} // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'capture' clause}} #pragma omp atomic capture capture b = a /= b; return 0; }
adi.c
//-------------------------------------------------------------------------// // // // This benchmark is a serial C version of the NPB SP code. This C // // version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the serial Fortran versions in // // "NPB3.3-SER" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this C version to cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header.h" void adi() { compute_rhs(); txinvr(); x_solve(); y_solve(); z_solve(); add(); } void ninvr() { int i, j, k; double r1, r2, r3, r4, r5, t1, t2; #pragma omp target //present(rhs) #ifdef SPEC_USE_INNER_SIMD #pragma omp teams distribute parallel for collapse(2) private(i,j,k,r1,r2,r3,r4,r5,t1,t2) #else #pragma omp teams distribute parallel for simd collapse(3) private(r1,r2,r3,r4,r5,t1,t2) #endif for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(r1,r2,r3,r4,r5,t1,t2) #endif for (i = 1; i <= nx2; i++) { r1 = rhs[0][k][j][i]; r2 = rhs[1][k][j][i]; r3 = rhs[2][k][j][i]; r4 = rhs[3][k][j][i]; r5 = rhs[4][k][j][i]; t1 = bt * r3; t2 = 0.5 * ( r4 + r5 ); rhs[0][k][j][i] = -r2; rhs[1][k][j][i] = r1; rhs[2][k][j][i] = bt * ( r4 - r5 ); rhs[3][k][j][i] = -t1 + t2; rhs[4][k][j][i] = t1 + t2; } } } } void pinvr() { int i, j, k; double r1, r2, r3, r4, r5, t1, t2; #pragma omp target //present(rhs) #ifdef SPEC_USE_INNER_SIMD #pragma omp teams distribute parallel for private(i,j,k,r1,r2,r3,r4,r5,t1,t2) collapse(2) #else #pragma omp teams distribute parallel for simd private(r1,r2,r3,r4,r5,t1,t2) collapse(3) #endif for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(r1,r2,r3,r4,r5,t1,t2) #endif for (i = 1; i <= nx2; i++) { r1 = rhs[0][k][j][i]; r2 = rhs[1][k][j][i]; r3 = rhs[2][k][j][i]; r4 = rhs[3][k][j][i]; r5 = rhs[4][k][j][i]; t1 = bt * r1; t2 = 0.5 * ( r4 + r5 ); rhs[0][k][j][i] = bt * ( r4 - r5 ); rhs[1][k][j][i] = -r3; rhs[2][k][j][i] = r2; rhs[3][k][j][i] = -t1 + t2; rhs[4][k][j][i] = t1 + t2; } } } } void tzetar() { int i, j, k; double t1, t2, t3, ac, xvel, yvel, zvel, r1, r2, r3, r4, r5; double btuz, ac2u, uzik1; #pragma omp target //present(us,vs,ws,qs,u,speed,rhs) #ifdef SPEC_USE_INNER_SIMD #pragma omp teams distribute parallel for collapse(2) private(i,j,k,t1,t2,t3,ac,xvel,yvel,zvel,r1,r2,r3,r4,r5,btuz,ac2u,uzik1) #else #pragma omp teams distribute parallel for simd collapse(3) private(t1,t2,t3,ac,xvel,yvel,zvel,r1,r2,r3,r4,r5,btuz,ac2u,uzik1) #endif for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(t1,t2,t3,ac,xvel,yvel,zvel,r1,r2,r3,r4,r5,btuz,ac2u,uzik1) #endif for (i = 1; i <= nx2; i++) { xvel = us[k][j][i]; yvel = vs[k][j][i]; zvel = ws[k][j][i]; ac = speed[k][j][i]; ac2u = ac*ac; r1 = rhs[0][k][j][i]; r2 = rhs[1][k][j][i]; r3 = rhs[2][k][j][i]; r4 = rhs[3][k][j][i]; r5 = rhs[4][k][j][i]; uzik1 = u[0][k][j][i]; btuz = bt * uzik1; t1 = btuz/ac * (r4 + r5); t2 = r3 + t1; t3 = btuz * (r4 - r5); rhs[0][k][j][i] = t2; rhs[1][k][j][i] = -uzik1*r2 + xvel*t2; rhs[2][k][j][i] = uzik1*r1 + yvel*t2; rhs[3][k][j][i] = zvel*t2 + t3; rhs[4][k][j][i] = uzik1*(-xvel*r2 + yvel*r1) + qs[k][j][i]*t2 + c2iv*ac2u*t1 + zvel*t3; } } } } void x_solve() { int i, j, k, i1, i2, m; int gp01,gp02,gp03,gp04; double ru1, fac1, fac2; double lhsX[5][nz2+1][IMAXP+1][IMAXP+1]; double lhspX[5][nz2+1][IMAXP+1][IMAXP+1]; double lhsmX[5][nz2+1][IMAXP+1][IMAXP+1]; double rhonX[nz2+1][IMAXP+1][PROBLEM_SIZE]; double rhsX[5][nz2+1][IMAXP+1][JMAXP+1]; int ni=nx2+1; gp01=grid_points[0]-1; gp02=grid_points[0]-2; gp03=grid_points[0]-3; gp04=grid_points[0]-4; #pragma omp target data map(alloc:lhsX[:][:][:][:],lhspX[:][:][:][:], lhsmX[:][:][:][:],rhonX[:][:][:],rhsX[:][:][:][:]) //present(rho_i,us,speed,rhs) { #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for collapse(2) private(i,j,k) #else #pragma omp target teams distribute parallel for simd collapse(3) #endif for (k = 0; k <= nz2; k++) { for (j = 0; j <= JMAXP; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 0; i <= IMAXP; i++) { rhsX[0][k][i][j] = rhs[0][k][j][i]; rhsX[1][k][i][j] = rhs[1][k][j][i]; rhsX[2][k][i][j] = rhs[2][k][j][i]; rhsX[3][k][i][j] = rhs[3][k][j][i]; rhsX[4][k][i][j] = rhs[4][k][j][i]; } } } #pragma omp target teams distribute parallel for private(k,j,m) collapse(2) for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { for (m = 0; m < 5; m++) { lhsX[m][k][0][j] = 0.0; lhspX[m][k][0][j] = 0.0; lhsmX[m][k][0][j] = 0.0; lhsX[m][k][ni][j] = 0.0; lhspX[m][k][ni][j] = 0.0; lhsmX[m][k][ni][j] = 0.0; } lhsX[2][k][0][j] = 1.0; lhspX[2][k][0][j] = 1.0; lhsmX[2][k][0][j] = 1.0; lhsX[2][k][ni][j] = 1.0; lhspX[2][k][ni][j] = 1.0; lhsmX[2][k][ni][j] = 1.0; } } //--------------------------------------------------------------------- // Computes the left hand side for the three x-factors //--------------------------------------------------------------------- //--------------------------------------------------------------------- // first fill the lhs for the u-eigenvalue //--------------------------------------------------------------------- #pragma omp target teams distribute parallel for collapse(2) private(i,j,k,ru1) for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { #pragma omp simd private(ru1) for (i = 0; i <= gp01; i++) { ru1 = c3c4*rho_i[k][j][i]; //cv[i] = us[k][j][i]; rhonX[k][j][i] = max(max(dx2+con43*ru1,dx5+c1c5*ru1), max(dxmax+ru1,dx1)); } #pragma omp simd for (i = 1; i <= nx2; i++) { lhsX[0][k][i][j] = 0.0; // lhsX[1][k][i][j] = -dttx2 * cv[i-1] - dttx1 * rhon[i-1]; lhsX[1][k][i][j] = -dttx2 * us[k][j][i-1] - dttx1 * rhonX[k][j][i-1]; lhsX[2][k][i][j] = 1.0 + c2dttx1 * rhonX[k][j][i]; // lhsX[3][k][i][j] = dttx2 * cv[i+1] - dttx1 * rhon[i+1]; lhsX[3][k][i][j] = dttx2 * us[k][j][i+1] - dttx1 * rhonX[k][j][i+1]; lhsX[4][k][i][j] = 0.0; } } } //--------------------------------------------------------------------- // add fourth order dissipation //--------------------------------------------------------------------- i = 1; #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(k,j) #else #pragma omp target teams distribute parallel for simd collapse(2) #endif for (k = 1; k <= nz2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (j = 1; j <= ny2; j++) { lhsX[2][k][i][j] = lhsX[2][k][i][j] + comz5; lhsX[3][k][i][j] = lhsX[3][k][i][j] - comz4; lhsX[4][k][i][j] = lhsX[4][k][i][j] + comz1; lhsX[1][k][i+1][j] = lhsX[1][k][i+1][j] - comz4; lhsX[2][k][i+1][j] = lhsX[2][k][i+1][j] + comz6; lhsX[3][k][i+1][j] = lhsX[3][k][i+1][j] - comz4; lhsX[4][k][i+1][j] = lhsX[4][k][i+1][j] + comz1; } } #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(3) #endif for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 3; i <= gp04; i++) { lhsX[0][k][i][j] = lhsX[0][k][i][j] + comz1; lhsX[1][k][i][j] = lhsX[1][k][i][j] - comz4; lhsX[2][k][i][j] = lhsX[2][k][i][j] + comz6; lhsX[3][k][i][j] = lhsX[3][k][i][j] - comz4; lhsX[4][k][i][j] = lhsX[4][k][i][j] + comz1; } } } i = gp03; #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(j,k) #else #pragma omp target teams distribute parallel for simd collapse(2) #endif for (k = 1; k <= nz2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (j = 1; j <= ny2; j++) { lhsX[0][k][i][j] = lhsX[0][k][i][j] + comz1; lhsX[1][k][i][j] = lhsX[1][k][i][j] - comz4; lhsX[2][k][i][j] = lhsX[2][k][i][j] + comz6; lhsX[3][k][i][j] = lhsX[3][k][i][j] - comz4; lhsX[0][k][i+1][j] = lhsX[0][k][i+1][j] + comz1; lhsX[1][k][i+1][j] = lhsX[1][k][i+1][j] - comz4; lhsX[2][k][i+1][j] = lhsX[2][k][i+1][j] + comz5; } } //--------------------------------------------------------------------- // subsequently, fill the other factors (u+c), (u-c) by adding to // the first //--------------------------------------------------------------------- #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(3) #endif for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 1; i <= nx2; i++) { lhspX[0][k][i][j] = lhsX[0][k][i][j]; lhspX[1][k][i][j] = lhsX[1][k][i][j] - dttx2 * speed[k][j][i-1]; lhspX[2][k][i][j] = lhsX[2][k][i][j]; lhspX[3][k][i][j] = lhsX[3][k][i][j] + dttx2 * speed[k][j][i+1]; lhspX[4][k][i][j] = lhsX[4][k][i][j]; lhsmX[0][k][i][j] = lhsX[0][k][i][j]; lhsmX[1][k][i][j] = lhsX[1][k][i][j] + dttx2 * speed[k][j][i-1]; lhsmX[2][k][i][j] = lhsX[2][k][i][j]; lhsmX[3][k][i][j] = lhsX[3][k][i][j] - dttx2 * speed[k][j][i+1]; lhsmX[4][k][i][j] = lhsX[4][k][i][j]; } } } //--------------------------------------------------------------------- // FORWARD ELIMINATION //--------------------------------------------------------------------- //--------------------------------------------------------------------- // perform the Thomas algorithm; first, FORWARD ELIMINATION //--------------------------------------------------------------------- #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,m) #else #pragma omp target teams distribute parallel for simd collapse(2) private(i,m,i1,i2,fac1) #endif for (k = 1; k <= nz2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(i1,i2,fac1) #endif for (j = 1; j <= ny2; j++) { for (i = 0; i <= gp03; i++) { i1 = i + 1; i2 = i + 2; fac1 = 1.0/lhsX[2][k][i][j]; lhsX[3][k][i][j] = fac1*lhsX[3][k][i][j]; lhsX[4][k][i][j] = fac1*lhsX[4][k][i][j]; for (m = 0; m < 3; m++) { rhsX[m][k][i][j] = fac1*rhsX[m][k][i][j]; } lhsX[2][k][i1][j] = lhsX[2][k][i1][j] - lhsX[1][k][i1][j]*lhsX[3][k][i][j]; lhsX[3][k][i1][j] = lhsX[3][k][i1][j] - lhsX[1][k][i1][j]*lhsX[4][k][i][j]; for (m = 0; m < 3; m++) { rhsX[m][k][i1][j] = rhsX[m][k][i1][j] - lhsX[1][k][i1][j]*rhsX[m][k][i][j]; } lhsX[1][k][i2][j] = lhsX[1][k][i2][j] - lhsX[0][k][i2][j]*lhsX[3][k][i][j]; lhsX[2][k][i2][j] = lhsX[2][k][i2][j] - lhsX[0][k][i2][j]*lhsX[4][k][i][j]; for (m = 0; m < 3; m++) { rhsX[m][k][i2][j] = rhsX[m][k][i2][j] - lhsX[0][k][i2][j]*rhsX[m][k][i][j]; } } } } //--------------------------------------------------------------------- // The last two rows in this grid block are a bit different, // since they for (not have two more rows available for the // elimination of off-diagonal entries //--------------------------------------------------------------------- i = gp02; i1 = gp01; #pragma omp target teams distribute parallel for private(j,k,m,fac1,fac2) collapse(2) for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { fac1 = 1.0/lhsX[2][k][i][j]; lhsX[3][k][i][j] = fac1*lhsX[3][k][i][j]; lhsX[4][k][i][j] = fac1*lhsX[4][k][i][j]; for (m = 0; m < 3; m++) { rhsX[m][k][i][j] = fac1*rhsX[m][k][i][j]; } lhsX[2][k][i1][j] = lhsX[2][k][i1][j] - lhsX[1][k][i1][j]*lhsX[3][k][i][j]; lhsX[3][k][i1][j] = lhsX[3][k][i1][j] - lhsX[1][k][i1][j]*lhsX[4][k][i][j]; for (m = 0; m < 3; m++) { rhsX[m][k][i1][j] = rhsX[m][k][i1][j] - lhsX[1][k][i1][j]*rhsX[m][k][i][j]; } //--------------------------------------------------------------------- // scale the last row immediately //--------------------------------------------------------------------- fac2 = 1.0/lhsX[2][k][i1][j]; for (m = 0; m < 3; m++) { rhsX[m][k][i1][j] = fac2*rhsX[m][k][i1][j]; } } } //--------------------------------------------------------------------- // for (the u+c and the u-c factors //--------------------------------------------------------------------- #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,m) #else #pragma omp target teams distribute parallel for simd collapse(2) private(i,m,fac1,i1,i2) #endif for (k = 1; k <= nz2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(fac1,i1,i2) #endif for (j = 1; j <= ny2; j++) { for (i = 0; i <= gp03; i++) { i1 = i + 1; i2 = i + 2; m = 3; fac1 = 1.0/lhspX[2][k][i][j]; lhspX[3][k][i][j] = fac1*lhspX[3][k][i][j]; lhspX[4][k][i][j] = fac1*lhspX[4][k][i][j]; rhsX[m][k][i][j] = fac1*rhsX[m][k][i][j]; lhspX[2][k][i1][j] = lhspX[2][k][i1][j] - lhspX[1][k][i1][j]*lhspX[3][k][i][j]; lhspX[3][k][i1][j] = lhspX[3][k][i1][j] - lhspX[1][k][i1][j]*lhspX[4][k][i][j]; rhsX[m][k][i1][j] = rhsX[m][k][i1][j] - lhspX[1][k][i1][j]*rhsX[m][k][i][j]; lhspX[1][k][i2][j] = lhspX[1][k][i2][j] - lhspX[0][k][i2][j]*lhspX[3][k][i][j]; lhspX[2][k][i2][j] = lhspX[2][k][i2][j] - lhspX[0][k][i2][j]*lhspX[4][k][i][j]; rhsX[m][k][i2][j] = rhsX[m][k][i2][j] - lhspX[0][k][i2][j]*rhsX[m][k][i][j]; m = 4; fac1 = 1.0/lhsmX[2][k][i][j]; lhsmX[3][k][i][j] = fac1*lhsmX[3][k][i][j]; lhsmX[4][k][i][j] = fac1*lhsmX[4][k][i][j]; rhsX[m][k][i][j] = fac1*rhsX[m][k][i][j]; lhsmX[2][k][i1][j] = lhsmX[2][k][i1][j] - lhsmX[1][k][i1][j]*lhsmX[3][k][i][j]; lhsmX[3][k][i1][j] = lhsmX[3][k][i1][j] - lhsmX[1][k][i1][j]*lhsmX[4][k][i][j]; rhsX[m][k][i1][j] = rhsX[m][k][i1][j] - lhsmX[1][k][i1][j]*rhsX[m][k][i][j]; lhsmX[1][k][i2][j] = lhsmX[1][k][i2][j] - lhsmX[0][k][i2][j]*lhsmX[3][k][i][j]; lhsmX[2][k][i2][j] = lhsmX[2][k][i2][j] - lhsmX[0][k][i2][j]*lhsmX[4][k][i][j]; rhsX[m][k][i2][j] = rhsX[m][k][i2][j] - lhsmX[0][k][i2][j]*rhsX[m][k][i][j]; } } } //--------------------------------------------------------------------- // And again the last two rows separately //--------------------------------------------------------------------- i = gp02; i1 = gp01; #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(j,k,m,fac1) #else #pragma omp target teams distribute parallel for simd collapse(2) private(m,fac1) #endif for (k = 1; k <= nz2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (j = 1; j <= ny2; j++) { m = 3; fac1 = 1.0/lhspX[2][k][i][j]; lhspX[3][k][i][j] = fac1*lhspX[3][k][i][j]; lhspX[4][k][i][j] = fac1*lhspX[4][k][i][j]; rhsX[m][k][i][j] = fac1*rhsX[m][k][i][j]; lhspX[2][k][i1][j] = lhspX[2][k][i1][j] - lhspX[1][k][i1][j]*lhspX[3][k][i][j]; lhspX[3][k][i1][j] = lhspX[3][k][i1][j] - lhspX[1][k][i1][j]*lhspX[4][k][i][j]; rhsX[m][k][i1][j] = rhsX[m][k][i1][j] - lhspX[1][k][i1][j]*rhsX[m][k][i][j]; m = 4; fac1 = 1.0/lhsmX[2][k][i][j]; lhsmX[3][k][i][j] = fac1*lhsmX[3][k][i][j]; lhsmX[4][k][i][j] = fac1*lhsmX[4][k][i][j]; rhsX[m][k][i][j] = fac1*rhsX[m][k][i][j]; lhsmX[2][k][i1][j] = lhsmX[2][k][i1][j] - lhsmX[1][k][i1][j]*lhsmX[3][k][i][j]; lhsmX[3][k][i1][j] = lhsmX[3][k][i1][j] - lhsmX[1][k][i1][j]*lhsmX[4][k][i][j]; rhsX[m][k][i1][j] = rhsX[m][k][i1][j] - lhsmX[1][k][i1][j]*rhsX[m][k][i][j]; //--------------------------------------------------------------------- // Scale the last row immediately //--------------------------------------------------------------------- rhsX[3][k][i1][j] = rhsX[3][k][i1][j]/lhspX[2][k][i1][j]; rhsX[4][k][i1][j] = rhsX[4][k][i1][j]/lhsmX[2][k][i1][j]; } } //--------------------------------------------------------------------- // BACKSUBSTITUTION //--------------------------------------------------------------------- i = gp02; i1 = gp01; #pragma omp target teams distribute parallel for private(j,k,m) collapse(2) for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { for (m = 0; m < 3; m++) { rhsX[m][k][i][j] = rhsX[m][k][i][j] - lhsX[3][k][i][j]*rhsX[m][k][i1][j]; } rhsX[3][k][i][j] = rhsX[3][k][i][j] - lhspX[3][k][i][j]*rhsX[3][k][i1][j]; rhsX[4][k][i][j] = rhsX[4][k][i][j] - lhsmX[3][k][i][j]*rhsX[4][k][i1][j]; } } //--------------------------------------------------------------------- // The first three factors //--------------------------------------------------------------------- #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,m) #else #pragma omp target teams distribute parallel for simd collapse(2) private(i,m,i1,i2) #endif for (k = 1; k <= nz2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(i1,i2) #endif for (j = 1; j <= ny2; j++) { for (i = gp03; i >= 0; i--) { i1 = i + 1; i2 = i + 2; for (m = 0; m < 3; m++) { rhsX[m][k][i][j] = rhsX[m][k][i][j] - lhsX[3][k][i][j]*rhsX[m][k][i1][j] - lhsX[4][k][i][j]*rhsX[m][k][i2][j]; } //------------------------------------------------------------------- // And the remaining two //------------------------------------------------------------------- rhsX[3][k][i][j] = rhsX[3][k][i][j] - lhspX[3][k][i][j]*rhsX[3][k][i1][j] - lhspX[4][k][i][j]*rhsX[3][k][i2][j]; rhsX[4][k][i][j] = rhsX[4][k][i][j] - lhsmX[3][k][i][j]*rhsX[4][k][i1][j] - lhsmX[4][k][i][j]*rhsX[4][k][i2][j]; } } } #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(3) #endif for (k = 0; k <= nz2; k++) { for (j = 0; j <= JMAXP; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 0; i <= IMAXP; i++) { rhs[0][k][j][i] = rhsX[0][k][i][j]; rhs[1][k][j][i] = rhsX[1][k][i][j]; rhs[2][k][j][i] = rhsX[2][k][i][j]; rhs[3][k][j][i] = rhsX[3][k][i][j]; rhs[4][k][j][i] = rhsX[4][k][i][j]; } } } }/* end omp target data */ //--------------------------------------------------------------------- // Do the block-diagonal inversion //--------------------------------------------------------------------- ninvr(); } void y_solve() { int i, j, k, j1, j2, m; int gp0, gp1, gp2; double ru1, fac1, fac2; double lhsY[5][nz2+1][IMAXP+1][IMAXP+1]; double lhspY[5][nz2+1][IMAXP+1][IMAXP+1]; double lhsmY[5][nz2+1][IMAXP+1][IMAXP+1]; double rhoqY[nz2+1][IMAXP+1][PROBLEM_SIZE]; int ni=ny2+1; gp0=grid_points[0]; gp1=grid_points[1]; gp2=grid_points[2]; #pragma omp target data map(alloc:lhsY[:][:][:][:],lhspY[:][:][:][:],lhsmY[:][:][:][:],rhoqY[:][:][:]) //present(rho_i,vs,speed,rhs) { #pragma omp target teams distribute parallel for private(i,k,m) collapse(2) for (k = 1; k <= nz2; k++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { lhsY[m][k][0][i] = 0.0; lhspY[m][k][0][i] = 0.0; lhsmY[m][k][0][i] = 0.0; lhsY[m][k][ni][i] = 0.0; lhspY[m][k][ni][i] = 0.0; lhsmY[m][k][ni][i] = 0.0; } lhsY[2][k][0][i] = 1.0; lhspY[2][k][0][i] = 1.0; lhsmY[2][k][0][i] = 1.0; lhsY[2][k][ni][i] = 1.0; lhspY[2][k][ni][i] = 1.0; lhsmY[2][k][ni][i] = 1.0; } } //--------------------------------------------------------------------- // Computes the left hand side for the three y-factors //--------------------------------------------------------------------- //--------------------------------------------------------------------- // first fill the lhs for the u-eigenvalue //--------------------------------------------------------------------- #pragma omp target teams distribute parallel for collapse(2) private(i,j,k,ru1) for (k = 1; k <= nz2; k++) { for (i = 1; i <= gp0-2; i++) { #pragma omp simd private(ru1) for (j = 0; j <= gp1-1; j++) { ru1 = c3c4*rho_i[k][j][i]; // cv[j] = vs[k][j][i]; rhoqY[k][j][i] = max(max(dy3+con43*ru1, dy5+c1c5*ru1), max(dymax+ru1, dy1)); } #pragma omp simd for (j = 1; j <= gp1-2; j++) { lhsY[0][k][j][i] = 0.0; // lhsY[1][k][j][i] = -dtty2 * cv[j-1] - dtty1 * rhoqY[j-1]; lhsY[1][k][j][i] = -dtty2 * vs[k][j-1][i] - dtty1 * rhoqY[k][j-1][i]; lhsY[2][k][j][i] = 1.0 + c2dtty1 * rhoqY[k][j][i]; // lhsY[3][k][j][i] = dtty2 * cv[j+1] - dtty1 * rhoq[j+1]; lhsY[3][k][j][i] = dtty2 * vs[k][j+1][i] - dtty1 * rhoqY[k][j+1][i]; lhsY[4][k][j][i] = 0.0; } } } //--------------------------------------------------------------------- // add fourth order dissipation //--------------------------------------------------------------------- j = 1; #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,k) #else #pragma omp target teams distribute parallel for simd collapse(2) #endif for (k = 1; k <= gp2-2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 1; i <= gp0-2; i++) { lhsY[2][k][j][i] = lhsY[2][k][j][i] + comz5; lhsY[3][k][j][i] = lhsY[3][k][j][i] - comz4; lhsY[4][k][j][i] = lhsY[4][k][j][i] + comz1; lhsY[1][k][j+1][i] = lhsY[1][k][j+1][i] - comz4; lhsY[2][k][j+1][i] = lhsY[2][k][j+1][i] + comz6; lhsY[3][k][j+1][i] = lhsY[3][k][j+1][i] - comz4; lhsY[4][k][j+1][i] = lhsY[4][k][j+1][i] + comz1; } } #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(3) #endif for (k = 1; k <= gp2-2; k++) { for (j = 3; j <= gp1-4; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 1; i <= gp0-2; i++) { lhsY[0][k][j][i] = lhsY[0][k][j][i] + comz1; lhsY[1][k][j][i] = lhsY[1][k][j][i] - comz4; lhsY[2][k][j][i] = lhsY[2][k][j][i] + comz6; lhsY[3][k][j][i] = lhsY[3][k][j][i] - comz4; lhsY[4][k][j][i] = lhsY[4][k][j][i] + comz1; } } } j = gp1-3; #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,k) #else #pragma omp target teams distribute parallel for simd collapse(2) #endif for (k = 1; k <= gp2-2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 1; i <= gp0-2; i++) { lhsY[0][k][j][i] = lhsY[0][k][j][i] + comz1; lhsY[1][k][j][i] = lhsY[1][k][j][i] - comz4; lhsY[2][k][j][i] = lhsY[2][k][j][i] + comz6; lhsY[3][k][j][i] = lhsY[3][k][j][i] - comz4; lhsY[0][k][j+1][i] = lhsY[0][k][j+1][i] + comz1; lhsY[1][k][j+1][i] = lhsY[1][k][j+1][i] - comz4; lhsY[2][k][j+1][i] = lhsY[2][k][j+1][i] + comz5; } } //--------------------------------------------------------------------- // subsequently, for (the other two factors //--------------------------------------------------------------------- #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(3) #endif for (k = 1; k <= gp2-2; k++) { for (j = 1; j <= gp1-2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 1; i <= gp0-2; i++) { lhspY[0][k][j][i] = lhsY[0][k][j][i]; lhspY[1][k][j][i] = lhsY[1][k][j][i] - dtty2 * speed[k][j-1][i]; lhspY[2][k][j][i] = lhsY[2][k][j][i]; lhspY[3][k][j][i] = lhsY[3][k][j][i] + dtty2 * speed[k][j+1][i]; lhspY[4][k][j][i] = lhsY[4][k][j][i]; lhsmY[0][k][j][i] = lhsY[0][k][j][i]; lhsmY[1][k][j][i] = lhsY[1][k][j][i] + dtty2 * speed[k][j-1][i]; lhsmY[2][k][j][i] = lhsY[2][k][j][i]; lhsmY[3][k][j][i] = lhsY[3][k][j][i] - dtty2 * speed[k][j+1][i]; lhsmY[4][k][j][i] = lhsY[4][k][j][i]; } } } //--------------------------------------------------------------------- // FORWARD ELIMINATION //--------------------------------------------------------------------- #pragma omp target teams distribute parallel for private(i,j,k,m,fac1,j1,j2) for (k = 1; k <= gp2-2; k++) { for (j = 0; j <= gp1-3; j++) { j1 = j + 1; j2 = j + 2; for (i = 1; i <= gp0-2; i++) { fac1 = 1.0/lhsY[2][k][j][i]; lhsY[3][k][j][i] = fac1*lhsY[3][k][j][i]; lhsY[4][k][j][i] = fac1*lhsY[4][k][j][i]; for (m = 0; m < 3; m++) { rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; } lhsY[2][k][j1][i] = lhsY[2][k][j1][i] - lhsY[1][k][j1][i]*lhsY[3][k][j][i]; lhsY[3][k][j1][i] = lhsY[3][k][j1][i] - lhsY[1][k][j1][i]*lhsY[4][k][j][i]; for (m = 0; m < 3; m++) { rhs[m][k][j1][i] = rhs[m][k][j1][i] - lhsY[1][k][j1][i]*rhs[m][k][j][i]; } lhsY[1][k][j2][i] = lhsY[1][k][j2][i] - lhsY[0][k][j2][i]*lhsY[3][k][j][i]; lhsY[2][k][j2][i] = lhsY[2][k][j2][i] - lhsY[0][k][j2][i]*lhsY[4][k][j][i]; for (m = 0; m < 3; m++) { rhs[m][k][j2][i] = rhs[m][k][j2][i] - lhsY[0][k][j2][i]*rhs[m][k][j][i]; } } } } //--------------------------------------------------------------------- // The last two rows in this grid block are a bit different, // since they for (not have two more rows available for the // elimination of off-diagonal entries //--------------------------------------------------------------------- j = gp1-2; j1 = gp1-1; #pragma omp target teams distribute parallel for private(i,k,m,fac1,fac2) collapse(2) for (k = 1; k <= gp2-2; k++) { for (i = 1; i <= gp0-2; i++) { fac1 = 1.0/lhsY[2][k][j][i]; lhsY[3][k][j][i] = fac1*lhsY[3][k][j][i]; lhsY[4][k][j][i] = fac1*lhsY[4][k][j][i]; for (m = 0; m < 3; m++) { rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; } lhsY[2][k][j1][i] = lhsY[2][k][j1][i] - lhsY[1][k][j1][i]*lhsY[3][k][j][i]; lhsY[3][k][j1][i] = lhsY[3][k][j1][i] - lhsY[1][k][j1][i]*lhsY[4][k][j][i]; for (m = 0; m < 3; m++) { rhs[m][k][j1][i] = rhs[m][k][j1][i] - lhsY[1][k][j1][i]*rhs[m][k][j][i]; } //--------------------------------------------------------------------- // scale the last row immediately //--------------------------------------------------------------------- fac2 = 1.0/lhsY[2][k][j1][i]; for (m = 0; m < 3; m++) { rhs[m][k][j1][i] = fac2*rhs[m][k][j1][i]; } } } //--------------------------------------------------------------------- // for (the u+c and the u-c factors //--------------------------------------------------------------------- #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,m) #else #pragma omp target teams distribute parallel for simd private(j,m,fac1,j1,j2) collapse(2) #endif for (k = 1; k <= gp2-2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(fac1,j1,j2) #endif for (i = 1; i <= gp0-2; i++) { for (j = 0; j <= gp1-3; j++) { j1 = j + 1; j2 = j + 2; m = 3; fac1 = 1.0/lhspY[2][k][j][i]; lhspY[3][k][j][i] = fac1*lhspY[3][k][j][i]; lhspY[4][k][j][i] = fac1*lhspY[4][k][j][i]; rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; lhspY[2][k][j1][i] = lhspY[2][k][j1][i] - lhspY[1][k][j1][i]*lhspY[3][k][j][i]; lhspY[3][k][j1][i] = lhspY[3][k][j1][i] - lhspY[1][k][j1][i]*lhspY[4][k][j][i]; rhs[m][k][j1][i] = rhs[m][k][j1][i] - lhspY[1][k][j1][i]*rhs[m][k][j][i]; lhspY[1][k][j2][i] = lhspY[1][k][j2][i] - lhspY[0][k][j2][i]*lhspY[3][k][j][i]; lhspY[2][k][j2][i] = lhspY[2][k][j2][i] - lhspY[0][k][j2][i]*lhspY[4][k][j][i]; rhs[m][k][j2][i] = rhs[m][k][j2][i] - lhspY[0][k][j2][i]*rhs[m][k][j][i]; m = 4; fac1 = 1.0/lhsmY[2][k][j][i]; lhsmY[3][k][j][i] = fac1*lhsmY[3][k][j][i]; lhsmY[4][k][j][i] = fac1*lhsmY[4][k][j][i]; rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; lhsmY[2][k][j1][i] = lhsmY[2][k][j1][i] - lhsmY[1][k][j1][i]*lhsmY[3][k][j][i]; lhsmY[3][k][j1][i] = lhsmY[3][k][j1][i] - lhsmY[1][k][j1][i]*lhsmY[4][k][j][i]; rhs[m][k][j1][i] = rhs[m][k][j1][i] - lhsmY[1][k][j1][i]*rhs[m][k][j][i]; lhsmY[1][k][j2][i] = lhsmY[1][k][j2][i] - lhsmY[0][k][j2][i]*lhsmY[3][k][j][i]; lhsmY[2][k][j2][i] = lhsmY[2][k][j2][i] - lhsmY[0][k][j2][i]*lhsmY[4][k][j][i]; rhs[m][k][j2][i] = rhs[m][k][j2][i] - lhsmY[0][k][j2][i]*rhs[m][k][j][i]; } } } //--------------------------------------------------------------------- // And again the last two rows separately //--------------------------------------------------------------------- j = gp1-2; j1 = gp1-1; #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,k,m,fac1) #else #pragma omp target teams distribute parallel for simd private(m,fac1) collapse(2) #endif for (k = 1; k <= gp2-2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(m, fac1) #endif for (i = 1; i <= gp0-2; i++) { m = 3; fac1 = 1.0/lhspY[2][k][j][i]; lhspY[3][k][j][i] = fac1*lhspY[3][k][j][i]; lhspY[4][k][j][i] = fac1*lhspY[4][k][j][i]; rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; lhspY[2][k][j1][i] = lhspY[2][k][j1][i] - lhspY[1][k][j1][i]*lhspY[3][k][j][i]; lhspY[3][k][j1][i] = lhspY[3][k][j1][i] - lhspY[1][k][j1][i]*lhspY[4][k][j][i]; rhs[m][k][j1][i] = rhs[m][k][j1][i] - lhspY[1][k][j1][i]*rhs[m][k][j][i]; m = 4; fac1 = 1.0/lhsmY[2][k][j][i]; lhsmY[3][k][j][i] = fac1*lhsmY[3][k][j][i]; lhsmY[4][k][j][i] = fac1*lhsmY[4][k][j][i]; rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; lhsmY[2][k][j1][i] = lhsmY[2][k][j1][i] - lhsmY[1][k][j1][i]*lhsmY[3][k][j][i]; lhsmY[3][k][j1][i] = lhsmY[3][k][j1][i] - lhsmY[1][k][j1][i]*lhsmY[4][k][j][i]; rhs[m][k][j1][i] = rhs[m][k][j1][i] - lhsmY[1][k][j1][i]*rhs[m][k][j][i]; //--------------------------------------------------------------------- // Scale the last row immediately //--------------------------------------------------------------------- rhs[3][k][j1][i] = rhs[3][k][j1][i]/lhspY[2][k][j1][i]; rhs[4][k][j1][i] = rhs[4][k][j1][i]/lhsmY[2][k][j1][i]; } } //--------------------------------------------------------------------- // BACKSUBSTITUTION //--------------------------------------------------------------------- j = gp1-2; j1 = gp1-1; #pragma omp target teams distribute parallel for private(i,k,m) collapse(2) for (k = 1; k <= gp2-2; k++) { for (i = 1; i <= gp0-2; i++) { for (m = 0; m < 3; m++) { rhs[m][k][j][i] = rhs[m][k][j][i] - lhsY[3][k][j][i]*rhs[m][k][j1][i]; } rhs[3][k][j][i] = rhs[3][k][j][i] - lhspY[3][k][j][i]*rhs[3][k][j1][i]; rhs[4][k][j][i] = rhs[4][k][j][i] - lhsmY[3][k][j][i]*rhs[4][k][j1][i]; } } //--------------------------------------------------------------------- // The first three factors //--------------------------------------------------------------------- #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,m) #else #pragma omp target teams distribute parallel for simd private(j,m,j1,j2) collapse(2) #endif for (k = 1; k <= gp2-2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(j1,j2) #endif for (i = 1; i <= gp0-2; i++) { for (j = gp1-3; j >= 0; j--) { j1 = j + 1; j2 = j + 2; for (m = 0; m < 3; m++) { rhs[m][k][j][i] = rhs[m][k][j][i] - lhsY[3][k][j][i]*rhs[m][k][j1][i] - lhsY[4][k][j][i]*rhs[m][k][j2][i]; } //------------------------------------------------------------------- // And the remaining two //------------------------------------------------------------------- rhs[3][k][j][i] = rhs[3][k][j][i] - lhspY[3][k][j][i]*rhs[3][k][j1][i] - lhspY[4][k][j][i]*rhs[3][k][j2][i]; rhs[4][k][j][i] = rhs[4][k][j][i] - lhsmY[3][k][j][i]*rhs[4][k][j1][i] - lhsmY[4][k][j][i]*rhs[4][k][j2][i]; } } } }/* end omp target data */ pinvr(); } void z_solve() { int i, j, k, k1, k2, m; int gp21,gp22,gp23; double ru1, fac1, fac2; double lhsZ[5][ny2+1][IMAXP+1][IMAXP+1]; double lhspZ[5][ny2+1][IMAXP+1][IMAXP+1]; double lhsmZ[5][ny2+1][IMAXP+1][IMAXP+1]; double rhosZ[ny2+1][IMAXP+1][PROBLEM_SIZE]; int ni=nz2+1; gp21=grid_points[2]-1; gp22=grid_points[2]-2; gp23=grid_points[2]-3; #pragma omp target data map(alloc:lhsZ[:][:][:][:],lhspZ[:][:][:][:],lhsmZ[:][:][:][:],rhosZ[:][:][:]) //present(rho_i,ws,speed,rhs) { #pragma omp target teams distribute parallel for private(i,j,m) collapse(2) for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { lhsZ[m][j][0][i] = 0.0; lhspZ[m][j][0][i] = 0.0; lhsmZ[m][j][0][i] = 0.0; lhsZ[m][j][ni][i] = 0.0; lhspZ[m][j][ni][i] = 0.0; lhsmZ[m][j][ni][i] = 0.0; } lhsZ[2][j][0][i] = 1.0; lhspZ[2][j][0][i] = 1.0; lhsmZ[2][j][0][i] = 1.0; lhsZ[2][j][ni][i] = 1.0; lhspZ[2][j][ni][i] = 1.0; lhsmZ[2][j][ni][i] = 1.0; } } //--------------------------------------------------------------------- // Computes the left hand side for the three z-factors //--------------------------------------------------------------------- //--------------------------------------------------------------------- // first fill the lhs for the u-eigenvalue //--------------------------------------------------------------------- #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,ru1) collapse(2) #else #pragma omp target teams distribute parallel for simd private(ru1) collapse(3) #endif for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (k = 0; k <= nz2+1; k++) { ru1 = c3c4*rho_i[k][j][i]; // cv[k] = ws[k][j][i]; rhosZ[j][i][k] = max(max(dz4+con43*ru1, dz5+c1c5*ru1), max(dzmax+ru1, dz1)); } } } #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(3) #endif for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (k = 1; k <= nz2; k++) { lhsZ[0][j][k][i] = 0.0; // lhs[k][i][1] = -dttz2 * cv[k-1] - dttz1 * rhos[k-1]; lhsZ[1][j][k][i] = -dttz2 * ws[k-1][j][i] - dttz1 * rhosZ[j][i][k-1]; lhsZ[2][j][k][i] = 1.0 + c2dttz1 * rhosZ[j][i][k]; // lhs[k][i][3] = dttz2 * cv[k+1] - dttz1 * rhos[k+1]; lhsZ[3][j][k][i] = dttz2 * ws[k+1][j][i] - dttz1 * rhosZ[j][i][k+1]; lhsZ[4][j][k][i] = 0.0; } } } //--------------------------------------------------------------------- // add fourth order dissipation //--------------------------------------------------------------------- #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k) #else #pragma omp target teams distribute parallel for simd private(k) collapse(2) #endif for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 1; i <= nx2; i++) { k = 1; lhsZ[2][j][k][i] = lhsZ[2][j][k][i] + comz5; lhsZ[3][j][k][i] = lhsZ[3][j][k][i] - comz4; lhsZ[4][j][k][i] = lhsZ[4][j][k][i] + comz1; k = 2; lhsZ[1][j][k][i] = lhsZ[1][j][k][i] - comz4; lhsZ[2][j][k][i] = lhsZ[2][j][k][i] + comz6; lhsZ[3][j][k][i] = lhsZ[3][j][k][i] - comz4; lhsZ[4][j][k][i] = lhsZ[4][j][k][i] + comz1; } } #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(3) #endif for (j = 1; j <= ny2; j++) { for (k = 3; k <= nz2-2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 1; i <= nx2; i++) { lhsZ[0][j][k][i] = lhsZ[0][j][k][i] + comz1; lhsZ[1][j][k][i] = lhsZ[1][j][k][i] - comz4; lhsZ[2][j][k][i] = lhsZ[2][j][k][i] + comz6; lhsZ[3][j][k][i] = lhsZ[3][j][k][i] - comz4; lhsZ[4][j][k][i] = lhsZ[4][j][k][i] + comz1; } } } #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k) #else #pragma omp target teams distribute parallel for simd private(k) collapse(2) #endif for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 1; i <= nx2; i++) { k = nz2-1; lhsZ[0][j][k][i] = lhsZ[0][j][k][i] + comz1; lhsZ[1][j][k][i] = lhsZ[1][j][k][i] - comz4; lhsZ[2][j][k][i] = lhsZ[2][j][k][i] + comz6; lhsZ[3][j][k][i] = lhsZ[3][j][k][i] - comz4; k = nz2; lhsZ[0][j][k][i] = lhsZ[0][j][k][i] + comz1; lhsZ[1][j][k][i] = lhsZ[1][j][k][i] - comz4; lhsZ[2][j][k][i] = lhsZ[2][j][k][i] + comz5; } } //--------------------------------------------------------------------- // subsequently, fill the other factors (u+c), (u-c) //--------------------------------------------------------------------- #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(3) #endif for (j = 1; j <= ny2; j++) { for (k = 1; k <= nz2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 1; i <= nx2; i++) { lhspZ[0][j][k][i] = lhsZ[0][j][k][i]; lhspZ[1][j][k][i] = lhsZ[1][j][k][i] - dttz2 * speed[k-1][j][i]; lhspZ[2][j][k][i] = lhsZ[2][j][k][i]; lhspZ[3][j][k][i] = lhsZ[3][j][k][i] + dttz2 * speed[k+1][j][i]; lhspZ[4][j][k][i] = lhsZ[4][j][k][i]; lhsmZ[0][j][k][i] = lhsZ[0][j][k][i]; lhsmZ[1][j][k][i] = lhsZ[1][j][k][i] + dttz2 * speed[k-1][j][i]; lhsmZ[2][j][k][i] = lhsZ[2][j][k][i]; lhsmZ[3][j][k][i] = lhsZ[3][j][k][i] - dttz2 * speed[k+1][j][i]; lhsmZ[4][j][k][i] = lhsZ[4][j][k][i]; } } } //--------------------------------------------------------------------- // FORWARD ELIMINATION //--------------------------------------------------------------------- #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,m) #else #pragma omp target teams distribute parallel for simd private(k,m,fac1,k1,k2) collapse(2) #endif for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(fac1,k1,k2) #endif for (i = 1; i <= nx2; i++) { for (k = 0; k <= gp23; k++) { k1 = k + 1; k2 = k + 2; fac1 = 1.0/lhsZ[2][j][k][i]; lhsZ[3][j][k][i] = fac1*lhsZ[3][j][k][i]; lhsZ[4][j][k][i] = fac1*lhsZ[4][j][k][i]; for (m = 0; m < 3; m++) { rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; } lhsZ[2][j][k1][i] = lhsZ[2][j][k1][i] - lhsZ[1][j][k1][i]*lhsZ[3][j][k][i]; lhsZ[3][j][k1][i] = lhsZ[3][j][k1][i] - lhsZ[1][j][k1][i]*lhsZ[4][j][k][i]; for (m = 0; m < 3; m++) { rhs[m][k1][j][i] = rhs[m][k1][j][i] - lhsZ[1][j][k1][i]*rhs[m][k][j][i]; } lhsZ[1][j][k2][i] = lhsZ[1][j][k2][i] - lhsZ[0][j][k2][i]*lhsZ[3][j][k][i]; lhsZ[2][j][k2][i] = lhsZ[2][j][k2][i] - lhsZ[0][j][k2][i]*lhsZ[4][j][k][i]; for (m = 0; m < 3; m++) { rhs[m][k2][j][i] = rhs[m][k2][j][i] - lhsZ[0][j][k2][i]*rhs[m][k][j][i]; } } } } //--------------------------------------------------------------------- // The last two rows in this grid block are a bit different, // since they for (not have two more rows available for the // elimination of off-diagonal entries //--------------------------------------------------------------------- k = gp22; k1 = gp21; #pragma omp target teams distribute parallel for private(m,fac1,fac2) collapse(2) for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { fac1 = 1.0/lhsZ[2][j][k][i]; lhsZ[3][j][k][i] = fac1*lhsZ[3][j][k][i]; lhsZ[4][j][k][i] = fac1*lhsZ[4][j][k][i]; for (m = 0; m < 3; m++) { rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; } lhsZ[2][j][k1][i] = lhsZ[2][j][k1][i] - lhsZ[1][j][k1][i]*lhsZ[3][j][k][i]; lhsZ[3][j][k1][i] = lhsZ[3][j][k1][i] - lhsZ[1][j][k1][i]*lhsZ[4][j][k][i]; for (m = 0; m < 3; m++) { rhs[m][k1][j][i] = rhs[m][k1][j][i] - lhsZ[1][j][k1][i]*rhs[m][k][j][i]; } //--------------------------------------------------------------------- // scale the last row immediately //--------------------------------------------------------------------- fac2 = 1.0/lhsZ[2][j][k1][i]; for (m = 0; m < 3; m++) { rhs[m][k1][j][i] = fac2*rhs[m][k1][j][i]; } } } //--------------------------------------------------------------------- // for (the u+c and the u-c factors //--------------------------------------------------------------------- #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,m) #else #pragma omp target teams distribute parallel for simd private(k,m,fac1,k1,k2) collapse(2) #endif for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(fac1,k1,k2) #endif for (i = 1; i <= nx2; i++) { for (k = 0; k <= gp23; k++) { k1 = k + 1; k2 = k + 2; m = 3; fac1 = 1.0/lhspZ[2][j][k][i]; lhspZ[3][j][k][i] = fac1*lhspZ[3][j][k][i]; lhspZ[4][j][k][i] = fac1*lhspZ[4][j][k][i]; rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; lhspZ[2][j][k1][i] = lhspZ[2][j][k1][i] - lhspZ[1][j][k1][i]*lhspZ[3][j][k][i]; lhspZ[3][j][k1][i] = lhspZ[3][j][k1][i] - lhspZ[1][j][k1][i]*lhspZ[4][j][k][i]; rhs[m][k1][j][i] = rhs[m][k1][j][i] - lhspZ[1][j][k1][i]*rhs[m][k][j][i]; lhspZ[1][j][k2][i] = lhspZ[1][j][k2][i] - lhspZ[0][j][k2][i]*lhspZ[3][j][k][i]; lhspZ[2][j][k2][i] = lhspZ[2][j][k2][i] - lhspZ[0][j][k2][i]*lhspZ[4][j][k][i]; rhs[m][k2][j][i] = rhs[m][k2][j][i] - lhspZ[0][j][k2][i]*rhs[m][k][j][i]; m = 4; fac1 = 1.0/lhsmZ[2][j][k][i]; lhsmZ[3][j][k][i] = fac1*lhsmZ[3][j][k][i]; lhsmZ[4][j][k][i] = fac1*lhsmZ[4][j][k][i]; rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; lhsmZ[2][j][k1][i] = lhsmZ[2][j][k1][i] - lhsmZ[1][j][k1][i]*lhsmZ[3][j][k][i]; lhsmZ[3][j][k1][i] = lhsmZ[3][j][k1][i] - lhsmZ[1][j][k1][i]*lhsmZ[4][j][k][i]; rhs[m][k1][j][i] = rhs[m][k1][j][i] - lhsmZ[1][j][k1][i]*rhs[m][k][j][i]; lhsmZ[1][j][k2][i] = lhsmZ[1][j][k2][i] - lhsmZ[0][j][k2][i]*lhsmZ[3][j][k][i]; lhsmZ[2][j][k2][i] = lhsmZ[2][j][k2][i] - lhsmZ[0][j][k2][i]*lhsmZ[4][j][k][i]; rhs[m][k2][j][i] = rhs[m][k2][j][i] - lhsmZ[0][j][k2][i]*rhs[m][k][j][i]; } } } //--------------------------------------------------------------------- // And again the last two rows separately //--------------------------------------------------------------------- k = gp22; k1 = gp21; #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,m,fac1) #else #pragma omp target teams distribute parallel for simd private(m,fac1) collapse(2) #endif for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(m, fac1) #endif for (i = 1; i <= nx2; i++) { m = 3; fac1 = 1.0/lhspZ[2][j][k][i]; lhspZ[3][j][k][i] = fac1*lhspZ[3][j][k][i]; lhspZ[4][j][k][i] = fac1*lhspZ[4][j][k][i]; rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; lhspZ[2][j][k1][i] = lhspZ[2][j][k1][i] - lhspZ[1][j][k1][i]*lhspZ[3][j][k][i]; lhspZ[3][j][k1][i] = lhspZ[3][j][k1][i] - lhspZ[1][j][k1][i]*lhspZ[4][j][k][i]; rhs[m][k1][j][i] = rhs[m][k1][j][i] - lhspZ[1][j][k1][i]*rhs[m][k][j][i]; m = 4; fac1 = 1.0/lhsmZ[2][j][k][i]; lhsmZ[3][j][k][i] = fac1*lhsmZ[3][j][k][i]; lhsmZ[4][j][k][i] = fac1*lhsmZ[4][j][k][i]; rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; lhsmZ[2][j][k1][i] = lhsmZ[2][j][k1][i] - lhsmZ[1][j][k1][i]*lhsmZ[3][j][k][i]; lhsmZ[3][j][k1][i] = lhsmZ[3][j][k1][i] - lhsmZ[1][j][k1][i]*lhsmZ[4][j][k][i]; rhs[m][k1][j][i] = rhs[m][k1][j][i] - lhsmZ[1][j][k1][i]*rhs[m][k][j][i]; //--------------------------------------------------------------------- // Scale the last row immediately (some of this is overkill // if this is the last cell) //--------------------------------------------------------------------- rhs[3][k1][j][i] = rhs[3][k1][j][i]/lhspZ[2][j][k1][i]; rhs[4][k1][j][i] = rhs[4][k1][j][i]/lhsmZ[2][j][k1][i]; } } //--------------------------------------------------------------------- // BACKSUBSTITUTION //--------------------------------------------------------------------- k = gp22; k1 = gp21; #pragma omp target teams distribute parallel for private(i,j,m) collapse(2) for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 3; m++) { rhs[m][k][j][i] = rhs[m][k][j][i] - lhsZ[3][j][k][i]*rhs[m][k1][j][i]; } rhs[3][k][j][i] = rhs[3][k][j][i] - lhspZ[3][j][k][i]*rhs[3][k1][j][i]; rhs[4][k][j][i] = rhs[4][k][j][i] - lhsmZ[3][j][k][i]*rhs[4][k1][j][i]; } } //--------------------------------------------------------------------- // Whether or not this is the last processor, we always have // to complete the back-substitution //--------------------------------------------------------------------- //--------------------------------------------------------------------- // The first three factors //--------------------------------------------------------------------- #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,m) #else #pragma omp target teams distribute parallel for simd private(k,m,k1,k2) collapse(2) #endif for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(k1,k2) #endif for (i = 1; i <= nx2; i++) { for (k = gp23; k >= 0; k--) { k1 = k + 1; k2 = k + 2; for (m = 0; m < 3; m++) { rhs[m][k][j][i] = rhs[m][k][j][i] - lhsZ[3][j][k][i]*rhs[m][k1][j][i] - lhsZ[4][j][k][i]*rhs[m][k2][j][i]; } //------------------------------------------------------------------- // And the remaining two //------------------------------------------------------------------- rhs[3][k][j][i] = rhs[3][k][j][i] - lhspZ[3][j][k][i]*rhs[3][k1][j][i] - lhspZ[4][j][k][i]*rhs[3][k2][j][i]; rhs[4][k][j][i] = rhs[4][k][j][i] - lhsmZ[3][j][k][i]*rhs[4][k1][j][i] - lhsmZ[4][j][k][i]*rhs[4][k2][j][i]; } } } }/* end omp target data */ tzetar(); }
GrB_Matrix_nvals.c
//------------------------------------------------------------------------------ // GrB_Matrix_nvals: number of entries in a sparse matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB.h" GrB_Info GrB_Matrix_nvals // get the number of entries in a matrix ( GrB_Index *nvals, // matrix has nvals entries const GrB_Matrix A // matrix to query ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE1 ("GrB_Matrix_nvals (&nvals, A)") ; GB_BURBLE_START ("GrB_Matrix_nvals") ; GB_RETURN_IF_NULL_OR_FAULTY (A) ; //-------------------------------------------------------------------------- // get the number of entries //-------------------------------------------------------------------------- GrB_Info info = GB_nvals (nvals, A, Context) ; GB_BURBLE_END ; #pragma omp flush return (info) ; }
7025.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl), DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j, k; #pragma scop #pragma omp parallel private (j, k) num_threads(4) { /* E := A*B */ #pragma omp for schedule(static, 1) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } /* F := C*D */ #pragma omp for schedule(static, 1) for (i = 0; i < _PB_NJ; i++) for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } /* G := E*F */ #pragma omp for schedule(static, 1) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
GB_binop__ge_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ge_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__ge_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__ge_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__ge_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_uint16) // A*D function (colscale): GB (_AxD__ge_uint16) // D*A function (rowscale): GB (_DxB__ge_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__ge_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__ge_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_uint16) // C=scalar+B GB (_bind1st__ge_uint16) // C=scalar+B' GB (_bind1st_tran__ge_uint16) // C=A+scalar GB (_bind2nd__ge_uint16) // C=A'+scalar GB (_bind2nd_tran__ge_uint16) // C type: bool // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_UINT16 || GxB_NO_GE_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ge_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ge_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ge_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ge_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ge_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ge_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ge_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ge_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ge_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ge_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__ge_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__ge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
OMP-Jacobi-1D-Sliced-Diamond-Tiling.test.c
//#include "jacobi1d.h" //#include "trapezoidTiling.h" #include <stdio.h> #include <omp.h> #include <time.h> #include <stdlib.h> #include <unistd.h> #include <getopt.h> #include <ctype.h> //#include "trapezoidTiling.h" //#include "jacobi1d.h" #include <stdbool.h> #include <assert.h> //#include "jacobi1d.h" #include <stdbool.h> #include <stdio.h> #include <omp.h> #include <time.h> #include <stdlib.h> #include <assert.h> #define stencil(read,write,x) space[write][x] = (space[read][x-1] + space[read][x] + space[read][x+1])/3; bool initedJacobi = false; int globalSeed = -1; int cores = -1; int problemSize = -1, T = -1, lowerBound = -1, upperBound = -1; double* space[2] = { NULL, NULL }; // space[t][x] for (t,x) in { {0,1} X {lowerBound, ... , upperBound} }; int min( int a, int b){ return (a <= b)? a : b; } int max( int a, int b){ return (a >= b)? a : b; } /* void stencil( int read, int write, int x ){ // stencil operation space[write][x] = (space[read][x-1] + space[read][x] + space[read][x+1])/3; } */ void initJacobi(){ // if init has not already been called (preserve things like global seed. if( ! initedJacobi ){ // note the convention someVar = ( someVar == -1 )? defaultValue : someVar ; // this allows us to use the cmd line flags to set variables, AND have an init call. // all values are initialized with -1 in global space, so if someVar == -1, then it has // not been set, and and be given a default value. // seed for random number generator. // allows all initSpace calls to generate the same inital values globalSeed = (globalSeed== -1)? time(NULL) : globalSeed; // problemSpace parameters T = (T == -1)? 100 : T; problemSize = (problemSize == -1)? 1000000 : problemSize; lowerBound = 1; upperBound = lowerBound + problemSize - 1; cores = (cores == -1)? omp_get_num_procs() : cores ; omp_set_num_threads( cores ); // set initialization flag initedJacobi = true; } } // initialize space array void initSpace(){ // if space has been previously allocated, free up space. if( space[0] != NULL ){ free( space[0] ); } if( space[1] != NULL ) { free( space[1] ); } /* // allocate space space = (double**) malloc( 2 * sizeof(double*) ); if( space == NULL ){ printf( "Could not allocate space array\n" ); exit(0); } */ // allocate time-steps 0 and 1 space[0] = (double*) malloc( (problemSize + 2) * sizeof(double)); space[1] = (double*) malloc( (problemSize + 2) * sizeof(double)); if( space[0] == NULL || space[1] == NULL ){ printf( "Could not allocate space array\n" ); exit(0); } // use global seed to seed the random number gen (will be constant) srand(globalSeed); // seed the space. int x; for( x = lowerBound; x <= upperBound; ++x ){ space[0][x] = rand() / (double)rand(); } // set halo values (sanity) space[0][0] = 0; space[0][upperBound+1] = 0; space[1][0] = 0; space[1][upperBound+1] = 0; } // parse int abstraction from strtol int parseInt( char* string ){ return (int) strtol( string, NULL, 10 ); } bool verifyResult( bool verbose ){ assert( space[0] != NULL && space[1] != NULL ); double* endSpace = (double*) malloc( (problemSize + 2) * sizeof(double) ); for( int x = 0; x < problemSize + 2; ++x ){ endSpace[x] = space[T & 1][x]; } initSpace(); int read = 0, write = 1; for( int t = 1; t <= T; ++t ){ for( int x = lowerBound; x <= upperBound; ++x ){ stencil(read, write, x); } read = write; write = 1 - write; } bool failed = false; for( int x = lowerBound; x <= upperBound; ++x ){ if( endSpace[x] != space[T & 1][x] ){ failed = true; if( verbose ) printf( "FAILED\n");// %f != %f at %d\n", endSpace[x], space[T & 1][x], x ); break; } } if( verbose && !failed ) printf( "SUCCESS\n" ); free( endSpace ); return !failed; } bool initedTrapezoid = false; int timeBand = -1, width_max = -1, width_min = -1; int tiles_A_start = -1, tiles_B_start = -1, betweenTiles = -1; int count_A_tiles = 0, count_B_tiles = 0; int A_tiles_per_core = 0, B_tiles_per_core = 0; int countTiles( char type ){ int x0; int count = 0; if( type == 0 || type == 'a' || type == 'A' ){ for( x0 = tiles_A_start; x0 <= upperBound; x0 += betweenTiles ){ count += 1; } } else if( type == 1 || type == 'b' || type == 'B' ){ for( x0 = tiles_B_start; x0 <= upperBound; x0 += betweenTiles ){ count += 1; } } return count; } void initTrapezoid(){ // if init has not already been called (preserve things like global seed. if( initedJacobi && !initedTrapezoid ){ // note the convention someVar = ( someVar == -1 )? defaultValue : someVar ; // this allows us to use the cmd line flags to set variables, AND have an init call. // all values are initialized with -1 in global space, so if someVar == -1, then it has // not been set, and and be given a default value. // tile size parameters timeBand = (timeBand == -1)? 100 : timeBand; width_max = (width_max == -1)? 54701 : width_max ; width_min = (width_max + -1 * timeBand) - (0 + 1 * timeBand) +1; tiles_A_start = lowerBound - timeBand + 1; // starting point for doing 'A' tiles loops tiles_B_start = tiles_A_start + width_max; // starting point for doing 'B' tiles loop betweenTiles = width_min + width_max; // width between the first x0 point and next x0 point // asser that this is a valid tile assert( width_min >= 1 && width_max >= width_min ); count_A_tiles = countTiles( 'a' ); count_B_tiles = countTiles( 'b' ); A_tiles_per_core = max( 1, count_A_tiles / cores ); B_tiles_per_core = max( 1, count_B_tiles / cores ); //printf("count A tiles: %d\n", count_A_tiles ); //printf("count B tiles: %d\n", count_A_tiles ); // set initialization flag initedTrapezoid = true; } } // Parallel Tiling double test_1(){ double start_time = omp_get_wtime(); int write, read; // read and write buffers int t0, t1, x0, x1, dx0, dx1; // most values of the tile tuples int t, x; // indices into space (t,x) // for all t0 in t0..T by timeBand for( t0 = 1; t0 <= T; t0 += timeBand ) { // set and clamp t1 from t0 t1 = min(t0 + timeBand - 1, T); // Do A-tiles // set dx0 and dx1 to correct A-tile values dx0 = 1; dx1 = -1; // iterate over all x0 points for A-tiles #pragma omp parallel for private( x0, x1, write, read, t, x) schedule(dynamic, A_tiles_per_core) for( x0 = tiles_A_start; x0 <= upperBound; x0 += betweenTiles ){ x1 = x0 + width_max - 1; // set x1 from x0 // Set read and write buffer. // this is equivilent to t0 % 2 but assumed faster read = (t0 - 1) & 1; write = 1 - read; // if x0 is at or below lower bound (left edge tile) if( x0 <= lowerBound ) { //printf("%d, %d, %d, %d, %d, %d\n", lowerBound, 0, x1, dx1, t0, t1 ); // for t in t0 ... t1 for( t = t0; t<= t1; ++t ){ //#pragma omp parallel for private( x ) schedule(static) // for x in lowerBound ... x1'ish int minVal = min(x1 + dx1 * (t - t0), upperBound ); for( x = lowerBound; x <= minVal; ++x){ stencil( read, write, x ); // stencil computation }// for x // flip write buffer read = write; write = 1 - write; }// for t }// if x0 <= lowerBound // if x1 is at or above upper bound (right edge tile) else if( x1 >= upperBound ){ //printf("%d, %d, %d, %d, %d, %d\n", x0, dx0, upperBound, 0, t0, t1 ); // for t in t0...t1 for( t = t0; t<= t1; ++t ){ //#pragma omp parallel for private( x ) schedule(static) // for x in x0'ish ... upperbound for( x = max(x0 + dx0 * (t - t0), lowerBound); x <= upperBound; ++x){ stencil( read, write, x ); // stencil computation }// for x // flip write buffer read = write; write = 1 - write; }// for t }// else if x1 >= upperBound // otherwise regular ol' tile else { //printf("%d, %d, %d, %d, %d, %d\n", x0, dx0, x1, dx1, t0, t1 ); // for t in t0 ... t1 for( t = t0; t<= t1; ++t ){ //#pragma omp parallel for private( x ) schedule(static) // for x in x0'ish ... x1'ish int minVal = min(x1 + dx1 * (t - t0), upperBound ); for( x = max(x0 + dx0 * (t - t0), lowerBound); x <= minVal; ++x){ stencil( read, write, x ); // stencil computation }// for x // flip write buffer read = write; write = 1 - write; }// for t }// else }// for A-tiles // Do B-tiles // set dx0 and dx1 to correct B-tile values dx0 = -1; dx1 = 1; // iterate over x0 points for B-tiles #pragma omp parallel for private( x0, x1, write, read, t, x) schedule(dynamic,B_tiles_per_core) for( x0 = tiles_B_start; x0 <= upperBound; x0 += betweenTiles ){ x1 = x0 + width_min - 1; // set x1 from x0 // Set write buffer. // this is equivilent to (t0 - 1 )% 2, but assumed faster read = (t0 - 1) & 1; write = 1 - read; // if x1 is at or above upper bound (right edge tile) if( x1 >= upperBound ){ //printf("%d, %d, %d, %d, %d, %d\n", x0, dx0, upperBound, 0, t0, t1 ); // for t in t0 ... t1 for( t = t0; t <= t1; ++t ){ //#pragma omp parallel for private( x ) schedule(static) // for x in x0'ish ... upper bound for( x = max( x0 + dx0 * (t - t0), lowerBound); x <= upperBound; ++x){ stencil( read, write, x ); // stencil computation }// for x // flip write buffer read = write; write = 1 - write; }// for t }// if x1 >= upperBound // regular ol' tile else { //printf("%d, %d, %d, %d, %d, %d\n", x0, dx0, x1, dx1, t0, t1 ); // for t in t0 ... t1 for( t = t0; t<= t1; ++t ){ //#pragma omp parallel for private( x ) schedule(static) // for x in x0'ish ... x1'ish int minVal = min(x1 + dx1 * (t - t0), upperBound); for( x = max(x0 + dx0 * (t - t0), lowerBound); x <= minVal; ++x){ stencil( read, write, x ); // stencil computation }// for x // flip write buffer read = write; write = 1 - write; } // for t }// else } // for B-tiles }// for t0 double end_time = omp_get_wtime(); return (end_time - start_time); } int main( int argc, char* argv[] ){ setbuf(stdout, NULL); // set buffer to null, so prints ALWAYS print (for debug purposes mainly) bool verify = false; bool printtime = true; // Command line parsing char c; while ((c = getopt (argc, argv, "nc:s:p:T:t:w:hv")) != -1){ switch( c ) { case 'n': // print time printtime = false; break; case 'c': // cores cores = parseInt( optarg ); if( cores <= 0 ){ fprintf(stderr, "cores must be greater than 0: %d\n", cores); exit( 0 ); } break; case 'p': // problem size problemSize = parseInt( optarg ); if( problemSize <= 0 ){ fprintf(stderr, "problemSize must be greater than 0: %d\n", problemSize); exit( 0 ); } break; case 'T': // T (time steps) T = parseInt( optarg ); if( T <= 0 ){ fprintf(stderr, "T must be greater than 0: %d\n", T); exit( 0 ); } break; case 't': // timeBand timeBand = parseInt( optarg ); if( timeBand <= 0 ){ fprintf(stderr, "t must be greater than 0: %d\n", T); exit( 0 ); } break; case 'w': // width width_max = parseInt( optarg ); if( width_max <= 0 ){ fprintf(stderr, "w must be greater than 0: %d\n", T); exit( 0 ); } break; case 'h': // help printf("usage: %s\n-n \t dont print time \n-p <problem size> \t problem size in elements \n-T <time steps>\t number of time steps\n-c <cores>\tnumber of threads\n-w <tile width>\t the width of the tile\n-t <tile height>\t the number of timesteps in a tile\n-h\tthis dialogue\n-v\tverify output\n", argv[0]); exit(0); case 'v': // verify; verify = true; break; case '?': if (optopt == 'p') fprintf (stderr, "Option -%c requires positive int argument: problem size.\n", optopt); else if (optopt == 'T') fprintf (stderr, "Option -%c requires positive int argument: T.\n", optopt); else if (optopt == 's') fprintf (stderr, "Option -%c requires int argument: subset_s.\n", optopt); else if (optopt == 'c') fprintf (stderr, "Option -%c requires int argument: number of cores.\n", optopt); else if (isprint (optopt)) fprintf (stderr, "Unknown option `-%c'.\n", optopt); else fprintf(stderr, "Unknown option character `\\x%x'.\n", optopt); exit(0); default: exit(0); } } initJacobi(); initTrapezoid(); initSpace(); double time = test_1(); if( printtime ){ printf( "Time: %f\n", time ); } if( verify ){ verifyResult( true ); } }
DenseVector.h
//================================================================================================= /*! // \file blaze/math/smp/openmp/DenseVector.h // \brief Header file for the OpenMP-based dense vector SMP implementation // // Copyright (C) 2012-2019 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_ #define _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_ //************************************************************************************************* // Includes //************************************************************************************************* #include <omp.h> #include <blaze/math/Aliases.h> #include <blaze/math/constraints/SMPAssignable.h> #include <blaze/math/expressions/DenseVector.h> #include <blaze/math/expressions/SparseVector.h> #include <blaze/math/simd/SIMDTrait.h> #include <blaze/math/smp/ParallelSection.h> #include <blaze/math/smp/SerialSection.h> #include <blaze/math/typetraits/IsDenseVector.h> #include <blaze/math/typetraits/IsSIMDCombinable.h> #include <blaze/math/typetraits/IsSMPAssignable.h> #include <blaze/math/views/Subvector.h> #include <blaze/system/SMP.h> #include <blaze/util/algorithms/Min.h> #include <blaze/util/Assert.h> #include <blaze/util/EnableIf.h> #include <blaze/util/FunctionTrace.h> #include <blaze/util/StaticAssert.h> #include <blaze/util/Types.h> namespace blaze { //================================================================================================= // // OPENMP-BASED ASSIGNMENT KERNELS // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a dense vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a dense // vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side dense vector , bool TF2 // Transpose flag of the right-hand side dense vector , typename OP > // Type of the assignment operation void openmpAssign( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); using ET1 = ElementType_t<VT1>; using ET2 = ElementType_t<VT2>; constexpr bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSIMDCombinable_v<ET1,ET2> ); constexpr size_t SIMDSIZE( SIMDTrait< ElementType_t<VT1> >::size ); const bool lhsAligned( (~lhs).isAligned() ); const bool rhsAligned( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).size() / threads + addon ); const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) ); const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (~lhs).size() ) continue; const size_t size( min( sizePerThread, (~lhs).size() - index ) ); if( simdEnabled && lhsAligned && rhsAligned ) { auto target( subvector<aligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<aligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } else if( simdEnabled && lhsAligned ) { auto target( subvector<aligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } else if( simdEnabled && rhsAligned ) { auto target( subvector<unaligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<aligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } else { auto target( subvector<unaligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a sparse vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a sparse // vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side sparse vector , bool TF2 // Transpose flag of the right-hand side sparse vector , typename OP > // Type of the assignment operation void openmpAssign( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t sizePerThread( (~lhs).size() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (~lhs).size() ) continue; const size_t size( min( sizePerThread, (~lhs).size() - index ) ); auto target( subvector<unaligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // PLAIN ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be assigned. // \return void // // This function implements the default OpenMP-based SMP assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); assign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be assigned. // \return void // // This function performs the OpenMP-based SMP assignment to a dense vector. Due to the // explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { assign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ assign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // ADDITION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be added. // \return void // // This function implements the default OpenMP-based SMP addition assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); addAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be added. // \return void // // This function implements the OpenMP-based SMP addition assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { addAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ addAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // SUBTRACTION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP subtraction assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be subtracted. // \return void // // This function implements the default OpenMP-based SMP subtraction assignment of a vector to // a dense vector. Due to the explicit application of the SFINAE principle, this function can // only be selected by the compiler in case both operands are SMP-assignable and the element // types of both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); subAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP subtraction assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be subtracted. // \return void // // This function implements the OpenMP-based SMP subtraction assignment to a dense vector. Due // to the explicit application of the SFINAE principle, this function can only be selected by // the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { subAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ subAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // MULTIPLICATION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be multiplied. // \return void // // This function implements the default OpenMP-based SMP multiplication assignment to a dense // vector. Due to the explicit application of the SFINAE principle, this function can only be // selected by the compiler in case both operands are SMP-assignable and the element types of // both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); multAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP multiplication assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector to be multiplied. // \return void // // This function implements the OpenMP-based SMP multiplication assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both // operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { multAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ multAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // DIVISION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP division assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector divisor. // \return void // // This function implements the default OpenMP-based SMP division assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both // operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); divAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP division assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector divisor. // \return void // // This function implements the OpenMP-based SMP division assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { divAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, []( auto& a, const auto& b ){ divAssign( a, b ); } ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // COMPILE TIME CONSTRAINTS // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ namespace { BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE ); } /*! \endcond */ //************************************************************************************************* } // namespace blaze #endif
GB_binop__remainder_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__remainder_fp32 // A.*B function (eWiseMult): GB_AemultB__remainder_fp32 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__remainder_fp32 // C+=b function (dense accum): GB_Cdense_accumb__remainder_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__remainder_fp32 // C=scalar+B GB_bind1st__remainder_fp32 // C=scalar+B' GB_bind1st_tran__remainder_fp32 // C=A+scalar GB_bind2nd__remainder_fp32 // C=A'+scalar GB_bind2nd_tran__remainder_fp32 // C type: float // A type: float // B,b type: float // BinaryOp: cij = remainderf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = remainderf (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_REMAINDER || GxB_NO_FP32 || GxB_NO_REMAINDER_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__remainder_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__remainder_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__remainder_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__remainder_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__remainder_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__remainder_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; float bij = Bx [p] ; Cx [p] = remainderf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__remainder_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = remainderf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = remainderf (x, aij) ; \ } GrB_Info GB_bind1st_tran__remainder_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = remainderf (aij, y) ; \ } GrB_Info GB_bind2nd_tran__remainder_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
bug_set_schedule_0.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <omp.h> #include "omp_testsuite.h" /* Test that the chunk size is set to default (1) when chunk size <= 0 is specified */ int a = 0; int test_set_schedule_0() { int i; a = 0; omp_set_schedule(omp_sched_dynamic,0); #pragma omp parallel { #pragma omp for schedule(runtime) for(i = 0; i < 10; i++) { #pragma omp atomic a++; if(a > 10) exit(1); } } return a==10; } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_set_schedule_0()) { num_failed++; } } return num_failed; }
calculate_discontinuous_distance_to_skin_process.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Pooyan Dadvand // Ruben Zorrilla // // Collaborators: Franziska Wahl // #if !defined(KRATOS_CALCULATE_DISCONTINUOUS_DISTANCE_TO_SKIN_PROCESS_H_INCLUDED ) #define KRATOS_CALCULATE_DISCONTINUOUS_DISTANCE_TO_SKIN_PROCESS_H_INCLUDED // System includes #include <string> #include <iostream> // External includes // Project includes #include "geometries/plane_3d.h" #include "includes/checks.h" #include "processes/process.h" #include "processes/find_intersected_geometrical_objects_process.h" #include "utilities/variable_utils.h" #include "utilities/pointer_communicator.h" namespace Kratos { ///@addtogroup Kratos Core ///@{ ///@name Kratos Classes ///@{ class KRATOS_API(KRATOS_CORE) CalculateDiscontinuousDistanceToSkinProcessFlags { public: KRATOS_DEFINE_LOCAL_FLAG(CALCULATE_ELEMENTAL_EDGE_DISTANCES); /// Local flag to switch on/off the elemental edge distances storage KRATOS_DEFINE_LOCAL_FLAG(CALCULATE_ELEMENTAL_EDGE_DISTANCES_EXTRAPOLATED); /// Local flag to switch on/off the extrapolated elemental edge distances storage KRATOS_DEFINE_LOCAL_FLAG(USE_POSITIVE_EPSILON_FOR_ZERO_VALUES); /// Local flag to switch from positive (true) to negative (false) epsilon when replacing zero distance values. }; /// This only calculates the distance. Calculating the inside outside should be done by a derived class of this. /** This process takes a volume model part (with tetrahedra mesh) and a skin model part (with triangle mesh) and and calcualtes the distance to the skin for all the elements and nodes of the volume model part. */ template<std::size_t TDim = 3> class KRATOS_API(KRATOS_CORE) CalculateDiscontinuousDistanceToSkinProcess : public Process { public: ///@name Type Definitions ///@{ /// Pointer definition of CalculateDiscontinuousDistanceToSkinProcess KRATOS_CLASS_POINTER_DEFINITION(CalculateDiscontinuousDistanceToSkinProcess); ///@} ///@name Life Cycle ///@{ /// Constructor to be used. CalculateDiscontinuousDistanceToSkinProcess( ModelPart& rVolumePart, ModelPart& rSkinPart); /// Constructor with option CalculateDiscontinuousDistanceToSkinProcess( ModelPart& rVolumePart, ModelPart& rSkinPart, const Flags rOptions); /// Constructor with parameters CalculateDiscontinuousDistanceToSkinProcess( ModelPart& rVolumePart, ModelPart& rSkinPart, Parameters& rParameters); /// Destructor. ~CalculateDiscontinuousDistanceToSkinProcess() override; ///@} ///@name Deleted ///@{ /// Default constructor. CalculateDiscontinuousDistanceToSkinProcess() = delete; /// Copy constructor. CalculateDiscontinuousDistanceToSkinProcess(CalculateDiscontinuousDistanceToSkinProcess const& rOther) = delete; /// Assignment operator. CalculateDiscontinuousDistanceToSkinProcess& operator=(CalculateDiscontinuousDistanceToSkinProcess const& rOther) = delete; FindIntersectedGeometricalObjectsProcess mFindIntersectedObjectsProcess; ///@} ///@name Operations ///@{ /** * @brief Initializes discontinuous distance computation process * This method initializes the TO_SPLIT flag, the DISTANCE and * ELEMENTAL_DISTANCES variables as well as the EMBEDDED_VELOCITY */ virtual void Initialize(); /** * @brief Calls the FindIntersectedObjectsProcess to find the intersections * This method calls the FindIntersectedObjectsProcess FindIntersections method. */ virtual void FindIntersections(); /** * @brief Get the array containing the intersecting objects * This method returns an array containing pointers to the intersecting geometries * @return std::vector<PointerVector<GeometricalObject>>& */ virtual std::vector<PointerVector<GeometricalObject>>& GetIntersections(); /** * @brief Computes the elemental distance values * Given an intersecting objects vector, this method computes the elemental distance field * @param rIntersectedObjects array containing pointers to the intersecting geometries */ virtual void CalculateDistances(std::vector<PointerVector<GeometricalObject>>& rIntersectedObjects); /** * @brief Calls the FindIntersectedObjects Clear() method * This method calls the FindIntersectedObjects Clear() to empty the intersecting objects geometries array */ void Clear() override; /** * @brief Executes the CalculateDiscontinuousDistanceToSkinProcess * This method automatically does all the calls required to compute the discontinuous distance function. */ void Execute() override; /** * @brief Calculate embedded variable from skin double specialization * This method calls the specialization method for two double variables * @param rVariable origin double variable in the skin mesh * @param rEmbeddedVariable elemental double variable in the volume mesh to be computed */ void CalculateEmbeddedVariableFromSkin( const Variable<double> &rVariable, const Variable<double> &rEmbeddedVariable); /** * @brief Calculate embedded variable from skin array specialization * This method calls the specialization method for two double variables * @param rVariable origin array variable in the skin mesh * @param rEmbeddedVariable elemental array variable in the volume mesh to be computed */ void CalculateEmbeddedVariableFromSkin( const Variable<array_1d<double,3>> &rVariable, const Variable<array_1d<double,3>> &rEmbeddedVariable); /** * @brief Obtain the default parameters to construct the class. */ const Parameters GetDefaultParameters() const override; ///@} ///@name Access ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override; /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override; /// Print object's data. void PrintData(std::ostream& rOStream) const override; ///@} protected: const Variable<Vector>* mpElementalDistancesVariable = &ELEMENTAL_DISTANCES; ///@name Protected Operations ///@{ /** * @brief Set the Intersection Plane object * This method returns the plane that defines the element intersection. The 2D * case is considered to be a simplification of the 3D one, so a "fake" extra * point is created by extruding the first point in the z-direction. * @param rIntPtsVector array containing the intersecting points coordinates * @return Plane3D the plane defined by the given intersecting points coordinates */ Plane3D SetIntersectionPlane(const std::vector<array_1d<double,3>> &rIntPtsVector); /** * @brief Calculates the domain characteristic length * This method computes the domain characteristic length as the norm of * the diagonal vector that joins the maximum and minimum coordinates * @return double the calculated characteristic length */ double CalculateCharacteristicLength(); ///@} private: ///@name Member Variables ///@{ ModelPart& mrSkinPart; ModelPart& mrVolumePart; Flags mOptions; static const std::size_t mNumNodes = TDim + 1; static const std::size_t mNumEdges = (TDim == 2) ? 3 : 6; const double mZeroToleranceMultiplier = 1e3; bool mDetectedZeroDistanceValues = false; bool mAreNeighboursComputed = false; bool mCalculateElementalEdgeDistances = false; bool mCalculateElementalEdgeDistancesExtrapolated = false; bool mUsePositiveEpsilonForZeroValues = true; const Variable<Vector>* mpElementalEdgeDistancesVariable = &ELEMENTAL_EDGE_DISTANCES; const Variable<Vector>* mpElementalEdgeDistancesExtrapolatedVariable = &ELEMENTAL_EDGE_DISTANCES_EXTRAPOLATED; const Variable<array_1d<double, 3>>* mpEmbeddedVelocityVariable = &EMBEDDED_VELOCITY; ///@} ///@name Private Operations ///@{ /** * @brief Computes the discontinuous distance in one element * This method computes the discontinuous distance field for a given element * @param rElement1 reference to the element of interest * @param rIntersectedObjects reference to the array containing the element of interest intersecting geometries */ void CalculateElementalDistances( Element& rElement1, PointerVector<GeometricalObject>& rIntersectedObjects); /** * @brief Computes the discontinuous edge-based distance in one element * This method computes the discontinuous edge-based distance field for a given element * @param rElement1 reference to the element of interest * @param rIntersectedObjects reference to the array containing the element of interest intersecting geometries */ void CalculateElementalAndEdgeDistances( Element& rElement1, PointerVector<GeometricalObject>& rIntersectedObjects); /** * @brief Computes the edges intersections in one element * Provided a list of elemental intersecting geometries, this * method computes the edge intersections for a given element * @param rElement1 reference to the element of interest * @param rIntersectedObjects reference to the array containing the element of interest intersecting geometries * @param rEdgesContainer reference to the array containing the edges of the element of interest * @param rCutEdgesRatioVector array that stores the relative positions from node zero of the average intersection points * @param rCutExtraEdgesRatioVector array that stores the relative positions from node zero of the average intersection points of the extrapolated geometry * @param rIntersectionPointsArray array containing the edges intersection points * @return unsigned int number of cut edges */ unsigned int ComputeEdgesIntersections( Element& rElement1, const PointerVector<GeometricalObject>& rIntersectedObjects, const Element::GeometryType::GeometriesArrayType& rEdgesContainer, array_1d<double,mNumEdges> &rCutEdgesRatioVector, array_1d<double,mNumEdges> &rCutExtraEdgesRatioVector, std::vector<array_1d <double,3> > &rIntersectionPointsArray); /** * @brief Computes the intersection of a single edge * This method computes the intersection of a given edge with the candidate * intersecting geometry. This operation is performed accordingly to the working * space dimension using the intersection utilities implemented in intersection_utilities.h * @param rIntObjGeometry candidate intersecting geometry * @param rEdgePoint1 edge origin point * @param rEdgePoint2 edge end point * @param rIntersectionPoint intersection point * @return int type of intersection id (see intersection_utilities.h) */ int ComputeEdgeIntersection( const Element::GeometryType& rIntObjGeometry, const Element::NodeType& rEdgePoint1, const Element::NodeType& rEdgePoint2, Point& rIntersectionPoint); /** * @brief Checks if rIntersectionPoint is already present in the * intersection point list in rIntersectionPointsVector for the tolerance rTolerance. * @param rIntersectionPoint reference to the intersection point * @param rIntersectionPointsVector reference to the list of already computed intersected points * @param rEdgeTolerance tolerance to compare two points and assess if they are equal * @return bool if rIntersectionPoint is present in rIntersectionPointsVector */ bool CheckIfPointIsRepeated( const array_1d<double,3>& rIntersectionPoint, const std::vector<array_1d<double,3>>& rIntersectionPointsVector, const double& rEdgeTolerance); /** * @brief Computes the element intersection unit normal * This method computes the element intersection unit normal vector using the distance function gradient. * @param rGeometry reference to the geometry of the element of interest * @param rElementalDistances array containing the ELEMENTAL_DISTANCES values * @param rNormal obtained unit normal vector */ void ComputeIntersectionNormal( const Element::GeometryType& rGeometry, const Vector& rElementalDistances, array_1d<double,3> &rNormal); /** * @brief Computes the nodal distances to the intersection plane * This methods creates a plane from the intersection points and then calculates the nodal distances * to the intersection plane. * In presence of multiple intersections, it performs a least squares approximation of the intersection plane. * @param rElement Element to calculate the ELEMENTAL_DISTANCES * @param rIntersectedObjects Intersected objects container * @param rIntersectionPointsCoordinates The edges intersection points coordinates */ void ComputeIntersectionPlaneElementalDistances( Element& rElement, const PointerVector<GeometricalObject>& rIntersectedObjects, const std::vector<array_1d<double,3>>& rIntersectionPointsCoordinates); /** * @brief Computes the intersection plane approximation * For complex intersection patterns, this method takes a list containing * all the intersecting points and computes the plane that minimizes the * distance from all these points in a least squares sense. The approximated * plane is defined in terms of an origin point and its normal vector. * @param rElement1 reference to the element of interest * @param rPointsCoord list containing the coordinates of al the intersecting points * @param rPlaneBasePointCoords base point defining the approximated plane * @param rPlaneNormal normal vector defining the approximated plane */ void ComputePlaneApproximation( const Element& rElement1, const std::vector< array_1d<double,3> >& rPointsCoord, array_1d<double,3>& rPlaneBasePointCoords, array_1d<double,3>& rPlaneNormal); /** * @brief Computes the elemental distances from the approximation * plane defined by the set of points in rPointVector. * @param rElement reference to the element of interest * @param rElementalDistances reference to the elemental distances container containing the coordinates of al the intersecting points * @param rPoitnVector reference to the vector containing the poits to define the approximation plane */ void ComputeElementalDistancesFromPlaneApproximation( Element& rElement, Vector& rElementalDistances, const std::vector<array_1d<double,3>>& rPointVector); /** * @brief Checks and replaces the values of the ELEMENTAL_DISTANCES vector that are * zero. The values are replaced by an epsilon (whose sign depends on a flag) * that is a fixed factor from the double precision. Can be deactivated by a flag. * @param rElementalDistances array containing the ELEMENTAL_DISTANCES values */ void ReplaceZeroDistances(Vector& rElementalDistances); /** * @brief Checks (and corrects if needed) the intersection normal orientation * This method checks the orientation of the previously computed intersection normal. * To do that, the normal vector to each one of the intersecting geometries is * computed and its directo is compared against the current one. If the negative * votes win, the current normal vector orientation is switched. * @param rGeometry element of interest geometry * @param rIntersectedObjects reference to the array containing the element of interest intersecting geometries * @param rElementalDistances array containing the ELEMENTAL_DISTANCES values */ void CorrectDistanceOrientation( const Element::GeometryType& rGeometry, const PointerVector<GeometricalObject>& rIntersectedObjects, Vector& rElementalDistances); /** * @brief Computes the normal vector to an intersecting object geometry * This method computes the normal vector to an intersecting object geometry. * @param rGeometry reference to the geometry of the intersecting object * @param rIntObjNormal reference to the intersecting object normal vector */ void inline ComputeIntersectionNormalFromGeometry( const Element::GeometryType &rGeometry, array_1d<double,3> &rIntObjNormal); /** * @brief Checks if element is incised and then computes the uncut edges intersections of the element * with an averaged and extrapolated geometry. Therefore it calls 'ComputeExtrapolatedGeometryIntersections'. * Note: for uncut or completely cut elements no ratios of the extrapolated geometry will be calculated. * @param rElement reference to the element of interest * @param rEdgesContainer reference to the array containing the edges of the element of interest * @param rNumCutEdges number of cut edges of the element (by the non-extrapolated geometry) * @param rCutEdgesRatioVector array that stores the relative positions from node zero of the average intersection points * @param rExtraGeomNormal array as normal vector of the averaged and extrapolated geometry * @param rCutExtraEdgesRatioVector array that stores the relative positions from node zero of the additional * average intersection points of the extrapolated geometry */ void ComputeExtrapolatedEdgesIntersectionsIfIncised( const Element& rElement, const Element::GeometryType::GeometriesArrayType& rEdgesContainer, unsigned int &rNumCutEdges, array_1d<double,mNumEdges>& rCutEdgesRatioVector, array_1d<double,3> &rExtraGeomNormal, array_1d<double,mNumEdges>& rCutExtraEdgesRatioVector); /** * @brief Computes the uncut edges intersections of one element with an averaged and extrapolated geometry. * Therefore it calls 'IntersectionUtilities'. * It saves the edge intersections as ratios of the edge's length in rCutExtraEdgesRatioVector. * @param rElement reference to the element of interest * @param rEdgesContainer reference to the array containing the edges of the element of interest * @param rNumCutEdges number of cut edges of the element * @param rCutEdgesRatioVector array that stores the relative positions from node zero of the average intersection points * @param rExtraGeomNormal normal of the averaged and extrapolated geometry * @param rCutExtraEdgesRatioVector array that stores the relative positions from node zero of the additional * average intersection points of the extrapolated geometry */ void ComputeExtrapolatedGeometryIntersections( const Element& rElement, const Element::GeometryType::GeometriesArrayType& rEdgesContainer, unsigned int& rNumCutEdges, array_1d<double,mNumEdges>& rCutEdgesRatioVector, array_1d<double,3>& rExtraGeomNormal, array_1d<double,mNumEdges>& rCutExtraEdgesRatioVector); /** * @brief Converts edge ratios and edge ratios of the extrapolated geometry to elemental (node) distances * @param rElement reference to the element of interest * @param rIntersectedObjects reference to the array containing the element of interest intersecting geometries * @param rEdgesContainer reference to the array containing the edges of the element of interest * @param rCutEdgesRatioVector array that stores the relative positions from node zero of the average intersection points * (ELEMENTAL_EDGE_DISTANCES) * @param rCutExtraEdgesRatioVector array that stores the relative positions from node zero of the additional * average intersection points of the extrapolated geometry (ELEMENTAL_EXTRA_EDGE_DISTANCES) */ void ComputeElementalDistancesFromEdgeRatios( Element& rElement, const PointerVector<GeometricalObject>& rIntersectedObjects, const Element::GeometryType::GeometriesArrayType& rEdgesContainer, const array_1d<double,mNumEdges> &rCutEdgesRatioVector, const array_1d<double,mNumEdges> &rCutExtraEdgesRatioVector); /** * @brief Computes the intersection points from the intersection ratios of the edges of the element of interest * @param rGeometry reference to geometry of the element of interest * @param rEdgesContainer reference to the array containing the edges of the element of interest * @param rEdgeRatiosVector array containing the intersection ratios of an element's edges * @param rIntersectionPointsVector vector containing the intersection point arrays */ void ConvertRatiosToIntersectionPoints( const Element::GeometryType& rGeometry, const Element::GeometryType::GeometriesArrayType& rEdgesContainer, const array_1d<double,mNumEdges> &rEdgeRatiosVector, std::vector<array_1d <double,3> > &rIntersectionPointsVector); /** * @brief Checks whether the edges of an element, which are cut, all share one node * @param rEdge reference to the edge of interest * @param rIntersectionPoint average intersection point at the edge * @return calculated relative positions of the intersection point along the edge from node zero */ double ConvertIntersectionPointToEdgeRatio( const Geometry<Node<3> >& rEdge, const array_1d<double,3>& rIntersectionPoint); /** * @brief Checks whether the edges of an element, which are cut, all share one node * @param rEdge reference to the edge of interest * @param rEdgeRatio relative positions of the intersection point along the edge from node zero * @return rIntersectionPoint calculated average intersection point at the edge */ array_1d<double,3> ConvertEdgeRatioToIntersectionPoint( const Geometry<Node<3> >& rEdge, const double& rEdgeRatio); /** * @brief Checks whether the edges of an element, which are cut, all share one node * @param rElement reference to the element of interest * @param rEdgesContainer reference to the array containing the edges of the element of interest * @param rCutEdgesRatioVector array that stores the relative positions from node zero of the average intersection points * @return boolean true if cut edges share one node */ bool CheckIfCutEdgesShareNode( const Element& rElement, const Element::GeometryType::GeometriesArrayType& rEdgesContainer, const array_1d<double,mNumEdges>& rCutEdgesRatioVector) const; /** * @brief Computes the value of any embedded variable * For a given array variable in the skin mesh, this method calculates the value * of such variable in the embedded mesh. This is done in each element of the volume * mesh by computing the average value of all the edges intersections. This value * is averaged again according to the number of intersected edges. * @tparam TVarType variable type * @param rVariable origin variable in the skin mesh * @param rEmbeddedVariable elemental variable in the volume mesh to be computed */ template<class TVarType> void CalculateEmbeddedVariableFromSkinSpecialization( const Variable<TVarType> &rVariable, const Variable<TVarType> &rEmbeddedVariable) { const auto &r_int_obj_vect= this->GetIntersections(); const int n_elems = mrVolumePart.NumberOfElements(); KRATOS_ERROR_IF((mrSkinPart.NodesBegin())->SolutionStepsDataHas(rVariable) == false) << "Skin model part solution step data missing variable: " << rVariable << std::endl; // Initialize embedded variable value VariableUtils().SetNonHistoricalVariableToZero(rEmbeddedVariable, mrVolumePart.Elements()); // Compute the embedded variable value for each element #pragma omp parallel for schedule(dynamic) for (int i_elem = 0; i_elem < n_elems; ++i_elem) { // Check if the current element has intersecting entities if (r_int_obj_vect[i_elem].size() != 0) { // Initialize the element values unsigned int n_int_edges = 0; auto it_elem = mrVolumePart.ElementsBegin() + i_elem; auto &r_geom = it_elem->GetGeometry(); const auto edges = r_geom.GenerateEdges(); // Loop the element of interest edges for (unsigned int i_edge = 0; i_edge < r_geom.EdgesNumber(); ++i_edge) { // Initialize edge values unsigned int n_int_obj = 0; TVarType i_edge_val = rEmbeddedVariable.Zero(); // Check the edge intersection against all the candidates for (auto &r_int_obj : r_int_obj_vect[i_elem]) { Point intersection_point; const int is_intersected = this->ComputeEdgeIntersection( r_int_obj.GetGeometry(), edges[i_edge][0], edges[i_edge][1], intersection_point); // Compute the variable value in the intersection point if (is_intersected == 1) { n_int_obj++; array_1d<double,3> local_coords; r_int_obj.GetGeometry().PointLocalCoordinates(local_coords, intersection_point); Vector int_obj_N; r_int_obj.GetGeometry().ShapeFunctionsValues(int_obj_N, local_coords); for (unsigned int i_node = 0; i_node < r_int_obj.GetGeometry().PointsNumber(); ++i_node) { i_edge_val += r_int_obj.GetGeometry()[i_node].FastGetSolutionStepValue(rVariable) * int_obj_N[i_node]; } } } // Check if the edge is intersected if (n_int_obj != 0) { // Update the element intersected edges counter n_int_edges++; // Add the average edge value (there might exist cases in where // more than one geometry intersects the edge of interest). it_elem->GetValue(rEmbeddedVariable) += i_edge_val / n_int_obj; } } // Average between all the intersected edges if (n_int_edges != 0) { it_elem->GetValue(rEmbeddedVariable) /= n_int_edges; } } } }; /** * @brief Set the TO_SPLIT Kratos flag * This function sets the TO_SPLIT flag in the provided element according to the ELEMENTAL_DISTANCES values * Note that the zero distance case is avoided by checking the positiveness and negativeness of the nodal values * @param rElement Element to set the TO_SPLIT flag * @param ZeroTolerance Tolerance to check the zero distance values */ void SetToSplitFlag( Element& rElement, const double ZeroTolerance); /** * @brief Checks the elemental edges distances if zero values of the distance * are detected. This ensures that the elementes detected as incised and intersected * are consistent with the zero-correction applied by the process. */ void CheckAndCorrectEdgeDistances(); /** * @brief Creates the global pointer communicator that contains all neighbours elements. In MPI, this * allows to get information from neighbours elements that are not in the same partition. */ GlobalPointerCommunicator<Element>::Pointer CreatePointerCommunicator(); ///@} }; // Class CalculateDiscontinuousDistanceToSkinProcess ///@} ///@name Input and output ///@{ /// input stream function inline std::istream& operator >> ( std::istream& rIStream, CalculateDiscontinuousDistanceToSkinProcess<>& rThis); /// output stream function inline std::ostream& operator << ( std::ostream& rOStream, const CalculateDiscontinuousDistanceToSkinProcess<>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block } // namespace Kratos. #endif // KRATOS_CALCULATE_DISCONTINUOUS_DISTANCE_TO_SKIN_PROCESS_H_INCLUDED defined
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; // TODO SYCL Integration header approach relies on an assumption that kernel // lambda objects created by the host compiler and any of the device compilers // will be identical wrt to field types, order and offsets. Some verification // mechanism should be developed to enforce that. // TODO FIXME SYCL Support for SYCL in FE should be refactored: // - kernel identification and generation should be made a separate pass over // AST. RecursiveASTVisitor + VisitFunctionTemplateDecl + // FunctionTemplateDecl::getSpecializations() mechanism could be used for that. // - All SYCL stuff on Sema level should be encapsulated into a single Sema // field // - Move SYCL stuff into a separate header // Represents contents of a SYCL integration header file produced by a SYCL // device compiler and used by SYCL host compiler (via forced inclusion into // compiled SYCL source): // - SYCL kernel names // - SYCL kernel parameters and offsets of corresponding actual arguments class SYCLIntegrationHeader { public: // Kind of kernel's parameters as captured by the compiler in the // kernel lambda or function object enum kernel_param_kind_t { kind_first, kind_accessor = kind_first, kind_std_layout, kind_sampler, kind_pointer, kind_last = kind_pointer }; public: SYCLIntegrationHeader(DiagnosticsEngine &Diag); /// Emits contents of the header into given stream. void emit(raw_ostream &Out); /// Emits contents of the header into a file with given name. /// Returns true/false on success/failure. bool emit(const StringRef &MainSrc); /// Signals that subsequent parameter descriptor additions will go to /// the kernel with given name. Starts new kernel invocation descriptor. void startKernel(StringRef KernelName, QualType KernelNameType); /// Adds a kernel parameter descriptor to current kernel invocation /// descriptor. void addParamDesc(kernel_param_kind_t Kind, int Info, unsigned Offset); /// Signals that addition of parameter descriptors to current kernel /// invocation descriptor has finished. void endKernel(); private: // Kernel actual parameter descriptor. struct KernelParamDesc { // Represents a parameter kind. kernel_param_kind_t Kind = kind_last; // If Kind is kind_scalar or kind_struct, then // denotes parameter size in bytes (includes padding for structs) // If Kind is kind_accessor // denotes access target; possible access targets are defined in // access/access.hpp int Info = 0; // Offset of the captured parameter value in the lambda or function object. unsigned Offset = 0; KernelParamDesc() = default; }; // Kernel invocation descriptor struct KernelDesc { /// Kernel name. std::string Name; /// Kernel name type. QualType NameType; /// Descriptor of kernel actual parameters. SmallVector<KernelParamDesc, 8> Params; KernelDesc() = default; }; /// Returns the latest invocation descriptor started by /// SYCLIntegrationHeader::startKernel KernelDesc *getCurKernelDesc() { return KernelDescs.size() > 0 ? &KernelDescs[KernelDescs.size() - 1] : nullptr; } /// Emits a forward declaration for given declaration. void emitFwdDecl(raw_ostream &O, const Decl *D); /// Emits forward declarations of classes and template classes on which /// declaration of given type depends. See example in the comments for the /// implementation. /// \param O /// stream to emit to /// \param T /// type to emit forward declarations for /// \param Emitted /// a set of declarations forward declrations has been emitted for already void emitForwardClassDecls(raw_ostream &O, QualType T, llvm::SmallPtrSetImpl<const void*> &Emitted); private: /// Keeps invocation descriptors for each kernel invocation started by /// SYCLIntegrationHeader::startKernel SmallVector<KernelDesc, 4> KernelDescs; /// Used for emitting diagnostics. DiagnosticsEngine &Diag; }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate, NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr( NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); ExprResult BuildUniqueStableName(SourceLocation OpLoc, TypeSourceInfo *Operand); ExprResult BuildUniqueStableName(SourceLocation OpLoc, Expr *E); ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation L, SourceLocation R, ParsedType Ty); ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation L, SourceLocation R, Expr *Operand); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl * startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Optional<std::pair<unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, ConceptDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); // Concepts Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); template <typename AttrType> bool checkRangedIntegralArgument(Expr *E, const AttrType *TmpAttr, ExprResult &Result); template <typename AttrType> void AddOneConstantValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); template <typename AttrType> void AddOneConstantPowerTwoValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OMPDeclareTargetDeclAttr::MapTypeTy MT, NamedDeclSetType &SameDirectiveDecls); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckIntelFPGABuiltinFunctionCall(unsigned BuiltinID, CallExpr *Call); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; private: // We store SYCL Kernels here and handle separately -- which is a hack. // FIXME: It would be best to refactor this. SmallVector<Decl*, 4> SyclDeviceDecls; // SYCL integration header instance for current compilation unit this Sema // is associated with. std::unique_ptr<SYCLIntegrationHeader> SyclIntHeader; public: void addSyclDeviceDecl(Decl *d) { SyclDeviceDecls.push_back(d); } SmallVectorImpl<Decl *> &syclDeviceDecls() { return SyclDeviceDecls; } /// Lazily creates and returns SYCL integration header instance. SYCLIntegrationHeader &getSyclIntegrationHeader() { if (SyclIntHeader == nullptr) SyclIntHeader = llvm::make_unique<SYCLIntegrationHeader>( getDiagnostics()); return *SyclIntHeader.get(); } enum SYCLRestrictKind { KernelGlobalVariable, KernelRTTI, KernelNonConstStaticDataVariable, KernelCallVirtualFunction, KernelUseExceptions, KernelCallRecursiveFunction, KernelCallFunctionPointer, KernelAllocateStorage, KernelUseAssembly, KernelHavePolymorphicClass, KernelCallVariadicFunction }; DeviceDiagBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); void ConstructOpenCLKernel(FunctionDecl *KernelCallerFunc); void MarkDevice(void); bool CheckSYCLCall(SourceLocation Loc, FunctionDecl *Callee); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
StarFM_compute.c
/** * computation functions for StarFM * Revision 1.2.1 10/2014 Feng Gao * - enabled OpenMP for multi-thread processing * * Revision 1.2.0 06/2014 Feng Gao * - modified to allow multiple-date predictions * - does not support highest occurences * - does not support LUT improvement * * Revision 1.1.x 11/2012 Feng Gao added processing for subwindow * Revision 1.1 01/2008 Feng Gao */ #include "StarFM.h" /** * do statistics on the input Landsat data and * find min, max, mean and stdev and then * compute slice distance based on the defined * number of classes (NUM_CLS) for homogeneity testing */ void doStatistics(SENSOR_PAIR *psensor[], CONTROL_PARAMETER *par) { int ip, irow, icol; unsigned char tmpchar; short int tmpint, min, max; double sumx, sumx2, num; /* compute mean and stdev for each input Landsat SR file */ for(ip=0; ip<par->NUM_PAIRS; ip++) { #ifdef DEBUG printf("\n\tfor input %s ", psensor[ip]->landsat.fname); #endif min = psensor[ip]->landsat.range[1]; max = psensor[ip]->landsat.range[0]; sumx = 0.0; sumx2 = 0.0; num = 0.0; for(irow=0; irow<psensor[ip]->landsat.nrows; irow++) { for(icol=0; icol<psensor[ip]->landsat.ncols; icol++) { fread(&tmpint, sizeof(short int), 1, psensor[ip]->landsat.fp); if(psensor[ip]->landsat.mfp != NULL) fread(&tmpchar, sizeof(char), 1, psensor[ip]->landsat.mfp); else /* assume all valid if no mask file */ tmpchar = VALID; if(irow<psensor[ip]->start_irow||irow>psensor[ip]->end_irow||icol<psensor[ip]->start_icol||icol>psensor[ip]->end_icol) continue; /* only check valid value */ if(tmpchar == 1 && tmpint > psensor[ip]->landsat.range[0] && tmpint < psensor[ip]->landsat.range[1]) { if(tmpint > max) max = tmpint; if(tmpint < min) min = tmpint; sumx += tmpint; sumx2 += tmpint * tmpint; num++; } /* end of valid pixels */ } /* end of icol */ } /* end of irow */ /* compute basic statistics */ psensor[ip]->landsat.min = min; psensor[ip]->landsat.max = max; psensor[ip]->landsat.mean = sumx/num; psensor[ip]->landsat.stdev = sqrt(sumx2/num - (sumx/num) * (sumx/num)); /* compute slice value for each input in 2 stdev * 2 sides / num_slice / 2 */ psensor[ip]->landsat.slice_value = (int)(psensor[ip]->landsat.stdev * 2.0 / par->NUM_CLS + 0.5); #ifdef DEBUG printf("\n\tmin=%4d max=%4d mean=%4d stdev=%4d slice_dis=%4d", psensor[ip]->landsat.min, psensor[ip]->landsat.max, psensor[ip]->landsat.mean, psensor[ip]->landsat.stdev, psensor[ip]->landsat.slice_value); #endif rewind(psensor[ip]->landsat.fp); if(psensor[ip]->landsat.mfp != NULL) rewind(psensor[ip]->landsat.mfp); } } /** * do prediction based on the StarFM algorithm (Gao et al., 2006) * write prediction and sample basis point (1/10000) to outputs * save best prediction to look-up-table for later improvement */ int doPrediction(SENSOR_PAIR *psensor[], CONTROL_PARAMETER *par, BEST_PREDICTION **bplut) { int i, j, k; short int **row_psr; /* predicted Landsat SR in short int (for store to file) */ /*FILE *fp[MAX_NPAIRS]; char tmpname[200];*/ /* open qa file for write */ /* for(k=0; k<par->NUM_PREDICTIONS; k++) { sprintf(tmpname, "%s.psam", psensor[par->NUM_PAIRS+k]->landsat.fname); if((fp[k]=fopen(tmpname, "wb"))==NULL) { printf("\nOpen QA file %s error!", tmpname); return FAILURE; } }*/ alloc_2dim_contig ((void ***) (&row_psr), psensor[0]->landsat.ncols, par->NUM_PREDICTIONS, sizeof(short int)); loadFirstBlock(psensor, par); printf("\n\tprocessing irow ... "); for(i=0; i<psensor[0]->landsat.nrows; i++) { if(i<psensor[0]->start_irow||i>psensor[0]->end_irow) continue; printf("%5d\b\b\b\b\b", i+1); #pragma omp parallel for shared(psensor, par) for(j=0; j<psensor[0]->landsat.ncols; j++) { /* process each pixel (i,j) */ if(j<psensor[0]->start_icol||j>psensor[0]->end_icol) continue; doOnePixel(psensor, par, i, j, row_psr[j]); } /* end of icol loop */ /* save results for one row after all parallel computings end */ for(j=0; j<psensor[0]->landsat.ncols; j++) for(k=0; k<par->NUM_PREDICTIONS; k++) fwrite(&(row_psr[j][k]), sizeof(short int), 1, psensor[par->NUM_PAIRS+k]->landsat.fp); loadNextRow(psensor, par, i); } /* end of irow (i) */ free_2dim_contig((void **)row_psr); /*for(k=0; k<par->NUM_PREDICTIONS; k++) fclose(fp[k]);*/ return SUCCESS; } /** * do prediction for one pixel based on the StarFM algorithm (Gao et al., 2006) * separated for OpenMP processing */ int doOnePixel(SENSOR_PAIR *psensor[], CONTROL_PARAMETER *par, int i, int j, short int *all_psr) { int ican; /* index of selected candidates */ int rel_n; /* relative n (index of column within searching window) */ int ip; /* index of input periods (0, NPAIRS) */ int k, m, n, modv, lndv; int nsam[MAX_NPAIRS]; short int diff; short int psam; /* basis point (1/10000) of samples used within searching distance */ int total_candidates; /* total number of potential Landsat and MODIS SR pairs */ int usable_nsam; /* total number of usable samples */ int is_similar; /* 1 means a spectral similar type */ float s_dis, t_dis, dis_x, dis_y, sum_reverse_dis; /* temporary variables used to compute spatial and temporal distance */ float fsr; /* final predicted Landsat SR in float */ short int psr; /* predicted Landsat SR in short int (for store to file) */ short int min_r_msr[MAX_NPAIRS], min_r_lsr; /* minimum SR differences on r_msr and r_lsr */ int r_msr_uncertain, r_lsr_uncertain; /* uncertainty of SR differences (assume independent to each other) */ int msr_uncer1, msr_uncer2, lsr_uncer; /* temporary variable to compute uncertainty */ int combined_uncertain; CANDIDATE_PIXEL candidate[MAX_NSAMPLES]; /* decide surface reflectance uncertainties from QA and AOT */ /* here I just use predefined values */ msr_uncer1 = psensor[0]->modis.uncertainty; msr_uncer2 = psensor[0]->modis.uncertainty; lsr_uncer = psensor[0]->landsat.uncertainty; /* compute uncertainty for the combined variables (assume independent) */ r_msr_uncertain = sqrt(msr_uncer1*msr_uncer1 + msr_uncer2*msr_uncer2); r_lsr_uncertain = sqrt(msr_uncer1*msr_uncer1 + lsr_uncer*lsr_uncer); combined_uncertain = sqrt(r_msr_uncertain*r_msr_uncertain + r_lsr_uncertain* r_lsr_uncertain); ican = 0; /* save working pixel to index 0, all other candidates start from 1 */ candidate[ican].loc[0] = j; candidate[ican].loc[1] = i; for(k=0; k<par->NUM_PREDICTIONS; k++) /* save MODIS data for later replacement if no prediction (temparory storage) */ candidate[ican].r_msr[k] = psensor[par->NUM_PAIRS+k]->modis.data[par->WIN_SIZE/2][j]; candidate[ican].lsr = psensor[0]->landsat.data[par->WIN_SIZE/2][j]; ican++; for(k=0; k<par->NUM_PREDICTIONS; k++) min_r_msr[k] = psensor[0]->modis.range[1]; min_r_lsr = psensor[0]->landsat.range[1]; for(ip=0; ip<par->NUM_PAIRS; ip++) { /* save current location to the head of array, ip=0 to ican=1 and ip=1 to ican=2 */ candidate[ican].loc[0] = j; candidate[ican].loc[1] = i; candidate[ican].msr = psensor[ip]->modis.data[par->WIN_SIZE/2][j]; candidate[ican].lsr = psensor[ip]->landsat.data[par->WIN_SIZE/2][j]; candidate[ican].mask = psensor[ip]->modis.mask[par->WIN_SIZE/2][j] * psensor[ip]->landsat.mask[par->WIN_SIZE/2][j]; for(k=0; k<par->NUM_PREDICTIONS; k++) { candidate[ican].r_msr[k] = psensor[ip]->modis.data[par->WIN_SIZE/2][j] - psensor[par->NUM_PAIRS+k]->modis.data[par->WIN_SIZE/2][j]; if(abs(candidate[ican].r_msr[k]) < min_r_msr[k]) min_r_msr[k] = abs(candidate[ican].r_msr[k]); } candidate[ican].r_lsr = psensor[ip]->modis.data[par->WIN_SIZE/2][j] - psensor[ip]->landsat.data[par->WIN_SIZE/2][j]; if(abs(candidate[ican].r_lsr) < min_r_lsr) min_r_lsr = abs(candidate[ican].r_lsr); ican++; } if(par->USE_SPATIAL_FLAG) { /* if use spatial info, then search all pixels include current one, reset index to 1 */ ican = 1; /* search small window and find potential candidates */ for(ip=0; ip<par->NUM_PAIRS; ip++) { nsam[ip] = 0; for(m=0; m<par->WIN_SIZE; m++) for(n=0; n<par->WIN_SIZE; n++) { rel_n = n - par->WIN_SIZE/2 + j; /* convert to actual icol */ /* valid image range test */ if(rel_n < 0 || rel_n >= psensor[0]->landsat.ncols) continue; /* check MODIS data */ modv = psensor[ip]->modis.data[m][rel_n]; lndv = psensor[ip]->landsat.data[m][rel_n]; if( modv == psensor[0]->modis.fillv || modv < psensor[0]->modis.range[0] || modv > psensor[0]->modis.range[1]) continue; /* check Landsat data */ if(lndv == psensor[0]->landsat.fillv || lndv < psensor[0]->landsat.range[0] || lndv > psensor[0]->landsat.range[1] ) continue; is_similar = 0; if(par->USE_CLUSTER_MAP) { /* use defined classification map */ if(psensor[ip]->cls[m][rel_n] == psensor[ip]->cls[par->WIN_SIZE/2][j]) is_similar = 1; } else { /* do spectral similar test if classification map is not defined */ diff = abs(psensor[ip]->landsat.data[m][rel_n] - psensor[ip]->landsat.data[par->WIN_SIZE/2][j]); /* if spectral similar - I just use a single band slice test here */ if(diff < psensor[ip]->landsat.slice_value) is_similar = 1; } if(is_similar) { /* put to the candidate list */ candidate[ican].loc[0] = rel_n; candidate[ican].loc[1] = m - par->WIN_SIZE/2 + i; candidate[ican].msr = psensor[ip]->modis.data[m][rel_n]; candidate[ican].lsr = psensor[ip]->landsat.data[m][rel_n]; candidate[ican].mask = psensor[ip]->modis.mask[m][rel_n] * psensor[ip]->landsat.mask[m][rel_n]; for(k=0; k<par->NUM_PREDICTIONS; k++) { modv = psensor[par->NUM_PAIRS+k]->modis.data[m][rel_n]; if(modv == psensor[0]->modis.fillv || modv < psensor[0]->modis.range[0] || modv > psensor[0]->modis.range[1]) candidate[ican].r_msr[k] = psensor[0]->modis.fillv; else candidate[ican].r_msr[k] = psensor[ip]->modis.data[m][rel_n] - modv; } candidate[ican].r_lsr = candidate[ican].msr - candidate[ican].lsr; ican++; nsam[ip]++; } /* endif spectral similar */ } /* end of moving window searching */ } /* end of foreach input pair */ } /* end of use spatial (neighbor) information */ total_candidates = ican; /* do predictions for all MODIS dates (new feature v1.2.0) */ for(k=0; k<par->NUM_PREDICTIONS; k++) { /* data valid check */ for(m=1; m<total_candidates; m++) { candidate[m].valid = 1; /* data range checking */ modv = candidate[m].r_msr[k]; /* accepts the small differences within one stdev */ if((abs(candidate[m].r_lsr) > (min_r_lsr+r_lsr_uncertain) && abs(modv) > (min_r_msr[k]+r_msr_uncertain)) || candidate[m].mask == INVALID || modv == psensor[0]->modis.fillv) candidate[m].valid = 0; /*if( candidate[m].mask == INVALID || modv == psensor[0]->modis.fillv || abs(modv) > (min_r_msr[k]+r_msr_uncertain)) candidate[m].valid = 0;*/ } /* do prediction using weighting approach (default) */ sum_reverse_dis = 0.0; usable_nsam = 0; for(m=1; m<total_candidates; m++) { if(candidate[m].valid == 1) { /* compute distance to the central pixel */ dis_x = candidate[m].loc[0]-candidate[0].loc[0]; dis_y = candidate[m].loc[1]-candidate[0].loc[1]; s_dis = sqrt(dis_x*dis_x + dis_y*dis_y); /* consider a small uncertainty for the difference of reflectance */ /* to avoid zero from either of them (and so give a biased large weight) */ if(par->NUM_PAIRS == 1) /* if input only contains one pair of Landsat and MODIS, then only use the difference between MODIS and Landsat - Feng (10/21/08) */ t_dis = (abs(candidate[m].r_lsr) + 1); else t_dis = (abs(candidate[m].r_msr[k]) + 1) * (abs(candidate[m].r_lsr) + 1); if(t_dis < combined_uncertain) /* very close (less than uncertainty), gets maximum weight */ candidate[m].r_dis = 1.0; else /* otherwise, compute combined weight */ candidate[m].r_dis = 1.0/(t_dis*(1.0+s_dis * psensor[0]->landsat.res / par->MAX_SEARCH_DIS)); sum_reverse_dis += candidate[m].r_dis; usable_nsam++; } } if(usable_nsam == 0) /* use MODIS SR if no good candidate */ /*fsr = candidate[0].r_msr[k]; */ /* use fill value */ fsr = psensor[0]->landsat.fillv; else { /* initialize value for sum */ fsr = 0.0; for(m=1; m<total_candidates; m++) { /* normalize weight */ if(candidate[m].valid == 1) candidate[m].weight = candidate[m].r_dis/sum_reverse_dis; else candidate[m].weight = 0.0; /* predict reflectance using weighting function */ fsr += candidate[m].weight * (candidate[m].lsr - candidate[m].r_msr[k]); } } /* end of prediction using weighting approach */ /* convert to integer */ if(fsr > psensor[0]->landsat.range[0] && fsr < psensor[0]->landsat.range[1]) psr = (int)(fsr + 0.5); else psr = psensor[0]->landsat.fillv; /* compute basis point of samples used (keep precision with basis point instead of percentage) */ psam = (short int)((10000.0*usable_nsam)/(par->WIN_SIZE*par->WIN_SIZE*par->NUM_PAIRS) + 0.5); /* save prediction for k date */ all_psr[k] = psr; #ifdef DEBUG if(i==DEBUG_irow && j==DEBUG_icol) { printf("\nfile=%s icol=%d irow=%d", psensor[par->NUM_PAIRS+k]->modis.fname, candidate[0].loc[0], candidate[0].loc[1]); printf("\npsr=%f", fsr); printf("\nmodis=%d min_r_msr=%d min_r_lsr=%d prediction=%d use_nsam=%d psam=%d(basis_point)", candidate[0].r_msr[k], min_r_msr[k], min_r_lsr, psr, usable_nsam, psam); printf("\nr_msr_uncertain=%d r_lsr_uncertain=%d combined_uncertain=%d\n", r_msr_uncertain, r_lsr_uncertain, combined_uncertain); for (m=1; m<total_candidates; m++) /*if(candidate[m].valid == 1) */ printf("%3d x=%4d y=%4d modis=%4d diff_modis=%4d landsat=%4d weight=%6.4f mask=%d valid=%d\n", m, candidate[m].loc[0], candidate[m].loc[1], candidate[m].msr, candidate[m].r_msr[k], candidate[m].lsr, candidate[m].weight, candidate[m].mask, candidate[m].valid); } #endif } /* end of k loop for each MODIS date */ return SUCCESS; }
conv_kernel_mips.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com */ #include "conv_kernel_mips.h" #include "wino_conv_kernel_mips.h" #include <stdint.h> #include <stdlib.h> #include <math.h> #if __mips_msa #include <msa.h> #endif #define max(a, b) ((a) > (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b)) static int get_private_mem_size(struct tensor* filter) { return filter->elem_num * filter->elem_size; // caution } static void interleave(struct tensor* filter, struct conv_priv_info* priv_info) { /* simply copy the data */ memcpy(priv_info->interleave_buffer, filter->data, filter->elem_num * filter->elem_size); } void im2col(float* data_img, float* data_col, int inh, int inw, int inc, int outh, int outw, int outc, int ksize_h, int ksize_w, int sh, int sw, int ph, int pw, int dh, int dw) { const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; float* out = data_col + (c * outh + h) * outw; const float* end = out + w_high; if (im_row >= 0 && im_row < inh) { float* in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(float)); out += w_low; while (out < end) { in += sw; *(out++) = *in; } memset(out, 0, (outw - w_high) * sizeof(float)); } else { memset(out, 0, outw * sizeof(float)); } } } } static void im2col_ir(struct tensor* input, struct tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group) { int input_chan = param->input_channel / param->group; int image_size = input->dims[1] * input->dims[2] * input->dims[3]; int group_size = input_chan * input->dims[2] * input->dims[3]; void* input_base = input->data + (n * image_size + group * group_size) * input->elem_size; void* im2col_buf = priv_info->im2col_buffer; int input_zero = 0; if (input->data_type == TENGINE_DT_UINT8) input_zero = input->zero_point; im2col(input_base, im2col_buf, input->dims[2], input->dims[3], input_chan, output->dims[2], output->dims[3], output->dims[1] / param->group, param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->dilation_h, param->dilation_w); } void input_pack4(int K, int N, float* pB, float* pB_t, int num_thread) { int nn_size = N >> 2; int remian_size_start = nn_size << 2; // [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....] #pragma omp parallel for num_threads(num_thread) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const float* img = pB + i; float* tmp = pB_t + (i / 4) * 4 * K; for (int j = 0; j < K; j++) { #if __mips_msa __msa_st_w(__msa_ld_w(img, 0), tmp, 0); #else tmp[0] = img[0]; tmp[1] = img[1]; tmp[2] = img[2]; tmp[3] = img[3]; #endif // __mips_msa tmp += 4; img += N; } } // [ch00, ch01, ch02, ch03 ....] #pragma omp parallel for num_threads(num_thread) for (int i = remian_size_start; i < N; i++) { const float* img = pB + i; float* tmp = pB_t + (i / 4 + i % 4) * 4 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp += 1; img += N; } } } // unloop output M, unloop N, packet 4x4, using intrinsic static void sgemm(int M, int N, int K, float* pA_t, float* pB_t, float* pC, int num_thread) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = M >> 2; remain_outch_start = nn_outch << 2; // output ch0 - ch3 #pragma omp parallel for num_threads(num_thread) for (int pp=0; pp<nn_outch; pp++) { int i = pp * 4; float* output0 = pC + ( i )*N; float* output1 = pC + (i + 1) * N; float* output2 = pC + (i + 2) * N; float* output3 = pC + (i + 3) * N; int j = 0; for (; j + 3 < N; j += 4) { float* va = pA_t + (i / 4) * 4 * K; float* vb = pB_t + (j / 4) * 4 * K; #if __mips_msa v4f32 _sum0 = {0.f}; v4f32 _sum1 = {0.f}; v4f32 _sum2 = {0.f}; v4f32 _sum3 = {0.f}; for (int k = 0; k < K; k++) { // k0 v4f32 _vb = (v4f32)__msa_ld_w(vb, 0); v4f32 _va0 = {va[0], va[0], va[0], va[0]}; v4f32 _va1 = {va[1], va[1], va[1], va[1]}; v4f32 _va2 = {va[2], va[2], va[2], va[2]}; v4f32 _va3 = {va[3], va[3], va[3], va[3]}; _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb, _va0)); // sum0 = (a00-a03) * k00 _sum1 = __msa_fadd_w(_sum1, __msa_fmul_w(_vb, _va1)); // sum1 = (a00-a03) * k10 _sum2 = __msa_fadd_w(_sum2, __msa_fmul_w(_vb, _va2)); // sum2 = (a00-a03) * k20 _sum3 = __msa_fadd_w(_sum3, __msa_fmul_w(_vb, _va3)); // sum3 = (a00-a03) * k30 va += 4; vb += 4; } __msa_st_w((v4i32)_sum0, output0, 0); __msa_st_w((v4i32)_sum1, output1, 0); __msa_st_w((v4i32)_sum2, output2, 0); __msa_st_w((v4i32)_sum3, output3, 0); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 4; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 4; } for (int n = 0; n < 4; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } #endif // __mips_msa output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j < N; j++) { float* va = pA_t + (i / 4) * 4 * K; float* vb = pB_t + (j / 4 + j % 4) * 4 * K; #if __mips_msa v4f32 _sum0_3 = {0.f}; v4f32 _sum0 = {0.f}; v4f32 _sum1 = {0.f}; v4f32 _sum2 = {0.f}; v4f32 _sum3 = {0.f}; int k = 0; for (; k + 3 < K; k = k + 4) { v4f32 _vb0 = {vb[0], vb[0], vb[0], vb[0]}; v4f32 _vb1 = {vb[1], vb[1], vb[1], vb[1]}; v4f32 _vb2 = {vb[2], vb[2], vb[2], vb[2]}; v4f32 _vb3 = {vb[3], vb[3], vb[3], vb[3]}; v4f32 _va0 = (v4f32)__msa_ld_w(va, 0); v4f32 _va1 = (v4f32)__msa_ld_w(va + 4, 0); v4f32 _va2 = (v4f32)__msa_ld_w(va + 8, 0); v4f32 _va3 = (v4f32)__msa_ld_w(va + 12, 0); _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_va0, _vb0)); // sum0 += (k00-k30) * a00 _sum1 = __msa_fadd_w(_sum1, __msa_fmul_w(_va1, _vb1)); // sum1 += (k01-k31) * a10 _sum2 = __msa_fadd_w(_sum2, __msa_fmul_w(_va2, _vb2)); // sum2 += (k02-k32) * a20 _sum3 = __msa_fadd_w(_sum3, __msa_fmul_w(_va3, _vb3)); // sum3 += (k03-k33) * a30 va += 16; vb += 4; } _sum0 = __msa_fadd_w(_sum0, _sum1); _sum2 = __msa_fadd_w(_sum2, _sum3); _sum0_3 = __msa_fadd_w(_sum2, _sum0); // _sum0_3 = __msa_fadd_w(_sum0_3, _sum2); for (; k < K; k++) { v4f32 _vb0 = {vb[0], vb[0], vb[0], vb[0]}; v4f32 _va = (v4f32)__msa_ld_w(va, 0); _sum0_3 = __msa_fadd_w(_sum0_3, __msa_fmul_w(_va, _vb0)); // sum0 += (k00-k30) * a00 va += 4; vb += 1; } output0[0] = _sum0_3[0]; output1[0] = _sum0_3[1]; output2[0] = _sum0_3[2]; output3[0] = _sum0_3[3]; #else float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __mips_msa output0++; output1++; output2++; output3++; } } // output ch0 #pragma omp parallel for num_threads(num_thread) for (int i=remain_outch_start; i<M; i++) { float* output = pC + i * N; int j = 0; for (; j + 3 < N; j += 4) { float* va = pA_t + (i / 4 + i % 4) * 4 * K; float* vb = pB_t + (j / 4) * 4 * K; #if __mips_msa v4f32 _sum0 = {0.f}; int k = 0; for (; k + 3 < K; k = k + 4) { // k0 v4f32 _va0 = {va[0], va[0], va[0], va[0]}; v4f32 _va1 = {va[1], va[1], va[1], va[1]}; v4f32 _va2 = {va[2], va[2], va[2], va[2]}; v4f32 _va3 = {va[3], va[3], va[3], va[3]}; v4f32 _vb0 = (v4f32)__msa_ld_w(vb, 0); v4f32 _vb1 = (v4f32)__msa_ld_w(vb + 4, 0); v4f32 _vb2 = (v4f32)__msa_ld_w(vb + 8, 0); v4f32 _vb3 = (v4f32)__msa_ld_w(vb + 12, 0); _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb0, _va0)); // sum0 = (a00-a03) * k00 _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb1, _va1)); // sum0 += (a10-a13) * k01 _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb2, _va2)); // sum0 += (a20-a23) * k02 _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb3, _va3)); // sum0 += (a30-a33) * k03 va += 4; vb += 16; } for (; k < K; k++) { // k0 v4f32 _va0 = {va[0]}; v4f32 _vb0 = (v4f32)__msa_ld_w(vb, 0); _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_vb0, _va0)); // sum0 = (a00-a03) * k00 va += 1; vb += 4; } __msa_st_w((v4i32)_sum0, output, 0); #else float sum[4] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 4; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 4; } for (int n = 0; n < 4; n++) { output[n] = sum[n]; } #endif // __mips_msa output += 4; } for (; j < N; j++) { float* va = pA_t + (i / 4 + i % 4) * 4 * K; float* vb = pB_t + (j / 4 + j % 4) * 4 * K; int k = 0; #if __mips_msa v4f32 _sum0 = {0.f}; for (; k + 3 < K; k += 4) { v4f32 _p0 = (v4f32)__msa_ld_w(vb, 0); v4f32 _k0 = (v4f32)__msa_ld_w(va, 0); _sum0 = __msa_fadd_w(_sum0, __msa_fmul_w(_p0, _k0)); va += 4; vb += 4; } float sum0 = _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3]; #else float sum0 = 0.f; #endif // __mips_msa for (; k < K; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } static void sgemm_fp32(struct tensor* input, struct tensor* filter, struct tensor* bias, struct tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; float* output_fp32 = ( float* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; float* bias_fp32 = NULL; if (bias) bias_fp32 = ( float* )bias->data + outchan_g * group; float* filter_sgemm = interleave_fp32; float* input_sgemm_pack4 = im2col_pack4_fp32; float* output_sgemm = output_fp32; sgemm(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread); // process bias if (bias) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; output_fp32[output_off] += bias_fp32[i]; } } } // process activation relu if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } // process activation relu6 if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } } /* check the conv wheather need to be using winograd */ static int winograd_support(struct conv_param* param, int in_h, int in_w) { int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int input_chan = param->input_channel; int output_chan = param->output_channel; int group = param->group; if (in_h <= 10 && in_w <= 10) return 0; if (group != 1 || kernel_h != 3 || kernel_w != 3 || stride_h != 1 || stride_w != 1 || dilation_h != 1 || dilation_w != 1 || input_chan < 16 || output_chan < 16) return 0; return 1; } int conv_hcl_get_shared_mem_size(struct tensor* input, struct tensor* output, struct conv_param* param) { int group = param->group; int input_chan = param->input_channel / group; int kernel_size = input_chan * param->kernel_h * param->kernel_w; int output_xy = output->dims[2] * output->dims[3]; int elem_size = input->elem_size; return elem_size * output_xy * kernel_size; } int conv_hcl_get_shared_pack4_mem_size(struct tensor* filter, struct tensor* output, struct conv_param* param) { int K = filter->elem_num / filter->dims[0]; int N = output->dims[2] * output->dims[3]; int elem_size = filter->elem_size; return (4 * K * (N / 4 + N % 4)) * elem_size; } int conv_hcl_get_interleave_pack4_size(int M, int K, struct tensor* filter) { int size = 4 * K * (M / 4 + M % 4) * filter->elem_size; return size; } void conv_hcl_interleave_pack4(int M, int K, struct conv_priv_info* priv_info) { float* pA = ( float* )priv_info->interleave_buffer; float* pA_t = ( float* )priv_info->interleave_buffer_pack4; int nn_outch = M >> 2; int remain_outch_start = nn_outch << 2; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; const float* k0 = pA + (p + 0) * K; const float* k1 = pA + (p + 1) * K; const float* k2 = pA + (p + 2) * K; const float* k3 = pA + (p + 3) * K; float* ktmp = pA_t + (p / 4) * 4 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } for (int p = remain_outch_start; p < M; p++) { const float* k0 = pA + (p + 0) * K; float* ktmp = pA_t + (p / 4 + p % 4) * 4 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } int conv_hcl_prerun(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; /* check winograd implement, only for conv3x3s1 */ priv_info->winograd = winograd_support(param, in_h, in_w); if (priv_info->winograd) { return wino_conv_hcl_prerun(input_tensor, filter_tensor, output_tensor, priv_info, param); } if (!priv_info->external_im2col_mem) { int mem_size = conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param); void* mem = sys_malloc(mem_size); priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; } if (!priv_info->external_im2col_pack4_mem) { int mem_size = conv_hcl_get_shared_pack4_mem_size(filter_tensor, output_tensor, param); void* mem = sys_malloc(mem_size); priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; } if (!priv_info->external_interleave_mem) { int mem_size = get_private_mem_size(filter_tensor); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } interleave(filter_tensor, priv_info); if (priv_info->external_interleave_pack4_mem) { int M = filter_tensor->dims[0]; int K = filter_tensor->elem_num / filter_tensor->dims[0]; int mem_size = conv_hcl_get_interleave_pack4_size(M, K, filter_tensor); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer_pack4 = mem; priv_info->interleave_buffer_pack4_size = mem_size; conv_hcl_interleave_pack4(M, K, priv_info); if (!priv_info->external_interleave_mem && priv_info->interleave_buffer) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } } return 0; } int conv_hcl_postrun(struct conv_priv_info* priv_info) { if (priv_info->winograd) { return wino_conv_hcl_postrun(priv_info); } if (priv_info->external_interleave_pack4_mem && !priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL) { sys_free(priv_info->im2col_buffer); priv_info->im2col_buffer = NULL; } if (!priv_info->external_im2col_pack4_mem && priv_info->im2col_buffer_pack4 != NULL) { sys_free(priv_info->im2col_buffer_pack4); priv_info->im2col_buffer_pack4 = NULL; } if (priv_info->external_interleave_pack4_mem && priv_info->interleave_buffer_pack4 != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } return 0; } int conv_hcl_run(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { int group = param->group; int type = input_tensor->data_type; if (priv_info->winograd) { return wino_conv_hcl_run(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread, cpu_affinity); } for (int i = 0; i < input_tensor->dims[0]; i++) // batch size { for (int j = 0; j < group; j++) { im2col_ir(input_tensor, output_tensor, priv_info, param, i, j); int K = filter_tensor->elem_num / filter_tensor->dims[0]; int N = output_tensor->dims[2] * output_tensor->dims[3]; float* im2col_fp32 = priv_info->im2col_buffer; float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; input_pack4(K, N, im2col_fp32, im2col_pack4_fp32, num_thread); if (type == TENGINE_DT_FP32) sgemm_fp32(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); } } return 0; } int conv_hcl_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_mem = 1; priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; return 0; } int conv_hcl_set_shared_pack4_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_pack4_mem = 1; priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; return 0; }
common.c
/**************************************************************************** * * * OpenMP MicroBenchmark Suite - Version 3.1 * * * * produced by * * * * Mark Bull, Fiona Reid and Nix Mc Donnell * * * * at * * * * Edinburgh Parallel Computing Centre * * * * email: markb@epcc.ed.ac.uk or fiona@epcc.ed.ac.uk * * * * * * This version copyright (c) The University of Edinburgh, 2015. * * * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * ****************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <time.h> #include <omp.h> #include "common.h" #define CONF95 1.96 int nthreads = -1; // Number of OpenMP threads int delaylength = -1; // The number of iterations to delay for int outerreps = -1; // Outer repetitions double delaytime = -1.0; // Length of time to delay for in microseconds double targettesttime = 0.0; // The length of time in microseconds that the test // should run for. unsigned long innerreps; // Inner repetitions double *times; // Array of doubles storing the benchmark times in microseconds double referencetime; // The average reference time in microseconds to perform // outerreps runs double referencesd; // The standard deviation in the reference time in // microseconds for outerreps runs. double testtime; // The average test time in microseconds for // outerreps runs double testsd; // The standard deviation in the test time in // microseconds for outerreps runs. void usage(char *argv[]) { printf("Usage: %s.x \n" "\t--outer-repetitions <outer-repetitions> (default %d)\n" "\t--test-time <target-test-time> (default %0.2f microseconds)\n" "\t--delay-time <delay-time> (default %0.4f microseconds)\n" "\t--delay-length <delay-length> " "(default auto-generated based on processor speed)\n", argv[0], DEFAULT_OUTER_REPS, DEFAULT_TEST_TARGET_TIME, DEFAULT_DELAY_TIME); } void parse_args(int argc, char *argv[]) { // Parse the parameters int arg; for (arg = 1; arg < argc; arg++) { if (strcmp(argv[arg], "--delay-time") == 0.0) { delaytime = atof(argv[++arg]); if (delaytime == 0.0) { printf("Invalid float:--delay-time: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--outer-repetitions") == 0) { outerreps = atoi(argv[++arg]); if (outerreps == 0) { printf("Invalid integer:--outer-repetitions: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--test-time") == 0) { targettesttime = atof(argv[++arg]); if (targettesttime == 0) { printf("Invalid integer:--test-time: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "-h") == 0) { usage(argv); exit(EXIT_SUCCESS); } else { printf("Invalid parameters: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } } int getdelaylengthfromtime(double delaytime) { int i, reps; double lapsedtime, starttime; // seconds reps = 1000; lapsedtime = 0.0; delaytime = delaytime/1.0E6; // convert from microseconds to seconds // Note: delaytime is local to this function and thus the conversion // does not propagate to the main code. // Here we want to use the delaytime in microseconds to find the // delaylength in iterations. We start with delaylength=0 and // increase until we get a large enough delaytime, return delaylength // in iterations. delaylength = 0; delay(delaylength); while (lapsedtime < delaytime) { delaylength = delaylength * 1.1 + 1; starttime = getclock(); for (i = 0; i < reps; i++) { delay(delaylength); } lapsedtime = (getclock() - starttime) / (double) reps; } return delaylength; } unsigned long getinnerreps(void (*test)(void)) { innerreps = 1L; // some initial value double time = 0.0; double start = getclock(); test(); time = (getclock() - start) * 1.0e6; if (time > targettesttime) { return innerreps; } while (time < targettesttime) { double start = getclock(); test(); time = (getclock() - start) * 1.0e6; innerreps *=2; // Test to stop code if compiler is optimising reference time expressions away if (innerreps > (targettesttime*1.0e15)) { printf("Compiler has optimised reference loop away, STOP! \n"); printf("Try recompiling with lower optimisation level \n"); exit(1); } } return innerreps; } void printheader(char *name) { printf("\n"); printf("--------------------------------------------------------\n"); printf("Computing %s time using %lu reps\n", name, innerreps); } void stats(double *mtp, double *sdp) { double meantime, totaltime, sumsq, mintime, maxtime, sd, cutoff; int i, nr; mintime = 1.0e10; maxtime = 0.; totaltime = 0.; for (i = 1; i <= outerreps; i++) { mintime = (mintime < times[i]) ? mintime : times[i]; maxtime = (maxtime > times[i]) ? maxtime : times[i]; totaltime += times[i]; } meantime = totaltime / outerreps; sumsq = 0; for (i = 1; i <= outerreps; i++) { sumsq += (times[i] - meantime) * (times[i] - meantime); } sd = sqrt(sumsq / (outerreps - 1)); cutoff = 3.0 * sd; nr = 0; for (i = 1; i <= outerreps; i++) { if (fabs(times[i] - meantime) > cutoff) nr++; } printf("\n"); printf("Sample_size Average Min Max S.D. Outliers\n"); printf(" %d %f %f %f %f %d\n", outerreps, meantime, mintime, maxtime, sd, nr); printf("\n"); *mtp = meantime; *sdp = sd; } void printfooter(char *name, double testtime, double testsd, double referencetime, double refsd) { printf("%s time = %f microseconds +/- %f\n", name, testtime, CONF95*testsd); printf("%s overhead = %f microseconds +/- %f\n", name, testtime-referencetime, CONF95*(testsd+referencesd)); } void printreferencefooter(char *name, double referencetime, double referencesd) { printf("%s time = %f microseconds +/- %f\n", name, referencetime, CONF95 * referencesd); } void init(int argc, char **argv) { #pragma omp parallel { #pragma omp master { nthreads = omp_get_num_threads(); } } parse_args(argc, argv); if (outerreps == -1) { outerreps = DEFAULT_OUTER_REPS; } if (targettesttime == 0.0) { targettesttime = DEFAULT_TEST_TARGET_TIME; } if (delaytime == -1.0) { delaytime = DEFAULT_DELAY_TIME; } delaylength = getdelaylengthfromtime(delaytime); // Always need to compute delaylength in iterations times = malloc((outerreps+1) * sizeof(double)); printf("Running OpenMP benchmark version 3.0\n" "\t%d thread(s)\n" "\t%d outer repetitions\n" "\t%0.2f test time (microseconds)\n" "\t%d delay length (iterations) \n" "\t%f delay time (microseconds)\n", nthreads, outerreps, targettesttime, delaylength, delaytime); } void finalise(void) { free(times); } void initreference(char *name) { printheader(name); } /* Calculate the reference time. */ void reference(char *name, void (*refer)(void)) { int k; double start; // Calculate the required number of innerreps innerreps = getinnerreps(refer); initreference(name); for (k = 0; k <= outerreps; k++) { start = getclock(); refer(); times[k] = (getclock() - start) * 1.0e6 / (double) innerreps; } finalisereference(name); } void finalisereference(char *name) { stats(&referencetime, &referencesd); printreferencefooter(name, referencetime, referencesd); } void intitest(char *name) { printheader(name); } void finalisetest(char *name) { stats(&testtime, &testsd); printfooter(name, testtime, testsd, referencetime, referencesd); } /* Function to run a microbenchmark test*/ void benchmark(char *name, void (*test)(void)) { int k; double start; // Calculate the required number of innerreps unsigned long innerreps1 = getinnerreps(test); unsigned long innerreps2 = getinnerreps(test); unsigned long innerreps3 = getinnerreps(test); unsigned long innerreps4 = getinnerreps(test); innerreps = innerreps1 > innerreps ? innerreps1 : innerreps; innerreps = innerreps2 > innerreps ? innerreps2 : innerreps; innerreps = innerreps3 > innerreps ? innerreps3 : innerreps; innerreps = innerreps4 > innerreps ? innerreps4 : innerreps; intitest(name); for (k=0; k<=outerreps; k++) { start = getclock(); test(); times[k] = (getclock() - start) * 1.0e6 / (double) innerreps; } finalisetest(name); } // For the Cray compiler on HECToR we need to turn off optimisation // for the delay and array_delay functions. Other compilers should // not be afffected. #pragma _CRI noopt void delay(int delaylength) { int i; float a = 0.; for (i = 0; i < delaylength; i++) a += i; if (a < 0) printf("%f \n", a); } void array_delay(int delaylength, double a[1]) { int i; a[0] = 1.0; for (i = 0; i < delaylength; i++) a[0] += i; if (a[0] < 0) printf("%f \n", a[0]); } // Re-enable optimisation for remainder of source. #pragma _CRI opt // Linux-specific, but much more precise double getclock() { struct timespec nowtime; clock_gettime(CLOCK_MONOTONIC, &nowtime); return nowtime.tv_sec + 1.0e-9 * nowtime.tv_nsec; } int returnfalse() { return 0; }
androidfde_fmt_plug.c
/* androidfde.c * * hashkill - a hash cracking tool * Copyright (C) 2010 Milen Rangelov <gat3way@gat3way.eu> * * Modified for JtR and made stuff more generic * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_fde; #elif FMT_REGISTERS_H john_register_one(&fmt_fde); #else #include <stdio.h> #include <string.h> #include <assert.h> #include <errno.h> #include "os.h" #include "stdint.h" #include <stdlib.h> #include <sys/types.h> #include "aes.h" #include <string.h> #include "arch.h" #include "johnswap.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "memory.h" #include "pbkdf2_hmac_sha1.h" // NOTE, this format FAILS for generic sha2. It could be due to interaction between openssl/aes and generic sha2 code. #include "sha2.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 #endif #endif #include "memdbg.h" #define FORMAT_TAG "$fde$" #define TAG_LENGTH 5 #define FORMAT_LABEL "fde" #define FORMAT_NAME "Android FDE" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME " SHA256/AES" #else #define ALGORITHM_NAME "PBKDF2-SHA1 SHA256/AES 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define PLAINTEXT_LENGTH 64 #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_ALIGN sizeof(int) #define SALT_SIZE sizeof(struct custom_salt) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests fde_tests[] = { {"$fde$16$04b36d4290b56e0fcca9778b74719ab8$16$b45f0f051f13f84872d1ef1abe0ada59$0f61d28f7466c0435040cc845a67e6734500de15df3ba6f48d2534ca2a7b8f910d7547357e8f1ec7364bab41383f5df9b5fb43fcd4a1e06189ce3c6ba77ec908b066e73a508e201c941fb409e9abdc051c3c052a735b01e56be61efa635e82cbceab18db1ba645b93f7befb83155852f0004a7c7d6800e9fa5f0d3c133dd2496f92110c3cdcfb16dcf57df8de830969e18514a34d4917de14597da19f9f7dc81eca2d7d461c91e0a8aeac06bafe89866d24f2b4991b4295b6277d0ff4ad97f1fa58e20f8a24e2062f84c318eb36cfbb4671117bc3522afcf7737353589cae0dce0d7c3341f457af654543758f3f005bd4d68fa2b35777cb2ea5f8f69c4debcfb1d8b2a601320e4f8621dc6e99434007388bdc0ceebc722f9ed44cbce3914bf144db332276e719f6b48108cde55916d861d19dc8c03ac76a2dad322457073111e441488228f13649073aa3aadfab51dadf89a0827acba284154a9e18d926facef43852a0733660a1fbcca8e81d2f41efd9f645a61f9395b75fc7ad446885d304808d511f2ba2e7c6138588c4292aee4ef6f2537bb00c7b015cee4a91d2defa87b67abc1315e71f0489e271673b36412377219e93aba6af3cfd504bf3f6bc24f2b6148536339d91ddd2f013314544650c1c11e7317028a7014909d0c850f78692e476c4f57da586fe26786504130aba22ba5261b989aeb47483d8cb9d5052120a4e5690b5b0cd009aadaadc351db7b6a230ebc1fa771651cb64d78daa56b7a6c6808db3b688afee9b7edaa617d8cb16ac7290465987bd443ea41ce38aa14e0c88874fb2707394b83679de82134efe351b4d021c63b2992a8314b2e93908906400628a7f753c9a4d85e917a207561b7840ce121800fab4026508d1b00fe8e7e756573743e11380f76f6bb7c0e528cb98875e6ad88bff51236601e6942964e37ffe0316b1a1f7bc0d84334fa024bf03c261bd06a07c01f099ad23fb9a1d8c98447463b8988cb33f3e1fb7d7a7c547f9a6d51cf7b75649d3c8cb5bf93be79eba1a961659b5fe928a1c7e80aca857825c6bc11493cb230e66126ef7b7284abe0823b5735bb1dfe844029f175c63442ca774784b775ecf02e48d029ac0f236813be91aca66905640666b89bd08118e3c18c75764bc49d00d1fe53ee92ccaa487852c613cba91f637b6de06dcaa1953a7cfb5333df573273a67f0157b63fbbf48c48f16c423caefaf29cdb5d34b19ac0f57b972b9e5ff1bc5cf25bdcdf8d29fb75865c4501458f19bfd64c844fd52a27feec97dc31ba922aea75706404d853071707d0c6001c59664676be6426ca5c7efbfc09ffa9acac91441f9175fd3148fb046c31a49d7c7ad10bf3c4b413dd148666b72b5a533f600cb02d7623270e5d1ad33355dd318d06aa8b3d7517cb7d5be40d222a026380cfbf5b79014e7631d677b07bcd805d9ea7103cf1d057bf883b29fb99b064c4e3cb4271596a74895c1c3f7c7c49d2be54b1435af4440ecd019dde11cee14a320712c9275bef339a15d3a18d9f38918d7af0a50a35199980429d74d4cc2a16dea619619a7c19827f4f78d3ebaf13340abf6717cec6bff8399b067fb17f11cdb1f9909c51253f7466ee769546d1d96319bcc1b04a6b1f8d8068f96b959d507c9004d75717792733fadb7a94a2d5db514a61cbd90eef89d1ace5a3138120168d62f1ebef5efbbd4e7f7e987834db81fe8c4877f3edcc71c61e96b20ca26c5a91e28fa11e484c1dcbfd5a0461065fe52f042ee9a09687d800c90a0a792f3dbe257965247f8eecd122b9b234b734454fa1477212a0295a347ae44463de4de405bf4fd91cde400b63d7fced6d7ccd20d79a4899139a79085f8742c3dfe7fbadca56c4e8aa95ce7841ad9675659349f6671d047efa0951feb9c61381f5f9e39182c1ec0a3ebd2ef5e036312c6ed6a0e59777813229ffdac771788e609c7d9f96848f63b428789c55e85c509068df8d5a0a7fc066be8c76205860d86d6c5bb7c2bc85a922a2ad86e6a791fe238420eedd1cf7ac770dd8316ca30c9577441a34873cdf0c5dc2103457a93fa0dd42da5eb2d6f82e9ff47b4bb6cd1d3fcba5645caace577a89c7bd70ff432f8dae113a7877a41a41043dac4c0d21860ad8198a1b9640d979322a20d4b90caa77a5d2b31c5bd06e", "strongpassword"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked; static int max_cracked; static struct custom_salt { int loaded; unsigned char *cipherbuf; int keysize; int iterations; // NOTE, not used. Hard coded to 2000 for FDE from droid <= 4.3 (PBKDF2-sha1) int saltlen; unsigned char data[512 * 3]; unsigned char salt[16]; unsigned char mkey[64]; unsigned char iv[16]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif max_cracked = self->params.max_keys_per_crypt; saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); cracked = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cracked)); } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr; int saltlen, keysize; char *p; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += TAG_LENGTH; if ((p = strtokm(ctcopy, "$")) == NULL) goto err; if (!isdec(p)) goto err; saltlen = atoi(p); if (saltlen > 16) /* saltlen */ goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt */ goto err; if (hexlenl(p) != saltlen * 2) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* keysize */ goto err; if (!isdec(p)) goto err; keysize = atoi(p); if (keysize > 64) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* key */ goto err; if (hexlenl(p) != keysize * 2) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* data */ goto err; if (hexlenl(p) != 512 * 3 * 2) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; // int res; int i; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += TAG_LENGTH; p = strtokm(ctcopy, "$"); cs.saltlen = atoi(p); p = strtokm(NULL, "$"); for (i = 0; i < cs.saltlen; i++) { cs.salt[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } p = strtokm(NULL, "$"); cs.keysize = atoi(p); p = strtokm(NULL, "$"); for (i = 0; i < cs.keysize; i++) { cs.mkey[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } p = strtokm(NULL, "$"); for (i = 0; i < 512 * 3; i++) { cs.data[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } // Not reference implementation - this is modified for use by androidfde! static void AES_cbc_essiv(unsigned char *src, unsigned char *dst, unsigned char *key, int startsector,int size) { AES_KEY aeskey; unsigned char essiv[16]; unsigned char essivhash[32]; SHA256_CTX ctx; unsigned char sectorbuf[16]; unsigned char zeroiv[16]; SHA256_Init(&ctx); SHA256_Update(&ctx, key, cur_salt->keysize); SHA256_Final(essivhash, &ctx); memset(sectorbuf,0,16); memset(zeroiv,0,16); memset(essiv,0,16); memcpy(sectorbuf,&startsector,4); AES_set_encrypt_key(essivhash, 256, &aeskey); AES_cbc_encrypt(sectorbuf, essiv, 16, &aeskey, zeroiv, AES_ENCRYPT); AES_set_decrypt_key(key, cur_salt->keysize*8, &aeskey); AES_cbc_encrypt(src, dst, size, &aeskey, essiv, AES_DECRYPT); } // cracked[index] = hash_plugin_check_hash(saved_key[index]); void hash_plugin_check_hash(int index) { unsigned char keycandidate2[255]; unsigned char decrypted1[512]; // FAT unsigned char decrypted2[512]; // ext3/4 AES_KEY aeskey; uint16_t v2,v3,v4; uint32_t v1,v5; int j = 0; #ifdef SIMD_COEF_32 unsigned char *keycandidate, Keycandidate[SSE_GROUP_SZ_SHA1][255]; int lens[SSE_GROUP_SZ_SHA1], i; unsigned char *pin[SSE_GROUP_SZ_SHA1]; union { ARCH_WORD_32 *pout[SSE_GROUP_SZ_SHA1]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = (ARCH_WORD_32*)(Keycandidate[i]); } pbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt, 16, 2000, &(x.poutc), cur_salt->keysize + 16, 0); #else unsigned char keycandidate[255]; char *password = saved_key[index]; pbkdf2_sha1((const uint8_t*)password, strlen(password), (const uint8_t*)(cur_salt->salt), 16, 2000, keycandidate, cur_salt->keysize + 16, 0); #endif j = 0; #ifdef SIMD_COEF_32 for (; j < SSE_GROUP_SZ_SHA1; ++j) { keycandidate = Keycandidate[j]; #endif AES_set_decrypt_key(keycandidate, cur_salt->keysize*8, &aeskey); AES_cbc_encrypt(cur_salt->mkey, keycandidate2, 16, &aeskey, keycandidate+16, AES_DECRYPT); AES_cbc_essiv(cur_salt->data, decrypted1, keycandidate2,0,32); AES_cbc_essiv(cur_salt->data + 1024, decrypted2, keycandidate2,2,128); // Check for FAT if ((memcmp(decrypted1+3,"MSDOS5.0",8)==0)) cracked[index+j] = 1; else { // Check for extfs memcpy(&v1,decrypted2+72,4); memcpy(&v2,decrypted2+0x3a,2); memcpy(&v3,decrypted2+0x3c,2); memcpy(&v4,decrypted2+0x4c,2); memcpy(&v5,decrypted2+0x48,4); #if !ARCH_LITTLE_ENDIAN v1 = JOHNSWAP(v1); v2 = JOHNSWAP(v2); v3 = JOHNSWAP(v3); v4 = JOHNSWAP(v4); v5 = JOHNSWAP(v5); #endif if ((v1<5)&&(v2<4)&&(v3<5)&&(v4<2)&&(v5<5)) cracked[index+j] = 1; } #ifdef SIMD_COEF_32 } #endif } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; memset(cracked, 0, sizeof(cracked[0])*max_cracked); #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { hash_plugin_check_hash(index); } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static void fde_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_fde = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, fde_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, fde_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
csr_matop.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Matrix operation functions for hypre_CSRMatrix class. * *****************************************************************************/ #include "seq_mv.h" #include "csr_matrix.h" /*-------------------------------------------------------------------------- * hypre_CSRMatrixAdd: * adds two CSR Matrices A and B and returns a CSR Matrix C; * Note: The routine does not check for 0-elements which might be generated * through cancellation of elements in A and B or already contained in A and B. To remove those, use hypre_CSRMatrixDeleteZeros *--------------------------------------------------------------------------*/ hypre_CSRMatrix* hypre_CSRMatrixAddHost ( hypre_CSRMatrix *A, hypre_CSRMatrix *B ) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Complex *B_data = hypre_CSRMatrixData(B); HYPRE_Int *B_i = hypre_CSRMatrixI(B); HYPRE_Int *B_j = hypre_CSRMatrixJ(B); HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B); HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B); hypre_CSRMatrix *C; HYPRE_Complex *C_data; HYPRE_Int *C_i; HYPRE_Int *C_j; HYPRE_Int ia, ib, ic, jcol, num_nonzeros; HYPRE_Int pos; HYPRE_Int *marker; HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); if (nrows_A != nrows_B || ncols_A != ncols_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n"); return NULL; } marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST); C_i = hypre_CTAlloc(HYPRE_Int, nrows_A+1, memory_location_C); for (ia = 0; ia < ncols_A; ia++) { marker[ia] = -1; } num_nonzeros = 0; C_i[0] = 0; for (ic = 0; ic < nrows_A; ic++) { for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { jcol = A_j[ia]; marker[jcol] = ic; num_nonzeros++; } for (ib = B_i[ic]; ib < B_i[ic+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] != ic) { marker[jcol] = ic; num_nonzeros++; } } C_i[ic+1] = num_nonzeros; } C = hypre_CSRMatrixCreate(nrows_A, ncols_A, num_nonzeros); hypre_CSRMatrixI(C) = C_i; hypre_CSRMatrixInitialize_v2(C, 0, memory_location_C); C_j = hypre_CSRMatrixJ(C); C_data = hypre_CSRMatrixData(C); for (ia = 0; ia < ncols_A; ia++) { marker[ia] = -1; } pos = 0; for (ic = 0; ic < nrows_A; ic++) { for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { jcol = A_j[ia]; C_j[pos] = jcol; C_data[pos] = A_data[ia]; marker[jcol] = pos; pos++; } for (ib = B_i[ic]; ib < B_i[ic+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] < C_i[ic]) { C_j[pos] = jcol; C_data[pos] = B_data[ib]; marker[jcol] = pos; pos++; } else { C_data[marker[jcol]] += B_data[ib]; } } } hypre_TFree(marker, HYPRE_MEMORY_HOST); return C; } hypre_CSRMatrix* hypre_CSRMatrixAdd( hypre_CSRMatrix *A, hypre_CSRMatrix *B) { hypre_CSRMatrix *C = NULL; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_CSRMatrixMemoryLocation(A), hypre_CSRMatrixMemoryLocation(B) ); if (exec == HYPRE_EXEC_DEVICE) { C = hypre_CSRMatrixAddDevice(A, B); } else #endif { C = hypre_CSRMatrixAddHost(A, B); } return C; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixBigAdd: * adds two CSR Matrices A and B and returns a CSR Matrix C; * Note: The routine does not check for 0-elements which might be generated * through cancellation of elements in A and B or already contained in A and B. To remove those, use hypre_CSRMatrixDeleteZeros *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_CSRMatrixBigAdd( hypre_CSRMatrix *A, hypre_CSRMatrix *B ) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_BigInt *A_j = hypre_CSRMatrixBigJ(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Complex *B_data = hypre_CSRMatrixData(B); HYPRE_Int *B_i = hypre_CSRMatrixI(B); HYPRE_BigInt *B_j = hypre_CSRMatrixBigJ(B); HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B); HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B); hypre_CSRMatrix *C; HYPRE_Complex *C_data; HYPRE_Int *C_i; HYPRE_BigInt *C_j; HYPRE_Int ia, ib, ic, num_nonzeros; HYPRE_BigInt jcol; HYPRE_Int pos; HYPRE_Int *marker; HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); if (nrows_A != nrows_B || ncols_A != ncols_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n"); return NULL; } marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST); C_i = hypre_CTAlloc(HYPRE_Int, nrows_A+1, memory_location_C); for (ia = 0; ia < ncols_A; ia++) { marker[ia] = -1; } num_nonzeros = 0; C_i[0] = 0; for (ic = 0; ic < nrows_A; ic++) { for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { jcol = A_j[ia]; marker[jcol] = ic; num_nonzeros++; } for (ib = B_i[ic]; ib < B_i[ic+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] != ic) { marker[jcol] = ic; num_nonzeros++; } } C_i[ic+1] = num_nonzeros; } C = hypre_CSRMatrixCreate(nrows_A, ncols_A, num_nonzeros); hypre_CSRMatrixI(C) = C_i; hypre_CSRMatrixInitialize_v2(C, 1, memory_location_C); C_j = hypre_CSRMatrixBigJ(C); C_data = hypre_CSRMatrixData(C); for (ia = 0; ia < ncols_A; ia++) { marker[ia] = -1; } pos = 0; for (ic = 0; ic < nrows_A; ic++) { for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { jcol = A_j[ia]; C_j[pos] = jcol; C_data[pos] = A_data[ia]; marker[jcol] = pos; pos++; } for (ib = B_i[ic]; ib < B_i[ic+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] < C_i[ic]) { C_j[pos] = jcol; C_data[pos] = B_data[ib]; marker[jcol] = pos; pos++; } else { C_data[marker[jcol]] += B_data[ib]; } } } hypre_TFree(marker, HYPRE_MEMORY_HOST); return C; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixMultiply * multiplies two CSR Matrices A and B and returns a CSR Matrix C; * Note: The routine does not check for 0-elements which might be generated * through cancellation of elements in A and B or already contained in A and B. To remove those, use hypre_CSRMatrixDeleteZeros *--------------------------------------------------------------------------*/ hypre_CSRMatrix* hypre_CSRMatrixMultiplyHost( hypre_CSRMatrix *A, hypre_CSRMatrix *B) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Complex *B_data = hypre_CSRMatrixData(B); HYPRE_Int *B_i = hypre_CSRMatrixI(B); HYPRE_Int *B_j = hypre_CSRMatrixJ(B); HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B); HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B); hypre_CSRMatrix *C; HYPRE_Complex *C_data; HYPRE_Int *C_i; HYPRE_Int *C_j; HYPRE_Int ia, ib, ic, ja, jb, num_nonzeros=0; HYPRE_Int row_start, counter; HYPRE_Complex a_entry, b_entry; HYPRE_Int allsquare = 0; HYPRE_Int max_num_threads; HYPRE_Int *jj_count; HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); if (ncols_A != nrows_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n"); return NULL; } if (nrows_A == ncols_B) { allsquare = 1; } C_i = hypre_CTAlloc(HYPRE_Int, nrows_A+1, memory_location_C); max_num_threads = hypre_NumThreads(); jj_count = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(ia, ib, ic, ja, jb, num_nonzeros, row_start, counter, a_entry, b_entry) #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int ns, ne, ii, jj; HYPRE_Int size, rest, num_threads; HYPRE_Int i1; ii = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); size = nrows_A/num_threads; rest = nrows_A - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } B_marker = hypre_CTAlloc(HYPRE_Int, ncols_B, HYPRE_MEMORY_HOST); for (ib = 0; ib < ncols_B; ib++) B_marker[ib] = -1; num_nonzeros = 0; for (ic = ns; ic < ne; ic++) { C_i[ic] = num_nonzeros; if (allsquare) { B_marker[ic] = ic; num_nonzeros++; } for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { ja = A_j[ia]; for (ib = B_i[ja]; ib < B_i[ja+1]; ib++) { jb = B_j[ib]; if (B_marker[jb] != ic) { B_marker[jb] = ic; num_nonzeros++; } } } } jj_count[ii] = num_nonzeros; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii) { jj = jj_count[0]; for (i1 = 1; i1 < ii; i1++) jj += jj_count[i1]; for (i1 = ns; i1 < ne; i1++) C_i[i1] += jj; } else { C_i[nrows_A] = 0; for (i1 = 0; i1 < num_threads; i1++) C_i[nrows_A] += jj_count[i1]; C = hypre_CSRMatrixCreate(nrows_A, ncols_B, C_i[nrows_A]); hypre_CSRMatrixI(C) = C_i; hypre_CSRMatrixInitialize_v2(C, 0, memory_location_C); C_j = hypre_CSRMatrixJ(C); C_data = hypre_CSRMatrixData(C); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (ib = 0; ib < ncols_B; ib++) B_marker[ib] = -1; counter = C_i[ns]; for (ic = ns; ic < ne; ic++) { row_start = C_i[ic]; if (allsquare) { B_marker[ic] = counter; C_data[counter] = 0; C_j[counter] = ic; counter++; } for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { ja = A_j[ia]; a_entry = A_data[ia]; for (ib = B_i[ja]; ib < B_i[ja+1]; ib++) { jb = B_j[ib]; b_entry = B_data[ib]; if (B_marker[jb] < row_start) { B_marker[jb] = counter; C_j[B_marker[jb]] = jb; C_data[B_marker[jb]] = a_entry*b_entry; counter++; } else C_data[B_marker[jb]] += a_entry*b_entry; } } } hypre_TFree(B_marker, HYPRE_MEMORY_HOST); } /*end parallel region */ hypre_TFree(jj_count, HYPRE_MEMORY_HOST); return C; } hypre_CSRMatrix* hypre_CSRMatrixMultiply( hypre_CSRMatrix *A, hypre_CSRMatrix *B) { hypre_CSRMatrix *C = NULL; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_CSRMatrixMemoryLocation(A), hypre_CSRMatrixMemoryLocation(B) ); if (exec == HYPRE_EXEC_DEVICE) { C = hypre_CSRMatrixMultiplyDevice(A,B); } else #endif { C = hypre_CSRMatrixMultiplyHost(A,B); } return C; } hypre_CSRMatrix * hypre_CSRMatrixDeleteZeros( hypre_CSRMatrix *A, HYPRE_Real tol) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A); hypre_CSRMatrix *B; HYPRE_Complex *B_data; HYPRE_Int *B_i; HYPRE_Int *B_j; HYPRE_Int zeros; HYPRE_Int i, j; HYPRE_Int pos_A, pos_B; zeros = 0; for (i=0; i < num_nonzeros; i++) if (hypre_cabs(A_data[i]) <= tol) zeros++; if (zeros) { B = hypre_CSRMatrixCreate(nrows_A,ncols_A,num_nonzeros-zeros); hypre_CSRMatrixInitialize(B); B_i = hypre_CSRMatrixI(B); B_j = hypre_CSRMatrixJ(B); B_data = hypre_CSRMatrixData(B); B_i[0] = 0; pos_A = 0; pos_B = 0; for (i=0; i < nrows_A; i++) { for (j = A_i[i]; j < A_i[i+1]; j++) { if (hypre_cabs(A_data[j]) <= tol) { pos_A++; } else { B_data[pos_B] = A_data[pos_A]; B_j[pos_B] = A_j[pos_A]; pos_B++; pos_A++; } } B_i[i+1] = pos_B; } return B; } else return NULL; } /****************************************************************************** * * Finds transpose of a hypre_CSRMatrix * *****************************************************************************/ /** * idx = idx2*dim1 + idx1 * -> ret = idx1*dim2 + idx2 * = (idx%dim1)*dim2 + idx/dim1 */ static inline HYPRE_Int transpose_idx(HYPRE_Int idx, HYPRE_Int dim1, HYPRE_Int dim2) { return idx%dim1*dim2 + idx/dim1; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixTranspose *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRMatrixTransposeHost(hypre_CSRMatrix *A, hypre_CSRMatrix **AT, HYPRE_Int data) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int num_rowsA = hypre_CSRMatrixNumRows(A); HYPRE_Int num_colsA = hypre_CSRMatrixNumCols(A); HYPRE_Int num_nonzerosA = hypre_CSRMatrixNumNonzeros(A); HYPRE_Complex *AT_data; /*HYPRE_Int *AT_i;*/ HYPRE_Int *AT_j; HYPRE_Int num_rowsAT; HYPRE_Int num_colsAT; HYPRE_Int num_nonzerosAT; HYPRE_Int max_col; HYPRE_Int i, j; HYPRE_MemoryLocation memory_location = hypre_CSRMatrixMemoryLocation(A); /*-------------------------------------------------------------- * First, ascertain that num_cols and num_nonzeros has been set. * If not, set them. *--------------------------------------------------------------*/ if (!num_nonzerosA) { num_nonzerosA = A_i[num_rowsA]; } if (num_rowsA && num_nonzerosA && ! num_colsA) { max_col = -1; for (i = 0; i < num_rowsA; ++i) { for (j = A_i[i]; j < A_i[i+1]; j++) { if (A_j[j] > max_col) { max_col = A_j[j]; } } } num_colsA = max_col+1; } num_rowsAT = num_colsA; num_colsAT = num_rowsA; num_nonzerosAT = num_nonzerosA; *AT = hypre_CSRMatrixCreate(num_rowsAT, num_colsAT, num_nonzerosAT); hypre_CSRMatrixMemoryLocation(*AT) = memory_location; if (0 == num_colsA) { // JSP: parallel counting sorting breaks down // when A has no columns hypre_CSRMatrixInitialize(*AT); return 0; } AT_j = hypre_CTAlloc(HYPRE_Int, num_nonzerosAT, memory_location); hypre_CSRMatrixJ(*AT) = AT_j; if (data) { AT_data = hypre_CTAlloc(HYPRE_Complex, num_nonzerosAT, memory_location); hypre_CSRMatrixData(*AT) = AT_data; } /*----------------------------------------------------------------- * Parallel count sort *-----------------------------------------------------------------*/ HYPRE_Int *bucket = hypre_TAlloc(HYPRE_Int, (num_colsA + 1)*hypre_NumThreads(), HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads = hypre_NumActiveThreads(); HYPRE_Int my_thread_num = hypre_GetThreadNum(); HYPRE_Int iBegin = hypre_CSRMatrixGetLoadBalancedPartitionBegin(A); HYPRE_Int iEnd = hypre_CSRMatrixGetLoadBalancedPartitionEnd(A); hypre_assert(iBegin <= iEnd); hypre_assert(iBegin >= 0 && iBegin <= num_rowsA); hypre_assert(iEnd >= 0 && iEnd <= num_rowsA); HYPRE_Int i, j; memset(bucket + my_thread_num*num_colsA, 0, sizeof(HYPRE_Int)*num_colsA); /*----------------------------------------------------------------- * Count the number of entries that will go into each bucket * bucket is used as HYPRE_Int[num_threads][num_colsA] 2D array *-----------------------------------------------------------------*/ for (j = A_i[iBegin]; j < A_i[iEnd]; ++j) { HYPRE_Int idx = A_j[j]; bucket[my_thread_num*num_colsA + idx]++; } /*----------------------------------------------------------------- * Parallel prefix sum of bucket with length num_colsA * num_threads * accessed as if it is transposed as HYPRE_Int[num_colsA][num_threads] *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = my_thread_num*num_colsA + 1; i < (my_thread_num + 1)*num_colsA; ++i) { HYPRE_Int transpose_i = transpose_idx(i, num_threads, num_colsA); HYPRE_Int transpose_i_minus_1 = transpose_idx(i - 1, num_threads, num_colsA); bucket[transpose_i] += bucket[transpose_i_minus_1]; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #pragma omp master #endif { for (i = 1; i < num_threads; ++i) { HYPRE_Int j0 = num_colsA*i - 1, j1 = num_colsA*(i + 1) - 1; HYPRE_Int transpose_j0 = transpose_idx(j0, num_threads, num_colsA); HYPRE_Int transpose_j1 = transpose_idx(j1, num_threads, num_colsA); bucket[transpose_j1] += bucket[transpose_j0]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num > 0) { HYPRE_Int transpose_i0 = transpose_idx(num_colsA*my_thread_num - 1, num_threads, num_colsA); HYPRE_Int offset = bucket[transpose_i0]; for (i = my_thread_num*num_colsA; i < (my_thread_num + 1)*num_colsA - 1; ++i) { HYPRE_Int transpose_i = transpose_idx(i, num_threads, num_colsA); bucket[transpose_i] += offset; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /*---------------------------------------------------------------- * Load the data and column numbers of AT *----------------------------------------------------------------*/ if (data) { for (i = iEnd - 1; i >= iBegin; --i) { for (j = A_i[i + 1] - 1; j >= A_i[i]; --j) { HYPRE_Int idx = A_j[j]; --bucket[my_thread_num*num_colsA + idx]; HYPRE_Int offset = bucket[my_thread_num*num_colsA + idx]; AT_data[offset] = A_data[j]; AT_j[offset] = i; } } } else { for (i = iEnd - 1; i >= iBegin; --i) { for (j = A_i[i + 1] - 1; j >= A_i[i]; --j) { HYPRE_Int idx = A_j[j]; --bucket[my_thread_num*num_colsA + idx]; HYPRE_Int offset = bucket[my_thread_num*num_colsA + idx]; AT_j[offset] = i; } } } } /*end parallel region */ hypre_CSRMatrixI(*AT) = hypre_TAlloc(HYPRE_Int, num_colsA + 1, memory_location); hypre_TMemcpy(hypre_CSRMatrixI(*AT), bucket, HYPRE_Int, num_colsA + 1, memory_location, HYPRE_MEMORY_HOST); hypre_CSRMatrixI(*AT)[num_colsA] = num_nonzerosA; hypre_TFree(bucket, HYPRE_MEMORY_HOST); return (0); } HYPRE_Int hypre_CSRMatrixTranspose(hypre_CSRMatrix *A, hypre_CSRMatrix **AT, HYPRE_Int data) { HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_CSRMatrixTransposeDevice(A, AT, data); } else #endif { ierr = hypre_CSRMatrixTransposeHost(A, AT, data); } return ierr; } /* RL: TODO add memory locations */ HYPRE_Int hypre_CSRMatrixSplit(hypre_CSRMatrix *Bs_ext, HYPRE_BigInt first_col_diag_B, HYPRE_BigInt last_col_diag_B, HYPRE_Int num_cols_offd_B, HYPRE_BigInt *col_map_offd_B, HYPRE_Int *num_cols_offd_C_ptr, HYPRE_BigInt **col_map_offd_C_ptr, hypre_CSRMatrix **Bext_diag_ptr, hypre_CSRMatrix **Bext_offd_ptr) { HYPRE_Complex *Bs_ext_data = hypre_CSRMatrixData(Bs_ext); HYPRE_Int *Bs_ext_i = hypre_CSRMatrixI(Bs_ext); HYPRE_BigInt *Bs_ext_j = hypre_CSRMatrixBigJ(Bs_ext); HYPRE_Int num_rows_Bext = hypre_CSRMatrixNumRows(Bs_ext); HYPRE_Int B_ext_diag_size = 0; HYPRE_Int B_ext_offd_size = 0; HYPRE_Int *B_ext_diag_i = NULL; HYPRE_Int *B_ext_diag_j = NULL; HYPRE_Complex *B_ext_diag_data = NULL; HYPRE_Int *B_ext_offd_i = NULL; HYPRE_Int *B_ext_offd_j = NULL; HYPRE_Complex *B_ext_offd_data = NULL; HYPRE_Int *my_diag_array; HYPRE_Int *my_offd_array; HYPRE_BigInt *temp; HYPRE_Int max_num_threads; HYPRE_Int cnt = 0; hypre_CSRMatrix *Bext_diag = NULL; hypre_CSRMatrix *Bext_offd = NULL; HYPRE_BigInt *col_map_offd_C = NULL; HYPRE_Int num_cols_offd_C = 0; B_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_Bext+1, HYPRE_MEMORY_HOST); B_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_Bext+1, HYPRE_MEMORY_HOST); max_num_threads = hypre_NumThreads(); my_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); my_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_rows_Bext/num_threads; rest = num_rows_Bext - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } my_diag_size = 0; my_offd_size = 0; for (i=ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { my_offd_size++; } else { my_diag_size++; } } } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_rows_Bext] = B_ext_diag_size; B_ext_offd_i[num_rows_Bext] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size || num_cols_offd_B) { temp = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size + num_cols_offd_B, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i = ns; i < ne; i++) { for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { temp[cnt_offd] = Bs_ext_j[j]; B_ext_offd_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = Bs_ext_j[j] - first_col_diag_B; B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } } /* This computes the mappings */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { cnt = 0; if (B_ext_offd_size || num_cols_offd_B) { cnt = B_ext_offd_size; for (i=0; i < num_cols_offd_B; i++) { temp[cnt++] = col_map_offd_B[i]; } if (cnt) { hypre_BigQsort0(temp, 0, cnt-1); num_cols_offd_C = 1; HYPRE_BigInt value = temp[0]; for (i = 1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) { col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i = 0; i < num_cols_offd_C; i++) { col_map_offd_C[i] = temp[i]; } hypre_TFree(temp, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = ns; i < ne; i++) { for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++) { B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, B_ext_offd_j[j], num_cols_offd_C); } } } /* end parallel region */ hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); Bext_diag = hypre_CSRMatrixCreate(num_rows_Bext, last_col_diag_B-first_col_diag_B+1, B_ext_diag_size); hypre_CSRMatrixMemoryLocation(Bext_diag) = HYPRE_MEMORY_HOST; Bext_offd = hypre_CSRMatrixCreate(num_rows_Bext, num_cols_offd_C, B_ext_offd_size); hypre_CSRMatrixMemoryLocation(Bext_offd) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(Bext_diag) = B_ext_diag_i; hypre_CSRMatrixJ(Bext_diag) = B_ext_diag_j; hypre_CSRMatrixData(Bext_diag) = B_ext_diag_data; hypre_CSRMatrixI(Bext_offd) = B_ext_offd_i; hypre_CSRMatrixJ(Bext_offd) = B_ext_offd_j; hypre_CSRMatrixData(Bext_offd) = B_ext_offd_data; *col_map_offd_C_ptr = col_map_offd_C; *Bext_diag_ptr = Bext_diag; *Bext_offd_ptr = Bext_offd; *num_cols_offd_C_ptr = num_cols_offd_C; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixReorder: * Reorders the column and data arrays of a square CSR matrix, such that the * first entry in each row is the diagonal one. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRMatrixReorder(hypre_CSRMatrix *A) { HYPRE_Int i, j, tempi, row_size; HYPRE_Complex tempd; HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int num_rowsA = hypre_CSRMatrixNumRows(A); HYPRE_Int num_colsA = hypre_CSRMatrixNumCols(A); /* the matrix should be square */ if (num_rowsA != num_colsA) return -1; for (i = 0; i < num_rowsA; i++) { row_size = A_i[i+1]-A_i[i]; for (j = 0; j < row_size; j++) { if (A_j[j] == i) { if (j != 0) { tempi = A_j[0]; A_j[0] = A_j[j]; A_j[j] = tempi; tempd = A_data[0]; A_data[0] = A_data[j]; A_data[j] = tempd; } break; } /* diagonal element is missing */ if (j == row_size-1) return -2; } A_j += row_size; A_data += row_size; } return 0; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixAddPartial: * adds matrix rows in the CSR matrix B to the CSR Matrix A, where row_nums[i] * defines to which row of A the i-th row of B is added, and returns a CSR Matrix C; * Note: The routine does not check for 0-elements which might be generated * through cancellation of elements in A and B or already contained * in A and B. To remove those, use hypre_CSRMatrixDeleteZeros *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_CSRMatrixAddPartial( hypre_CSRMatrix *A, hypre_CSRMatrix *B, HYPRE_Int *row_nums) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Complex *B_data = hypre_CSRMatrixData(B); HYPRE_Int *B_i = hypre_CSRMatrixI(B); HYPRE_Int *B_j = hypre_CSRMatrixJ(B); HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B); HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B); hypre_CSRMatrix *C; HYPRE_Complex *C_data; HYPRE_Int *C_i; HYPRE_Int *C_j; HYPRE_Int ia, ib, ic, jcol, num_nonzeros; HYPRE_Int pos, i, i2, j, cnt; HYPRE_Int *marker; HYPRE_Int *map; HYPRE_Int *temp; HYPRE_MemoryLocation memory_location_A = hypre_CSRMatrixMemoryLocation(A); HYPRE_MemoryLocation memory_location_B = hypre_CSRMatrixMemoryLocation(B); /* RL: TODO cannot guarantee, maybe should never assert hypre_assert(memory_location_A == memory_location_B); */ /* RL: in the case of A=H, B=D, or A=D, B=H, let C = D, * not sure if this is the right thing to do. * Also, need something like this in other places * TODO */ HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B); if (ncols_A != ncols_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n"); return NULL; } map = hypre_CTAlloc(HYPRE_Int, nrows_B, HYPRE_MEMORY_HOST); temp = hypre_CTAlloc(HYPRE_Int, nrows_B, HYPRE_MEMORY_HOST); for (i=0; i < nrows_B; i++) { map[i] = i; temp[i] = row_nums[i]; } hypre_qsort2i(temp,map,0,nrows_B-1); marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST); C_i = hypre_CTAlloc(HYPRE_Int, nrows_A+1, memory_location_C); for (ia = 0; ia < ncols_A; ia++) { marker[ia] = -1; } num_nonzeros = 0; C_i[0] = 0; cnt = 0; for (ic = 0; ic < nrows_A; ic++) { for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { jcol = A_j[ia]; marker[jcol] = ic; num_nonzeros++; } if (cnt < nrows_B && temp[cnt] == ic) { for (j = cnt; j < nrows_B; j++) { if (temp[j] == ic) { i2 = map[cnt++]; for (ib = B_i[i2]; ib < B_i[i2+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] != ic) { marker[jcol] = ic; num_nonzeros++; } } } else { break; } } } C_i[ic+1] = num_nonzeros; } C = hypre_CSRMatrixCreate(nrows_A, ncols_A, num_nonzeros); hypre_CSRMatrixI(C) = C_i; hypre_CSRMatrixInitialize_v2(C, 0, memory_location_C); C_j = hypre_CSRMatrixJ(C); C_data = hypre_CSRMatrixData(C); for (ia = 0; ia < ncols_A; ia++) { marker[ia] = -1; } cnt = 0; pos = 0; for (ic = 0; ic < nrows_A; ic++) { for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { jcol = A_j[ia]; C_j[pos] = jcol; C_data[pos] = A_data[ia]; marker[jcol] = pos; pos++; } if (cnt < nrows_B && temp[cnt] == ic) { for (j = cnt; j < nrows_B; j++) { if (temp[j] == ic) { i2 = map[cnt++]; for (ib = B_i[i2]; ib < B_i[i2+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] < C_i[ic]) { C_j[pos] = jcol; C_data[pos] = B_data[ib]; marker[jcol] = pos; pos++; } else { C_data[marker[jcol]] += B_data[ib]; } } } else { break; } } } } hypre_TFree(marker, HYPRE_MEMORY_HOST); hypre_TFree(map, HYPRE_MEMORY_HOST); hypre_TFree(temp, HYPRE_MEMORY_HOST); return C; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixSumElts: * Returns the sum of all matrix elements. *--------------------------------------------------------------------------*/ HYPRE_Complex hypre_CSRMatrixSumElts( hypre_CSRMatrix *A ) { HYPRE_Complex sum = 0; HYPRE_Complex *data = hypre_CSRMatrixData( A ); HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A); HYPRE_Int i; for ( i = 0; i < num_nonzeros; ++i ) { sum += data[i]; } return sum; } HYPRE_Real hypre_CSRMatrixFnorm( hypre_CSRMatrix *A ) { HYPRE_Complex sum = 0; HYPRE_Complex *data = hypre_CSRMatrixData( A ); HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A); HYPRE_Int i, nrows, *A_i; nrows = hypre_CSRMatrixNumRows(A); A_i = hypre_CSRMatrixI(A); hypre_assert(num_nonzeros == A_i[nrows]); for ( i = 0; i < num_nonzeros; ++i ) { HYPRE_Complex v = data[i]; sum += v * v; } return sqrt(sum); } /* type == 0, sum, * 1, abs sum * 2, square sum */ void hypre_CSRMatrixComputeRowSumHost( hypre_CSRMatrix *A, HYPRE_Int *CF_i, HYPRE_Int *CF_j, HYPRE_Complex *row_sum, HYPRE_Int type, HYPRE_Complex scal, const char *set_or_add) { HYPRE_Int nrows = hypre_CSRMatrixNumRows(A); HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int i, j; for (i = 0; i < nrows; i++) { HYPRE_Complex row_sum_i = set_or_add[0] == 's' ? 0.0 : row_sum[i]; for (j = A_i[i]; j < A_i[i+1]; j++) { if (CF_i && CF_j && CF_i[i] != CF_j[A_j[j]]) { continue; } if (type == 0) { row_sum_i += scal * A_data[j]; } else if (type == 1) { row_sum_i += scal * fabs(A_data[j]); } else if (type == 2) { row_sum_i += scal * A_data[j] * A_data[j]; } } row_sum[i] = row_sum_i; } } void hypre_CSRMatrixComputeRowSum( hypre_CSRMatrix *A, HYPRE_Int *CF_i, HYPRE_Int *CF_j, HYPRE_Complex *row_sum, HYPRE_Int type, HYPRE_Complex scal, const char *set_or_add) { hypre_assert( (CF_i && CF_j) || (!CF_i && !CF_j) ); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { hypre_CSRMatrixComputeRowSumDevice(A, CF_i, CF_j, row_sum, type, scal, set_or_add); } else #endif { hypre_CSRMatrixComputeRowSumHost(A, CF_i, CF_j, row_sum, type, scal, set_or_add); } } void hypre_CSRMatrixExtractDiagonalHost( hypre_CSRMatrix *A, HYPRE_Complex *d, HYPRE_Int type) { HYPRE_Int nrows = hypre_CSRMatrixNumRows(A); HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int i, j; HYPRE_Complex d_i; for (i = 0; i < nrows; i++) { d_i = 0.0; for (j = A_i[i]; j < A_i[i+1]; j++) { if (A_j[j] == i) { if (type == 0) { d_i = A_data[j]; } else if (type == 1) { d_i = fabs(A_data[j]); } break; } } d[i] = d_i; } } /* type 0: diag * 1: abs diag */ void hypre_CSRMatrixExtractDiagonal( hypre_CSRMatrix *A, HYPRE_Complex *d, HYPRE_Int type) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { hypre_CSRMatrixExtractDiagonalDevice(A, d, type); } else #endif { hypre_CSRMatrixExtractDiagonalHost(A, d, type); } }
GB_unop__log_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__log_fp32_fp32 // op(A') function: GB_unop_tran__log_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = logf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = logf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = logf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__log_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = logf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__log_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
zgbsv.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_gbsv * * Computes the solution to a system of linear equations A * X = B, * using the LU factorization computed by plasma_zgbtrf. * ******************************************************************************* * * @param[in] n * The number of columns of the matrix A. n >= 0. * * @param[in] kl * The number of subdiagonals within the band of A. kl >= 0. * * @param[in] ku * The number of superdiagonals within the band of A. ku >= 0. * * @param[in] nrhs * The number of right hand sides, i.e., the number of * columns of the matrix B. nrhs >= 0. * * @param[in,out] AB * Details of the LU factorization of the band matrix A, as * computed by plasma_zgbtrf. * * @param[in] ldab * The leading dimension of the array AB. * * @param[out] ipiv * The pivot indices; for 1 <= i <= min(m,n), row i of the * matrix was interchanged with row ipiv(i). * * @param[in,out] B * On entry, the n-by-nrhs right hand side matrix B. * On exit, if return value = 0, the n-by-nrhs solution matrix X. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,n). * ******************************************************************************/ int plasma_zgbsv(int n, int kl, int ku, int nrhs, plasma_complex64_t *pAB, int ldab, int *ipiv, plasma_complex64_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if (n < 0) { plasma_error("illegal value of n"); return -1; } if (kl < 0) { plasma_error("illegal value of kl"); return -2; } if (ku < 0) { plasma_error("illegal value of ku"); return -3; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -4; } if (ldab < imax(1, 1+kl+ku)) { plasma_error("illegal value of ldab"); return -6; } if (ldb < imax(1, n)) { plasma_error("illegal value of ldb"); return -8; } // quick return if (imin(n, nrhs) == 0) return PlasmaSuccess; // Set tiling parameters. int nb = plasma->nb; // Initialize barrier. plasma_barrier_init(&plasma->barrier); // Create tile matrix. plasma_desc_t AB; plasma_desc_t B; int tku = (ku+kl+nb-1)/nb; // number of tiles in upper band (not including diagonal) int tkl = (kl+nb-1)/nb; // number of tiles in lower band (not including diagonal) int lm = (tku+tkl+1)*nb; // since we use zgetrf on panel, we pivot back within panel. // this could fill the last tile of the panel, // and we need extra NB space on the bottom int retval; retval = plasma_desc_general_band_create(PlasmaComplexDouble, PlasmaGeneral, nb, nb, lm, n, 0, 0, n, n, kl, ku, &AB); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, n, nrhs, 0, 0, n, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&AB); return retval; } // Create sequence. plasma_sequence_t *sequence = NULL; retval = plasma_sequence_create(&sequence); if (retval != PlasmaSuccess) { plasma_error("plasma_sequence_create() failed"); return retval; } // Initialize request. plasma_request_t request = PlasmaRequestInitializer; #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zpb2desc(pAB, ldab, AB, sequence, &request); plasma_omp_zge2desc(pB, ldb, B, sequence, &request); } #pragma omp parallel #pragma omp master { // Call the tile async function. plasma_omp_zgbsv(AB, ipiv, B, sequence, &request); } #pragma omp parallel #pragma omp master { // Translate back to LAPACK layout. plasma_omp_zdesc2pb(AB, pAB, ldab, sequence, &request); plasma_omp_zdesc2ge(B, pB, ldb, sequence, &request); } // Free matrices in tile layout. plasma_desc_destroy(&B); plasma_desc_destroy(&AB); // Return status. int status = sequence->status; plasma_sequence_destroy(sequence); return status; } /***************************************************************************//** * * Computes the solution to a system of linear equations A * X = B, * using the LU factorization computed by plasma_zgbtrf. * Non-blocking tile version of plasma_zgbsv(). * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in,out] AB * Descriptor of matrix A. * * @param[out] ipiv * The pivot indices; for 1 <= i <= min(m,n), row i of the * matrix was interchanged with row ipiv(i). * * @param[in,out] B * Descriptor of right-hand-sides B. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************/ void plasma_omp_zgbsv(plasma_desc_t AB, int *ipiv, plasma_desc_t B, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(AB) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid AB"); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (AB.n == 0 || B.n == 0) return; // Call the parallel function. plasma_pzgbtrf(AB, ipiv, sequence, request); plasma_pztbsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit, 1.0, AB, B, ipiv, sequence, request); plasma_pztbsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit, 1.0, AB, B, ipiv, sequence, request); }
GB_unaryop__identity_uint8_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint8_fp64 // op(A') function: GB_tran__identity_uint8_fp64 // C type: uint8_t // A type: double // cast: uint8_t cij ; GB_CAST_UNSIGNED(cij,aij,8) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint8_t z ; GB_CAST_UNSIGNED(z,x,8) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint8_fp64 ( uint8_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint8_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
AntOMP48.c
#include<stdio.h> #include<stdlib.h> #include<string.h> #include<math.h> #include<time.h> #include<omp.h> #include<mpi.h> double dist[48][48]; double aleatorioEntero(int li, int ls); double aleatorio(); int probabilidad(double visi[], double fero[], int vector[], int cities); void gettingMatrix(); void printMatrix(); int main(){ //Funcion principal int i, j, cities, ants, condition, iter; cities=48; ants=1000; iter=10000; srand(time(NULL)); int ant[ants][cities+1]; // Inicializa la matriz de distancia (Fila columna) double fero[cities][cities], visi[cities][cities], prob[cities][cities]; //dist[cities][cities], double aux,random; int k,l,m, cityNow, vector[cities],condicion, contador; double feroIter[cities], visiIter[cities]; double recorrido[ants], best;//evaluacion best=10000000; double rho=0.0001, Q=500; //tasa de evaporacion feromona gettingMatrix(); //printMatrix(); for (i=0; i<cities; i++){ for (j=0; j<cities; j++){ fero[i][j]=0.1; if(i!=j){ visi[i][j]=500/dist[i][j]; } else{ visi[i][j]=0; } } } //-------------------Probability------------ double sumVF; for (i=0;i<cities;i++){ sumVF=0; for(j=0;j<cities;j++){ sumVF+=visi[i][j]*fero[i][j]; } aux=0; for(j=0;j<cities;j++){ prob[i][j]=((visi[i][j]*fero[i][j])/(sumVF)); } } //---------------------------- //------------ Hormiga solucion--------------- for(m=0;m<iter;m++){ for(i=0;i<=cities;i++){ for(j=0;j<=cities;j++){ ant[i][j]=0; } } #pragma omp parallel for private(i,j,aux,random, feroIter, visiIter, vector, cityNow) for(k=0;k<ants;k++){ ant[k][0]=0;//Inicia en la ciudad cero; random=aleatorio(); aux=0; ant[k][cities]=0; for(j=0;j<cities;j++){// inicia el vector con las N ciudades vector[j]=j; } j=1; do{ aux+=prob[0][j]; if(random<=aux){ ant[k][1]=j;//Selecciona la primera ciudad partiendo de la ciudad inicial vector[j]=0;//Anula la ciudad del listado } j++; }while(random>aux && j<cities); //------------------Resto ciudades---------------------- for(i=2;i<cities;i++){ cityNow=ant[k][i-1]; contador=0; for(j=0;j<cities;j++){ feroIter[j]=fero[cityNow][j]; visiIter[j]=visi[cityNow][j]; } ant[k][i]=probabilidad(visiIter, feroIter, vector, cities); vector[ant[k][i]]=0; } } //-------------- Evaluacion de las soluciones --------------- #pragma omp parallel for private(i,j) shared(best) for(k=0;k<ants;k++){ recorrido[k]=0; for(i=0;i<cities+1;i++){ recorrido[k]+=dist[ant[k][i]][ant[k][i+1]]; } if(recorrido[k]<best){ best=recorrido[k]; //printf("\n El mejor = %.lf la hormina %i iteracion %i", best,k,m); } } //----------------- Actualizacion de las feromonas #pragma omp parallel for private(i) shared(fero) for(k=0;k<ants;k++){ for(i=0;i<cities;i++){ fero[ant[k][i]][ant[k][i+1]]+=Q/recorrido[k]; fero[ant[k][i+1]][ant[k][i]]+=Q/recorrido[k]; } } #pragma omp parallel for private(j) shared(fero) for(i=0;i<cities;i++){ for(j=0;j<cities;j++){ fero[i][j]=fero[i][j]*(1-rho); if(fero[i][j]<0.01){ fero[i][j]=0.01; } } } }//fin de iteraciones printf("\nLa menor distancia fue %.lf\n", best); //------------------------------------------ return 0; } double aleatorioEntero(int li, int ls){ double numero; srand(time(NULL)); numero=li+rand() % ((ls+1)-1); return numero; } double aleatorio(){ //srand(time(NULL)); return (double)rand() / (double)RAND_MAX ; } int probabilidad(double visi[], double fero[], int vector[], int cities){ int i, j, city, condicion, contador; double sumVF, aux, probRel, number; #pragma omp private(i,j,sumVF,aux,probRel, number, condicion, city, contador) sumVF=0; contador=0; for(j=0;j<cities;j++){ if(vector[j]!=0){ sumVF+=visi[j]*fero[j]; } if(fero[j]<=0.000001){ contador++; } } if(sumVF<=0){printf("\n ERROR EN SUMA \n");} number=aleatorio(); aux=0; city=-1; condicion=0; j=0; while(j<cities && condicion==0){ if(vector[j]!=0){ probRel=(visi[j]*fero[j])/(sumVF); aux+=probRel; if(aux!=aux|| aux==INFINITY){ for(i=0;i<cities;i++){ printf("\n visibilidad %.6lf || fero %.6lf || suma %.6lf ||vector %i || %i", visi[i],fero[i],sumVF,vector[i],contador); } exit(-1); } if(number<=aux+0.0001){ city=j; condicion=1; } } //printf("\n %.5lf - %.5lf iter=%i suma %.5lf visi %.5lf fero %.5lf vector %i",aux, number,j, sumVF, visi[j],fero[j],vector[j]); j++; } if(city==-1){ printf("NO asigno"); printf("\n %.8lf - %.8lf iter=%i",aux, number,j-1); exit(-1); } return city; } void gettingMatrix(){ int i, j; FILE *inputFile; inputFile= fopen("matrix.txt", "r"); char help[300], *token; i=0; while(!feof(inputFile)){ fscanf(inputFile, "%s", help); token=strtok(help,","); j=0; while(token != NULL){ dist[i][j]=atof(token); token=strtok(NULL, ","); j++; } i++; } } void printMatrix(){ int i, j; for(i=0; i<48;i++){ for(j=0;j<48;j++){ printf("%.lf ",dist[i][j]); } printf("\n"); } }
sidh_vow_base.c
#include <assert.h> #include "sidh_vow_base.h" #include "types/triples.h" #include "types/state.h" #include "curve_math.h" #include "bintree.h" #include "triples.h" // Functions for swig interface #include "swig_helpers.c" #include "state.c" /* Initializations */ static st_t init_st(uint64_t nwords_state) { st_t s; s.words = calloc(nwords_state, sizeof(digit_t)); return s; } static void free_st(st_t *s) { free(s->words); } static void ConcatArray(unsigned long *cat, unsigned long *A, unsigned long lenA, unsigned long *B, unsigned long lenB) { unsigned int i; for (i = 1; i < lenA + 1; i++) cat[i] = A[i - 1]; for (i = 0; i < lenB; i++) cat[lenA + i + 1] = B[i]; } static unsigned long OptStrat(unsigned long *strat, const unsigned long n, const double p, const double q) { unsigned int i, j, b, ctr = 2; /* Maximum size of strategy = 250 */ double C[250], newCpqs[250], newCpq, tmpCpq; unsigned long S[250][250]; unsigned long lens[250]; lens[1] = 0; S[1][0] = 0; C[1] = 0; for (i = 2; i < n + 1; i++) { for (b = 1; b < i; b++) newCpqs[b] = C[i - b] + C[b] + b * p + (i - b) * q; newCpq = newCpqs[1]; b = 1; for (j = 2; j < i; j++) { tmpCpq = newCpqs[j]; if (newCpq > tmpCpq) { newCpq = tmpCpq; b = j; } } S[ctr][0] = b; ConcatArray(S[ctr], S[i - b], lens[i - b], S[b], lens[b]); C[ctr] = newCpqs[b]; lens[ctr] = 1 + lens[i - b] + lens[b]; ctr++; } for (i = 0; i < lens[ctr - 1]; i++) strat[i] = S[ctr - 1][i]; return lens[ctr - 1]; } static void xDBL_affine(const point_proj_t P, point_proj_t Q, const f2elm_t a24) { f2elm_t t0, t1; fp2sub(P->X, P->Z, t0); // t0 = X1-Z1 fp2add(P->X, P->Z, t1); // t1 = X1+Z1 fp2sqr_mont(t0, t0); // t0 = (X1-Z1)^2 fp2sqr_mont(t1, t1); // t1 = (X1+Z1)^2 fp2mul_mont(t0, t1, Q->X); // X2 = C24*(X1-Z1)^2*(X1+Z1)^2 fp2sub(t1, t0, t1); // t1 = (X1+Z1)^2-(X1-Z1)^2 fp2mul_mont(a24, t1, Q->Z); // t0 = A24plus*[(X1+Z1)^2-(X1-Z1)^2] fp2add(Q->Z, t0, Q->Z); // Z2 = A24plus*[(X1+Z1)^2-(X1-Z1)^2] + C24*(X1-Z1)^2 fp2mul_mont(Q->Z, t1, Q->Z); // Z2 = [A24plus*[(X1+Z1)^2-(X1-Z1)^2] + C24*(X1-Z1)^2]*[(X1+Z1)^2-(X1-Z1)^2] } static void xDBLe_affine(const point_proj_t P, point_proj_t Q, const f2elm_t a24, const int e) { // Computes [2^e](X:Z) on Montgomery curve with projective constant via e repeated doublings. // Input: projective Montgomery x-coordinates P = (XP:ZP), such that xP=XP/ZP and Montgomery curve constants A+2C and 4C. // Output: projective Montgomery x-coordinates Q <- (2^e)*P. copy_words((digit_t *)P, (digit_t *)Q, 2 * 2 * NWORDS_FIELD); for (int i = 0; i < e; i++) xDBL_affine(Q, Q, a24); } static void GetFourIsogenyWithKernelXeqZ(point_proj_t A24, const f2elm_t a24) { fp2copy(a24, A24->X); fp2copy(a24, A24->Z); fpsub((digit_t *)A24->Z, (digit_t *)&Montgomery_one, (digit_t *)A24->Z); } static void EvalFourIsogenyWithKernelXeqZ(point_proj_t S, const f2elm_t a24) { f2elm_t T0, T1, T2; fpzero(T1[1]); // Set RZ = 1 fpcopy((digit_t *)&Montgomery_one, T1[0]); fp2add(S->X, S->Z, T0); fp2sub(S->X, S->Z, T2); fp2sqr_mont(T0, T0); fp2sqr_mont(T2, T2); fp2sub(T1, a24, T1); fp2add(T1, T1, T1); fp2add(T1, T1, T1); fp2mul_mont(T1, S->X, T1); fp2mul_mont(T1, S->Z, T1); fp2mul_mont(T1, T2, S->Z); fp2sub(T0, T1, T1); fp2mul_mont(T0, T1, S->X); } static void GetFourIsogenyWithKernelXeqMinZ(point_proj_t A24, const f2elm_t a24) { fp2copy(a24, A24->X); fp2copy(a24, A24->Z); fpsub((digit_t *)A24->X, (digit_t *)&Montgomery_one, (digit_t *)A24->X); } static void EvalFourIsogenyWithKernelXeqMinZ(point_proj_t S, const f2elm_t a24) { f2elm_t T0, T1, T2; fp2add(S->X, S->Z, T2); fp2sub(S->X, S->Z, T0); fp2sqr_mont(T2, T2); fp2sqr_mont(T0, T0); fp2add(a24, a24, T1); fp2add(T1, T1, T1); fp2mul_mont(T1, S->X, T1); fp2mul_mont(T1, S->Z, T1); fp2mul_mont(T1, T2, S->Z); fp2neg(S->Z); fp2add(T0, T1, T1); fp2mul_mont(T0, T1, S->X); } static void FourwayInv(f2elm_t X, f2elm_t Y, f2elm_t Z, f2elm_t T) { f2elm_t Xt, Zt, XY, ZT, XYZT; fp2copy(X, Xt); fp2copy(Z, Zt); fp2mul_mont(X, Y, XY); fp2mul_mont(Z, T, ZT); fp2mul_mont(XY, ZT, XYZT); fp2inv_mont(XYZT); /* 1 / XYZT */ fp2mul_mont(ZT, XYZT, ZT); /* 1 / XY */ fp2mul_mont(XY, XYZT, XY); /* 1 / ZT */ fp2mul_mont(ZT, Y, X); fp2mul_mont(ZT, Xt, Y); fp2mul_mont(XY, T, Z); fp2mul_mont(XY, Zt, T); } /* Simple functions on states */ static unsigned char GetC_SIDH(const st_t *s) { return (s->bytes[0] & 0x01); } static unsigned char GetB_SIDH(const st_t *s) { return ((s->bytes[0] & 0x06) >> 1); } static void SetB_SIDH(st_t *s, const unsigned char t) { /* Assumes that b is set to 0x03 */ s->bytes[0] &= ((t << 1) | 0xF9); } static void copy_st(st_t *r, const st_t *s, const uint64_t nwords_state) { for (unsigned int i = 0; i < nwords_state; i++) r->words[i] = s->words[i]; } static void copy_st2uint64(uint64_t *r, const st_t *s, const uint64_t nwords_state) { for (unsigned int i = 0; i < nwords_state; i++) r[i] = s->words[i]; } static void copy_uint642st(st_t *r, const uint64_t *s, const uint64_t nwords_state) { for (unsigned int i = 0; i < nwords_state; i++) r->words[i] = s[i]; } static void SwapStSIDH(st_t *r, st_t *s, uint64_t nwords_state) { st_t t = init_st(nwords_state); copy_st(&t, r, nwords_state); copy_st(r, s, nwords_state); copy_st(s, &t, nwords_state); free_st(&t); } bool is_equal_st(const st_t *s, const st_t *t, const uint64_t nwords_state) { for (unsigned int i = 0; i < nwords_state; i++) { if (s->words[i] != t->words[i]) return false; } return true; } static bool is_equal_st_words(const st_t *s, const uint64_t *r, const uint64_t nwords_state) { for (unsigned int i = 0; i < nwords_state; i++) { if (s->words[i] != r[i]) return false; } return true; } bool IsEqualJinvSIDH(unsigned char j0[FP2_ENCODED_BYTES], unsigned char j1[FP2_ENCODED_BYTES]) { for (unsigned int i = 0; i < FP2_ENCODED_BYTES; i++) { if (j0[i] != j1[i]) return false; } return true; } void copy_trip(trip_t *s, const trip_t *t, const uint64_t nwords_state) { copy_st(&s->current_state, &t->current_state, nwords_state); s->current_steps = t->current_steps; copy_st(&s->initial_state, &t->initial_state, nwords_state); } /* Functions for vOW */ bool DistinguishedSIDH(private_state_t *private_state) { /* Divide distinguishedness over interval to avoid bad cases */ assert(private_state->MEMORY_LOG_SIZE > EXTRA_MEM_LOG_SIZE); uint64_t val; val = private_state->current.current_state.words[0] >> (private_state->MEMORY_LOG_SIZE + EXTRA_MEM_LOG_SIZE); val += (uint64_t)private_state->function_version * (uint64_t)private_state->DIST_BOUND; val &= (((uint64_t)1 << (private_state->NBITS_STATE - EXTRA_MEM_LOG_SIZE - private_state->MEMORY_LOG_SIZE)) - 1); return (val <= (uint64_t)private_state->DIST_BOUND); } uint64_t MemIndexSIDH(private_state_t *private_state) { assert(private_state->MEMORY_SIZE <= (pow(2, RADIX - 3) - 1)); /* Assumes that MEMORY_SIZE <= 2^RADIX */ return (uint64_t)(((private_state->current.current_state.words[0] >> EXTRA_MEM_LOG_SIZE) + private_state->random_functions) & private_state->MEMORY_SIZE_MASK); } static unsigned int GetMSBSIDH(const unsigned char *m, unsigned long nbits_state) { int msb = nbits_state; int bit = (m[(msb - 1) >> 3] >> ((msb - 1) & 0x07)) & 1; while ((bit == 0) && (msb > 0)) { msb--; bit = (m[(msb - 1) >> 3] >> ((msb - 1) & 0x07)) & 1; } return msb; } void UpdateSIDH(private_state_t *private_state) { uint64_t i, temp; unsigned char j[FP2_ENCODED_BYTES]; UpdateStSIDH(j, &private_state->current.current_state, &private_state->current.current_state, private_state); private_state->number_steps_collect += 1; if (private_state->HANSEL_GRETEL) { if (private_state->crumbs.num_crumbs < private_state->crumbs.max_crumbs) { copy_st2uint64(&private_state->crumbs.crumbs[private_state->crumbs.position], &private_state->current.current_state, private_state->NWORDS_STATE); private_state->crumbs.positions[private_state->crumbs.position] = private_state->crumbs.position; private_state->crumbs.index_crumbs[private_state->crumbs.position] = private_state->crumbs.position; private_state->crumbs.num_crumbs++; } else if (private_state->crumbs.position - private_state->crumbs.positions[private_state->crumbs.max_crumbs - 1] == private_state->crumbs.max_dist) { temp = private_state->crumbs.index_crumbs[private_state->crumbs.index_position]; for (i = private_state->crumbs.index_position; i < private_state->crumbs.max_crumbs - 1; i++) { // Updating table with crumb indices for the crump table private_state->crumbs.index_crumbs[i] = private_state->crumbs.index_crumbs[i + 1]; } private_state->crumbs.index_crumbs[private_state->crumbs.max_crumbs - 1] = temp; private_state->crumbs.index_position++; if (private_state->crumbs.index_position > private_state->crumbs.max_crumbs - 1) private_state->crumbs.index_position = 0; copy_st2uint64(&private_state->crumbs.crumbs[temp], &private_state->current.current_state, private_state->NWORDS_STATE); // Inserting a new crumb at the end of the crumb table for (i = private_state->crumbs.scratch_position; i < private_state->crumbs.max_crumbs - 1; i++) { // Updating table with crumb positions private_state->crumbs.positions[i] = private_state->crumbs.positions[i + 1]; } private_state->crumbs.positions[private_state->crumbs.max_crumbs - 1] = private_state->crumbs.position; private_state->crumbs.swap_position += 2 * private_state->crumbs.real_dist; private_state->crumbs.scratch_position++; if (private_state->crumbs.swap_position > private_state->crumbs.max_crumbs - 1) { // Kind of cumbersome, maybe this can be simplified (but not time critical) private_state->crumbs.swap_position = 0; private_state->crumbs.real_dist <<= 1; } if (private_state->crumbs.scratch_position > private_state->crumbs.max_crumbs - 1) { private_state->crumbs.scratch_position = 0; private_state->crumbs.max_dist <<= 1; private_state->crumbs.swap_position = private_state->crumbs.max_dist; } } private_state->crumbs.position++; } } void UpdateRandomFunctionSIDH(shared_state_t *S, private_state_t *private_state) { private_state->function_version++; // reset "resync done" flag if (private_state->thread_id == 0) { S->resync_cores[0] = 0; } } static inline bool BacktrackSIDH_core(trip_t *c0, trip_t *c1, shared_state_t *S, private_state_t *private_state) { unsigned long L, i; st_t c0_, c1_; unsigned char jinv0[FP2_ENCODED_BYTES], jinv1[FP2_ENCODED_BYTES]; f2elm_t jinv; (void)private_state; // Make c0 have the largest number of steps if (c0->current_steps < c1->current_steps) { SwapStSIDH(&c0->initial_state, &c1->initial_state, private_state->NWORDS_STATE); L = (unsigned long)(c1->current_steps - c0->current_steps); } else { L = (unsigned long)(c0->current_steps - c1->current_steps); } // Catch up the trails for (i = 0; i < L; i++) { UpdateStSIDH(jinv0, &c0->initial_state, &c0->initial_state, private_state); private_state->number_steps_locate += 1; } if (is_equal_st(&c0->initial_state, &c1->initial_state, private_state->NWORDS_STATE)) return false; // Robin Hood c0_ = init_st(private_state->NWORDS_STATE); c1_ = init_st(private_state->NWORDS_STATE); for (i = 0; i < c1->current_steps + 1; i++) { UpdateStSIDH(jinv0, &c0_, &c0->initial_state, private_state); private_state->number_steps_locate += 1; UpdateStSIDH(jinv1, &c1_, &c1->initial_state, private_state); private_state->number_steps_locate += 1; if (IsEqualJinvSIDH(jinv0, jinv1)) { /* Record collision */ private_state->collisions += 1; if (private_state->collect_vow_stats) { #pragma omp critical { insertTree(&S->dist_cols, c0->initial_state, c1->initial_state, private_state->NWORDS_STATE); } } // free tmp states free_st(&c0_); free_st(&c1_); if (GetC_SIDH(&c0->initial_state) == GetC_SIDH(&c1->initial_state)) { return false; } else { fp2_decode(jinv0, jinv); assert(fp2_is_equal(jinv, private_state->jinv)); /* Verify that we found the right one*/ return true; } } else { copy_st(&c0->initial_state, &c0_, private_state->NWORDS_STATE); copy_st(&c1->initial_state, &c1_, private_state->NWORDS_STATE); } } /* Should never reach here */ return false; } static inline bool BacktrackSIDH_Hansel_Gretel(trip_t *c_mem, trip_t *c_crumbs, shared_state_t *S, private_state_t *private_state) { uint64_t L; trip_t c0_, cmem; uint64_t i, k, index; uint64_t crumb; bool resp, equal; unsigned char j[FP2_ENCODED_BYTES]; cmem = init_trip(private_state->NWORDS_STATE); copy_trip(&cmem, c_mem, private_state->NWORDS_STATE); // Make the memory trail (without crumbs) at most the length of the crumbs trail. if (cmem.current_steps > c_crumbs->current_steps) { L = cmem.current_steps - c_crumbs->current_steps; for (i = 0; i < L; i++) { UpdateStSIDH(j, &cmem.initial_state, &cmem.initial_state, private_state); private_state->number_steps_locate += 1; } cmem.current_steps = c_crumbs->current_steps; } // Check for Robin Hood if (is_equal_st(&cmem.initial_state, &c_crumbs->initial_state, private_state->NWORDS_STATE)) return false; // The memory path is L steps shorter than the crumbs path. L = c_crumbs->current_steps - cmem.current_steps; k = 0; // Since there has been at least one step, there is at least one crumb. // Crumbs only store intermediate points, not the initial state and not // necessarily the current state. index = private_state->crumbs.positions[0] + 1; while ((L > index) && (k + 1 < private_state->crumbs.num_crumbs)) { // There are still crumbs to check and we haven't found the next crumb to reach. k++; index = private_state->crumbs.positions[k] + 1; } // Either have found the next crumb or ran out of crumbs to check. if (L > index) { // Ran out of crumbs to check, i.e. already in the interval beyond the last crumb. // Trails collide after last crumb. // Call original BacktrackGen on memory trail and shortened crumbs trail. copy_uint642st(&c_crumbs->initial_state, &private_state->crumbs.crumbs[private_state->crumbs.index_crumbs[k]], private_state->NWORDS_STATE); c_crumbs->current_steps -= (private_state->crumbs.positions[k] + 1); resp = BacktrackSIDH_core(&cmem, c_crumbs, S, private_state); } else { // Next crumb to check lies before (or is) the last crumb. c0_ = init_trip(private_state->NWORDS_STATE); copy_trip(&c0_, &cmem, private_state->NWORDS_STATE); do { cmem.current_steps = c0_.current_steps; copy_st(&cmem.initial_state, &c0_.initial_state, private_state->NWORDS_STATE); crumb = private_state->crumbs.crumbs[private_state->crumbs.index_crumbs[k]]; index = private_state->crumbs.positions[k] + 1; L = cmem.current_steps - (c_crumbs->current_steps - index); for (i = 0; i < L; i++) { UpdateStSIDH(j, &c0_.initial_state, &c0_.initial_state, private_state); private_state->number_steps_locate += 1; } c0_.current_steps -= L; k++; equal = is_equal_st_words(&c0_.initial_state, &crumb, private_state->NWORDS_STATE); } while (!equal && k < private_state->crumbs.num_crumbs); // Either found the colliding crumb or moved to the interval beyond the last crumb. if (equal) { // Have a colliding crumb copy_uint642st(&cmem.current_state, &crumb, private_state->NWORDS_STATE); cmem.current_steps -= c0_.current_steps; if (k == 1) { c0_.current_steps = private_state->crumbs.positions[0] + 1; copy_uint642st(&c0_.initial_state, c_crumbs->initial_state.words, private_state->NWORDS_STATE); } else { c0_.current_steps = private_state->crumbs.positions[k - 1] - private_state->crumbs.positions[k - 2]; copy_uint642st(&c0_.initial_state, &private_state->crumbs.crumbs[private_state->crumbs.index_crumbs[k - 2]], private_state->NWORDS_STATE); } copy_uint642st(&c0_.current_state, &crumb, private_state->NWORDS_STATE); } else { // Collision happens after the last crumb. cmem.current_steps = c0_.current_steps; copy_uint642st(&cmem.initial_state, &crumb, private_state->NWORDS_STATE); } resp = BacktrackSIDH_core(&cmem, &c0_, S, private_state); free_trip(&c0_); } free_trip(&cmem); return resp; } bool BacktrackSIDH(trip_t *c0, trip_t *c1, shared_state_t *S, private_state_t *private_state) { // Backtrack function selection if (private_state->HANSEL_GRETEL) return BacktrackSIDH_Hansel_Gretel(c0, c1, S, private_state); else return BacktrackSIDH_core(c0, c1, S, private_state); }
GB_unop__conj_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__conj_fc64_fc64) // op(A') function: GB (_unop_tran__conj_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = conj (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = conj (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = conj (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CONJ || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__conj_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = conj (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = conj (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__conj_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__times_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_fc64) // A.*B function (eWiseMult): GB (_AemultB_08__times_fc64) // A.*B function (eWiseMult): GB (_AemultB_02__times_fc64) // A.*B function (eWiseMult): GB (_AemultB_04__times_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_fc64) // A*D function (colscale): GB (_AxD__times_fc64) // D*A function (rowscale): GB (_DxB__times_fc64) // C+=B function (dense accum): GB (_Cdense_accumB__times_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__times_fc64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_fc64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_fc64) // C=scalar+B GB (_bind1st__times_fc64) // C=scalar+B' GB (_bind1st_tran__times_fc64) // C=A+scalar GB (_bind2nd__times_fc64) // C=A'+scalar GB (_bind2nd_tran__times_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // A pattern? 0 // B type: GxB_FC64_t // B pattern? 0 // BinaryOp: cij = GB_FC64_mul (aij, bij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC64_mul (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_FC64 || GxB_NO_TIMES_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__times_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_fc64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC64_t alpha_scalar ; GxB_FC64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__times_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__times_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_fc64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC64_mul (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_fc64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC64_mul (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_mul (x, aij) ; \ } GrB_Info GB (_bind1st_tran__times_fc64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_mul (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__times_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
trsm_cmpt.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include "mkl.h" #include "sys/time.h" #include "math.h" #define min(X,Y) (((X) < (Y)) ? (X) : (Y)) #define TRSM_BATCH cblas_dtrsm_batch #define TRSM_COMPUTE_BATCH cblas_dtrsm_compute_batch #define fabst fabs #define FPTYPE double #define VECTOR_LENGTH 8 #define REPEAT 1000 #define MEM_ALIGNMENT 64 #define GRP_COUNT 1 int main(int argc, char *argv[]) { int i, j, ii, jj, nthr, grp; #pragma omp parallel { nthr = omp_get_num_threads(); } unsigned long op_count = 0; double startt, stopt, mint, mints; mint = 1e5; mints = 1e5; int group_count = GRP_COUNT; int pack_length = VECTOR_LENGTH; int m_init, grp_init; if (argc > 1) m_init = atoi(argv[1]); else m_init = 5; if (argc > 2) grp_init = atoi(argv[2]); else grp_init = 512; MKL_INT m[GRP_COUNT] = {m_init}; MKL_INT lda[GRP_COUNT] = {m_init}; MKL_INT ldb[GRP_COUNT] = {m_init}; FPTYPE alpha[GRP_COUNT] = {1.0}; MKL_INT size_per_grp[GRP_COUNT] = {grp_init}; CBLAS_SIDE SIDE[GRP_COUNT] = {CblasRight}; CBLAS_UPLO UPLO[GRP_COUNT] = {CblasUpper}; CBLAS_TRANSPOSE TRANSA[GRP_COUNT] = {CblasNoTrans}; CBLAS_DIAG DIAG[GRP_COUNT] = {CblasNonUnit}; int num_pointers = 0; for (i = 0; i < GRP_COUNT; i++) num_pointers += size_per_grp[i]; int a_total = 0; int b_total = 0; for (i = 0; i < GRP_COUNT; i++) a_total += m[i] * m[i] * size_per_grp[i]; for (i = 0; i < GRP_COUNT; i++) b_total += m[i] * m[i] * size_per_grp[i]; FPTYPE *a, *b, *c; a = (FPTYPE *)_mm_malloc( a_total * sizeof(FPTYPE), MEM_ALIGNMENT ); b = (FPTYPE *)_mm_malloc( b_total * sizeof(FPTYPE), MEM_ALIGNMENT ); c = (FPTYPE *)_mm_malloc( b_total * sizeof(FPTYPE), MEM_ALIGNMENT ); for (i = 0; i < a_total; i++) a[i] = (FPTYPE) rand() / (FPTYPE) RAND_MAX + .5; for (i = 0; i < b_total; i++) b[i] = (FPTYPE) rand() / (FPTYPE) RAND_MAX + .5; for (i = 0; i < b_total; i++) c[i] = b[i]; FPTYPE *a_array[num_pointers], *b_array[num_pointers]; int a_idx = 0; int b_idx = 0; int p_num = 0; for (i = 0; i < GRP_COUNT; i++) { for (j = 0; j < size_per_grp[i]; j++) { a_array[p_num] = &a[ a_idx ]; b_array[p_num] = &b[ b_idx ]; p_num++; a_idx += m[i] * m[i]; b_idx += m[i] * m[i]; op_count += m[i] * m[i] * m[i]; } } for (i = 0; i < GRP_COUNT; i++) { int idx_matrix = 0; for (ii = 0; ii < i; ii++) idx_matrix += size_per_grp[ii]; for (j = 0; j < size_per_grp[i]; j++) { for (ii = 0; ii < m[i]; ii++) { a_array[idx_matrix][ii*lda[i] + ii] = 10.0; } idx_matrix++; } } // setup packed arrays int grp_idx = 0; int A_p_idx = 0; int B_p_idx = 0; int i_grp, i_format, format_tail; FPTYPE *a_packed, *b_packed; a_packed = (FPTYPE *)_mm_malloc( a_total * sizeof(FPTYPE), MEM_ALIGNMENT ); b_packed = (FPTYPE *)_mm_malloc( b_total * sizeof(FPTYPE), MEM_ALIGNMENT ); // setup compact_t structs compact_t A_p, B_p, C_p; A_p.layout = CblasColMajor; A_p.rows = m; A_p.cols = m; A_p.stride = lda; A_p.group_count = GRP_COUNT; A_p.size_per_group = size_per_grp; A_p.format = VECTOR_LENGTH; A_p.mat = a_packed; B_p.layout = CblasColMajor; B_p.rows = m; B_p.cols = m; B_p.stride = ldb; B_p.group_count = GRP_COUNT; B_p.size_per_group = size_per_grp; B_p.format = VECTOR_LENGTH; B_p.mat = b_packed; for (i_grp=0; i_grp<GRP_COUNT; i_grp++) { for (i_format=0; i_format<(size_per_grp[i_grp]/VECTOR_LENGTH)*VECTOR_LENGTH; i_format+=VECTOR_LENGTH) { for (j=0; j<A_p.cols[i_grp]; j++) { for (i=0; i<A_p.rows[i_grp]; i++) { for (ii=0; ii<VECTOR_LENGTH; ii++) { a_packed[ii + A_p_idx] = a_array[ii + grp_idx + i_format][A_p.rows[i_grp]*j + i]; } A_p_idx+=VECTOR_LENGTH; } } for (j=0; j<B_p.cols[i_grp]; j++) { for (i=0; i<B_p.rows[i_grp]; i++) { for (ii=0; ii<VECTOR_LENGTH; ii++) { b_packed[ii + B_p_idx] = b_array[ii + grp_idx + i_format][B_p.rows[i_grp]*j + i]; } B_p_idx+=VECTOR_LENGTH; } } } // tail handling format_tail = size_per_grp[i_grp] - i_format; i_format = (size_per_grp[i_grp]/VECTOR_LENGTH)*VECTOR_LENGTH; if (format_tail > 0) { for (j=0; j<A_p.cols[i_grp]; j++) { for (i=0; i<A_p.rows[i_grp]; i++) { for (ii=0; ii<format_tail; ii++) { a_packed[ii + A_p_idx] = a_array[ii + grp_idx + i_format][A_p.rows[i_grp]*j + i]; } A_p_idx+=format_tail; } } for (j=0; j<B_p.cols[i_grp]; j++) { for (i=0; i<B_p.rows[i_grp]; i++) { for (ii=0; ii<format_tail; ii++) { b_packed[ii + B_p_idx] = b_array[ii + grp_idx + i_format][B_p.rows[i_grp]*j + i]; } B_p_idx+=format_tail; } } } grp_idx += size_per_grp[i_grp]; } printf("\n THREADS --- m = n --- TRSM_BATCH --- TRSM_BATCH_COMPUTE\n"); for (i = 0; i < REPEAT; i++) { startt = omp_get_wtime(); TRSM_BATCH( CblasColMajor, SIDE, UPLO, TRANSA, DIAG, m, m, alpha, (const FPTYPE**)a_array, lda, b_array, ldb, GRP_COUNT, size_per_grp ); stopt = omp_get_wtime(); mint = min(mint, (stopt - startt)); } for (i = 0; i < REPEAT; i++) { startt = omp_get_wtime(); TRSM_COMPUTE_BATCH( SIDE, UPLO, TRANSA, DIAG, alpha, &A_p, &B_p ); stopt = omp_get_wtime(); mints = min(mints, (stopt - startt)); } printf(" %d --- %d --- %5.2f --- %5.2f\n", nthr, m_init, (double)op_count/(mint*1e9), (double)op_count/(mints*1e9)); _mm_free(a_packed); _mm_free(b_packed); _mm_free(a); _mm_free(b); _mm_free(c); return 0; }
iRCCE_lib.h
// // Copyright 2010 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // [2010-10-25] added support for non-blocking send/recv operations // - iRCCE_isend(), ..._test(), ..._wait(), ..._push() // - iRCCE_irecv(), ..._test(), ..._wait(), ..._push() // by Carsten Clauss, Chair for Operating Systems, // RWTH Aachen University // // [2010-11-12] extracted non-blocking code into separate library // by Carsten Scholtes // // [2011-04-19] added wildcard mechanism (iRCCE_ANY_SOURCE) for receiving // a message from an arbitrary remote rank // by Simon Pickartz, Chair for Operating Systems, // RWTH Aachen University // // [2011-06-27] merged iRCCE_ANY_SOURCE branch with trunk (iRCCE_ANY_LENGTH) // #ifndef IRCCE_LIB_H #define IRCCE_LIB_H #include "RCCE_lib.h" #include "iRCCE.h" #ifdef AIR #define FPGA_BASE 0xf9000000 #define BACKOFF_MIN 8 #define BACKOFF_MAX 256 extern iRCCE_AIR iRCCE_atomic_inc_regs[]; extern int iRCCE_atomic_alloc_counter; extern iRCCE_AIR* iRCCE_atomic_barrier[2]; #endif extern iRCCE_SEND_REQUEST* iRCCE_isend_queue; extern iRCCE_RECV_REQUEST* iRCCE_irecv_queue[RCCE_MAXNP]; extern iRCCE_RECV_REQUEST* iRCCE_irecv_any_source_queue; extern int iRCCE_recent_source; extern int iRCCE_recent_length; #if defined(_OPENMP) && !defined(__hermit__) #pragma omp threadprivate (iRCCE_isend_queue, iRCCE_irecv_queue, iRCCE_irecv_any_source_queue, iRCCE_recent_source, iRCCE_recent_length) #endif int iRCCE_test_flag(RCCE_FLAG, RCCE_FLAG_STATUS, int *); int iRCCE_push_ssend_request(iRCCE_SEND_REQUEST *request); int iRCCE_push_srecv_request(iRCCE_RECV_REQUEST *request); #endif
Gemm_MT_Loop5_MRxNRKernel_ver2_a.c
#include <stdio.h> #include <stdlib.h> #include<immintrin.h> #include "omp.h" #define alpha( i,j ) A[ (j)*ldA + (i) ] // map alpha( i,j ) to array A #define beta( i,j ) B[ (j)*ldB + (i) ] // map beta( i,j ) to array B #define gamma( i,j ) C[ (j)*ldC + (i) ] // map gamma( i,j ) to array C #define min( x, y ) ( ( x ) < ( y ) ? x : y ) void LoopFive( int, int, int, double *, int, double *, int, double *, int ); void LoopFour( int, int, int, double *, int, double *, int, double *, int ); void LoopThree( int, int, int, double *, int, double *, double *, int ); void LoopTwo( int, int, int, double *, double *, double *, int ); void LoopOne( int, int, int, double *, double *, double *, int ); void Gemm_MRxNRKernel_Packed( int, double *, double *, double *, int ); void PackBlockA_MCxKC( int, int, double *, int, double * ); void PackPanelB_KCxNC( int, int, double *, int, double * ); void MyGemm( int m, int n, int k, double *A, int ldA, double *B, int ldB, double *C, int ldC ) { if ( m % MR != 0 || MC % MR != 0 ){ printf( "m and MC must be multiples of MR\n" ); exit( 0 ); } if ( n % NR != 0 || NC % NR != 0 ){ printf( "n and NC must be multiples of NR\n" ); exit( 0 ); } LoopFive( m, n, k, A, ldA, B, ldB, C, ldC ); } void LoopFive( int m, int n, int k, double *A, int ldA, double *B, int ldB, double *C, int ldC ) { int max_threads = omp_get_max_threads(); /* Resize NC so that each thread uses at most NC/max_threads. Make sure it is a multiple of NR */ // int NC_per_thread = ( ( NC / max_threads ) / NR ) * NR; /* Compute the width of C that is a multiple of NC_per_thread and the number of threads. Notice that / performs integer division. */ int loadbalanced_part = ( n / ( NC * max_threads ) ) * NC * max_threads; /* Compute the remainder */ int remainder = n - loadbalanced_part; /* Compute the remainder per thread. But it needs to be a multiple of NR */ int remainder_per_thread = ( ( remainder / max_threads ) / NR ) * NR; if ( remainder_per_thread == 0 ) remainder_per_thread = NR; /* Compute the loadbalanced part in parallel */ #pragma omp parallel for for ( int j=0; j<loadbalanced_part; j+=NC ) LoopFour( m, NC, k, A, ldA, &beta( 0,j ), ldB, &gamma( 0,j ), ldC ); /* Compute the rest in parallel */ #pragma omp parallel for for ( int j=loadbalanced_part; j<n; j+=remainder_per_thread ) { int jb = min( remainder_per_thread, n-j ); /* Last loop may not involve a full block */ LoopFour( m, jb, k, A, ldA, &beta( 0,j ), ldB, &gamma( 0,j ), ldC ); } } void LoopFour( int m, int n, int k, double *A, int ldA, double *B, int ldB, double *C, int ldC ) { double *Btilde = ( double * ) _mm_malloc( KC * NC * sizeof( double ), 64 ); for ( int p=0; p<k; p+=KC ) { int pb = min( KC, k-p ); /* Last loop may not involve a full block */ PackPanelB_KCxNC( pb, n, &beta( p, 0 ), ldB, Btilde ); LoopThree( m, n, pb, &alpha( 0, p ), ldA, Btilde, C, ldC ); } _mm_free( Btilde); } void LoopThree( int m, int n, int k, double *A, int ldA, double *Btilde, double *C, int ldC ) { double *Atilde = ( double * ) _mm_malloc( MC * KC * sizeof( double ), 64 ); for ( int i=0; i<m; i+=MC ) { int ib = min( MC, m-i ); /* Last loop may not involve a full block */ PackBlockA_MCxKC( ib, k, &alpha( i, 0 ), ldA, Atilde ); LoopTwo( ib, n, k, Atilde, Btilde, &gamma( i,0 ), ldC ); } _mm_free( Atilde); } void LoopTwo( int m, int n, int k, double *Atilde, double *Btilde, double *C, int ldC ) { for ( int j=0; j<n; j+=NR ) { int jb = min( NR, n-j ); LoopOne( m, jb, k, Atilde, &Btilde[ j*k ], &gamma( 0,j ), ldC ); } } void LoopOne( int m, int n, int k, double *Atilde, double *MicroPanelB, double *C, int ldC ) { for ( int i=0; i<m; i+=MR ) { int ib = min( MR, m-i ); Gemm_MRxNRKernel_Packed( k, &Atilde[ i*k ], MicroPanelB, &gamma( i,0 ), ldC ); } }
matmul.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> const int N = 128*2; int main(int argc, char **argv) { float matA[N][N]; float matB[N][N]; float matC[N][N]; float matE[N][N]; fprintf(stderr, "Starting matmul\n"); #pragma omp target data map(to: matA, matB) map(from: matC) { float tmp; #pragma omp target teams distribute parallel for private(tmp) map(to:N) //collapse(2) for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { tmp = 0.0; for (int k = 0; k < N; k++) { tmp += matA[i][k] * matB[k][j]; } matC[i][j] = tmp; } } } fprintf(stderr, "Passed\n"); return 0; }
GB_binop__isge_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isge_int8 // A.*B function (eWiseMult): GB_AemultB__isge_int8 // A*D function (colscale): GB_AxD__isge_int8 // D*A function (rowscale): GB_DxB__isge_int8 // C+=B function (dense accum): GB_Cdense_accumB__isge_int8 // C+=b function (dense accum): GB_Cdense_accumb__isge_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isge_int8 // C=scalar+B GB_bind1st__isge_int8 // C=scalar+B' GB_bind1st_tran__isge_int8 // C=A+scalar GB_bind2nd__isge_int8 // C=A'+scalar GB_bind2nd_tran__isge_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x >= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_INT8 || GxB_NO_ISGE_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isge_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isge_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isge_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isge_int8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isge_int8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isge_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isge_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isge_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isge_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB_bind1st_tran__isge_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB_bind2nd_tran__isge_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
hypre_merge_sort.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_utilities.h" #include "hypre_hopscotch_hash.h" #include "../seq_mv/HYPRE_seq_mv.h" //#define DBG_MERGE_SORT #ifdef DBG_MERGE_SORT #include <algorithm> #include <unordered_map> #endif #define SWAP(T, a, b) do { T tmp = a; a = b; b = tmp; } while (0) /*-------------------------------------------------------------------------- * hypre_union2 * * Union of two sorted (in ascending order) array arr1 and arr2 into arr3 * * Assumptions: * 1) no duplicates in arr1 and arr2 * 2) arr3 should have enough space on entry * 3) map1 and map2 map arr1 and arr2 to arr3 *--------------------------------------------------------------------------*/ void hypre_union2( HYPRE_Int n1, HYPRE_BigInt *arr1, HYPRE_Int n2, HYPRE_BigInt *arr2, HYPRE_Int *n3, HYPRE_BigInt *arr3, HYPRE_Int *map1, HYPRE_Int *map2 ) { HYPRE_Int i = 0, j = 0, k = 0; while (i < n1 && j < n2) { if (arr1[i] < arr2[j]) { if (map1) { map1[i] = k; } arr3[k++] = arr1[i++]; } else if (arr1[i] > arr2[j]) { if (map2) { map2[j] = k; } arr3[k++] = arr2[j++]; } else /* == */ { if (map1) { map1[i] = k; } if (map2) { map2[j] = k; } arr3[k++] = arr1[i++]; j++; } } while (i < n1) { if (map1) { map1[i] = k; } arr3[k++] = arr1[i++]; } while (j < n2) { if (map2) { map2[j] = k; } arr3[k++] = arr2[j++]; } *n3 = k; } /*-------------------------------------------------------------------------- * hypre_merge *--------------------------------------------------------------------------*/ static void hypre_merge( HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2, HYPRE_Int *out ) { for ( ; first1 != last1; ++out) { if (first2 == last2) { for ( ; first1 != last1; ++first1, ++out) { *out = *first1; } return; } if (*first2 < *first1) { *out = *first2; ++first2; } else { *out = *first1; ++first1; } } for ( ; first2 != last2; ++first2, ++out) { *out = *first2; } } /*-------------------------------------------------------------------------- * hypre_big_merge *--------------------------------------------------------------------------*/ static void hypre_big_merge( HYPRE_BigInt *first1, HYPRE_BigInt *last1, HYPRE_BigInt *first2, HYPRE_BigInt *last2, HYPRE_BigInt *out ) { for ( ; first1 != last1; ++out) { if (first2 == last2) { for ( ; first1 != last1; ++first1, ++out) { *out = *first1; } return; } if (*first2 < *first1) { *out = *first2; ++first2; } else { *out = *first1; ++first1; } } for ( ; first2 != last2; ++first2, ++out) { *out = *first2; } } /*-------------------------------------------------------------------------- * kth_element_ *--------------------------------------------------------------------------*/ static void kth_element_( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_Int *a1, HYPRE_Int *a2, HYPRE_Int left, HYPRE_Int right, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { while (1) { HYPRE_Int i = (left + right)/2; // right < k -> i < k HYPRE_Int j = k - i - 1; #ifdef DBG_MERGE_SORT hypre_assert(left <= right && right <= k); hypre_assert(i < k); // i == k implies left == right == k that can never happen hypre_assert(j >= 0 && j < n2); #endif if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1])) { *out1 = i; *out2 = j + 1; return; } else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1])) { *out1 = i + 1; *out2 = j; return; } else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1]) { // search in left half of a1 right = i - 1; } else { // search in right half of a1 left = i + 1; } } } /** * Partition the input so that * a1[0:*out1) and a2[0:*out2) contain the smallest k elements */ /*-------------------------------------------------------------------------- * kth_element * * Partition the input so that * a1[0:*out1) and a2[0:*out2) contain the smallest k elements *--------------------------------------------------------------------------*/ static void kth_element( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_Int *a1, HYPRE_Int *a2, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { // either of the inputs is empty if (n1 == 0) { *out1 = 0; *out2 = k; return; } if (n2 == 0) { *out1 = k; *out2 = 0; return; } if (k >= n1 + n2) { *out1 = n1; *out2 = n2; return; } // one is greater than the other if (k < n1 && a1[k] <= a2[0]) { *out1 = k; *out2 = 0; return; } if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1]) { *out1 = n1; *out2 = k - n1; return; } if (k < n2 && a2[k] <= a1[0]) { *out1 = 0; *out2 = k; return; } if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1]) { *out1 = k - n2; *out2 = n2; return; } // now k > 0 // faster to do binary search on the shorter sequence if (n1 > n2) { SWAP(HYPRE_Int, n1, n2); SWAP(HYPRE_Int *, a1, a2); SWAP(HYPRE_Int *, out1, out2); } if (k < (n1 + n2)/2) { kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k); } else { // when k is big, faster to find (n1 + n2 - k)th biggest element HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0); HYPRE_Int new_k = k - offset1 - offset2; HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1); HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1); kth_element_(out1, out2, a1 + offset1, a2 + offset2, 0, new_n1 - 1, new_n1, new_n2, new_k); *out1 += offset1; *out2 += offset2; } #ifdef DBG_MERGE_SORT hypre_assert(*out1 + *out2 == k); #endif } /*-------------------------------------------------------------------------- * big_kth_element_ *--------------------------------------------------------------------------*/ static void big_kth_element_( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_BigInt *a1, HYPRE_BigInt *a2, HYPRE_Int left, HYPRE_Int right, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { while (1) { HYPRE_Int i = (left + right)/2; // right < k -> i < k HYPRE_Int j = k - i - 1; #ifdef DBG_MERGE_SORT hypre_assert(left <= right && right <= k); hypre_assert(i < k); // i == k implies left == right == k that can never happen hypre_assert(j >= 0 && j < n2); #endif if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1])) { *out1 = i; *out2 = j + 1; return; } else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1])) { *out1 = i + 1; *out2 = j; return; } else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1]) { // search in left half of a1 right = i - 1; } else { // search in right half of a1 left = i + 1; } } } /*-------------------------------------------------------------------------- * big_kth_element * * Partition the input so that * a1[0:*out1) and a2[0:*out2) contain the smallest k elements *--------------------------------------------------------------------------*/ static void big_kth_element( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_BigInt *a1, HYPRE_BigInt *a2, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { // either of the inputs is empty if (n1 == 0) { *out1 = 0; *out2 = k; return; } if (n2 == 0) { *out1 = k; *out2 = 0; return; } if (k >= n1 + n2) { *out1 = n1; *out2 = n2; return; } // one is greater than the other if (k < n1 && a1[k] <= a2[0]) { *out1 = k; *out2 = 0; return; } if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1]) { *out1 = n1; *out2 = k - n1; return; } if (k < n2 && a2[k] <= a1[0]) { *out1 = 0; *out2 = k; return; } if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1]) { *out1 = k - n2; *out2 = n2; return; } // now k > 0 // faster to do binary search on the shorter sequence if (n1 > n2) { SWAP(HYPRE_Int, n1, n2); SWAP(HYPRE_BigInt *, a1, a2); SWAP(HYPRE_Int *, out1, out2); } if (k < (n1 + n2)/2) { big_kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k); } else { // when k is big, faster to find (n1 + n2 - k)th biggest element HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0); HYPRE_Int new_k = k - offset1 - offset2; HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1); HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1); big_kth_element_(out1, out2, a1 + (HYPRE_BigInt)offset1, a2 + (HYPRE_BigInt)offset2, 0, new_n1 - 1, new_n1, new_n2, new_k); *out1 += offset1; *out2 += offset2; } #ifdef DBG_MERGE_SORT hypre_assert(*out1 + *out2 == k); #endif } /*-------------------------------------------------------------------------- * hypre_parallel_merge * * @param num_threads number of threads that participate in this merge * @param my_thread_num thread id (zer0-based) among the threads that * participate in this merge *--------------------------------------------------------------------------*/ static void hypre_parallel_merge( HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2, HYPRE_Int *out, HYPRE_Int num_threads, HYPRE_Int my_thread_num ) { HYPRE_Int n1 = last1 - first1; HYPRE_Int n2 = last2 - first2; HYPRE_Int n = n1 + n2; HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads; HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n); HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n); #ifdef DBG_MERGE_SORT hypre_assert(std::is_sorted(first1, last1)); hypre_assert(std::is_sorted(first2, last2)); #endif HYPRE_Int begin1, begin2, end1, end2; kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank); kth_element(&end1, &end2, first1, first2, n1, n2, end_rank); while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif begin1--; begin2++; } while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif end1--; end2++; } #ifdef DBG_MERGE_SORT hypre_assert(begin1 <= end1); hypre_assert(begin2 <= end2); #endif hypre_merge( first1 + begin1, first1 + end1, first2 + begin2, first2 + end2, out + begin1 + begin2); #ifdef DBG_MERGE_SORT hypre_assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2)); #endif } /*-------------------------------------------------------------------------- * hypre_big_parallel_merge * * @param num_threads number of threads that participate in this merge * @param my_thread_num thread id (zero-based) among the threads that * participate in this merge *--------------------------------------------------------------------------*/ static void hypre_big_parallel_merge( HYPRE_BigInt *first1, HYPRE_BigInt *last1, HYPRE_BigInt *first2, HYPRE_BigInt *last2, HYPRE_BigInt *out, HYPRE_Int num_threads, HYPRE_Int my_thread_num) { HYPRE_Int n1 = (HYPRE_Int)(last1 - first1); HYPRE_Int n2 = (HYPRE_Int)(last2 - first2); HYPRE_Int n = n1 + n2; HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads; HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n); HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n); #ifdef DBG_MERGE_SORT hypre_assert(std::is_sorted(first1, last1)); hypre_assert(std::is_sorted(first2, last2)); #endif HYPRE_Int begin1, begin2, end1, end2; big_kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank); big_kth_element(&end1, &end2, first1, first2, n1, n2, end_rank); while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif begin1--; begin2++; } while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif end1--; end2++; } #ifdef DBG_MERGE_SORT hypre_assert(begin1 <= end1); hypre_assert(begin2 <= end2); #endif hypre_big_merge( first1 + (HYPRE_BigInt)begin1, first1 + (HYPRE_BigInt)end1, first2 + (HYPRE_BigInt)begin2, first2 + (HYPRE_BigInt)end2, out + (HYPRE_BigInt)(begin1 + begin2)); #ifdef DBG_MERGE_SORT hypre_assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2)); #endif } /*-------------------------------------------------------------------------- * hypre_merge_sort *--------------------------------------------------------------------------*/ void hypre_merge_sort( HYPRE_Int *in, HYPRE_Int *temp, HYPRE_Int len, HYPRE_Int **out ) { if (0 == len) return; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif #ifdef DBG_MERGE_SORT HYPRE_Int *dbg_buf = new HYPRE_Int[len]; std::copy(in, in + len, dbg_buf); std::sort(dbg_buf, dbg_buf + len); #endif // HYPRE_Int thread_private_len[hypre_NumThreads()]; // HYPRE_Int out_len = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads = hypre_NumActiveThreads(); HYPRE_Int my_thread_num = hypre_GetThreadNum(); // thread-private sort HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads; HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len); HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len); hypre_qsort0(in, i_begin, i_end - 1); // merge sorted sequences HYPRE_Int in_group_size; HYPRE_Int *in_buf = in; HYPRE_Int *out_buf = temp; for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif // merge 2 in-groups into 1 out-group HYPRE_Int out_group_size = in_group_size*2; HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size; // HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1); HYPRE_Int id_in_group = my_thread_num%out_group_size; HYPRE_Int num_threads_in_group = hypre_min(group_leader + out_group_size, num_threads) - group_leader; HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len); HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len); hypre_parallel_merge( in_buf + in_group1_begin, in_buf + in_group1_end, in_buf + in_group2_begin, in_buf + in_group2_end, out_buf + in_group1_begin, num_threads_in_group, id_in_group); HYPRE_Int *temp = in_buf; in_buf = out_buf; out_buf = temp; } *out = in_buf; } /* omp parallel */ #ifdef DBG_MERGE_SORT hypre_assert(std::equal(*out, *out + len, dbg_buf)); delete[] dbg_buf; #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } /*-------------------------------------------------------------------------- * hypre_sort_and_create_inverse_map * * Sort array "in" with length len and put result in array "out" * "in" will be deallocated unless in == *out * inverse_map is an inverse hash table s.t. * inverse_map[i] = j iff (*out)[j] = i *--------------------------------------------------------------------------*/ void hypre_sort_and_create_inverse_map(HYPRE_Int *in, HYPRE_Int len, HYPRE_Int **out, hypre_UnorderedIntMap *inverse_map) { if (len == 0) { return; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif HYPRE_Int *temp = hypre_TAlloc(HYPRE_Int, len, HYPRE_MEMORY_HOST); hypre_merge_sort(in, temp, len, out); hypre_UnorderedIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads()); HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = 0; i < len; i++) { HYPRE_Int old = hypre_UnorderedIntMapPutIfAbsent(inverse_map, (*out)[i], i); hypre_assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY); #ifdef DBG_MERGE_SORT if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); hypre_assert(false); } #endif } #ifdef DBG_MERGE_SORT std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len); for (HYPRE_Int i = 0; i < len; ++i) { inverse_map2[(*out)[i]] = i; if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); hypre_assert(false); } } hypre_assert(hypre_UnorderedIntMapSize(inverse_map) == len); #endif if (*out == in) { hypre_TFree(temp, HYPRE_MEMORY_HOST); } else { hypre_TFree(in, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } /*-------------------------------------------------------------------------- * hypre_big_merge_sort *--------------------------------------------------------------------------*/ void hypre_big_merge_sort(HYPRE_BigInt *in, HYPRE_BigInt *temp, HYPRE_Int len, HYPRE_BigInt **out) { if (0 == len) return; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif #ifdef DBG_MERGE_SORT HYPRE_Int *dbg_buf = new HYPRE_Int[len]; std::copy(in, in + len, dbg_buf); std::sort(dbg_buf, dbg_buf + len); #endif // HYPRE_Int thread_private_len[hypre_NumThreads()]; // HYPRE_Int out_len = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads = hypre_NumActiveThreads(); HYPRE_Int my_thread_num = hypre_GetThreadNum(); // thread-private sort HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads; HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len); HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len); hypre_BigQsort0(in, i_begin, i_end - 1); // merge sorted sequences HYPRE_Int in_group_size; HYPRE_BigInt *in_buf = in; HYPRE_BigInt *out_buf = temp; for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif // merge 2 in-groups into 1 out-group HYPRE_Int out_group_size = in_group_size*2; HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size; // HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1); HYPRE_Int id_in_group = my_thread_num%out_group_size; HYPRE_Int num_threads_in_group = hypre_min(group_leader + out_group_size, num_threads) - group_leader; HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len); HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len); hypre_big_parallel_merge( in_buf + (HYPRE_BigInt)in_group1_begin, in_buf + (HYPRE_BigInt)in_group1_end, in_buf + (HYPRE_BigInt)in_group2_begin, in_buf + (HYPRE_BigInt)in_group2_end, out_buf + (HYPRE_BigInt)in_group1_begin, num_threads_in_group, id_in_group); HYPRE_BigInt *temp = in_buf; in_buf = out_buf; out_buf = temp; } *out = in_buf; } /* omp parallel */ #ifdef DBG_MERGE_SORT hypre_assert(std::equal(*out, *out + len, dbg_buf)); delete[] dbg_buf; #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } /*-------------------------------------------------------------------------- * hypre_big_sort_and_create_inverse_map *--------------------------------------------------------------------------*/ void hypre_big_sort_and_create_inverse_map(HYPRE_BigInt *in, HYPRE_Int len, HYPRE_BigInt **out, hypre_UnorderedBigIntMap *inverse_map) { if (len == 0) { return; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif HYPRE_BigInt *temp = hypre_TAlloc(HYPRE_BigInt, len, HYPRE_MEMORY_HOST); hypre_big_merge_sort(in, temp, len, out); hypre_UnorderedBigIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads()); HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = 0; i < len; i++) { HYPRE_Int old = hypre_UnorderedBigIntMapPutIfAbsent(inverse_map, (*out)[i], i); hypre_assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY); #ifdef DBG_MERGE_SORT if (hypre_UnorderedBigIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); hypre_assert(false); } #endif } #ifdef DBG_MERGE_SORT std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len); for (HYPRE_Int i = 0; i < len; ++i) { inverse_map2[(*out)[i]] = i; if (hypre_UnorderedBigIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); hypre_assert(false); } } hypre_assert(hypre_UnorderedBigIntMapSize(inverse_map) == len); #endif if (*out == in) { hypre_TFree(temp, HYPRE_MEMORY_HOST); } else { hypre_TFree(in, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } /* vim: set tabstop=8 softtabstop=3 sw=3 expandtab: */
computeP_mex.c
#include "mex.h" #include "omp.h" #include "math.h" #include "string.h" void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { // Inputs double *E; // relabelled ensemble matrix long long M, N; long long nClust, maxElem; // Outputs double *P; double *nElemInCls; // Internal long long i, Erow, label, Ntmp, Ninc, nbytes, destination_offset, bytes_to_copy; mxArray *Ptmp_mat, *newSpace_mat; double *Ptmp, *newSpace, *newptr; /* Check for proper number of input and output arguments */ if (nrhs != 2) { mexErrMsgTxt("Two inputs argument required."); } if(nlhs != 2){ mexErrMsgTxt("2 outputs required."); } // E matrix [M X N] N = mxGetM(prhs[0]); // number of data points M = mxGetN(prhs[0]); // number of ensemble members E = mxGetPr(prhs[0]); // Get size of ensemble M nClust = (long long)mxGetScalar(prhs[1]); // Create temp P matrix, half size of N; if we will need more space, we will allocate another matrix Ntmp = (long long)floor(N/2.0); Ninc = (long long)ceil(N*0.2); // 20% increase size if out of space Ptmp_mat = mxCreateDoubleMatrix(nClust,Ntmp,mxREAL); Ptmp = mxGetPr(Ptmp_mat); // Create vector for storing number of elements in each cluster plhs[1] = mxCreateDoubleMatrix(nClust,1,mxREAL); nElemInCls = mxGetPr(plhs[1]); //------------------------------------------------ // Compute P and sum of cols maxElem = 0; //#pragma omp parallel for shared(E, N, M) private(i,Erow,label) for (i=0; i<N*M; i++){ Erow = i % N; if(E[i]>0){ label = (long long)E[i]-1; // Need to add some space to Ptmp? if(nElemInCls[label] >= Ntmp){ newSpace_mat = mxCreateDoubleMatrix(nClust,Ninc,mxREAL); newSpace = mxGetPr(newSpace_mat); nbytes = (nClust) * (Ntmp + Ninc)* sizeof(double);//size of new array destination_offset = nClust * Ntmp; //start offset for copying bytes_to_copy = nClust * Ninc * sizeof(double); //ptr = mxGetPr(prhs[0]); newptr = (double *)mxRealloc(Ptmp, nbytes);//reallocate array mxSetPr(Ptmp_mat,newptr); //ptr = mxGetPr(prhs[1]); memcpy(newptr+destination_offset,newSpace,bytes_to_copy);//actual copy mxSetN(Ptmp_mat,Ntmp + Ninc);//fix dimension Ptmp = newptr; Ntmp += Ninc; mxDestroyArray(newSpace_mat); } Ptmp[label+(long long)nElemInCls[label]*nClust] = (double)Erow; nElemInCls[label]++; if(nElemInCls[label] > maxElem){ maxElem = (long long)nElemInCls[label]; } } } // Create P matrix - trim Ptmp plhs[0] = mxCreateDoubleMatrix(nClust,maxElem,mxREAL); P = mxGetPr(plhs[0]); memcpy(P,Ptmp,maxElem*nClust*sizeof(double)); // copy contents of Ptmp, only relevant mxDestroyArray(Ptmp_mat); }
initialise_chunk_kernel_c.c
/*Crown Copyright 2012 AWE. * * This file is part of CloverLeaf. * * CloverLeaf is free software: you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the * Free Software Foundation, either version 3 of the License, or (at your option) * any later version. * * CloverLeaf is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License along with * CloverLeaf. If not, see http://www.gnu.org/licenses/. */ /** * @brief Driver for chunk initialisation. * @author Wayne Gaudin * @details Invokes the user specified chunk initialisation kernel. */ #include <stdio.h> #include <stdlib.h> #include "ftocmacros.h" #include <math.h> void initialise_chunk_kernel_c_(int *xmin,int *xmax,int *ymin,int *ymax, double *minx, double *miny, double *dx, double *dy, double *vertexx, double *vertexdx, double *vertexy, double *vertexdy, double *cellx, double *celldx, double *celly, double *celldy, double *volume, double *xarea, double *yarea) { int x_min=*xmin; int x_max=*xmax; int y_min=*ymin; int y_max=*ymax; double min_x=*minx; double min_y=*miny; double d_x=*dx; double d_y=*dy; int j,k; #pragma omp parallel { #pragma omp for private(j) #pragma ivdep for (j=x_min-2;j<=x_max+3;j++) { vertexx[FTNREF1D(j,x_min-2)]=min_x+d_x*(double)(j-x_min); } #pragma omp for private(j) #pragma ivdep for (j=x_min-2;j<=x_max+3;j++) { vertexdx[FTNREF1D(j,x_min-2)]=d_x; } #pragma omp for private(j,k) #pragma ivdep for (k=y_min-2;k<=y_max+3;k++) { vertexy[FTNREF1D(k,y_min-2)]=min_y+d_y*(double)(k-y_min); } #pragma omp for private(j) #pragma ivdep for (k=y_min-2;k<=y_max+3;k++) { vertexdy[FTNREF1D(k,y_min-2)]=d_y; } #pragma omp for private(j) #pragma ivdep for (j=x_min-2;j<=x_max+2;j++) { cellx[FTNREF1D(j,x_min-2)]=0.5*(vertexx[FTNREF1D(j,x_min-2)]+vertexx[FTNREF1D(j+1,x_min-2)]); } #pragma omp for private(j) #pragma ivdep for (j=x_min-2;j<=x_max+2;j++) { celldx[FTNREF1D(j,x_min-2)]=d_x; } #pragma omp for private(j) #pragma ivdep for (k=y_min-2;k<=y_max+2;k++) { celly[FTNREF1D(k,y_min-2)]=0.5*(vertexy[FTNREF1D(k,y_min-2)]+vertexy[FTNREF1D(k+1,x_min-2)]); } #pragma omp for private(j) #pragma ivdep for (k=y_min-2;k<=y_max+2;k++) { celldy[FTNREF1D(k,y_min-2)]=d_y; } #pragma omp for private(j,k) for (k=y_min-2;k<=y_max+2;k++) { #pragma ivdep for (j=x_min-2;j<=x_max+2;j++) { volume[FTNREF2D(j,k,x_max+4,x_min-2,y_min-2)]=d_x*d_y; } } #pragma omp for private(j,k) for (k=y_min-2;k<=y_max+2;k++) { #pragma ivdep for (j=x_min-2;j<=x_max+2;j++) { xarea[FTNREF2D(j,k,x_max+5,x_min-2,y_min-2)]=celldy[FTNREF1D(k,y_min-2)]; } } #pragma omp for private(j,k) for (k=y_min-2;k<=y_max+2;k++) { #pragma ivdep for (j=x_min-2;j<=x_max+2;j++) { yarea[FTNREF2D(j,k,x_max+4,x_min-2,y_min-2)]=celldx[FTNREF1D(j,x_min-2)]; } } } }
rose_reduction_max.c
#include "omp.h" double a[10]; int foo() { double max_val = - 1e99; double min_val = 1e99; int i; #pragma omp parallel for private (i) reduction (max:max_val) reduction (min:min_val) for (i = 0; i <= 9; i += 1) { if (a[i] > max_val) { max_val = a[i]; } if (a[i] < min_val) min_val = a[i]; } }
fill_int2c.c
/* * */ #include <stdlib.h> #include <complex.h> #include "config.h" #include "cint.h" #include "np_helper/np_helper.h" #define PLAIN 0 #define HERMITIAN 1 #define ANTIHERMI 2 #define SYMMETRIC 3 int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env); /* * mat(naoi,naoj,comp) in F-order */ void GTOint2c(int (*intor)(), double *mat, int comp, int hermi, int *shls_slice, int *ao_loc, CINTOpt *opt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const int cache_size = GTOmax_cache_size(intor, shls_slice, 2, atm, natm, bas, nbas, env); #pragma omp parallel default(none) \ shared(intor, mat, comp, hermi, ao_loc, opt, atm, natm, bas, nbas, env) { int dims[] = {naoi, naoj}; int ish, jsh, ij, di, dj, i0, j0; int shls[2]; double *cache = malloc(sizeof(double) * cache_size); #pragma omp for schedule(dynamic, 4) for (ij = 0; ij < nish*njsh; ij++) { ish = ij / njsh; jsh = ij % njsh; if (hermi != PLAIN && ish > jsh) { // fill up only upper triangle of F-array continue; } ish += ish0; jsh += jsh0; shls[0] = ish; shls[1] = jsh; i0 = ao_loc[ish] - ao_loc[ish0]; j0 = ao_loc[jsh] - ao_loc[jsh0]; (*intor)(mat+j0*naoi+i0, dims, shls, atm, natm, bas, nbas, env, opt, cache); } free(cache); } if (hermi != PLAIN) { // lower triangle of F-array int ic; for (ic = 0; ic < comp; ic++) { NPdsymm_triu(naoi, mat+ic*naoi*naoi, hermi); } } } void GTOint2c_spinor(int (*intor)(), double complex *mat, int comp, int hermi, int *shls_slice, int *ao_loc, CINTOpt *opt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const int cache_size = GTOmax_cache_size(intor, shls_slice, 2, atm, natm, bas, nbas, env); #pragma omp parallel default(none) \ shared(intor, mat, comp, hermi, ao_loc, opt, atm, natm, bas, nbas, env) { int dims[] = {naoi, naoj}; int ish, jsh, ij, di, dj, i0, j0; int shls[2]; double *cache = malloc(sizeof(double) * cache_size); #pragma omp for schedule(dynamic, 4) for (ij = 0; ij < nish*njsh; ij++) { ish = ij / njsh; jsh = ij % njsh; if (hermi != PLAIN && ish > jsh) { continue; } ish += ish0; jsh += jsh0; shls[0] = ish; shls[1] = jsh; i0 = ao_loc[ish] - ao_loc[ish0]; j0 = ao_loc[jsh] - ao_loc[jsh0]; (*intor)(mat+j0*naoi+i0, dims, shls, atm, natm, bas, nbas, env, opt, cache); } free(cache); } if (hermi != PLAIN) { int ic; for (ic = 0; ic < comp; ic++) { NPzhermi_triu(naoi, mat+ic*naoi*naoi, hermi); } } }
tiled_blas_l1.c
/******************************************************************************* * Copyright 2019 UChicago Argonne, LLC. * (c.f. AUTHORS, LICENSE) * * This file is part of the AML project. * For more info, see https://github.com/anlsys/aml * * SPDX-License-Identifier: BSD-3-Clause ******************************************************************************/ #include <assert.h> #include <errno.h> #include <stdio.h> #include <unistd.h> #include "aml.h" #include "aml/area/linux.h" #include "aml/layout/dense.h" #include "aml/tiling/resize.h" #include "blas_l1_kernel.h" #include "utils.h" #include "verify_blas_l1.h" #define DEFAULT_ARRAY_SIZE (1UL << 20) #define DEFAULT_TILE_SIZE (1UL << 8) #ifdef NTIMES #if NTIMES <= 1 #define NTIMES 10 #endif #endif #ifndef NTIMES #define NTIMES 10 #endif #define OFFSET 0 #ifndef MIN #define MIN(x, y) ((x) < (y) ? (x) : (y)) #endif #ifndef MAX #define MAX(x, y) ((x) > (y) ? (x) : (y)) #endif #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif static double *pt; double run_dasum(size_t tilesize, size_t ntiles, struct aml_tiling *ta, struct aml_tiling *tb, struct aml_tiling *tc, double scalar) { (void)*tb; (void)*tc; double asum = 0; #pragma omp parallel for reduction(+ : asum) for (size_t i = 0; i < ntiles; i++) { double *at, *bt, *ct; double temp; at = aml_tiling_rawptr(ta, (size_t[]){i}); bt = pt; ct = pt; temp = dasum(tilesize, at, bt, ct, scalar); asum += temp; } return asum; } double run_daxpy(size_t tilesize, size_t ntiles, struct aml_tiling *ta, struct aml_tiling *tb, struct aml_tiling *tc, double scalar) { #pragma omp parallel for for (size_t i = 0; i < ntiles; i++) { double *at, *bt, *ct; at = aml_tiling_rawptr(ta, (size_t[]){i}); bt = aml_tiling_rawptr(tb, (size_t[]){i}); ct = aml_tiling_rawptr(tc, (size_t[]){i}); daxpy(tilesize, at, bt, ct, scalar); } return 1; } double run_dcopy(size_t tilesize, size_t ntiles, struct aml_tiling *ta, struct aml_tiling *tb, struct aml_tiling *tc, double scalar) { (void)*tc; #pragma omp parallel for for (size_t i = 0; i < ntiles; i++) { double *at, *bt, *ct; at = aml_tiling_rawptr(ta, (size_t[]){i}); bt = aml_tiling_rawptr(tb, (size_t[]){i}); ct = pt; dcopy(tilesize, at, bt, ct, scalar); } return 1; } double run_ddot(size_t tilesize, size_t ntiles, struct aml_tiling *ta, struct aml_tiling *tb, struct aml_tiling *tc, double scalar) { (void)*tc; double dot = 0.0; #pragma omp parallel for reduction(+ : dot) for (size_t i = 0; i < ntiles; i++) { double *at, *bt, *ct; double temp; at = aml_tiling_rawptr(ta, (size_t[]){i}); bt = aml_tiling_rawptr(tb, (size_t[]){i}); ct = pt; temp = ddot(tilesize, at, bt, ct, scalar); dot += temp; } return dot; } double run_dnrm2(size_t tilesize, size_t ntiles, struct aml_tiling *ta, struct aml_tiling *tb, struct aml_tiling *tc, double scalar) { (void)*tb; (void)*tc; double nrm2 = 0; #pragma omp parallel for reduction(+ : nrm2) for (size_t i = 0; i < ntiles; i++) { double *at, *bt, *ct; double nrm; at = aml_tiling_rawptr(ta, (size_t[]){i}); bt = pt; ct = pt; nrm = dnrm2(tilesize, at, bt, ct, scalar); nrm2 += pow(nrm, 2); } return sqrt(nrm2); } double run_dscal(size_t tilesize, size_t ntiles, struct aml_tiling *ta, struct aml_tiling *tb, struct aml_tiling *tc, double scalar) { (void)*tc; #pragma omp parallel for for (size_t i = 0; i < ntiles; i++) { double *at, *bt, *ct; at = aml_tiling_rawptr(ta, (size_t[]){i}); bt = aml_tiling_rawptr(tb, (size_t[]){i}); ct = pt; dscal(tilesize, at, bt, ct, scalar); } return 1; } double run_dswap(size_t tilesize, size_t ntiles, struct aml_tiling *ta, struct aml_tiling *tb, struct aml_tiling *tc, double scalar) { (void)*tc; #pragma omp parallel for for (size_t i = 0; i < ntiles; i++) { double *at, *bt, *ct; at = aml_tiling_rawptr(ta, (size_t[]){i}); bt = aml_tiling_rawptr(tb, (size_t[]){i}); ct = pt; dswap(tilesize, at, bt, ct, scalar); } return 1; } double run_idmax(size_t tilesize, size_t ntiles, struct aml_tiling *ta, struct aml_tiling *tb, struct aml_tiling *tc, double scalar) { (void)*tb; (void)*tc; size_t maxid = 0; double max = 0.0; #pragma omp parallel { double local_max = -DBL_MAX; size_t local_maxid; #pragma omp parallel for for (size_t i = 0; i < ntiles; i++) { double *at, *bt, *ct; double maxl; size_t maxidl; at = aml_tiling_rawptr(ta, (size_t[]){i}); bt = pt; ct = pt; maxidl = idmax(tilesize, at, bt, ct, scalar); maxl = abs(at[maxidl]); maxidl += i * tilesize; if (local_max < maxl) { local_max = maxl; local_maxid = maxidl; } } #pragma omp critical if (max < local_max) { max = local_max; maxid = local_maxid; } } return maxid; } double run_drot(size_t tilesize, size_t ntiles, struct aml_tiling *ta, struct aml_tiling *tb, struct aml_tiling *tc, double x, double y) { (void)*tc; #pragma omp parallel for for (size_t i = 0; i < ntiles; i++) { double *at, *bt; at = aml_tiling_rawptr(ta, (size_t[]){i}); bt = aml_tiling_rawptr(tb, (size_t[]){i}); drot(tilesize, at, bt, x, y); } return 1; } // TODO implement drotg(x, y, w, s); double run_drotm(size_t tilesize, size_t ntiles, struct aml_tiling *ta, struct aml_tiling *tb, struct aml_tiling *tc, double *param) { (void)*tc; #pragma omp parallel for for (size_t i = 0; i < ntiles; i++) { double *at, *bt; at = aml_tiling_rawptr(ta, (size_t[]){i}); bt = aml_tiling_rawptr(tb, (size_t[]){i}); drotm(tilesize, at, bt, param); } return 1; } // TODO implement drotmg(d1, d2, x, y, param); typedef double (*r)(size_t, size_t, struct aml_tiling *, struct aml_tiling *, struct aml_tiling *, double); r run_f[8] = {&run_dcopy, &run_dscal, &run_daxpy, &run_dasum, &run_ddot, &run_dnrm2, &run_dswap, &run_idmax}; v verify_f[8] = {&verify_dcopy, &verify_dscal, &verify_daxpy, &verify_dasum, &verify_ddot, &verify_dnrm2, &verify_dswap, &verify_idmax}; int main(int argc, char *argv[]) { aml_init(&argc, &argv); struct aml_area *area = &aml_area_linux; size_t nb_reps; size_t memsize, tilesize, ntiles; size_t i, j, k; long long int timing; aml_time_t start, end; double *a, *b, *c; struct aml_layout *la, *lb, *lc; struct aml_tiling *ta, *tb, *tc; double res; double scalar = 1.0; double scal2 = 2.0; double param[5]; param[0] = -1.0; for (size_t i = 1; i < 5; i++) param[i] = i; long long int sumtime[10] = {0}, maxtime[10] = {0}, mintime[10] = {LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX, LONG_MAX}; char *label[10] = { "Copy: ", "Scale: ", "Triad: ", "Asum: ", "Dot: ", "Norm: ", "Swap: ", "Max ID: ", "RotP: ", "RotM: "}; if (argc == 1) { memsize = DEFAULT_ARRAY_SIZE; tilesize = DEFAULT_TILE_SIZE; nb_reps = NTIMES; } else { assert(argc == 3); memsize = 1UL << atoi(argv[1]); tilesize = 1UL << atoi(argv[2]); nb_reps = atoi(argv[3]); } printf("Each kernel will be executed %ld times.\n", nb_reps); #pragma omp parallel { #pragma omp master { k = omp_get_num_threads(); printf("Number of threads required = %li\n", k); } } k = 0; #pragma omp parallel #pragma omp atomic k++; printf("Number of threads counted = %li\n", k); // AML code a = aml_area_mmap(area, memsize * sizeof(double), NULL); b = aml_area_mmap(area, memsize * sizeof(double), NULL); c = aml_area_mmap(area, memsize * sizeof(double), NULL); assert(a != NULL && b != NULL && c != NULL); /* layouts */ assert(!aml_layout_dense_create(&la, a, AML_LAYOUT_ORDER_C, sizeof(double), 1, (size_t[]){memsize}, NULL, NULL)); assert(!aml_layout_dense_create(&lb, b, AML_LAYOUT_ORDER_C, sizeof(double), 1, (size_t[]){memsize}, NULL, NULL)); assert(!aml_layout_dense_create(&lc, c, AML_LAYOUT_ORDER_C, sizeof(double), 1, (size_t[]){memsize}, NULL, NULL)); assert(la != NULL && lb != NULL && lc != NULL); /* tilings */ assert(!aml_tiling_resize_create(&ta, AML_TILING_ORDER_C, la, 1, (size_t[]){tilesize})); assert(!aml_tiling_resize_create(&tb, AML_TILING_ORDER_C, lb, 1, (size_t[]){tilesize})); assert(!aml_tiling_resize_create(&tc, AML_TILING_ORDER_C, lc, 1, (size_t[]){tilesize})); assert(ta != NULL && tb != NULL && tc != NULL); aml_tiling_dims(ta, &ntiles); /* MAIN LOOP - repeat test cases nb_reps */ for (k = 0; k < nb_reps; k++) { // Trying this array of functions thing for (i = 0; i < 8; i++) { init_arrays(memsize, a, b, c); aml_gettime(&start); res = run_f[i](tilesize, ntiles, ta, tb, tc, scalar); aml_gettime(&end); timing = aml_timediff(start, end); verify_f[i](memsize, a, b, c, scalar, res); sumtime[i] += timing; mintime[i] = MIN(mintime[i], timing); maxtime[i] = MAX(maxtime[i], timing); } // Rotations init_arrays(memsize, a, b, c); aml_gettime(&start); res = run_drot(tilesize, ntiles, ta, tb, tc, scal2, scalar); aml_gettime(&end); timing = aml_timediff(start, end); verify_drot(memsize, a, b, c, scal2, scalar, res); sumtime[8] += timing; mintime[8] = MIN(mintime[i], timing); maxtime[8] = MAX(maxtime[i], timing); init_arrays(memsize, a, b, c); aml_gettime(&start); res = run_drotm(tilesize, ntiles, ta, tb, tc, param); aml_gettime(&end); timing = aml_timediff(start, end); verify_drotm(memsize, a, b, c, scal2, scalar, res); sumtime[9] += timing; mintime[9] = MIN(mintime[i], timing); maxtime[9] = MAX(maxtime[i], timing); /* Add the rotation generations later, + 2 functions drotg(x, y, dc, ds); drotmg(d1, d2, x, y, param); */ } /* SUMMARY */ printf("Function Avg time Min time Max time\n"); for (j = 0; j < 10; j++) { double avg = (double)sumtime[j] / (double)(nb_reps - 1); printf("%s\t%11.6f\t%lld\t%lld\n", label[j], avg, mintime[j], maxtime[j]); } /* destroy everything */ aml_tiling_resize_destroy(&ta); aml_tiling_resize_destroy(&tb); aml_tiling_resize_destroy(&tc); aml_layout_destroy(&la); aml_layout_destroy(&lb); aml_layout_destroy(&lc); aml_area_munmap(area, a, memsize * sizeof(double)); aml_area_munmap(area, b, memsize * sizeof(double)); aml_area_munmap(area, c, memsize * sizeof(double)); aml_finalize(); return 0; }
GB_subref_slice.c
//------------------------------------------------------------------------------ // GB_subref_slice: construct coarse/fine tasks for C = A(I,J) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Determine the tasks for computing C=A(I,J). The matrix C has Cnvec vectors, // and these are divided into coarse and fine tasks. A coarse task will // compute one or more whole vectors of C. A fine task operates on a slice of // a single vector of C. The slice can be done by the # of entries in the // corresponding vector of A, or by the list of indices I, depending on how the // work is done for that method. // The (kC)th vector will access A(imin:imax,kA) in Ai,Ax [pA:pA_end-1], where // pA = Ap_start [kC] and pA_end = Ap_end [kC]. // The computation of each vector C(:,kC) = A(I,kA) is by done using one of 12 // different cases, depending on the vector, as determined by GB_subref_method. // Not all vectors in C are computed using the same method. // Note that J can have duplicates. kC is unique (0:Cnvec-1) but the // corresponding vector kA in A may repeat, if J has duplicates. Duplicates in // J are not exploited, since the coarse/fine tasks are constructed by slicing // slicing the list of vectors Ch of size Cnvec, not the vectors of A. // Compare this function with GB_ewise_slice, which constructs coarse/fine // tasks for the eWise operations (C=A+B, C=A.*B, and C<M>=Z). #define GB_FREE_WORK \ { \ GB_WERK_POP (Coarse, int64_t) ; \ GB_FREE_WERK (&Cwork, Cwork_size) ; \ } #define GB_FREE_ALL \ { \ GB_FREE_WORK ; \ GB_FREE_WERK (&TaskList, TaskList_size) ; \ GB_FREE_WERK (&Mark, Mark_size) ; \ GB_FREE_WERK (&Inext, Inext_size) ; \ } #include "GB_subref.h" GrB_Info GB_subref_slice ( // output: GB_task_struct **p_TaskList, // array of structs size_t *p_TaskList_size, // size of TaskList int *p_ntasks, // # of tasks constructed int *p_nthreads, // # of threads for subref operation bool *p_post_sort, // true if a final post-sort is needed int64_t *restrict *p_Mark, // for I inverse, if needed; size avlen size_t *p_Mark_size, int64_t *restrict *p_Inext, // for I inverse, if needed; size nI size_t *p_Inext_size, int64_t *p_nduplicates, // # of duplicates, if I inverse computed // from phase0: const int64_t *restrict Ap_start, // location of A(imin:imax,kA) const int64_t *restrict Ap_end, const int64_t Cnvec, // # of vectors of C const bool need_qsort, // true if C must be sorted const int Ikind, // GB_ALL, GB_RANGE, GB_STRIDE or GB_LIST const int64_t nI, // length of I const int64_t Icolon [3], // for GB_RANGE and GB_STRIDE // original input: const int64_t avlen, // A->vlen const int64_t anz, // nnz (A) const GrB_Index *I, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (p_TaskList != NULL) ; ASSERT (p_TaskList_size != NULL) ; ASSERT (p_ntasks != NULL) ; ASSERT (p_nthreads != NULL) ; ASSERT (p_post_sort != NULL) ; ASSERT (p_Mark != NULL) ; ASSERT (p_Inext != NULL) ; ASSERT (p_nduplicates != NULL) ; ASSERT ((Cnvec > 0) == (Ap_start != NULL)) ; ASSERT ((Cnvec > 0) == (Ap_end != NULL)) ; (*p_TaskList) = NULL ; (*p_TaskList_size) = 0 ; (*p_Mark ) = NULL ; (*p_Inext ) = NULL ; int64_t *restrict Mark = NULL ; size_t Mark_size = 0 ; int64_t *restrict Inext = NULL ; size_t Inext_size = 0 ; int64_t *restrict Cwork = NULL ; size_t Cwork_size = 0 ; GB_WERK_DECLARE (Coarse, int64_t) ; // size ntasks1+1 int ntasks1 = 0 ; GrB_Info info ; //-------------------------------------------------------------------------- // determine # of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // allocate the initial TaskList //-------------------------------------------------------------------------- // Allocate the TaskList to hold at least 2*ntask0 tasks. It will grow // later, if needed. Usually, 64*nthreads_max is enough, but in a few cases // fine tasks can cause this number to be exceeded. If that occurs, // TaskList is reallocated. // When the mask is present, it is often fastest to break the work up // into tasks, even when nthreads_max is 1. GB_task_struct *restrict TaskList = NULL ; size_t TaskList_size = 0 ; int max_ntasks = 0 ; int ntasks0 = (nthreads_max == 1) ? 1 : (32 * nthreads_max) ; GB_REALLOC_TASK_WERK (TaskList, ntasks0, max_ntasks) ; //-------------------------------------------------------------------------- // determine if I_inverse can be constructed //-------------------------------------------------------------------------- // I_inverse_ok is true if I might be inverted. If false, then I will not // be inverted. I can be inverted only if the workspace for the inverse // does not exceed nnz(A). Note that if I was provided on input as an // explicit list, but consists of a contiguous range imin:imax, then Ikind // is now GB_LIST and the list I is ignored. // If I_inverse_ok is true, the inverse of I might still not be needed. // need_I_inverse becomes true if any C(:,kC) = A (I,kA) computation // requires I inverse. int64_t I_inverse_limit = GB_IMAX (4096, anz) ; bool I_inverse_ok = (Ikind == GB_LIST && ((nI > avlen / 256) || ((nI + avlen) < I_inverse_limit))) ; bool need_I_inverse = false ; bool post_sort = false ; int64_t iinc = Icolon [GxB_INC] ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- Cwork = GB_MALLOC_WERK (Cnvec+1, int64_t, &Cwork_size) ; if (Cwork == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // estimate the work required for each vector of C //-------------------------------------------------------------------------- int nthreads_for_Cwork = GB_nthreads (Cnvec, chunk, nthreads_max) ; int64_t kC ; #pragma omp parallel for num_threads(nthreads_for_Cwork) schedule(static) \ reduction(||:need_I_inverse) for (kC = 0 ; kC < Cnvec ; kC++) { // jC is the (kC)th vector of C = A(I,J) // int64_t jC = GBH (Ch, kC) ; // C(:,kC) = A(I,kA) will be constructed int64_t pA = Ap_start [kC] ; int64_t pA_end = Ap_end [kC] ; int64_t alen = pA_end - pA ; // nnz (A (imin:imax,j)) int64_t work ; // amount of work for C(:,kC) = A (I,kA) bool this_needs_I_inverse ; // true if this vector needs I inverse // ndupl in I not yet known; it is found when I is inverted. For // now, assume I has no duplicate entries. All that is needed for now // is the work required for each C(:,kC), and whether or not I inverse // must be created. The # of duplicates has no impact on the I inverse // decision, and a minor effect on the work (which is ignored). GB_subref_method (&work, &this_needs_I_inverse, alen, avlen, Ikind, nI, I_inverse_ok, need_qsort, iinc, 0) ; // log the result need_I_inverse = need_I_inverse || this_needs_I_inverse ; Cwork [kC] = work ; } //-------------------------------------------------------------------------- // replace Cwork with its cumulative sum //-------------------------------------------------------------------------- GB_cumsum (Cwork, Cnvec, NULL, nthreads_for_Cwork, Context) ; double cwork = (double) Cwork [Cnvec] ; //-------------------------------------------------------------------------- // determine # of threads and tasks to use for C=A(I,J) //-------------------------------------------------------------------------- int nthreads = GB_nthreads (cwork, chunk, nthreads_max) ; ntasks1 = (nthreads == 1) ? 1 : (32 * nthreads) ; double target_task_size = cwork / (double) (ntasks1) ; target_task_size = GB_IMAX (target_task_size, chunk) ; //-------------------------------------------------------------------------- // invert I if required //-------------------------------------------------------------------------- int64_t ndupl = 0 ; if (need_I_inverse) { GB_OK (GB_I_inverse (I, nI, avlen, &Mark, &Mark_size, &Inext, &Inext_size, &ndupl, Context)) ; ASSERT (Mark != NULL) ; ASSERT (Inext != NULL) ; } //-------------------------------------------------------------------------- // check for quick return for a single task //-------------------------------------------------------------------------- if (Cnvec == 0 || ntasks1 == 1) { // construct a single coarse task that computes all of C TaskList [0].kfirst = 0 ; TaskList [0].klast = Cnvec-1 ; // free workspace and return result GB_FREE_WORK ; (*p_TaskList ) = TaskList ; (*p_TaskList_size) = TaskList_size ; (*p_ntasks ) = (Cnvec == 0) ? 0 : 1 ; (*p_nthreads ) = 1 ; (*p_post_sort ) = false ; (*p_Mark ) = Mark ; (*p_Mark_size ) = Mark_size ; (*p_Inext ) = Inext ; (*p_Inext_size ) = Inext_size ; (*p_nduplicates) = ndupl ; return (GrB_SUCCESS) ; } //-------------------------------------------------------------------------- // slice the work into coarse tasks //-------------------------------------------------------------------------- GB_WERK_PUSH (Coarse, ntasks1 + 1, int64_t) ; if (Coarse == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } GB_pslice (Coarse, Cwork, Cnvec, ntasks1, false) ; //-------------------------------------------------------------------------- // construct all tasks, both coarse and fine //-------------------------------------------------------------------------- int ntasks = 0 ; for (int t = 0 ; t < ntasks1 ; t++) { //---------------------------------------------------------------------- // coarse task computes C (:,k:klast) //---------------------------------------------------------------------- int64_t k = Coarse [t] ; int64_t klast = Coarse [t+1] - 1 ; if (k >= Cnvec) { //------------------------------------------------------------------ // all tasks have been constructed //------------------------------------------------------------------ break ; } else if (k < klast) { //------------------------------------------------------------------ // coarse task has 2 or more vectors //------------------------------------------------------------------ // This is a non-empty coarse-grain task that does two or more // entire vectors of C, vectors k:klast, inclusive. GB_REALLOC_TASK_WERK (TaskList, ntasks + 1, max_ntasks) ; TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = klast ; ntasks++ ; } else { //------------------------------------------------------------------ // coarse task has 0 or 1 vectors //------------------------------------------------------------------ // As a coarse-grain task, this task is empty or does a single // vector, k. Vector k must be removed from the work done by this // and any other coarse-grain task, and split into one or more // fine-grain tasks. for (int tt = t ; tt < ntasks1 ; tt++) { // remove k from the initial slice tt if (Coarse [tt] == k) { // remove k from task tt Coarse [tt] = k+1 ; } else { // break, k not in task tt break ; } } //------------------------------------------------------------------ // determine the # of fine-grain tasks to create for vector k //------------------------------------------------------------------ double ckwork = Cwork [k+1] - Cwork [k] ; int nfine = ckwork / target_task_size ; nfine = GB_IMAX (nfine, 1) ; // make the TaskList bigger, if needed GB_REALLOC_TASK_WERK (TaskList, ntasks + nfine, max_ntasks) ; //------------------------------------------------------------------ // create the fine-grain tasks //------------------------------------------------------------------ if (nfine == 1) { //-------------------------------------------------------------- // this is a single coarse task for all of vector k //-------------------------------------------------------------- TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = k ; ntasks++ ; } else { //-------------------------------------------------------------- // slice vector k into nfine fine tasks //-------------------------------------------------------------- // There are two kinds of fine tasks, depending on the method // used to compute C(:,kC) = A(I,kA). If the method iterates // across all entries in A(imin:imax,kA), then those entries // are sliced (of size alen). Three methods (1, 2, and 6) // iterate across all entries in I instead (of size nI). int64_t pA = Ap_start [k] ; int64_t pA_end = Ap_end [k] ; int64_t alen = pA_end - pA ; // nnz (A (imin:imax,j)) int method = GB_subref_method (NULL, NULL, alen, avlen, Ikind, nI, I_inverse_ok, need_qsort, iinc, ndupl) ; if (method == 10) { // multiple fine tasks operate on a single vector C(:,kC) // using method 10, and so a post-sort is needed. post_sort = true ; } if (method == 1 || method == 2 || method == 6) { // slice I for this task nfine = GB_IMIN (nfine, nI) ; nfine = GB_IMAX (nfine, 1) ; for (int tfine = 0 ; tfine < nfine ; tfine++) { // flag this as a fine task, and record the method. // Methods 1, 2, and 6 slice I, not A(:,kA) TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = -method ; // do not partition A(:,kA) TaskList [ntasks].pA = pA ; TaskList [ntasks].pA_end = pA_end ; // partition I for this task GB_PARTITION (TaskList [ntasks].pB, TaskList [ntasks].pB_end, nI, tfine, nfine) ; // unused TaskList [ntasks].pM = -1 ; TaskList [ntasks].pM_end = -1 ; // no post sort TaskList [ntasks].len = 0 ; ntasks++ ; } } else { // slice A(:,kA) for this task nfine = GB_IMIN (nfine, alen) ; nfine = GB_IMAX (nfine, 1) ; bool reverse = (method == 8 || method == 9) ; for (int tfine = 0 ; tfine < nfine ; tfine++) { // flag this as a fine task, and record the method. // These methods slice A(:,kA). Methods 8 and 9 // must do so in reverse order. TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = -method ; // partition the items for this task GB_PARTITION (TaskList [ntasks].pA, TaskList [ntasks].pA_end, alen, (reverse) ? (nfine-tfine-1) : tfine, nfine) ; TaskList [ntasks].pA += pA ; TaskList [ntasks].pA_end += pA ; // do not partition I TaskList [ntasks].pB = 0 ; TaskList [ntasks].pB_end = nI ; // unused TaskList [ntasks].pM = -1 ; TaskList [ntasks].pM_end = -1 ; // flag the task that does the post sort TaskList [ntasks].len = (tfine == 0 && method == 10) ; ntasks++ ; } } } } } ASSERT (ntasks > 0) ; //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORK ; (*p_TaskList ) = TaskList ; (*p_TaskList_size) = TaskList_size ; (*p_ntasks ) = ntasks ; (*p_nthreads ) = nthreads ; (*p_post_sort ) = post_sort ; (*p_Mark ) = Mark ; (*p_Mark_size ) = Mark_size ; (*p_Inext ) = Inext ; (*p_Inext_size ) = Inext_size ; (*p_nduplicates) = ndupl ; return (GrB_SUCCESS) ; }
Interp1PrimFifthOrderCRWENOChar.c
/*! @file Interp1PrimFifthOrderCRWENOChar.c @author Debojyoti Ghosh @brief Characteristic-based CRWENO5 Scheme */ #include <stdio.h> #include <basic.h> #include <arrayfunctions.h> #include <mathfunctions.h> #include <interpolation.h> #include <tridiagLU.h> #include <mpivars.h> #include <hypar.h> #ifdef with_omp #include <omp.h> #endif #undef _MINIMUM_GHOSTS_ /*! \def _MINIMUM_GHOSTS_ * Minimum number of ghost points required for this interpolation * method. */ #define _MINIMUM_GHOSTS_ 3 /*! @brief 5th order CRWENO reconstruction (characteristic-based) on a uniform grid Computes the interpolated values of the first primitive of a function \f${\bf f}\left({\bf u}\right)\f$ at the interfaces from the cell-centered values of the function using the fifth order CRWENO scheme on a uniform grid. The first primitive is defined as a function \f${\bf h}\left({\bf u}\right)\f$ that satisfies: \f{equation}{ {\bf f}\left({\bf u}\left(x\right)\right) = \frac{1}{\Delta x} \int_{x-\Delta x/2}^{x+\Delta x/2} {\bf h}\left({\bf u}\left(\zeta\right)\right)d\zeta, \f} where \f$x\f$ is the spatial coordinate along the dimension of the interpolation. This function computes the 5th order CRWENO numerical approximation \f$\hat{\bf f}_{j+1/2} \approx {\bf h}_{j+1/2}\f$ as the convex combination of three 3rd order methods: \f{align}{ &\ \omega_1\ \times\ \left[ \frac{2}{3}\hat{\alpha}^k_{j-1/2} + \frac{1}{3}\hat{\alpha}^k_{j+1/2} = \frac{1}{6} \left( f_{j-1} + 5f_j \right) \right]\\ + &\ \omega_2\ \times\ \left[ \frac{1}{3}\hat{\alpha}^k_{j-1/2}+\frac{2}{3}\hat{\alpha}^k_{j+1/2} = \frac{1}{6} \left( 5f_j + f_{j+1} \right) \right] \\ + &\ \omega_3\ \times\ \left[ \frac{2}{3}\hat{\alpha}^k_{j+1/2} + \frac{1}{3}\hat{\alpha}^k_{j+3/2} = \frac{1}{6} \left( f_j + 5f_{j+1} \right) \right] \\ \Rightarrow &\ \left(\frac{2}{3}\omega_1+\frac{1}{3}\omega_2\right)\hat{\alpha}^k_{j-1/2} + \left[\frac{1}{3}\omega_1+\frac{2}{3}(\omega_2+\omega_3)\right]\hat{\alpha}^k_{j+1/2} + \frac{1}{3}\omega_3\hat{\alpha}^k_{j+3/2} = \frac{\omega_1}{6}{\alpha}^k_{j-1} + \frac{5(\omega_1+\omega_2)+\omega_3}{6}{\alpha}^k_j + \frac{\omega_2+5\omega_3}{6}{\alpha}^k_{j+1}, \f} where \f{equation}{ \alpha^k = {\bf l}_k \cdot {\bf f},\ k=1,\cdots,n \f} is the \f$k\f$-th characteristic quantity, and \f${\bf l}_k\f$ is the \f$k\f$-th left eigenvector, \f${\bf r}_k\f$ is the \f$k\f$-th right eigenvector, and \f$n\f$ is #HyPar::nvars. The nonlinear weights \f$\omega_k; k=1,2,3\f$ are the WENO weights computed in WENOFifthOrderCalculateWeightsChar(). The resulting block tridiagonal system is solved using blocktridiagLU() (see also #TridiagLU, tridiagLU.h). The final interpolated function is computed from the interpolated characteristic quantities as: \f{equation}{ \hat{\bf f}_{j+1/2} = \sum_{k=1}^n \alpha^k_{j+1/2} {\bf r}_k \f} \b Implementation \b Notes: + This method assumes a uniform grid in the spatial dimension corresponding to the interpolation. + The method described above corresponds to a left-biased interpolation. The corresponding right-biased interpolation can be obtained by reflecting the equations about interface j+1/2. + The left and right eigenvectors are computed at an averaged quantity at j+1/2. Thus, this function requires functions to compute the average state, and the left and right eigenvectors. These are provided by the physical model through - #HyPar::GetLeftEigenvectors() - #HyPar::GetRightEigenvectors() - #HyPar::AveragingFunction() If these functions are not provided by the physical model, then a characteristic-based interpolation cannot be used. + The function computes the interpolant for the entire grid in one call. It loops over all the grid lines along the interpolation direction and carries out the 1D interpolation along these grid lines. + Location of cell-centers and cell interfaces along the spatial dimension of the interpolation is shown in the following figure: @image html chap1_1Ddomain.png @image latex chap1_1Ddomain.eps width=0.9\textwidth \b Function \b arguments: Argument | Type | Explanation --------- | --------- | --------------------------------------------- fI | double* | Array to hold the computed interpolant at the grid interfaces. This array must have the same layout as the solution, but with \b no \b ghost \b points. Its size should be the same as u in all dimensions, except dir (the dimension along which to interpolate) along which it should be larger by 1 (number of interfaces is 1 more than the number of interior cell centers). fC | double* | Array with the cell-centered values of the flux function \f${\bf f}\left({\bf u}\right)\f$. This array must have the same layout and size as the solution, \b with \b ghost \b points. u | double* | The solution array \f${\bf u}\f$ (with ghost points). If the interpolation is characteristic based, this is needed to compute the eigendecomposition. For a multidimensional problem, the layout is as follows: u is a contiguous 1D array of size (nvars*dim[0]*dim[1]*...*dim[D-1]) corresponding to the multi-dimensional solution, with the following ordering - nvars, dim[0], dim[1], ..., dim[D-1], where nvars is the number of solution components (#HyPar::nvars), dim is the local size (#HyPar::dim_local), D is the number of spatial dimensions. x | double* | The grid array (with ghost points). This is used only by non-uniform-grid interpolation methods. For multidimensional problems, the layout is as follows: x is a contiguous 1D array of size (dim[0]+dim[1]+...+dim[D-1]), with the spatial coordinates along dim[0] stored from 0,...,dim[0]-1, the spatial coordinates along dim[1] stored along dim[0],...,dim[0]+dim[1]-1, and so forth. upw | int | Upwinding direction: if positive, a left-biased interpolant will be computed; if negative, a right-biased interpolant will be computed. If the interpolation method is central, then this has no effect. dir | int | Spatial dimension along which to interpolate (eg: 0 for 1D; 0 or 1 for 2D; 0,1 or 2 for 3D) s | void* | Solver object of type #HyPar: the following variables are needed - #HyPar::ghosts, #HyPar::ndims, #HyPar::nvars, #HyPar::dim_local. m | void* | MPI object of type #MPIVariables: this is needed only by compact interpolation method that need to solve a global implicit system across MPI ranks. uflag | int | A flag indicating if the function being interpolated \f${\bf f}\f$ is the solution itself \f${\bf u}\f$ (if 1, \f${\bf f}\left({\bf u}\right) \equiv {\bf u}\f$). \b Reference: + Ghosh, D., Baeder, J. D., Compact Reconstruction Schemes with Weighted ENO Limiting for Hyperbolic Conservation Laws, SIAM Journal on Scientific Computing, 34 (3), 2012, A1678–A1706, http://dx.doi.org/10.1137/110857659 + Ghosh, D., Constantinescu, E. M., Brown, J., Efficient Implementation of Nonlinear Compact Schemes on Massively Parallel Platforms, SIAM Journal on Scientific Computing, 37 (3), 2015, C354–C383, http://dx.doi.org/10.1137/140989261 */ int Interp1PrimFifthOrderCRWENOChar( double *fI, /*!< Array of interpolated function values at the interfaces */ double *fC, /*!< Array of cell-centered values of the function \f${\bf f}\left({\bf u}\right)\f$ */ double *u, /*!< Array of cell-centered values of the solution \f${\bf u}\f$ */ double *x, /*!< Grid coordinates */ int upw, /*!< Upwind direction (left or right biased) */ int dir, /*!< Spatial dimension along which to interpolation */ void *s, /*!< Object of type #HyPar containing solver-related variables */ void *m, /*!< Object of type #MPIVariables containing MPI-related variables */ int uflag /*!< Flag to indicate if \f$f(u) \equiv u\f$, i.e, if the solution is being reconstructed */ ) { HyPar *solver = (HyPar*) s; MPIVariables *mpi = (MPIVariables*) m; CompactScheme *compact= (CompactScheme*) solver->compact; WENOParameters *weno = (WENOParameters*) solver->interp; TridiagLU *lu = (TridiagLU*) solver->lusolver; int sys,Nsys,d,v,k; _DECLARE_IERR_; int ghosts = solver->ghosts; int ndims = solver->ndims; int nvars = solver->nvars; int *dim = solver->dim_local; /* define some constants */ static const double one_third = 1.0/3.0; static const double one_sixth = 1.0/6.0; double *ww1, *ww2, *ww3; ww1 = weno->w1 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir]; ww2 = weno->w2 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir]; ww3 = weno->w3 + (upw < 0 ? 2*weno->size : 0) + (uflag ? weno->size : 0) + weno->offset[dir]; /* create index and bounds for the outer loop, i.e., to loop over all 1D lines along dimension "dir" */ int indexC[ndims], indexI[ndims], index_outer[ndims], bounds_outer[ndims], bounds_inter[ndims]; _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1; _ArrayCopy1D_(dim,bounds_inter,ndims); bounds_inter[dir] += 1; /* calculate total number of block tridiagonal systems to solve */ _ArrayProduct1D_(bounds_outer,ndims,Nsys); /* allocate arrays for the averaged state, eigenvectors and characteristic interpolated f */ double R[nvars*nvars], L[nvars*nvars], uavg[nvars]; /* Allocate arrays for tridiagonal system */ double *A = compact->A; double *B = compact->B; double *C = compact->C; double *F = compact->R; #pragma omp parallel for schedule(auto) default(shared) private(sys,d,v,k,R,L,uavg,index_outer,indexC,indexI) for (sys=0; sys<Nsys; sys++) { _ArrayIndexnD_(ndims,sys,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexC,ndims); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { int qm1,qm2,qm3,qp1,qp2; if (upw > 0) { indexC[dir] = indexI[dir]-3; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm3); indexC[dir] = indexI[dir]-2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm2); indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1); indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1); indexC[dir] = indexI[dir]+1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp2); } else { indexC[dir] = indexI[dir]+2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm3); indexC[dir] = indexI[dir]+1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm2); indexC[dir] = indexI[dir] ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qm1); indexC[dir] = indexI[dir]-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp1); indexC[dir] = indexI[dir]-2; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qp2); } int p; /* 1D index of the interface */ _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p); /* find averaged state at this interface */ IERR solver->AveragingFunction(uavg,&u[nvars*qm1],&u[nvars*qp1],solver->physics); CHECKERR(ierr); /* Get the left and right eigenvectors */ IERR solver->GetLeftEigenvectors (uavg,L,solver->physics,dir); CHECKERR(ierr); IERR solver->GetRightEigenvectors (uavg,R,solver->physics,dir); CHECKERR(ierr); for (v=0; v<nvars; v++) { /* calculate the characteristic flux components along this characteristic */ double fm3, fm2, fm1, fp1, fp2; fm3 = fm2 = fm1 = fp1 = fp2 = 0; for (k = 0; k < nvars; k++) { fm3 += L[v*nvars+k] * fC[qm3*nvars+k]; fm2 += L[v*nvars+k] * fC[qm2*nvars+k]; fm1 += L[v*nvars+k] * fC[qm1*nvars+k]; fp1 += L[v*nvars+k] * fC[qp1*nvars+k]; fp2 += L[v*nvars+k] * fC[qp2*nvars+k]; } /* Candidate stencils and their optimal weights*/ double f1, f2, f3; if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 )) || ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) { /* Use WENO5 at the physical boundaries */ f1 = (2*one_sixth)*fm3 - (7.0*one_sixth)*fm2 + (11.0*one_sixth)*fm1; f2 = (-one_sixth)*fm2 + (5.0*one_sixth)*fm1 + (2*one_sixth)*fp1; f3 = (2*one_sixth)*fm1 + (5*one_sixth)*fp1 - (one_sixth)*fp2; } else { /* CRWENO5 at the interior points */ f1 = (one_sixth) * (fm2 + 5*fm1); f2 = (one_sixth) * (5*fm1 + fp1); f3 = (one_sixth) * (fm1 + 5*fp1); } /* calculate WENO weights */ double w1,w2,w3; w1 = *(ww1+p*nvars+v); w2 = *(ww2+p*nvars+v); w3 = *(ww3+p*nvars+v); if ( ((mpi->ip[dir] == 0 ) && (indexI[dir] == 0 )) || ((mpi->ip[dir] == mpi->iproc[dir]-1) && (indexI[dir] == dim[dir])) ) { for (k=0; k<nvars; k++) { A[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = 0.0; C[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = 0.0; B[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = L[v*nvars+k]; } } else { if (upw > 0) { for (k=0; k<nvars; k++) { A[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = ((2*one_third)*w1 + (one_third)*w2) * L[v*nvars+k]; B[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = ((one_third)*w1 + (2*one_third)*(w2+w3)) * L[v*nvars+k]; C[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = ((one_third)*w3) * L[v*nvars+k]; } } else { for (k=0; k<nvars; k++) { C[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = ((2*one_third)*w1 + (one_third)*w2) * L[v*nvars+k]; B[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = ((one_third)*w1 + (2*one_third)*(w2+w3)) * L[v*nvars+k]; A[(Nsys*indexI[dir]+sys)*nvars*nvars+v*nvars+k] = ((one_third)*w3) * L[v*nvars+k]; } } } F[(Nsys*indexI[dir]+sys)*nvars+v] = w1*f1 + w2*f2 + w3*f3; } } } #ifdef serial /* Solve the tridiagonal system */ IERR blocktridiagLU(A,B,C,F,dim[dir]+1,Nsys,nvars,lu,NULL); CHECKERR(ierr); #else /* Solve the tridiagonal system */ /* all processes except the last will solve without the last interface to avoid overlap */ if (mpi->ip[dir] != mpi->iproc[dir]-1) { IERR blocktridiagLU(A,B,C,F,dim[dir] ,Nsys,nvars,lu,&mpi->comm[dir]); CHECKERR(ierr); } else { IERR blocktridiagLU(A,B,C,F,dim[dir]+1,Nsys,nvars,lu,&mpi->comm[dir]); CHECKERR(ierr); } /* Now get the solution to the last interface from the next proc */ double *sendbuf = compact->sendbuf; double *recvbuf = compact->recvbuf; MPI_Request req[2] = {MPI_REQUEST_NULL,MPI_REQUEST_NULL}; if (mpi->ip[dir]) for (d=0; d<Nsys*nvars; d++) sendbuf[d] = F[d]; if (mpi->ip[dir] != mpi->iproc[dir]-1) MPI_Irecv(recvbuf,Nsys*nvars,MPI_DOUBLE,mpi->ip[dir]+1,214,mpi->comm[dir],&req[0]); if (mpi->ip[dir]) MPI_Isend(sendbuf,Nsys*nvars,MPI_DOUBLE,mpi->ip[dir]-1,214,mpi->comm[dir],&req[1]); MPI_Waitall(2,&req[0],MPI_STATUS_IGNORE); if (mpi->ip[dir] != mpi->iproc[dir]-1) for (d=0; d<Nsys*nvars; d++) F[d+Nsys*nvars*dim[dir]] = recvbuf[d]; #endif /* save the solution to fI */ #pragma omp parallel for schedule(auto) default(shared) private(sys,d,v,k,R,L,uavg,index_outer,indexC,indexI) for (sys=0; sys<Nsys; sys++) { _ArrayIndexnD_(ndims,sys,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexI,ndims); for (indexI[dir] = 0; indexI[dir] < dim[dir]+1; indexI[dir]++) { int p; _ArrayIndex1D_(ndims,bounds_inter,indexI,0,p); _ArrayCopy1D_((F+sys*nvars+Nsys*nvars*indexI[dir]),(fI+nvars*p),nvars); } } return(0); }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/LocInfoType.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> // HLSL Change Starts #include "llvm/Support/OacrIgnoreCond.h" // HLSL Change - all sema use is heavily language-dependant namespace hlsl { struct UnusualAnnotation; } // HLSL Change Ends namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; class InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class AttributeList; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class ExternalSemaSource; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPClause; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///\brief Source of additional semantic information. ExternalSemaSource *ExternalSource; ///\brief Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { // We are about to link these. It is now safe to compute the linkage of // the new decl. If the new decl has external linkage, we will // link it with the hidden decl (which also has external linkage) and // it will keep having external linkage. If it has internal linkage, we // will not link it. Since it has no previous decls, it will remain // with internal linkage. if (getLangOpts().ModulesHideInternalLinkage) return isVisible(Old) || New->isExternallyVisible(); return true; } public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// \brief Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// \brief Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// \brief Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; /// PackContext - Manages the stack for \#pragma pack. An alignment /// of 0 indicates default alignment. void *PackContext; // Really a "PragmaPackStack*" bool MSStructPragmaOn; // True when \#pragma ms_struct on /// \brief Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; // HLSL Change Begin // The HLSL rewriter doesn't define a default matrix pack, // so we must preserve the lack of annotations to avoid changing semantics. bool HasDefaultMatrixPack = false; // Uses of #pragma pack_matrix change the default pack. bool DefaultMatrixPackRowMajor = false; // HLSL Change End. enum PragmaVtorDispKind { PVDK_Push, ///< #pragma vtordisp(push, mode) PVDK_Set, ///< #pragma vtordisp(mode) PVDK_Pop, ///< #pragma vtordisp(pop) PVDK_Reset ///< #pragma vtordisp() }; enum PragmaMsStackAction { PSK_Reset, // #pragma () PSK_Set, // #pragma ("name") PSK_Push, // #pragma (push[, id]) PSK_Push_Set, // #pragma (push[, id], "name") PSK_Pop, // #pragma (pop[, id]) PSK_Pop_Set, // #pragma (pop[, id], "name") }; /// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects /// /// The stack always has at least one element in it. SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// \brief Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); explicit PragmaStack(const ValueType &Value) : CurrentValue(Value) {} SmallVector<Slot, 2> Stack; ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// \brief This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// \brief Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// ExprNeedsCleanups - True if the current evaluation context /// requires cleanups to be run at its conclusion. bool ExprNeedsCleanups; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// \brief Store a list of either DeclRefExprs or MemberExprs /// that contain a reference to a variable (constant) that may or may not /// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue /// and discarded value conversions have been applied to all subexpressions /// of the enclosing full expression. This is cleared at the end of each /// full expression. llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; /// \brief Stack containing information about each of the nested /// function, block, and method scopes that are currently active. /// /// This array is never empty. Clients should ignore the first /// element, which is used to cache a single FunctionScopeInfo /// that's used to parse every top-level function. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType; /// \brief Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// \brief Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// \brief Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// \brief Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// \brief All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// \brief The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// \brief All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// \brief All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedExceptionSpecChecks; /// \brief All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; typedef llvm::MapVector<const FunctionDecl *, LateParsedTemplate *> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// \brief Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// \brief The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// \brief RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated); } ~SynthesizedFunctionScope() { S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// \brief Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// \brief The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// \brief The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// \brief The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// \brief The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// \brief Caches identifiers/selectors for NSFoundation APIs. // std::unique_ptr<NSAPI> NSAPIObj; // HLSL Change /// \brief The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// \brief The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// \brief Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// \brief Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// \brief The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// \brief The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// \brief Pointer to NSString type (NSString *). QualType NSStringPointer; /// \brief The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// \brief The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// \brief The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// \brief The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// \brief The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// \brief The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// \brief id<NSCopying> type. QualType QIDNSCopying; /// \brief will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// \brief counter for internal MS Asm label names. unsigned MSAsmLabelNameCounter; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// \brief Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum ExpressionEvaluationContext { /// \brief The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// \brief The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// \brief The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// \brief The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// \brief The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// \brief Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// \brief The expression evaluation context. ExpressionEvaluationContext Context; /// \brief Whether the enclosing context needed a cleanup. bool ParentNeedsCleanups; /// \brief Whether we are in a decltype expression. bool IsDecltype; /// \brief The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// \brief The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// \brief The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// \brief The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// \brief The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering; /// \brief If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// \brief If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, bool ParentNeedsCleanups, Decl *ManglingContextDecl, bool IsDecltype) : Context(Context), ParentNeedsCleanups(ParentNeedsCleanups), IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering() { } /// \brief Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == Unevaluated || Context == UnevaluatedAbstract; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// \brief Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; /// \brief A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache; /// \brief The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// \brief The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// \brief A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed; /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; void ReadMethodPool(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// \brief Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema& S) : S(S), OldFPContractState(S.FPFeatures.fp_contract) {} ~FPContractStateRAII() { S.FPFeatures.fp_contract = OldFPContractState; } private: Sema& S; bool OldFPContractState : 1; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// \brief Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///\brief Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// \brief Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// \brief Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// \brief Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// \brief Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// \brief Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// \brief Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// \brief Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// \brief This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const { if (FunctionScopes.empty()) return nullptr; for (int e = FunctionScopes.size()-1; e >= 0; --e) { if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) continue; return FunctionScopes[e]; } return nullptr; } template <typename ExprT> void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) { if (!isUnevaluatedContext()) getCurFunction()->recordUseOfWeak(E, IsRead); } void PushCompoundScope(); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// \brief Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// \brief Retrieve the current lambda scope info, if any. sema::LambdaScopeInfo *getCurLambda(); /// \brief Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// \brief Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); unsigned deduceWeakPropertyFromType(QualType T) { if ((getLangOpts().getGC() != LangOptions::NonGC && T.isObjCGCWeak()) || (getLangOpts().ObjCAutoRefCount && T.getObjCLifetime() == Qualifiers::OCL_Weak)) return ObjCDeclSpec::DQ_PR_weak; return 0; } /// \brief Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// \brief Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc, bool *MissingExceptionSpecification = nullptr, bool *MissingEmptyExceptionSpecification = nullptr, bool AllowNoexceptAllMatchWithNoSpec = false, bool IsOperatorNew = false); bool CheckExceptionSpecSubset( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// \brief The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// \brief Abstract class used to diagnose incomplete types. struct TypeDiagnoser { bool Suppressed; TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {(DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Args(Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (Suppressed) return; const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); VisibleModuleSet VisibleModules; llvm::SmallVector<VisibleModuleSet, 16> VisibleModulesStack; Module *CachedFakeTopLevelModule; public: /// \brief Get the module owning an entity. Module *getOwningModule(Decl *Entity); /// \brief Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc); bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } bool hasVisibleMergedDefinition(NamedDecl *Def); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // /// List of decls defined in a function prototype. This contains EnumConstants /// that incorrectly end up in translation unit scope because there is no /// function to pin them on. ActOnFunctionDeclarator reads this list and patches /// them into the FunctionDecl. std::vector<NamedDecl*> DeclsInPrototypeScope; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = ParsedType(), bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool AllowClassTemplates = false); /// \brief For compatibility with MSVC, we delay parsing of some default /// template type arguments until instantiation time. Emits a warning and /// returns a synthesized DependentNameType that isn't really dependent on any /// other template arguments. ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II, SourceLocation NameLoc); /// \brief Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; const IdentifierInfo *Keyword; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword), Keyword(Keyword) { } static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; default: llvm_unreachable("unsupported name classification."); } } }; /// \brief Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R); void CheckShadow(Scope *S, VarDecl *D); void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); // HLSL Change Starts // This enumeration is used to determine whether a variable declaration // should shadow a prior declaration rather than merging. enum ShadowMergeState { ShadowMergeState_Disallowed, // shadowing is not allowed ShadowMergeState_Possible, // shadowing is possible (but may not occur) ShadowMergeState_Effective // the declaration should shadow a prior one }; // HLSL Change Ends NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state void CheckVariableDeclarationType(VarDecl *NewVD); void CheckCompleteVariableDeclaration(VarDecl *var); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsExplicitSpecialization); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SCm, hlsl::ParameterModifier ParamMod); // HLSL Change void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit, bool TypeMayContainAuto); void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group, bool TypeMayContainAuto = true); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition(FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// \brief Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// \brief Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineMethodDef(CXXMethodDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// \brief Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ParmVarDecl * const *Begin, ParmVarDecl * const *End); /// \brief Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin, ParmVarDecl * const *End, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// \brief Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, AttributeList *AttrList, SourceLocation SemiLoc); /// \brief The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// \brief The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// \brief Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument }; /// \brief Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, bool NeedDefinition, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); /// \brief Retrieve a suitable printing policy. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// \brief Retrieve a suitable printing policy. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation = false); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {} bool ShouldSkip; NamedDecl *Previous; }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, AttributeList *MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); typedef void *SkippedDefinitionContext; /// \brief Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceLocation RBraceLoc); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// \brief Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, AttributeList *Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, SourceLocation RBraceLoc, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, AttributeList *Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// \brief Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool Override, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); /// \brief Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// \brief Don't merge availability attributes at all. AMK_None, /// \brief Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// \brief Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override }; void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous, ShadowMergeState& MergeState); // HLSL Change - add merge state void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld, ShadowMergeState& MergeState); // HLSL Change - add merge state void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl); /// \brief Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsNoReturnConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr ///< Constant expression in a noptr-new-declarator. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// \brief Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// \brief Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// \brief Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType()); // Emit as a series of 'note's all template and non-templates // identified by the expression Expr void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType()); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, const SourceRange& OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; // An enum to represent whether something is dealing with a call to begin() // or a call to end() in a range-based for loop. enum BeginEndFunction { BEF_begin, BEF_end }; ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc, SourceLocation RangeLoc, VarDecl *Decl, BeginEndFunction BEF, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *input); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ParmVarDecl *const *Param, ParmVarDecl *const *ParamEnd, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// @brief Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// \brief Look up any declaration with any name. LookupAnyName }; /// \brief Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// \brief The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// \brief The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists. ForRedeclaration }; /// \brief The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// \brief The lookup resulted in an error. LOLR_Error, /// \brief The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// \brief The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// \brief The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// \brief The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT; TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT; }; /// \brief The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// \brief Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // \brief The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// \brief Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// \brief Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// \brief Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions, DeclAccessPair Operator, QualType T1, QualType T2); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// \brief Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const AttributeList *AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const AttributeList &attr, unsigned &value); bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckNoReturnAttr(const AttributeList &attr); bool checkStringLiteralArgumentAttr(const AttributeList &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType &T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type. /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param nullabilityLoc The location of the nullability specifier. /// /// \param isContextSensitive Whether this nullability specifier was /// written as a context-sensitive keyword (in an Objective-C /// method) or an Objective-C property attribute, rather than as an /// underscored type specifier. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation nullabilityLoc, bool isContextSensitive); /// \brief Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl, ObjCInterfaceDecl *IDecl); void DefaultSynthesizeProperties(Scope *S, Decl *D); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, bool *isOverridingProperty, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// \brief Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// \brief - Returns instance or factory methods in global method pool for /// given selector. If no such method or only one method found, function returns /// false; otherwise, it returns true bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool instance); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// \brief - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance); /// \brief Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg(ActOnFinishFullExpr(Arg, CC).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg); StmtResult ActOnExprStmtError(); StmtResult ActOnHlslDiscardStmt(SourceLocation Loc); // HLSL Change StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// \brief A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S): S(S) { S.ActOnStartOfCompoundStmt(); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal, SourceLocation DotDotDotLoc, Expr *RHSVal, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); StmtResult ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond, Decl *CondVar); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond, Decl *CondVar, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, FullExprArg Second, Decl *SecondVar, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *BeginEndDecl, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, bool AllowFunctionParameters); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, bool AllowFunctionParameters); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, llvm::InlineAsmIdentifierInfo &Info, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// \brief If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable, AD_Partial }; void EmitAvailabilityWarning(AvailabilityDiagnostic AD, NamedDecl *D, StringRef Message, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, const ObjCPropertyDecl *ObjCProperty, bool ObjCPropertyAccess); bool makeUnavailableInSystemHeader(SourceLocation loc, StringRef message); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D); bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass=nullptr, bool ObjCPropertyAccess=false); void NoteDeletedFunction(FunctionDecl *FD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, bool IsDecltype = false); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool OdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// \brief Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// \brief Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// \brief Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// \brief Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// \brief Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// \brief Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// \brief Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr( CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentType IT); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, const SourceRange &ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); // HLSL Change Begins bool CheckHLSLUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation Loc, UnaryExprOrTypeTrait ExprKind); // HLSL Change Ends bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// \brief Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// \brief Describes the result of an "if-exists" condition check. enum IfExistsResult { /// \brief The symbol exists. IER_Exists, /// \brief The symbol does not exist. IER_DoesNotExist, /// \brief The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// \brief An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); // HLSL Change Starts //===---------------------------- HLSL Features -------------------------===// /// cbuffer/tbuffer llvm::SmallVector<Decl*, 1> HLSLBuffers; Decl* ActOnStartHLSLBuffer(Scope* bufferScope, bool cbuffer, SourceLocation KwLoc, IdentifierInfo *Ident, SourceLocation IdentLoc, std::vector<hlsl::UnusualAnnotation *>& BufferAttributes, SourceLocation LBrace); void ActOnFinishHLSLBuffer(Decl *Dcl, SourceLocation RBrace); Decl* getActiveHLSLBuffer() const; void ActOnStartHLSLBufferView(); bool IsOnHLSLBufferView(); Decl *ActOnHLSLBufferView(Scope *bufferScope, SourceLocation KwLoc, DeclGroupPtrTy &dcl, bool iscbuf); // HLSL Change Ends //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, AttributeList *AttrList); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); CXXRecordDecl *getStdBadAlloc() const; /// \brief Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// \brief Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// \brief Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const CXXConstructorDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, AttributeList *AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, AttributeList *AttrList, bool IsInstantiation, bool HasTypenameKeyword, SourceLocation TypenameLoc); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, bool HasUsingKeyword, SourceLocation UsingLoc, CXXScopeSpec &SS, UnqualifiedId &Name, AttributeList *AttrList, bool HasTypenameKeyword, SourceLocation TypenameLoc); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, AttributeList *AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// \brief Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// \brief Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(ComputedEST != EST_ComputedNoexcept && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// \brief The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// \brief The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// \brief Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// \brief Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// \brief Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_ComputedNoexcept; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// \brief Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defautled /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD); /// \brief Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// \brief Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// \brief Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); /// \brief Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); /// \brief Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// \brief Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor); /// \brief Declare all inheriting constructors for the given class. /// /// \param ClassDecl The class declaration into which the inheriting /// constructors will be added. void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl); /// \brief Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// \brief Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// \brief Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// \brief Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// \brief Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// \brief Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// \brief Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// \brief When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// \brief RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// \brief Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// \brief Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr); /// \brief Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer, bool TypeMayContainAuto = true); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, bool UseGlobal, QualType AllocType, bool IsArray, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete); bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range, DeclarationName Name, MultiExprArg Args, DeclContext *Ctx, bool AllowMissing, FunctionDecl *&Operator, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, QualType Param1, QualType Param2 = QualType(), bool addRestrictAttr = false); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, DeclarationName Name); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, bool ConvertToBoolean); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// \brief Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the bianry type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); ExprResult ActOnFinishFullExpr(Expr *Expr) { return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc() : SourceLocation()); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue = false, bool IsConstexpr = false, bool IsLambdaInitCaptureInitializer = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// \brief The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// \brief The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation IdLoc, IdentifierInfo &II, ParsedType ObjectType); bool BuildCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, QualType ObjectType, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr); /// \brief The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param Identifier The identifier preceding the '::'. /// /// \param IdentifierLoc The location of the identifier. /// /// \param CCLoc The location of the '::'. /// /// \param ObjectType The type of the object, if we're parsing /// nested-name-specifier in a member access expression. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, ParsedType ObjectType, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation ColonLoc, ParsedType ObjectType, bool EnteringContext); /// \brief The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// \brief Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// \brief Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// \brief Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// \brief Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params); /// \brief Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// \brief Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. QualType performLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, Expr *&Init); /// \brief Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, Expr *Init); /// \brief Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// \brief Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief Introduce the lambda parameters into scope. void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope); /// \brief Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// \brief Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// \brief Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// \brief Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, Expr **Strings, unsigned NumStrings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, ObjCDictionaryElement *Elements, unsigned NumElements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, AttributeList *Attrs = nullptr); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// \brief The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// \brief The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// \brief The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// \brief Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// \brief Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// \brief Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// \brief Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXMemberDefaultArgs(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, unsigned NumBases); void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases, unsigned NumBases); bool IsDerivedFrom(QualType Derived, QualType Base); bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// \brief When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, AbstractDiagSelID SelID = AbstractNone); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); Decl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); Decl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, Decl **Params, unsigned NumParams, SourceLocation RAngleLoc); /// \brief The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid); DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false); /// \brief Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template); DeclResult ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, AttributeList *Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization(FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// \brief Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// \brief The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// \brief The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// \brief The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// \brief Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateArgument(TemplateTemplateParmDecl *Param, TemplateArgumentLoc &Arg, unsigned ArgumentPackIndex); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// \brief Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// \brief We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// \brief We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// \brief We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// \brief Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// \brief Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// \brief The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// \brief An arbitrary expression. UPPC_Expression = 0, /// \brief The base type of a class type. UPPC_BaseType, /// \brief The type of an arbitrary declaration. UPPC_DeclarationType, /// \brief The type of a data member. UPPC_DataMemberType, /// \brief The size of a bit-field. UPPC_BitFieldWidth, /// \brief The expression in a static assertion. UPPC_StaticAssertExpression, /// \brief The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// \brief The enumerator value. UPPC_EnumeratorValue, /// \brief A using declaration. UPPC_UsingDeclaration, /// \brief A friend declaration. UPPC_FriendDeclaration, /// \brief A declaration qualifier. UPPC_DeclarationQualifier, /// \brief An initializer. UPPC_Initializer, /// \brief A default argument. UPPC_DefaultArgument, /// \brief The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// \brief The type of an exception. UPPC_ExceptionType, /// \brief Partial specialization. UPPC_PartialSpecialization, /// \brief Microsoft __if_exists. UPPC_IfExists, /// \brief Microsoft __if_not_exists. UPPC_IfNotExists, /// \brief Lambda expression. UPPC_Lambda, /// \brief Block expression, UPPC_Block }; /// \brief Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// \brief If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// \brief If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// \brief If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// \brief If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// \brief If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// \brief If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param SS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(CXXScopeSpec &SS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// \brief Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// \brief Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// \brief Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType); /// \brief Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// \brief Template argument deduction was successful. TDK_Success = 0, /// \brief The declaration was invalid; do nothing. TDK_Invalid, /// \brief Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// \brief Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// \brief Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// \brief Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// \brief Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// \brief A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// \brief When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// \brief When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// \brief The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// \brief The arguments included an overloaded function name that could /// not be resolved to a suitable function. TDK_FailedOverloadResolution, /// \brief Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) { } QualType OriginalParamType; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); /// \brief Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// \brief Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// \brief Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// \brief A template instantiation that is currently in progress. struct ActiveTemplateInstantiation { /// \brief The kind of template instantiation we are performing enum InstantiationKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template, and /// TemplateArgs/NumTemplateArguments provides the template /// arguments as specified. /// FIXME: Use a TemplateArgumentList DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a ClassTemplatePartialSpecializationDecl or /// a FunctionTemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation } Kind; /// \brief The point of instantiation within the source code. SourceLocation PointOfInstantiation; /// \brief The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// \brief The entity that is being instantiated. Decl *Entity; /// \brief The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; /// \brief The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// \brief The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// \brief The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; ActiveTemplateInstantiation() : Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// \brief Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; friend bool operator==(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { if (X.Kind != Y.Kind) return false; if (X.Entity != Y.Entity) return false; switch (X.Kind) { case TemplateInstantiation: case ExceptionSpecInstantiation: return true; case PriorTemplateArgumentSubstitution: case DefaultTemplateArgumentChecking: return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs; case DefaultTemplateArgumentInstantiation: case ExplicitTemplateArgumentSubstitution: case DeducedTemplateArgumentSubstitution: case DefaultFunctionArgumentInstantiation: return X.TemplateArgs == Y.TemplateArgs; } llvm_unreachable("Invalid InstantiationKind!"); } friend bool operator!=(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { return !(X == Y); } }; /// \brief List of active template instantiations. /// /// This vector is treated as a stack. As one template instantiation /// requires another template instantiation, additional /// instantiations are pushed onto the stack up to a /// user-configurable limit LangOptions::InstantiationDepth. SmallVector<ActiveTemplateInstantiation, 16> ActiveTemplateInstantiations; /// \brief Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules; /// \brief Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// \brief Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// \brief Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// \brief The number of ActiveTemplateInstantiation entries in /// \c ActiveTemplateInstantiations that are not actual instantiations and, /// therefore, should not be counted as part of the instantiation depth. unsigned NonInstantiationEntries; /// \brief The last template from which a template instantiation /// error or warning was produced. /// /// This value is used to suppress printing of redundant template /// instantiation backtraces when there are multiple errors in the /// same instantiation. FIXME: Does this belong in Sema? It's tough /// to implement it anywhere else. ActiveTemplateInstantiation LastTemplateInstantiationErrorContext; /// \brief The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// \brief RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// \brief The stack of calls expression undergoing template instantiation. /// /// The top of this stack is used by a fixit instantiating unresolved /// function calls to fix the AST to match the textual change it prints. SmallVector<CallExpr *, 8> CallsUndergoingInstantiation; /// \brief For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// \brief A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// \brief Note that we are instantiating a class template, /// function template, or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// \brief Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, ActiveTemplateInstantiation::InstantiationKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// \brief Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } private: Sema &SemaRef; bool Invalid; bool SavedInNonInstantiationSFINAEContext; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = ArrayRef<TemplateArgument>(), sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void PrintInstantiationStack(); /// \brief Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// \brief Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// \brief RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; } /// \brief Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// \brief RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// \brief The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// \brief Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// \brief The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// \brief A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// \brief Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// \brief An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// \brief The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; class SavePendingInstantiationsAndVTableUsesRAII { public: SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } ~SavePendingInstantiationsAndVTableUsesRAII() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// \brief The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class SavePendingLocalImplicitInstantiationsRAII { public: SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } ~SavePendingLocalImplicitInstantiationsRAII() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ParmVarDecl **Params, unsigned NumParams, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams = nullptr); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param NumExprs The number of expressions in \p Exprs. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateStaticDataMemberDefinition( SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface(Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl * const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, const IdentifierLocPair *IdentList, unsigned NumElts, AttributeList *attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, const IdentifierLocPair *ProtocolId, unsigned NumProtocols, SmallVectorImpl<Decl *> &Protocols); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Check the application of the Objective-C '__kindof' qualifier to /// the given type. bool checkObjCKindOfType(QualType &type, SourceLocation loc); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed /// \param CD The semantic container for the property /// \param redeclaredProperty Declaration for property if redeclared /// in class extension. /// \param lexicalDC Container for redeclaredProperty. void ProcessPropertyDecl(ObjCPropertyDecl *property, ObjCContainerDecl *CD, ObjCPropertyDecl *redeclaredProperty = nullptr, ObjCContainerDecl *lexicalDC = nullptr); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, bool *OverridingProperty, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. AttributeList *ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// \brief Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// \brief The message is sent to 'super'. ObjCSuperMessage, /// \brief The message is an instance message. ObjCInstanceMessage, /// \brief The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// \brief Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// \brief Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); enum PragmaPackKind { PPK_Default, // #pragma pack([n]) PPK_Show, // #pragma pack(show), only supported by MSVC. PPK_Push, // #pragma pack(push, [identifier], [n]) PPK_Pop // #pragma pack(pop, [identifier], [n]) }; enum PragmaMSStructKind { PMSST_OFF, // #pragms ms_struct off PMSST_ON // #pragms ms_struct on }; enum PragmaMSCommentKind { PCK_Unknown, PCK_Linker, // #pragma comment(linker, ...) PCK_Lib, // #pragma comment(lib, ...) PCK_Compiler, // #pragma comment(compiler, ...) PCK_ExeStr, // #pragma comment(exestr, ...) PCK_User // #pragma comment(user, ...) }; /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name, Expr *Alignment, SourceLocation PragmaLoc, SourceLocation LParenLoc, SourceLocation RParenLoc); /// ActOnPragmaPackMatrix - Called on well formed \#pragma pack_matrix(...). void ActOnPragmaPackMatrix(bool bRowMajor, SourceLocation PragmaLoc); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// \brief Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// \brief Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// \brief Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT void ActOnPragmaFPContract(tok::OnOffSwitch OOS); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); /// \brief Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// \brief Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// \brief Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// \brief Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); // OpenMP directives and clauses. private: void *VarDataSharingAttributesStack; /// \brief Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind); public: /// \brief Check if the specified variable is used in a private clause in /// Checks if the specified variable is used in one of the private /// clauses in OpenMP constructs. bool IsOpenMPCapturedVar(VarDecl *VD); /// OpenMP constructs. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateVar(VarDecl *VD, unsigned Level); ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// \brief Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// \brief Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// \brief End analysis of clauses. void EndOpenMPClause(); /// \brief Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// \brief Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// \brief Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// \brief Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// \brief End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// \brief Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, unsigned Argument, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause(OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'ordered' clause. OMPClause *ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind, SourceLocation DepLoc); /// \brief Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'reduction' clause. OMPClause * ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId); /// \brief Called on well-formed 'linear' clause. OMPClause *ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief The kind of conversion being performed. enum CheckedConversionKind { /// \brief An implicit conversion. CCK_ImplicitConversion, /// \brief A C-style cast. CCK_CStyleCast, /// \brief A functional-style cast. CCK_FunctionalCast, /// \brief A cast other than a C-style cast. CCK_OtherCast }; /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointer - The assignment is between two pointers types which /// point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and prepare for a conversion of the /// RHS to the LHS type. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind); // CheckSingleAssignmentConstraints - Currently used by // CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking, // this routine performs the default function/array converions. AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false); // \brief If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc, bool isRelational); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool *NonStandardCompositeType = nullptr); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool *NonStandardCompositeType = nullptr) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, NonStandardCompositeType); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool isRelational); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible_With_Added_Qualification - The two types are /// reference-compatible with added qualification, meaning that /// they are reference-compatible and the qualifiers on T1 (cv1) /// are greater than the qualifiers on T2 (cv2). Ref_Compatible_With_Added_Qualification, /// Ref_Compatible - The two types are reference-compatible and /// have equivalent qualifiers (cv1 == cv2). Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// \brief Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// \brief Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged }; /// \brief Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds. ARCConversionResult CheckObjCARCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// \brief Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// \brief If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// \brief Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc); ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc, Expr *SubExpr); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// \brief Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// \brief Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D); bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \name Code completion //@{ /// \brief Describes the context in which code completion occurs. enum ParserCompletionContext { /// \brief Code completion occurs at top-level or namespace context. PCC_Namespace, /// \brief Code completion occurs within a class, struct, or union. PCC_Class, /// \brief Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// \brief Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// \brief Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// \brief Code completion occurs following one or more template /// headers. PCC_Template, /// \brief Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// \brief Code completion occurs within an expression. PCC_Expression, /// \brief Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// \brief Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// \brief Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// \brief Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// \brief Code completion occurs where only a type is permitted. PCC_Type, /// \brief Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// \brief Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool IsArrow); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteCase(Scope *S); void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args); void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols, unsigned NumProtocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, bool IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteNaturalLanguage(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); // HLSL Change Starts - checking array subscript access to vector or matrix member void CheckHLSLArrayAccess(const Expr *expr); // HLSL Change ends void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(CallExpr *TheCall); bool SemaBuiltinVAStartARM(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinCpuSupports(CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr, ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, bool inFunctionCall, VariadicCallType CallType, llvm::SmallBitVector &CheckedVarArgs); bool FormatStringHasSArg(const StringLiteral *FExpr); bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl, IdentifierInfo *FnInfo); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS); void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// \brief Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// \brief Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// \brief Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// \brief Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// \brief A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// \brief Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const Expr * const *ExprArgs); /// \brief The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; // HLSL Change Starts bool DiagnoseHLSLDecl(Declarator& D, DeclContext* DC, Expr *BitWidth, TypeSourceInfo* TInfo, bool isParameter); bool DiagnoseHLSLLookup(const LookupResult &R); void TransferUnusualAttributes(Declarator& D, NamedDecl* NewDecl); // HLSL Change Ends /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// \brief Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } AvailabilityResult getCurContextAvailability() const; const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// \brief To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } }; /// \brief RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; public: EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, IsDecltype); } EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, Sema::ReuseLambdaContextDecl, IsDecltype); } ~EnterExpressionEvaluationContext() { Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// \brief Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// \brief The template function declaration to be late parsed. Decl *D; }; } // end namespace clang #endif
GB_unaryop__ainv_bool_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_bool_uint64 // op(A') function: GB_tran__ainv_bool_uint64 // C type: bool // A type: uint64_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ bool z = (bool) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_bool_uint64 ( bool *Cx, // Cx and Ax may be aliased uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_bool_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
solver.h
#pragma once #include <fgpl/src/broadcast.h> #include <fgpl/src/dist_hash_map.h> #include <fgpl/src/dist_hash_set.h> #include <fgpl/src/dist_range.h> #include <fgpl/src/hash_set.h> #include <fgpl/src/reducer.h> #include <hps/src/hps.h> #include <omp_hash_map/src/omp_hash_map.h> #include <omp_hash_map/src/omp_hash_set.h> #include <cmath> #include <cstdlib> #include <functional> #include <numeric> #include <unordered_set> #include <queue> #include "../config.h" #include "../det/det.h" #include "../math_vector.h" #include "../parallel.h" #include "../result.h" #include "../timer.h" #include "../util.h" #include "davidson.h" #include "green.h" #include "hamiltonian.h" #include "hc_server.h" #include "uncert_result.h" template <class S> class Solver { public: void run(); private: S system; Hamiltonian<S> hamiltonian; std::vector<double> eps_tried_prev; fgpl::HashSet<Det, DetHasher> var_dets; size_t pt_mem_avail; size_t var_iteration_global; double eps_var_min; double eps_pt; double eps_pt_dtm; double eps_pt_psto; double target_error; size_t bytes_per_det; void run_all_variations(); void run_variation(const double eps_var, const bool until_converged = true); void run_all_perturbations(); void run_perturbation(const double eps_var); double get_energy_pt_dtm(const double eps_var); UncertResult get_energy_pt_psto(const double eps_var, const double energy_pt_dtm); UncertResult get_energy_pt_sto(const double eps_var, const UncertResult& get_energy_pt_sto); bool load_variation_result(const std::string& filename); void save_variation_result(const std::string& filename); void save_pair_contrib(const double eps_var); void print_dets_info() const; std::string get_wf_filename(const double eps_var) const; template <class C> std::array<double, 2> mapreduce_sum( const fgpl::DistHashMap<Det, C, DetHasher>& map, const std::function<double(const Det& det, const C& hc_sum)>& mapper) const; }; template <class S> void Solver<S>::run() { Timer::start("setup"); std::setlocale(LC_ALL, "en_US.UTF-8"); system.setup(); target_error = Config::get<double>("target_error", 5.0e-5); Result::put("energy_hf", system.energy_hf); Timer::end(); std::vector<std::vector<size_t>> connections; if (!Config::get<bool>("skip_var", false)) { Timer::start("variation"); run_all_variations(); if (Config::get<bool>("2rdm", false) || Config::get<bool>("get_2rdm_csv", false)) { connections = hamiltonian.matrix.get_connections(); } hamiltonian.clear(); Timer::end(); } Timer::start("post variation"); if (Config::get<bool>("hc_server_mode", false)) { if (system.time_sym) throw std::invalid_argument("time sym hc server not implemented"); const auto& wf_filename = get_wf_filename(eps_var_min); if (!load_variation_result(wf_filename)) throw std::runtime_error("failed to load wf"); hamiltonian.update(system); HcServer<S> server(system, hamiltonian); server.run(); return; } if (Config::get<bool>("get_green", false)) { if (system.time_sym) throw std::invalid_argument("time sym green not implemented"); Timer::start("green"); Green<S> green(system, hamiltonian); green.run(); Timer::end(); } system.post_variation(connections); connections.clear(); connections.shrink_to_fit(); hamiltonian.clear(); eps_tried_prev.clear(); var_dets.clear_and_shrink(); Timer::end(); if (Config::get<bool>("var_only", false)) return; Timer::start("perturbation"); run_all_perturbations(); system.post_perturbation(); Timer::end(); } template <class S> void Solver<S>::run_all_variations() { if (Parallel::is_master()) { printf("Final iteration 0 HF ndets= 1 energy= %.8f\n", system.energy_hf); } const auto& eps_vars = Config::get<std::vector<double>>("eps_vars"); const auto& eps_vars_schedule = Config::get<std::vector<double>>("eps_vars_schedule"); double eps_var_prev = Util::INF; for (const auto& det : system.dets) var_dets.set(det); auto it_schedule = eps_vars_schedule.begin(); var_iteration_global = 0; eps_var_min = eps_vars.back(); const bool get_pair_contrib = Config::get<bool>("get_pair_contrib", false); for (const double eps_var : eps_vars) { Timer::start(Util::str_printf("eps_var %#.2e", eps_var)); const auto& filename = get_wf_filename(eps_var); if (Config::get<bool>("force_var", false) || !load_variation_result(filename)) { // Perform extra scheduled eps. while (it_schedule != eps_vars_schedule.end() && *it_schedule >= eps_var_prev) it_schedule++; while (it_schedule != eps_vars_schedule.end() && *it_schedule > eps_var) { const double eps_var_extra = *it_schedule; Timer::start(Util::str_printf("extra %#.2e", eps_var_extra)); run_variation(eps_var_extra, false); Timer::end(); it_schedule++; } Timer::start("main"); run_variation(eps_var); Result::put<double>(Util::str_printf("energy_var/%#.2e", eps_var), system.energy_var); Timer::end(); save_variation_result(filename); } else { eps_tried_prev.clear(); var_dets.clear(); for (const auto& det : system.dets) var_dets.set(det); // hamiltonian.clear(); Result::put<double>(Util::str_printf("energy_var/%#.2e", eps_var), system.energy_var); } if (Parallel::is_master() && get_pair_contrib) { save_pair_contrib(eps_var); } eps_var_prev = eps_var; Timer::end(); } // hamiltonian.clear(); eps_tried_prev.clear(); eps_tried_prev.shrink_to_fit(); var_dets.clear_and_shrink(); } template <class S> void Solver<S>::run_all_perturbations() { const auto& eps_vars = Config::get<std::vector<double>>("eps_vars"); bytes_per_det = N_CHUNKS * 16; #ifdef INF_ORBS bytes_per_det += 128; #endif for (const double eps_var : eps_vars) { Timer::start(Util::str_printf("eps_var %#.2e", eps_var)); run_perturbation(eps_var); Timer::end(); } } template <class S> void Solver<S>::run_variation(const double eps_var, const bool until_converged) { Davidson davidson; fgpl::DistHashSet<Det, DetHasher> dist_new_dets; size_t n_dets = system.get_n_dets(); size_t n_dets_new = n_dets; double energy_var_prev = 0.0; bool converged = false; size_t iteration = 0; bool dets_converged = false; const bool get_pair_contrib = Config::get<bool>("get_pair_contrib", false); bool var_sd = Config::get<bool>("var_sd", get_pair_contrib); while (!converged) { eps_tried_prev.resize(n_dets, Util::INF); if (until_converged) Timer::start(Util::str_printf("#%zu", iteration + 1)); // Random execution and broadcast. if (!dets_converged) { n_dets_new = n_dets; for (size_t j = 0; j < 5; j++) { fgpl::DistRange<size_t>(j, n_dets, 5).for_each([&](const size_t i) { const auto& det = system.dets[i]; const double coef = system.coefs[i]; double eps_min = eps_var / std::abs(coef); if (i == 0 && var_sd) eps_min = 0.0; if (system.time_sym && det.up != det.dn) eps_min *= Util::SQRT2; if (eps_min >= eps_tried_prev[i] * 0.999) return; Det connected_det_reg; const auto& connected_det_handler = [&](const Det& connected_det, const int n_excite) { connected_det_reg = connected_det; if (system.time_sym && connected_det.up > connected_det.dn) { connected_det_reg.reverse_spin(); } if (var_dets.has(connected_det_reg)) return; if (n_excite == 1) { const double h_ai = system.get_hamiltonian_elem(det, connected_det, 1); if (std::abs(h_ai) < eps_min) return; // Filter out small single excitation. } dist_new_dets.async_set(connected_det_reg); }; system.find_connected_dets(det, eps_tried_prev[i], eps_min, connected_det_handler); eps_tried_prev[i] = eps_min; }); dist_new_dets.sync(); n_dets_new += dist_new_dets.get_n_keys(); system.dets.reserve(n_dets_new); system.coefs.reserve(n_dets_new); dist_new_dets.for_each_serial([&](const Det& connected_det, const size_t) { var_dets.set(connected_det); system.dets.push_back(connected_det); system.coefs.push_back(1.0e-16); }); dist_new_dets.clear(); if (Parallel::is_master()) printf("%zu%% ", (j + 1) * 20); } if (Parallel::is_master()) { printf("\nNumber of dets / new dets: %'zu / %'zu\n", n_dets_new, n_dets_new - n_dets); } Timer::checkpoint("get next det list"); hamiltonian.update(system); } const double davidson_target_error = until_converged ? target_error * target_error * 1e-4 : target_error * target_error; davidson.diagonalize( hamiltonian.matrix, system.coefs, davidson_target_error, Parallel::is_master()); const double energy_var_new = davidson.get_lowest_eigenvalue(); system.coefs = davidson.get_lowest_eigenvector(); Timer::checkpoint("diagonalize sparse hamiltonian"); var_iteration_global++; if (Parallel::is_master()) { printf("Iteration %zu ", var_iteration_global); printf("eps1= %#.2e ndets= %'zu energy= %.8f\n", eps_var, n_dets_new, energy_var_new); } if (std::abs(energy_var_new - energy_var_prev) < target_error * target_error * 1e-2) { converged = true; } if (n_dets_new < n_dets * 1.001) { dets_converged = true; } if (dets_converged && davidson.converged) { converged = true; } n_dets = n_dets_new; energy_var_prev = energy_var_new; if (!until_converged) break; Timer::end(); iteration++; } system.energy_var = energy_var_prev; if (Parallel::is_master() && until_converged) { printf("Final iteration %zu ", var_iteration_global); printf("eps1= %#.2e ndets= %'zu energy= %.8f\n", eps_var, n_dets, system.energy_var); print_dets_info(); } } template <class S> void Solver<S>::run_perturbation(const double eps_var) { double default_eps_pt_dtm = 2.0e-6; double default_eps_pt_psto = 1.0e-7; double default_eps_pt = 1.0e-20; if (system.type == SystemType::HEG) { default_eps_pt_psto = default_eps_pt_dtm; default_eps_pt = eps_var * 1.0e-20; } eps_pt_dtm = Config::get<double>("eps_pt_dtm", default_eps_pt_dtm); eps_pt_psto = Config::get<double>("eps_pt_psto", default_eps_pt_psto); eps_pt = Config::get<double>("eps_pt", default_eps_pt); if (eps_pt_psto < eps_pt) eps_pt_psto = eps_pt; if (eps_pt_dtm < eps_pt_psto) eps_pt_dtm = eps_pt_psto; // If result already exists, return. const auto& value_entry = Util::str_printf("energy_total/%#.2e/%#.2e/value", eps_var, eps_pt); const auto& uncert_entry = Util::str_printf("energy_total/%#.2e/%#.2e/uncert", eps_var, eps_pt); UncertResult res(Result::get<double>(value_entry, 0.0)); if (res.value != 0.0) { if (Parallel::is_master()) { res.uncert = Result::get<double>(uncert_entry, 0.0); printf("Total energy: %s (loaded from result file)\n", res.to_string().c_str()); } if (!Config::get<bool>("force_pt", false)) return; } // Load var wf. const auto& var_filename = get_wf_filename(eps_var); if (!load_variation_result(var_filename)) { throw new std::runtime_error("cannot load variation results"); } system.update_diag_helper(); if (system.time_sym) system.unpack_time_sym(); // Perform multi stage PT. system.dets.shrink_to_fit(); system.coefs.shrink_to_fit(); var_dets.clear_and_shrink(); var_dets.reserve(system.get_n_dets()); for (const auto& det : system.dets) var_dets.set(det); size_t mem_total = Config::get<double>("mem_total", Util::get_mem_total()); #ifdef INF_ORBS mem_total *= 0.8; #endif const size_t mem_var = system.get_n_dets() * (bytes_per_det * 3 + 8); const double mem_left = mem_total * 0.7 - mem_var - system.helper_size; assert(mem_left > 0); pt_mem_avail = mem_left; const size_t n_procs = Parallel::get_n_procs(); if (n_procs >= 2) { pt_mem_avail = static_cast<size_t>(pt_mem_avail * 0.7 * n_procs); } if (Parallel::is_master()) { printf("Memory total: %.1fGB\n", mem_total * 1.0e-9); printf("Helper size: %.1fGB\n", system.helper_size * 1.0e-9); printf("Bytes per det: %zu\n", bytes_per_det); printf("Memory var: %.1fGB\n", mem_var * 1.0e-9); printf("Memory PT limit: %.1fGB\n", pt_mem_avail * 1.0e-9); } const double energy_pt_dtm = get_energy_pt_dtm(eps_var); const UncertResult energy_pt_psto = get_energy_pt_psto(eps_var, energy_pt_dtm); const UncertResult energy_pt = get_energy_pt_sto(eps_var, energy_pt_psto); if (Parallel::is_master()) { printf("Total energy: %s Ha\n", energy_pt.to_string().c_str()); } Result::put(value_entry, energy_pt.value); Result::put(uncert_entry, energy_pt.uncert); } template <class S> double Solver<S>::get_energy_pt_dtm(const double eps_var) { if (eps_pt_dtm >= 1.0) return system.energy_var; Timer::start(Util::str_printf("dtm %#.2e", eps_pt_dtm)); const size_t n_var_dets = system.get_n_dets(); size_t n_batches = Config::get<size_t>("n_batches_pt_dtm", 0); fgpl::DistHashMap<Det, MathVector<double, 1>, DetHasher> hc_sums; size_t bytes_per_entry = bytes_per_det + 8; const DetHasher det_hasher; // Estimate best n batches. if (n_batches == 0) { fgpl::DistRange<size_t>(50, n_var_dets, 100).for_each([&](const size_t i) { const Det& det = system.dets[i]; const double coef = system.coefs[i]; const auto& pt_det_handler = [&](const Det& det_a, const int n_excite) { if (var_dets.has(det_a)) return; const size_t det_a_hash = det_hasher(det_a); const size_t batch_hash = Util::rehash(det_a_hash); if ((batch_hash & 127) != 0) return; // use 1st of 16 batches. if (n_excite == 1) { const double h_ai = system.get_hamiltonian_elem(det, det_a, n_excite); const double hc = h_ai * coef; if (std::abs(hc) < eps_pt_dtm) return; // Filter out small single excitation. } MathVector<double, 1> contrib; hc_sums.async_set(det_a, contrib); }; system.find_connected_dets(det, Util::INF, eps_pt_dtm / std::abs(coef), pt_det_handler); }); hc_sums.sync(); const size_t n_pt_dets = hc_sums.get_n_keys(); n_batches = static_cast<size_t>(ceil(2.5 * 128 * 100 * n_pt_dets * bytes_per_entry / pt_mem_avail)); if (n_batches == 0) n_batches = 1; fgpl::broadcast(n_batches); if (Parallel::is_master()) { printf("Number of dtm batches: %zu\n", n_batches); } Timer::checkpoint("determine number of dtm batches"); hc_sums.clear(); } double energy_sum = 0.0; double energy_sq_sum = 0.0; size_t n_pt_dets_sum = 0; UncertResult energy_pt_dtm; for (size_t batch_id = 0; batch_id < n_batches; batch_id++) { Timer::start(Util::str_printf("#%zu/%zu", batch_id + 1, n_batches)); for (size_t j = 0; j < 5; j++) { fgpl::DistRange<size_t>(j, n_var_dets, 5).for_each([&](const size_t i) { const Det& det = system.dets[i]; const double coef = system.coefs[i]; const auto& pt_det_handler = [&](const Det& det_a, const int n_excite) { const size_t det_a_hash = det_hasher(det_a); const size_t batch_hash = Util::rehash(det_a_hash); if (batch_hash % n_batches != batch_id) return; if (var_dets.has(det_a)) return; const double h_ai = system.get_hamiltonian_elem(det, det_a, n_excite); const double hc = h_ai * coef; if (std::abs(hc) < eps_pt_dtm) return; // Filter out small single excitation. const MathVector<double, 1> contrib(hc); hc_sums.async_set(det_a, contrib, fgpl::Reducer<MathVector<double, 1>>::sum); }; system.find_connected_dets(det, Util::INF, eps_pt_dtm / std::abs(coef), pt_det_handler); }); hc_sums.sync(fgpl::Reducer<MathVector<double, 1>>::sum); if (Parallel::is_master()) printf("%zu%% ", (j + 1) * 20); } const size_t n_pt_dets = hc_sums.get_n_keys(); if (Parallel::is_master()) { printf("\nNumber of dtm pt dets: %'zu\n", n_pt_dets); } n_pt_dets_sum += n_pt_dets; Timer::checkpoint("create hc sums"); const auto& energy_pt_dtm_batch = mapreduce_sum<MathVector<double, 1>>( hc_sums, [&](const Det& det_a, const MathVector<double, 1>& hc_sum) { const double H_aa = system.get_hamiltonian_elem(det_a, det_a, 0); const double contrib = hc_sum[0] * hc_sum[0] / (system.energy_var - H_aa); return contrib; }); energy_sum += energy_pt_dtm_batch[0]; energy_sq_sum += energy_pt_dtm_batch[1]; energy_pt_dtm.value = energy_sum / (batch_id + 1) * n_batches; if (batch_id == n_batches - 1) { energy_pt_dtm.uncert = 0.0; } else { const double energy_avg = energy_sum / n_pt_dets_sum; const double sample_stdev = sqrt(energy_sq_sum / n_pt_dets_sum - energy_avg * energy_avg); energy_pt_dtm.uncert = sample_stdev * sqrt(n_pt_dets_sum) / (batch_id + 1) * (n_batches - batch_id - 1); } if (Parallel::is_master()) { printf("PT dtm batch correction: " ENERGY_FORMAT "\n", energy_pt_dtm_batch[0]); printf("PT dtm correction (eps1= %.2e):", eps_var); printf(" %s Ha\n", energy_pt_dtm.to_string().c_str()); printf("PT dtm total energy (eps1= %.2e):", eps_var); printf(" %s Ha\n", (energy_pt_dtm + system.energy_var).to_string().c_str()); printf("Correlation energy (eps1= %.2e):", eps_var); printf( " %s Ha\n", (energy_pt_dtm + system.energy_var - system.energy_hf).to_string().c_str()); } hc_sums.clear(); Timer::end(); // batch } hc_sums.clear_and_shrink(); Timer::end(); // dtm return energy_pt_dtm.value + system.energy_var; } template <class S> UncertResult Solver<S>::get_energy_pt_psto(const double eps_var, const double energy_pt_dtm) { if (eps_pt_psto >= eps_pt_dtm) return UncertResult(energy_pt_dtm, 0.0); Timer::start(Util::str_printf("psto %#.2e", eps_pt_psto)); const size_t n_var_dets = system.get_n_dets(); size_t n_batches = Config::get<size_t>("n_batches_pt_psto", 0); fgpl::DistHashMap<Det, MathVector<double, 2>, DetHasher> hc_sums; const size_t bytes_per_entry = bytes_per_det + 16; const DetHasher det_hasher; // Estimate best n batches. if (n_batches == 0) { fgpl::DistRange<size_t>(50, n_var_dets, 100).for_each([&](const size_t i) { const Det& det = system.dets[i]; const double coef = system.coefs[i]; const auto& pt_det_handler = [&](const Det& det_a, const int n_excite) { if (var_dets.has(det_a)) return; const size_t det_a_hash = det_hasher(det_a); const size_t batch_hash = Util::rehash(det_a_hash); if ((batch_hash & 127) != 0) return; // use 1st of 16 batches. if (n_excite == 1) { const double h_ai = system.get_hamiltonian_elem(det, det_a, n_excite); const double hc = h_ai * coef; if (std::abs(hc) < eps_pt_psto) return; // Filter out small single excitation. } MathVector<double, 2> contrib; hc_sums.async_set(det_a, contrib); }; system.find_connected_dets(det, Util::INF, eps_pt_psto / std::abs(coef), pt_det_handler); }); hc_sums.sync(); const size_t n_pt_dets = hc_sums.get_n_keys(); const double mem_usage = Config::get<double>("pt_psto_mem_usage", 1.0); n_batches = static_cast<size_t>( ceil(2.5 * 128 * 100 * n_pt_dets * bytes_per_entry / (pt_mem_avail * mem_usage))); if (n_batches < 16) n_batches = 16; fgpl::broadcast(n_batches); if (Parallel::is_master()) { printf("Number of psto batches: %zu\n", n_batches); } Timer::checkpoint("determine number of psto batches"); hc_sums.clear(); } double energy_sum = 0.0; double energy_sq_sum = 0.0; size_t n_pt_dets_sum = 0; UncertResult energy_pt_psto; for (size_t batch_id = 0; batch_id < n_batches; batch_id++) { Timer::start(Util::str_printf("#%zu/%zu", batch_id + 1, n_batches)); for (size_t j = 0; j < 5; j++) { fgpl::DistRange<size_t>(j, n_var_dets, 5).for_each([&](const size_t i) { const Det& det = system.dets[i]; const double coef = system.coefs[i]; const auto& pt_det_handler = [&](const Det& det_a, const int n_excite) { const size_t det_a_hash = det_hasher(det_a); const size_t batch_hash = Util::rehash(det_a_hash); if (batch_hash % n_batches != batch_id) return; if (var_dets.has(det_a)) return; const double h_ai = system.get_hamiltonian_elem(det, det_a, n_excite); const double hc = h_ai * coef; if (std::abs(hc) < eps_pt_psto) return; // Filter out small single excitation. MathVector<double, 2> contrib; contrib[0] = hc; if (std::abs(hc) >= eps_pt_dtm) contrib[1] = hc; hc_sums.async_set(det_a, contrib, fgpl::Reducer<MathVector<double, 2>>::sum); }; system.find_connected_dets(det, Util::INF, eps_pt_psto / std::abs(coef), pt_det_handler); }); hc_sums.sync(fgpl::Reducer<MathVector<double, 2>>::sum); if (Parallel::is_master()) printf("%zu%% ", (j + 1) * 20); } const size_t n_pt_dets = hc_sums.get_n_keys(); if (Parallel::is_master()) { printf("\nNumber of psto pt dets: %'zu\n", n_pt_dets); } n_pt_dets_sum += n_pt_dets; Timer::checkpoint("create hc sums"); const auto& energy_pt_psto_batch = mapreduce_sum<MathVector<double, 2>>( hc_sums, [&](const Det& det_a, const MathVector<double, 2>& hc_sum) { const double hc_sum_sq_diff = hc_sum[0] * hc_sum[0] - hc_sum[1] * hc_sum[1]; const double H_aa = system.get_hamiltonian_elem(det_a, det_a, 0); const double contrib = hc_sum_sq_diff / (system.energy_var - H_aa); return contrib; }); energy_sum += energy_pt_psto_batch[0]; energy_sq_sum += energy_pt_psto_batch[1]; energy_pt_psto.value = energy_sum / (batch_id + 1) * n_batches; if (batch_id == n_batches - 1) { energy_pt_psto.uncert = 0.0; } else { const double energy_avg = energy_sum / n_pt_dets_sum; const double sample_stdev = sqrt(energy_sq_sum / n_pt_dets_sum - energy_avg * energy_avg); const double mean_stdev = sample_stdev / sqrt(n_pt_dets_sum); energy_pt_psto.uncert = mean_stdev * n_pt_dets_sum / (batch_id + 1) * (n_batches - batch_id - 1); // energy_pt_psto.uncert = sample_stdev * sqrt(n_pt_dets_sum) / (batch_id + 1) * n_batches; } if (Parallel::is_master()) { printf("PT psto batch correction: " ENERGY_FORMAT "\n", energy_pt_psto_batch[0]); printf("PT psto correction (eps1= %.2e):", eps_var); printf(" %s Ha\n", energy_pt_psto.to_string().c_str()); printf("PT psto total energy (eps1= %.2e):", eps_var); printf(" %s Ha\n", (energy_pt_psto + energy_pt_dtm).to_string().c_str()); printf("Correlation energy (eps1= %.2e):", eps_var); printf(" %s Ha\n", (energy_pt_psto + energy_pt_dtm - system.energy_hf).to_string().c_str()); } hc_sums.clear(); Timer::end(); // batch if (energy_pt_psto.uncert <= target_error * 0.7) break; if (eps_pt_psto <= eps_pt && energy_pt_psto.uncert <= target_error) break; } Timer::end(); // psto return energy_pt_psto + energy_pt_dtm; } template <class S> UncertResult Solver<S>::get_energy_pt_sto( const double eps_var, const UncertResult& energy_pt_psto) { if (eps_pt >= eps_pt_psto) return energy_pt_psto; const size_t max_pt_iterations = Config::get<size_t>("max_pt_iterations", 100); fgpl::DistHashMap<Det, MathVector<double, 3>, DetHasher> hc_sums; const size_t bytes_per_entry = bytes_per_det + 24; const size_t n_var_dets = system.get_n_dets(); size_t n_batches = Config::get<size_t>("n_batches_pt_sto", 0); if (n_batches == 0) n_batches = 64; size_t n_samples = Config::get<size_t>("n_samples_pt_sto", 0); std::vector<double> probs(n_var_dets); std::vector<double> cum_probs(n_var_dets); // For sampling. std::unordered_map<size_t, unsigned> sample_dets; std::vector<size_t> sample_dets_list; size_t iteration = 0; const DetHasher det_hasher; UncertResult energy_pt_sto; std::vector<double> energy_pt_sto_loops; // Contruct probs. double sum_weights = 0.0; for (size_t i = 0; i < n_var_dets; i++) { sum_weights += std::abs(system.coefs[i]); } for (size_t i = 0; i < n_var_dets; i++) { probs[i] = std::abs(system.coefs[i]) / sum_weights; cum_probs[i] = probs[i]; if (i > 0) cum_probs[i] += cum_probs[i - 1]; } const unsigned random_seed = Config::get<unsigned>("random_seed", time(nullptr)); srand(random_seed); Timer::start(Util::str_printf("sto %#.2e", eps_pt)); // Estimate best n sample. if (n_samples == 0) { for (size_t i = 0; i < 1000; i++) { const double rand_01 = (static_cast<double>(rand()) / (RAND_MAX)); const int sample_det_id = std::lower_bound(cum_probs.begin(), cum_probs.end(), rand_01) - cum_probs.begin(); if (sample_dets.count(sample_det_id) == 0) sample_dets_list.push_back(sample_det_id); sample_dets[sample_det_id]++; } fgpl::broadcast(sample_dets); fgpl::broadcast(sample_dets_list); size_t n_unique_samples = sample_dets_list.size(); fgpl::DistRange<size_t>(0, n_unique_samples).for_each([&](const size_t sample_id) { const size_t i = sample_dets_list[sample_id]; const Det& det = system.dets[i]; const double coef = system.coefs[i]; const auto& pt_det_handler = [&](const Det& det_a, const int n_excite) { if (var_dets.has(det_a)) return; const size_t det_a_hash = det_hasher(det_a); const size_t batch_hash = Util::rehash(det_a_hash); if ((batch_hash & 127) != 0) return; if (n_excite == 1) { const double h_ai = system.get_hamiltonian_elem(det, det_a, n_excite); const double hc = h_ai * coef; if (std::abs(hc) < eps_pt) return; // Filter out small single excitation. } MathVector<double, 3> contrib; hc_sums.async_set(det_a, contrib); }; system.find_connected_dets(det, Util::INF, eps_pt / std::abs(coef), pt_det_handler); }); hc_sums.sync(); const size_t n_pt_dets = hc_sums.get_n_keys(); hc_sums.clear(); const size_t n_pt_dets_batch = n_pt_dets * 128 / n_batches; double default_mem_usage = 0.4; if (system.type == SystemType::HEG) default_mem_usage = 1.0; const double mem_usage = Config::get<double>("pt_sto_mem_usage", default_mem_usage); size_t n_unique_target = pt_mem_avail * mem_usage * n_unique_samples / bytes_per_entry / 5.0 / n_pt_dets_batch; const size_t max_unique_targets = n_var_dets / 8 + 1; if (n_unique_target >= max_unique_targets) n_unique_target = max_unique_targets; sample_dets.clear(); sample_dets_list.clear(); n_samples = 0; n_unique_samples = 0; while (n_unique_samples < n_unique_target) { const double rand_01 = (static_cast<double>(rand()) / (RAND_MAX)); const int sample_det_id = std::lower_bound(cum_probs.begin(), cum_probs.end(), rand_01) - cum_probs.begin(); if (sample_dets.count(sample_det_id) == 0) { n_unique_samples++; } n_samples++; sample_dets[sample_det_id]++; } sample_dets.clear(); fgpl::broadcast(n_samples); if (Parallel::is_master()) { printf("Number of samples chosen: %'zu\n", n_samples); } Timer::checkpoint("determine n samples"); } while (iteration < max_pt_iterations) { Timer::start(Util::str_printf("#%zu", iteration + 1)); // Generate random sample for (size_t i = 0; i < n_samples; i++) { const double rand_01 = (static_cast<double>(rand()) / (RAND_MAX)); const int sample_det_id = std::lower_bound(cum_probs.begin(), cum_probs.end(), rand_01) - cum_probs.begin(); if (sample_dets.count(sample_det_id) == 0) sample_dets_list.push_back(sample_det_id); sample_dets[sample_det_id]++; } fgpl::broadcast(sample_dets); fgpl::broadcast(sample_dets_list); if (Parallel::is_master()) { printf( "Number of unique variational determinants in sample: %'zu\n", sample_dets_list.size()); } // Select random batch. size_t batch_id = rand() % n_batches; fgpl::broadcast(batch_id); const size_t n_unique_samples = sample_dets_list.size(); if (Parallel::is_master()) printf("Batch id: %zu / %zu\n", batch_id, n_batches); for (size_t j = 0; j < 5; j++) { fgpl::DistRange<size_t>(j, n_unique_samples, 5).for_each([&](const size_t sample_id) { const size_t i = sample_dets_list[sample_id]; const double count = static_cast<double>(sample_dets[i]); const Det& det = system.dets[i]; const double coef = system.coefs[i]; const double prob = probs[i]; const auto& pt_det_handler = [&](const Det& det_a, const int n_excite) { const size_t det_a_hash = det_hasher(det_a); const size_t batch_hash = Util::rehash(det_a_hash); if (batch_hash % n_batches != batch_id) return; if (var_dets.has(det_a)) return; const double h_ai = system.get_hamiltonian_elem(det, det_a, n_excite); const double hc = h_ai * coef; if (std::abs(hc) < eps_pt) return; // Filter out small single excitation. const double factor = static_cast<double>(n_batches) / (n_samples * (n_samples - 1)); MathVector<double, 3> contrib; contrib[0] = count * hc / prob * sqrt(factor); if (std::abs(hc) < eps_pt_psto) { contrib[2] = (n_samples - 1 - count / prob) * hc * hc * factor * count / prob; } else { contrib[1] = contrib[0]; } hc_sums.async_set(det_a, contrib, fgpl::Reducer<MathVector<double, 3>>::sum); }; system.find_connected_dets(det, Util::INF, eps_pt / std::abs(coef), pt_det_handler); }); hc_sums.sync(fgpl::Reducer<MathVector<double, 3>>::sum); if (Parallel::is_master()) printf("%zu%% ", (j + 1) * 20); } const size_t n_pt_dets = hc_sums.get_n_keys(); if (Parallel::is_master()) printf("\nNumber of sto pt dets: %'zu\n", n_pt_dets); sample_dets.clear(); sample_dets_list.clear(); Timer::checkpoint("create hc sums"); const double energy_pt_sto_loop = mapreduce_sum<MathVector<double, 3>>( hc_sums, [&](const Det& det_a, const MathVector<double, 3>& hc_sum) { const double h_aa = system.get_hamiltonian_elem(det_a, det_a, 0); const double factor = 1.0 / (system.energy_var - h_aa); return (hc_sum[0] * hc_sum[0] - hc_sum[1] * hc_sum[1] + hc_sum[2]) * factor; })[0]; energy_pt_sto_loops.push_back(energy_pt_sto_loop); energy_pt_sto.value = Util::avg(energy_pt_sto_loops); energy_pt_sto.uncert = Util::stdev(energy_pt_sto_loops) / sqrt(iteration + 1.0); if (Parallel::is_master()) { printf("PT sto loop correction: " ENERGY_FORMAT "\n", energy_pt_sto_loop); printf("PT sto correction (eps1= %.2e):", eps_var); printf(" %s Ha\n", energy_pt_sto.to_string().c_str()); printf("PT sto total energy (eps1= %.2e):", eps_var); printf(" %s Ha\n", (energy_pt_sto + energy_pt_psto).to_string().c_str()); printf("Correlation energy (eps1= %.2e):", eps_var); printf(" %s Ha\n", (energy_pt_sto + energy_pt_psto - system.energy_hf).to_string().c_str()); } hc_sums.clear(); Timer::end(); iteration++; if (iteration >= 6 && energy_pt_sto.uncert <= target_error * 0.7) { break; } if (iteration >= 10 && (energy_pt_sto + energy_pt_psto).uncert <= target_error) { break; } } hc_sums.clear_and_shrink(); Timer::end(); return energy_pt_sto + energy_pt_psto; } template <class S> template <class C> std::array<double, 2> Solver<S>::mapreduce_sum( const fgpl::DistHashMap<Det, C, DetHasher>& map, const std::function<double(const Det& det, const C& hc_sum)>& mapper) const { const int n_threads = omp_get_max_threads(); std::vector<double> res_sq_thread(n_threads, 0.0); std::vector<double> res_thread(n_threads, 0.0); map.for_each([&](const Det& key, const size_t, const C& value) { const int thread_id = omp_get_thread_num(); const double mapped = mapper(key, value); res_thread[thread_id] += mapped; res_sq_thread[thread_id] += mapped * mapped; }); std::array<double, 2> res_local = {0.0, 0.0}; std::array<double, 2> res; for (int i = 0; i < n_threads; i++) { res_local[0] += res_thread[i]; res_local[1] += res_sq_thread[i]; } MPI_Allreduce(&res_local, &res, 2, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); return res; } template <class S> bool Solver<S>::load_variation_result(const std::string& filename) { std::string serialized; const int TRUNK_SIZE = 1 << 20; char buffer[TRUNK_SIZE]; MPI_File file; int error; error = MPI_File_open( MPI_COMM_WORLD, filename.c_str(), MPI_MODE_RDONLY | MPI_MODE_RDONLY, MPI_INFO_NULL, &file); if (error) return false; MPI_Offset size; MPI_File_get_size(file, &size); MPI_Status status; while (size > TRUNK_SIZE) { MPI_File_read_all(file, buffer, TRUNK_SIZE, MPI_CHAR, &status); serialized.append(buffer, TRUNK_SIZE); size -= TRUNK_SIZE; } MPI_File_read_all(file, buffer, size, MPI_CHAR, &status); serialized.append(buffer, size); MPI_File_close(&file); hps::from_string(serialized, system); if (Parallel::is_master()) { printf("Loaded %'zu dets from: %s\n", system.get_n_dets(), filename.c_str()); print_dets_info(); printf("HF energy: " ENERGY_FORMAT "\n", system.energy_hf); printf("Variational energy: " ENERGY_FORMAT "\n", system.energy_var); } return true; } template <class S> void Solver<S>::save_variation_result(const std::string& filename) { if (Parallel::is_master()) { std::ofstream file(filename, std::ofstream::binary); hps::to_stream(system, file); printf("Variational results saved to: %s\n", filename.c_str()); } } template <class S> void Solver<S>::save_pair_contrib(const double eps_var) { const auto& det_hf = system.dets[0]; // const size_t n_elecs = system.n_elecs; const size_t n_up = system.n_up; // const size_t n_dn = system.n_dn; if (det_hf.up != det_hf.dn) { throw std::invalid_argument("non sym det_hf not implemented"); } std::vector<std::vector<double>> contribs(n_up); std::string contrib_filename = Util::str_printf("pair_contrib_%#.2e.csv", eps_var); const auto& contrib_entry = Util::str_printf("pair_contrib/%#.2e", eps_var); Result::put<std::string>(contrib_entry, contrib_filename); std::ofstream contrib_file(contrib_filename); contrib_file << "i,j,pair_contrib" << std::endl; for (size_t i = 0; i < n_up; i++) { contribs[i].assign(n_up, 0.0); } const double c0 = system.coefs[0]; for (size_t det_id = 1; det_id < system.dets.size(); det_id++) { const auto& det = system.dets[det_id]; const double coef = system.coefs[det_id]; const auto& diff_up = det_hf.up.diff(det.up); const auto& diff_dn = det_hf.dn.diff(det.dn); const unsigned n_excite = diff_up.n_diffs + diff_dn.n_diffs; if (n_excite > 2) continue; size_t i = 0; size_t j = 0; const auto& H = system.get_hamiltonian_elem(det_hf, det, -1); if (diff_up.n_diffs == 2) { i = diff_up.left_only[0]; j = diff_up.left_only[1]; } else if (diff_up.n_diffs == 1) { i = diff_up.left_only[0]; if (diff_dn.n_diffs == 1) { j = diff_dn.left_only[0]; if (j < i) { std::swap(i, j); } } else { j = i; } } else { i = diff_dn.left_only[0]; if (diff_dn.n_diffs == 2) { j = diff_dn.left_only[1]; if (j < i) { std::swap(i, j); } } else { j = i; } } if (det.up == det.dn) { contribs[i][j] += H * coef / c0; } else if (system.time_sym) { contribs[i][j] += H * coef / c0 * Util::SQRT2; } else { contribs[i][j] += H * coef / c0; } } contrib_file.precision(15); for (size_t i = 0; i < n_up; i++) { for (size_t j = i; j < n_up; j++) { if (i != j) { contribs[i][j] /= 2; } contrib_file << i << "," << j << "," << contribs[i][j] << std::endl; } } contrib_file.close(); } template <class S> void Solver<S>::print_dets_info() const { if (system.time_sym) { // Print effective dets for unpacked time sym. size_t n_eff_dets = 0; for (const auto& det : system.dets) { if (det.up == det.dn) { n_eff_dets += 1; } else if (det.up < det.dn) { n_eff_dets += 2; } else { throw std::runtime_error("wf has unvalid det for time sym"); } } printf("Effect dets (without time sym): %'zu\n", n_eff_dets); } // Print excitations. std::unordered_map<unsigned, size_t> excitations; std::unordered_map<unsigned, double> weights; unsigned highest_excitation = 0; const auto& det_hf = system.dets[0]; for (size_t i = 0; i < system.dets.size(); i++) { const auto& det = system.dets[i]; const double coef = system.coefs[i]; const unsigned n_excite = det_hf.up.n_diffs(det.up) + det_hf.dn.n_diffs(det.dn); if (det.up != det.dn && system.time_sym) { excitations[n_excite] += 2; } else { excitations[n_excite] += 1; } weights[n_excite] += coef * coef; if (highest_excitation < n_excite) highest_excitation = n_excite; } printf("----------------------------------------\n"); printf("%-10s%12s%16s\n", "Excite Lv", "# dets", "Sum c^2"); for (unsigned i = 0; i <= highest_excitation; i++) { if (excitations.count(i) == 0) { excitations[i] = 0; weights[i] = 0.0; } printf("%-10u%12zu%16.8f\n", i, excitations[i], weights[i]); } // Print orb occupations. std::vector<double> orb_occupations(system.n_orbs, 0.0); #pragma omp parallel for schedule(static, 1) for (unsigned j = 0; j < system.n_orbs; j++) { for (size_t i = 0; i < system.dets.size(); i++) { const auto& det = system.dets[i]; const double coef = system.coefs[i]; if (det.up.has(j)) { orb_occupations[j] += coef * coef; } if (det.dn.has(j)) { orb_occupations[j] += coef * coef; } } } printf("----------------------------------------\n"); printf("%-10s%12s%16s\n", "Orbital", "", "Sum c^2"); for (unsigned j = 0; j < system.n_orbs && j < 50; j++) { printf("%-10u%12s%16.8f\n", j, "", orb_occupations[j]); } double sum_orb_occupation = std::accumulate(orb_occupations.begin(), orb_occupations.end(), 0.0); printf("Sum orbitals c^2: %.8f\n", sum_orb_occupation); // Print most important dets. printf("----------------------------------------\n"); printf("Most important dets:\n"); std::vector<size_t> det_order(system.dets.size()); for (size_t i = 0; i < system.dets.size(); i++) { det_order[i] = i; } const auto& comp = [&](const size_t a, const size_t b) { if (std::abs(system.coefs[a]) != std::abs(system.coefs[b])) { return std::abs(system.coefs[a]) < std::abs(system.coefs[b]); } return a > b; }; std::priority_queue<size_t, std::vector<size_t>, decltype(comp)> det_ordered(comp, det_order); printf("%-10s%12s %-12s\n", "Excite Lv", "Coef", "Det (Reordered orb)"); for (size_t i = 0; i < std::min((size_t)20, system.dets.size()); i++) { size_t ordered_i = det_ordered.top(); det_ordered.pop(); const double coef = system.coefs[ordered_i]; const auto& det = system.dets[ordered_i]; const auto& occs_up = det.up.get_occupied_orbs(); const auto& occs_dn = det.dn.get_occupied_orbs(); const unsigned n_excite = det_hf.up.n_diffs(det.up) + det_hf.dn.n_diffs(det.dn); printf("%-10u%12.8f", n_excite, coef); printf(" | "); for (unsigned j = 0; j < system.n_up; j++) { printf("%2u ", occs_up[j]); } printf("| "); for (unsigned j = 0; j < system.n_dn; j++) { printf("%2u ", occs_dn[j]); } printf("|\n"); } printf("----------------------------------------\n"); } template <class S> std::string Solver<S>::get_wf_filename(const double eps_var) const { return Util::str_printf("wf_eps1_%#.2e.dat", eps_var); }
parallel_matrix.c
//code implementation of matrix multiplication times a scalar using openmp parallelization #include <stdio.h> #include <omp.h> #define NUM_THREADS 5 int n = 1000; void evaluacion(int i, float mat[n][n]); int main(int argc, char const *argv[]) { omp_set_num_threads(NUM_THREADS); double t, t1, t2; float mat[n][n]; float num = 1.0; int j; int i; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { mat[i][j] = j; } } t1 = omp_get_wtime(); #pragma omp parallel { #pragma omp for for ( i = 0; i < n; i++){ evaluacion(i, mat); } } t2 = omp_get_wtime(); t = t2-t1; printf("t = %f \n", t ); } void evaluacion(int i, float mat[n][n]){ int j; for ( j = 0; j < n; j++){ printf("%f , ", mat[i][j]*65); if (j*1 == 9) { printf("\n"); } } } // #pragma omp for{ // // } // // }
dicts.h
/* Software SPAMS v2.1 - Copyright 2009-2011 Julien Mairal * * This file is part of SPAMS. * * SPAMS is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * SPAMS is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with SPAMS. If not, see <http://www.gnu.org/licenses/>. */ /*! * * \file * toolbox dictLearn * * by Julien Mairal * julien.mairal@inria.fr * * File dicts.h * \brief Contains dictionary learning algorithms * It requires the toolbox decomp */ #ifndef DICTS_H #define DICTS_H #include <decomp.h> #include <fista.h> char buffer_string[50]; enum constraint_type_D { L2, L1L2, L1L2FL, L1L2MU}; enum mode_compute { AUTO, PARAM1, PARAM2, PARAM3}; struct regul_def { const char *name; FISTA::regul_t regul; } regul_table[] = { {"l0", FISTA::L0}, {"l1", FISTA::L1}, {"l2", FISTA::L2}, {"linf", FISTA::LINF}, {"none", FISTA::NONE}, {"elastic-net", FISTA::ELASTICNET}, {"fused-lasso", FISTA::FUSEDLASSO}, {"graph", FISTA::GRAPH}, {"graph-ridge", FISTA::GRAPH_RIDGE}, {"tree-l0", FISTA::TREE_L0}, {"tree-l2", FISTA::TREE_L2}, {"tree-linf", FISTA::TREE_LINF}, }; #define NBREGUL sizeof(regul_table)/sizeof(struct regul_def) FISTA::regul_t regul_from_string(const char* regul) { for(unsigned int i = 0;i < NBREGUL;i++) if (strcmp(regul,regul_table[i].name)==0) return regul_table[i].regul; return FISTA::INCORRECT_REG; } void regul_error(char *buffer, int bufsize,const char *message) { int n1 = strlen(message); int size = n1; if(n1 < bufsize) { // calculate size for(unsigned int i = 0;i < NBREGUL;i++) size += strlen(regul_table[i].name) + 1; } if (size >= bufsize) { n1 = bufsize - 1; strncpy(buffer,"Invalid regularization\n",n1); } else { strncpy(buffer,message,n1); for(unsigned int i = 0;i < NBREGUL;i++) { int k = strlen(regul_table[i].name); strncpy(&buffer[n1],regul_table[i].name,k); buffer[n1+k] = ' '; n1 += k + 1; } buffer[n1 - 1] = '\n'; } buffer[n1] = 0; return; } template <typename T> struct ParamDictLearn { public: ParamDictLearn() : mode(PENALTY), regul(FISTA::RIDGE), tol(1.e-4), ista(false), fixed_step(true), posAlpha(false), modeD(L2), posD(false), modeParam(AUTO), t0(1e-5), rho(5), gamma1(0), mu(0), lambda3(0), lambda4(0), lambda2(0), gamma2(0), approx(0.0), p(1.0), whiten(false), expand(false), isConstant(false), updateConstant(true), ThetaDiag(false), ThetaDiagPlus(false), ThetaId(false), DequalsW(false), weightClasses(false), balanceClasses(false), extend(false), pattern(false), stochastic(false), scaleW(false), batch(false), verbose(true), clean(true), log(false), updateD(true), updateW(true), updateTheta(true), logName(NULL), iter_updateD(1) { }; ~ParamDictLearn() { delete[](logName); }; int iter; T lambda; constraint_type mode; FISTA::regul_t regul; T tol; bool ista; bool fixed_step; bool posAlpha; constraint_type_D modeD; bool posD; mode_compute modeParam; T t0; T rho; T gamma1; T mu; T lambda3; T lambda4; T lambda2; T gamma2; T approx; T p; bool whiten; bool expand; bool isConstant; bool updateConstant; bool ThetaDiag; bool ThetaDiagPlus; bool ThetaId; bool DequalsW; bool weightClasses; bool balanceClasses; bool extend; bool pattern; bool stochastic; bool scaleW; bool batch; bool verbose; bool clean; bool log; bool updateD; bool updateW; bool updateTheta; char* logName; int iter_updateD; }; template <typename T> class Trainer { public: /// Empty constructor Trainer(); /// Constructor with data Trainer(const int k, const int batchsize = 256, const int NUM_THREADS=-1); /// Constructor with initial dictionary Trainer(const Matrix<T>& D, const int batchsize = 256, const int NUM_THREADS=-1); /// Constructor with existing structure Trainer(const Matrix<T>& A, const Matrix<T>& B, const Matrix<T>& D, const int itercount, const int batchsize, const int NUM_THREADS); /// train or retrain using the matrix X /* train with graph or tree */ void train(const Data<T>& X, const ParamDictLearn<T>& param); void train_fista(const Data<T>& X, const ParamDictLearn<T>& param, const GraphStruct<T>* graph_st = NULL, const TreeStruct<T>* tree_st = NULL); void trainOffline(const Data<T>& X, const ParamDictLearn<T>& param); /// Accessors void getA(Matrix<T>& A) const { A.copy(_A);}; void getB(Matrix<T>& B) const { B.copy(_B);}; void getD(Matrix<T>& D) const { D.copy(_D);}; int getIter() const { return _itercount; }; private: /// Forbid lazy copies explicit Trainer<T>(const Trainer<T>& trainer); /// Forbid lazy copies Trainer<T>& operator=(const Trainer<T>& trainer); /// clean the dictionary void cleanDict(const Data<T>& X, Matrix<T>& G, const bool posD = false, const constraint_type_D modeD = L2, const T gamma1 = 0, const T gamma2 = 0, const T maxCorrel = 0.999999); /// clean the dictionary void cleanDict(Matrix<T>& G); Matrix<T> _A; Matrix<T> _B; Matrix<T> _D; int _k; bool _initialDict; int _itercount; int _batchsize; int _NUM_THREADS; }; /// Empty constructor template <typename T> Trainer<T>::Trainer() : _k(0), _initialDict(false), _itercount(0), _batchsize(256) { _NUM_THREADS=1; #ifdef _OPENMP _NUM_THREADS = MIN(MAX_THREADS,omp_get_num_procs()); #endif _batchsize=floor(_batchsize*(_NUM_THREADS+1)/2); }; /// Constructor with data template <typename T> Trainer<T>::Trainer(const int k, const int batchsize, const int NUM_THREADS) : _k(k), _initialDict(false), _itercount(0),_batchsize(batchsize), _NUM_THREADS(NUM_THREADS) { if (_NUM_THREADS == -1) { _NUM_THREADS=1; #ifdef _OPENMP _NUM_THREADS = MIN(MAX_THREADS,omp_get_num_procs()); #endif } }; /// Constructor with initial dictionary template <typename T> Trainer<T>::Trainer(const Matrix<T>& D, const int batchsize, const int NUM_THREADS) : _k(D.n()), _initialDict(true),_itercount(0),_batchsize(batchsize), _NUM_THREADS(NUM_THREADS) { _D.copy(D); _A.resize(D.n(),D.n()); _B.resize(D.m(),D.n()); if (_NUM_THREADS == -1) { _NUM_THREADS=1; #ifdef _OPENMP _NUM_THREADS = MIN(MAX_THREADS,omp_get_num_procs()); #endif } } /// Constructor with existing structure template <typename T> Trainer<T>::Trainer(const Matrix<T>& A, const Matrix<T>& B, const Matrix<T>& D, const int itercount, const int batchsize, const int NUM_THREADS) : _k(D.n()),_initialDict(true),_itercount(itercount), _batchsize(batchsize), _NUM_THREADS(NUM_THREADS) { _D.copy(D); _A.copy(A); _B.copy(B); if (_NUM_THREADS == -1) { _NUM_THREADS=1; #ifdef _OPENMP _NUM_THREADS = MIN(MAX_THREADS,omp_get_num_procs()); #endif } }; template <typename T> void Trainer<T>::cleanDict(const Data<T>& X, Matrix<T>& G, const bool posD, const constraint_type_D modeD, const T gamma1, const T gamma2, const T maxCorrel) { int sparseD = modeD == L1L2 ? 2 : 6; const int k = _D.n(); const int n = _D.m(); const int M = X.n(); T* const pr_G=G.rawX(); Vector<T> aleat(n); Vector<T> col(n); for (int i = 0; i<k; ++i) { //pr_G[i*k+i] += 1e-10; for (int j = i; j<k; ++j) { if ((j > i && abs(pr_G[i*k+j])/sqrt(pr_G[i*k+i]*pr_G[j*k+j]) > maxCorrel) || (j == i && abs(pr_G[i*k+j]) < 1e-4)) { /// remove element j and replace it by a random element of X const int ind = random() % M; Vector<T> d, g; _D.refCol(j,d); X.getData(col,ind); d.copy(col); if (modeD != L2) { aleat.copy(d); aleat.sparseProject(d,T(1.0),sparseD,gamma1,gamma2,T(2.0),posD); } else { if (posD) d.thrsPos(); d.normalize(); } G.refCol(j,g); _D.multTrans(d,g); for (int l = 0; l<_D.n(); ++l) pr_G[l*k+j] = pr_G[j*k+l]; } } } } template <typename T> void Trainer<T>::cleanDict(Matrix<T>& G) { const int k = _D.n(); T* const pr_G=G.rawX(); for (int i = 0; i<k; ++i) { pr_G[i*k+i] += 1e-10; } } template <typename T> void Trainer<T>::train(const Data<T>& X, const ParamDictLearn<T>& param) { T rho = param.rho; T t0 = param.t0; int sparseD = param.modeD == L1L2 ? 2 : param.modeD == L1L2MU ? 7 : 6; int NUM_THREADS=init_omp(_NUM_THREADS); if (param.verbose) { cout << "num param iterD: " << param.iter_updateD << endl; if (param.batch) { cout << "Batch Mode" << endl; } else if (param.stochastic) { cout << "Stochastic Gradient. rho : " << rho << ", t0 : " << t0 << endl; } else { if (param.modeParam == AUTO) { cout << "Online Dictionary Learning with no parameter " << endl; } else if (param.modeParam == PARAM1) { cout << "Online Dictionary Learning with parameters: " << t0 << " rho: " << rho << endl; } else { cout << "Online Dictionary Learning with exponential decay t0: " << t0 << " rho: " << rho << endl; } } if (param.posD) cout << "Positivity constraints on D activated" << endl; if (param.posAlpha) cout << "Positivity constraints on alpha activated" << endl; if (param.modeD != L2) cout << "Sparse dictionaries, mode: " << param.modeD << ", gamma1: " << param.gamma1 << ", gamma2: " << param.gamma2 << endl; cout << "mode Alpha " << param.mode << endl; if (param.clean) cout << "Cleaning activated " << endl; if (param.log && param.logName) { cout << "log activated " << endl; cerr << param.logName << endl; } if (param.mode == PENALTY && param.lambda==0 && param.lambda2 > 0 && !param.posAlpha) cout << "L2 solver is used" << endl; if (_itercount > 0) cout << "Retraining from iteration " << _itercount << endl; flush(cout); } const int M = X.n(); const int K = _k; const int n = X.m(); const int L = param.mode == SPARSITY ? static_cast<int>(param.lambda) : param.mode == PENALTY && param.lambda == 0 && param.lambda2 > 0 && !param.posAlpha ? K : MIN(n,K); const int batchsize= param.batch ? M : MIN(_batchsize,M); if (param.verbose) { cout << "batch size: " << batchsize << endl; cout << "L: " << L << endl; cout << "lambda: " << param.lambda << endl; cout << "mode: " << param.mode << endl; flush(cout); } if (_D.m() != n || _D.n() != K) _initialDict=false; srandom(0); Vector<T> col(n); if (!_initialDict) { _D.resize(n,K); for (int i = 0; i<K; ++i) { const int ind = random() % M; Vector<T> d; _D.refCol(i,d); X.getData(col,ind); d.copy(col); } _initialDict=true; } if (param.verbose) { cout << "*****Online Dictionary Learning*****" << endl; flush(cout); } Vector<T> tmp(n); if (param.modeD != L2) { for (int i = 0; i<K; ++i) { Vector<T> d; _D.refCol(i,d); tmp.copy(d); tmp.sparseProject(d,T(1.0),sparseD,param.gamma1, param.gamma2,T(2.0),param.posD); } } else { if (param.posD) _D.thrsPos(); _D.normalize(); } int count=0; int countPrev=0; T scalt0 = abs<T>(t0); if (_itercount == 0) { _A.resize(K,K); _A.setZeros(); _B.resize(n,K); _B.setZeros(); if (!param.batch) { _A.setDiag(scalt0); _B.copy(_D); _B.scal(scalt0); } } //Matrix<T> G(K,K); Matrix<T> Borig(n,K); Matrix<T> Aorig(K,K); Matrix<T> Bodd(n,K); Matrix<T> Aodd(K,K); Matrix<T> Beven(n,K); Matrix<T> Aeven(K,K); SpVector<T>* spcoeffT=new SpVector<T>[NUM_THREADS]; Vector<T>* DtRT=new Vector<T>[NUM_THREADS]; Vector<T>* XT=new Vector<T>[NUM_THREADS]; Matrix<T>* BT=new Matrix<T>[NUM_THREADS]; Matrix<T>* AT=new Matrix<T>[NUM_THREADS]; Matrix<T>* GsT=new Matrix<T>[NUM_THREADS]; Matrix<T>* GaT=new Matrix<T>[NUM_THREADS]; Matrix<T>* invGsT=new Matrix<T>[NUM_THREADS]; Matrix<T>* workT=new Matrix<T>[NUM_THREADS]; Vector<T>* uT=new Vector<T>[NUM_THREADS]; for (int i = 0; i<NUM_THREADS; ++i) { spcoeffT[i].resize(K); DtRT[i].resize(K); XT[i].resize(n); BT[i].resize(n,K); BT[i].setZeros(); AT[i].resize(K,K); AT[i].setZeros(); GsT[i].resize(L,L); GsT[i].setZeros(); invGsT[i].resize(L,L); invGsT[i].setZeros(); GaT[i].resize(K,L); GaT[i].setZeros(); workT[i].resize(K,3); workT[i].setZeros(); uT[i].resize(L); uT[i].setZeros(); } Timer time, time2; time.start(); srandom(0); Vector<int> perm; perm.randperm(M); Aodd.setZeros(); Bodd.setZeros(); Aeven.setZeros(); Beven.setZeros(); Aorig.copy(_A); Borig.copy(_B); int JJ = param.iter < 0 ? 100000000 : param.iter; bool even=true; int last_written=-40; int i; for (i = 0; i<JJ; ++i) { if (param.verbose) { cout << "Iteration: " << i << endl; flush(cout); } time.stop(); if (param.iter < 0 && time.getElapsed() > T(-param.iter)) break; if (param.log) { int seconds=static_cast<int>(floor(log(time.getElapsed())*5)); if (seconds > last_written) { last_written++; sprintf(buffer_string,"%s_%d.log",param.logName, last_written+40); writeLog(_D,T(time.getElapsed()),i,buffer_string); fprintf(stderr,"\r%d",i); } } time.start(); Matrix<T> G; _D.XtX(G); if (param.clean) this->cleanDict(X,G,param.posD, param.modeD,param.gamma1,param.gamma2); G.addDiag(MAX(param.lambda2,1e-10)); int j; for (j = 0; j<NUM_THREADS; ++j) { AT[j].setZeros(); BT[j].setZeros(); } #pragma omp parallel for private(j) for (j = 0; j<batchsize; ++j) { #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif const int index=perm[(j+i*batchsize) % M]; Vector<T>& Xj = XT[numT]; SpVector<T>& spcoeffj = spcoeffT[numT]; Vector<T>& DtRj = DtRT[numT]; //X.refCol(index,Xj); X.getData(Xj,index); if (param.whiten) { if (param.pattern) { Vector<T> mean(4); Xj.whiten(mean,param.pattern); } else { Xj.whiten(X.V()); } } _D.multTrans(Xj,DtRj); Matrix<T>& Gs = GsT[numT]; Matrix<T>& Ga = GaT[numT]; Matrix<T>& invGs = invGsT[numT]; Matrix<T>& work= workT[numT]; Vector<T>& u = uT[numT]; Vector<INTM> ind; Vector<T> coeffs_sparse; spcoeffj.setL(L); spcoeffj.refIndices(ind); spcoeffj.refVal(coeffs_sparse); T normX=Xj.nrm2sq(); coeffs_sparse.setZeros(); if (param.mode < SPARSITY) { if (param.mode == PENALTY && param.lambda==0 && param.lambda2 > 0 && !param.posAlpha) { Matrix<T>& GG = G; u.set(0); GG.conjugateGradient(DtRj,u,1e-4,2*K); for (int k = 0; k<K; ++k) { ind[k]=k; coeffs_sparse[k]=u[k]; } } else { coreLARS2(DtRj,G,Gs,Ga,invGs,u,coeffs_sparse,ind,work,normX,param.mode,param.lambda,param.posAlpha); } } else { if (param.mode == SPARSITY) { coreORMPB(DtRj,G,ind,coeffs_sparse,normX,L,T(0.0),T(0.0)); } else if (param.mode==L2ERROR2) { coreORMPB(DtRj,G,ind,coeffs_sparse,normX,L,param.lambda,T(0.0)); } else { coreORMPB(DtRj,G,ind,coeffs_sparse,normX,L,T(0.0),param.lambda); } } int count2=0; for (int k = 0; k<L; ++k) if (ind[k] == -1) { break; } else { ++count2; } sort(ind.rawX(),coeffs_sparse.rawX(),(INTM)0,(INTM)(count2-1)); spcoeffj.setL(count2); AT[numT].rank1Update(spcoeffj); BT[numT].rank1Update(Xj,spcoeffj); } if (param.batch) { _A.setZeros(); _B.setZeros(); for (j = 0; j<NUM_THREADS; ++j) { _A.add(AT[j]); _B.add(BT[j]); } Vector<T> di, ai,bi; Vector<T> newd(n); for (j = 0; j<param.iter_updateD; ++j) { for (int k = 0; k<K; ++k) { if (_A[k*K+k] > 1e-6) { _D.refCol(k,di); _A.refCol(k,ai); _B.refCol(k,bi); _D.mult(ai,newd,T(-1.0)); newd.add(bi); newd.scal(T(1.0)/_A[k*K+k]); newd.add(di); if (param.modeD != L2) { newd.sparseProject(di,T(1.0), sparseD,param.gamma1, param.gamma2,T(2.0),param.posD); } else { if (param.posD) newd.thrsPos(); newd.normalize2(); di.copy(newd); } } else if (param.clean) { _D.refCol(k,di); di.setZeros(); } } } } else if (param.stochastic) { _A.setZeros(); _B.setZeros(); for (j = 0; j<NUM_THREADS; ++j) { _A.add(AT[j]); _B.add(BT[j]); } _D.mult(_A,_B,false,false,T(-1.0),T(1.0)); T step_grad=rho/T(t0+batchsize*(i+1)); _D.add(_B,step_grad); Vector<T> dj; Vector<T> dnew(n); if (param.modeD != L2) { for (j = 0; j<K; ++j) { _D.refCol(j,dj); dnew.copy(dj); dnew.sparseProject(dj,T(1.0),sparseD,param.gamma1, param.gamma2,T(2.0),param.posD); } } else { for (j = 0; j<K; ++j) { _D.refCol(j,dj); if (param.posD) dj.thrsPos(); dj.normalize2(); } } } else { /// Dictionary Update /// Check the epoch parity int epoch = (((i+1) % M)*batchsize) / M; if ((even && ((epoch % 2) == 1)) || (!even && ((epoch % 2) == 0))) { Aodd.copy(Aeven); Bodd.copy(Beven); Aeven.setZeros(); Beven.setZeros(); count=countPrev; countPrev=0; even=!even; } int ii=_itercount+i; int num_elem=MIN(2*M, ii < batchsize ? ii*batchsize : batchsize*batchsize+ii-batchsize); T scal2=T(T(1.0)/batchsize); T scal; int totaliter=_itercount+count; if (param.modeParam == PARAM2) { scal=param.rho; } else if (param.modeParam == PARAM1) { scal=MAX(0.95,pow(T(totaliter)/T(totaliter+1),-rho)); } else { scal = T(_itercount+num_elem+1- batchsize)/T(_itercount+num_elem+1); } Aeven.scal(scal); Beven.scal(scal); Aodd.scal(scal); Bodd.scal(scal); if ((_itercount > 0 && i*batchsize < M) || (_itercount == 0 && t0 != 0 && i*batchsize < 10000)) { Aorig.scal(scal); Borig.scal(scal); _A.copy(Aorig); _B.copy(Borig); } else { _A.setZeros(); _B.setZeros(); } for (j = 0; j<NUM_THREADS; ++j) { Aeven.add(AT[j],scal2); Beven.add(BT[j],scal2); } _A.add(Aodd); _A.add(Aeven); _B.add(Bodd); _B.add(Beven); ++count; ++countPrev; Vector<T> di, ai,bi; Vector<T> newd(n); for (j = 0; j<param.iter_updateD; ++j) { for (int k = 0; k<K; ++k) { if (_A[k*K+k] > 1e-6) { _D.refCol(k,di); _A.refCol(k,ai); _B.refCol(k,bi); _D.mult(ai,newd,T(-1.0)); newd.add(bi); newd.scal(T(1.0)/_A[k*K+k]); newd.add(di); if (param.modeD != L2) { newd.sparseProject(di,T(1.0),sparseD, param.gamma1,param.gamma2,T(2.0),param.posD); } else { if (param.posD) newd.thrsPos(); newd.normalize2(); di.copy(newd); } } else if (param.clean && ((_itercount+i)*batchsize) > 10000) { _D.refCol(k,di); di.setZeros(); } } } } } _itercount += i; if (param.verbose) time.printElapsed(); delete[](spcoeffT); delete[](DtRT); delete[](AT); delete[](BT); delete[](GsT); delete[](invGsT); delete[](GaT); delete[](uT); delete[](XT); delete[](workT); }; template <typename T> void Trainer<T>::train_fista(const Data<T>& X, const ParamDictLearn<T>& param, const GraphStruct<T>* graph_st, const TreeStruct<T>* tree_st) { T rho = param.rho; T t0 = param.t0; int sparseD = param.modeD == L1L2 ? 2 : param.modeD == L1L2MU ? 7 : 6; int NUM_THREADS=init_omp(_NUM_THREADS); if (param.verbose) { cout << "num param iterD: " << param.iter_updateD << endl; if (param.batch) { cout << "Batch Mode" << endl; } else if (param.stochastic) { cout << "Stochastic Gradient. rho : " << rho << ", t0 : " << t0 << endl; } else { if (param.modeParam == AUTO) { cout << "Online Dictionary Learning with no parameter " << endl; } else if (param.modeParam == PARAM1) { cout << "Online Dictionary Learning with parameters: " << t0 << " rho: " << rho << endl; } else { cout << "Online Dictionary Learning with exponential decay t0: " << t0 << " rho: " << rho << endl; } } if (param.posD) cout << "Positivity constraints on D activated" << endl; if (param.posAlpha) cout << "Positivity constraints on alpha activated" << endl; if (param.modeD != L2) cout << "Sparse dictionaries, mode: " << param.modeD << ", gamma1: " << param.gamma1 << ", gamma2: " << param.gamma2 << endl; cout << "mode Alpha " << param.mode << endl; if (param.clean) cout << "Cleaning activated " << endl; if (param.log && param.logName) { cout << "log activated " << endl; cerr << param.logName << endl; } if (param.mode == PENALTY && param.lambda==0 && param.lambda2 > 0 && !param.posAlpha) cout << "L2 solver is used" << endl; if (_itercount > 0) cout << "Retraining from iteration " << _itercount << endl; flush(cout); } const int M = X.n(); const int K = _k; const int n = X.m(); const int L = param.mode == SPARSITY ? static_cast<int>(param.lambda) : param.mode == PENALTY && param.lambda == 0 && param.lambda2 > 0 && !param.posAlpha ? K : MIN(n,K); const int batchsize= param.batch ? M : MIN(_batchsize,M); if (param.verbose) { cout << "batch size: " << batchsize << endl; cout << "L: " << L << endl; cout << "lambda: " << param.lambda << endl; cout << "mode: " << param.mode << endl; flush(cout); } if (_D.m() != n || _D.n() != K) _initialDict=false; srandom(0); Vector<T> col(n); if (!_initialDict) { _D.resize(n,K); for (int i = 0; i<K; ++i) { const int ind = random() % M; Vector<T> d; _D.refCol(i,d); X.getData(col,ind); d.copy(col); } _initialDict=true; } if (param.verbose) { cout << "*****Online Dictionary Learning*****" << endl; flush(cout); } Vector<T> tmp(n); if (param.modeD != L2) { for (int i = 0; i<K; ++i) { Vector<T> d; _D.refCol(i,d); tmp.copy(d); tmp.sparseProject(d,T(1.0),sparseD,param.gamma1, param.gamma2,T(2.0),param.posD); } } else { if (param.posD) _D.thrsPos(); _D.normalize(); } int count=0; int countPrev=0; T scalt0 = abs<T>(t0); if (_itercount == 0) { _A.resize(K,K); _A.setZeros(); _B.resize(n,K); _B.setZeros(); if (!param.batch) { _A.setDiag(scalt0); _B.copy(_D); _B.scal(scalt0); } } //Matrix<T> G(K,K); Matrix<T> Borig(n,K); Matrix<T> Aorig(K,K); Matrix<T> Bodd(n,K); Matrix<T> Aodd(K,K); Matrix<T> Beven(n,K); Matrix<T> Aeven(K,K); SpVector<T>* spcoeffT=new SpVector<T>[NUM_THREADS]; Vector<T>* DtRT=new Vector<T>[NUM_THREADS]; Vector<T>* XT=new Vector<T>[NUM_THREADS]; Matrix<T>* BT=new Matrix<T>[NUM_THREADS]; Matrix<T>* AT=new Matrix<T>[NUM_THREADS]; Matrix<T>* GsT=new Matrix<T>[NUM_THREADS]; Matrix<T>* GaT=new Matrix<T>[NUM_THREADS]; Matrix<T>* invGsT=new Matrix<T>[NUM_THREADS]; Matrix<T>* workT=new Matrix<T>[NUM_THREADS]; Vector<T>* uT=new Vector<T>[NUM_THREADS]; FISTA::Loss<T>** losses = new FISTA::Loss<T>*[NUM_THREADS]; FISTA::Regularizer<T>** regularizers= new FISTA::Regularizer<T>*[NUM_THREADS]; Vector<T>* alphaT = new Vector<T>[_NUM_THREADS]; Matrix<T> G; FISTA::ParamFISTA<T> param_fista; if(param.mode == FISTAMODE) { param_fista.regul = param.regul; param_fista.pos = param.posAlpha; param_fista.lambda = param.lambda; param_fista.lambda2 = param.lambda2; param_fista.lambda3 = param.lambda3; // param_fista.verbose = param.verbose; param_fista.tol = param.tol; param_fista.ista = param.ista; param_fista.fixed_step = param.fixed_step; if(param.fixed_step) param_fista.L0 = (T)K; else param_fista.L0 = 1.; } if(FISTA::regul_for_matrices(param.regul)) { cerr << "dicts.h : regul_for_matrices not implemented\n"; exit(1); } for (int i = 0; i<NUM_THREADS; ++i) { spcoeffT[i].resize(K); alphaT[i].resize(K); DtRT[i].resize(K); XT[i].resize(n); BT[i].resize(n,K); BT[i].setZeros(); AT[i].resize(K,K); AT[i].setZeros(); GsT[i].resize(L,L); GsT[i].setZeros(); invGsT[i].resize(L,L); invGsT[i].setZeros(); GaT[i].resize(K,L); GaT[i].setZeros(); workT[i].resize(K,3); workT[i].setZeros(); uT[i].resize(L); uT[i].setZeros(); regularizers[i]=FISTA::setRegularizerVectors(param_fista,graph_st,tree_st); losses[i]=new FISTA::SqLoss<T>(_D,G); } Timer time, time2; time.start(); srandom(0); Vector<int> perm; perm.randperm(M); Aodd.setZeros(); Bodd.setZeros(); Aeven.setZeros(); Beven.setZeros(); Aorig.copy(_A); Borig.copy(_B); int JJ = param.iter < 0 ? 100000000 : param.iter; bool even=true; int last_written=-40; int i; for (i = 0; i<JJ; ++i) { if (param.verbose) { cout << "Iteration: " << i << endl; flush(cout); } time.stop(); if (param.iter < 0 && time.getElapsed() > T(-param.iter)) break; if (param.log) { int seconds=static_cast<int>(floor(log(time.getElapsed())*5)); if (seconds > last_written) { last_written++; sprintf(buffer_string,"%s_%d.log",param.logName, last_written+40); writeLog(_D,T(time.getElapsed()),i,buffer_string); fprintf(stderr,"\r%d",i); } } time.start(); // !! Matrix<T> G; _D.XtX(G); if (param.clean) this->cleanDict(X,G,param.posD, param.modeD,param.gamma1,param.gamma2); G.addDiag(MAX(param.lambda2,1e-10)); int j; for (j = 0; j<NUM_THREADS; ++j) { AT[j].setZeros(); BT[j].setZeros(); } #pragma omp parallel for private(j) for (j = 0; j<batchsize; ++j) { #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif const int index=perm[(j+i*batchsize) % M]; Vector<T>& Xj = XT[numT]; SpVector<T>& spcoeffj = spcoeffT[numT]; Vector<T>& DtRj = DtRT[numT]; //X.refCol(index,Xj); X.getData(Xj,index); if (param.whiten) { if (param.pattern) { Vector<T> mean(4); Xj.whiten(mean,param.pattern); } else { Xj.whiten(X.V()); } } _D.multTrans(Xj,DtRj); Matrix<T>& Gs = GsT[numT]; Matrix<T>& Ga = GaT[numT]; Matrix<T>& invGs = invGsT[numT]; Matrix<T>& work= workT[numT]; Vector<T>& u = uT[numT]; Vector<INTM> ind; Vector<T> coeffs_sparse; spcoeffj.setL(L); spcoeffj.refIndices(ind); spcoeffj.refVal(coeffs_sparse); T normX=Xj.nrm2sq(); coeffs_sparse.setZeros(); if (param.mode < SPARSITY) { if (param.mode == PENALTY && param.lambda==0 && param.lambda2 > 0 && !param.posAlpha) { Matrix<T>& GG = G; u.set(0); GG.conjugateGradient(DtRj,u,1e-4,2*K); for (int k = 0; k<K; ++k) { ind[k]=k; coeffs_sparse[k]=u[k]; } } else { coreLARS2(DtRj,G,Gs,Ga,invGs,u,coeffs_sparse,ind,work,normX,param.mode,param.lambda,param.posAlpha); } } else if(param.mode == FISTAMODE) { Vector<T> optim_infoi(4); Vector<T> alpha0i(_D.n()); alpha0i.setZeros(); Vector<T>& alphai = alphaT[numT]; losses[numT]->init(Xj); regularizers[numT]->reset(); // alpha.refCol(j,alphai); FISTA::FISTA_Generic(*(losses[numT]),*(regularizers[numT]),alpha0i,alphai,optim_infoi,param_fista); alphai.toSparse(spcoeffj); } else { if (param.mode == SPARSITY) { coreORMPB(DtRj,G,ind,coeffs_sparse,normX,L,T(0.0),T(0.0)); } else if (param.mode==L2ERROR2) { coreORMPB(DtRj,G,ind,coeffs_sparse,normX,L,param.lambda,T(0.0)); } else { coreORMPB(DtRj,G,ind,coeffs_sparse,normX,L,T(0.0),param.lambda); } } if(param.mode != FISTAMODE) { INTM count2=0; for (int k = 0; k<L; ++k) if (ind[k] == -1) { break; } else { ++count2; } sort(ind.rawX(),coeffs_sparse.rawX(),(INTM)0,count2-1); spcoeffj.setL(count2); } AT[numT].rank1Update(spcoeffj); BT[numT].rank1Update(Xj,spcoeffj); } if (param.batch) { _A.setZeros(); _B.setZeros(); for (j = 0; j<NUM_THREADS; ++j) { _A.add(AT[j]); _B.add(BT[j]); } Vector<T> di, ai,bi; Vector<T> newd(n); for (j = 0; j<param.iter_updateD; ++j) { for (int k = 0; k<K; ++k) { if (_A[k*K+k] > 1e-6) { _D.refCol(k,di); _A.refCol(k,ai); _B.refCol(k,bi); _D.mult(ai,newd,T(-1.0)); newd.add(bi); newd.scal(T(1.0)/_A[k*K+k]); newd.add(di); if (param.modeD != L2) { newd.sparseProject(di,T(1.0), sparseD,param.gamma1, param.gamma2,T(2.0),param.posD); } else { if (param.posD) newd.thrsPos(); newd.normalize2(); di.copy(newd); } } else if (param.clean) { _D.refCol(k,di); di.setZeros(); } } } } else if (param.stochastic) { _A.setZeros(); _B.setZeros(); for (j = 0; j<NUM_THREADS; ++j) { _A.add(AT[j]); _B.add(BT[j]); } _D.mult(_A,_B,false,false,T(-1.0),T(1.0)); T step_grad=rho/T(t0+batchsize*(i+1)); _D.add(_B,step_grad); Vector<T> dj; Vector<T> dnew(n); if (param.modeD != L2) { for (j = 0; j<K; ++j) { _D.refCol(j,dj); dnew.copy(dj); dnew.sparseProject(dj,T(1.0),sparseD,param.gamma1, param.gamma2,T(2.0),param.posD); } } else { for (j = 0; j<K; ++j) { _D.refCol(j,dj); if (param.posD) dj.thrsPos(); dj.normalize2(); } } } else { /// Dictionary Update /// Check the epoch parity int epoch = (((i+1) % M)*batchsize) / M; if ((even && ((epoch % 2) == 1)) || (!even && ((epoch % 2) == 0))) { Aodd.copy(Aeven); Bodd.copy(Beven); Aeven.setZeros(); Beven.setZeros(); count=countPrev; countPrev=0; even=!even; } int ii=_itercount+i; int num_elem=MIN(2*M, ii < batchsize ? ii*batchsize : batchsize*batchsize+ii-batchsize); T scal2=T(T(1.0)/batchsize); T scal; int totaliter=_itercount+count; if (param.modeParam == PARAM2) { scal=param.rho; } else if (param.modeParam == PARAM1) { scal=MAX(0.95,pow(T(totaliter)/T(totaliter+1),-rho)); } else { scal = T(_itercount+num_elem+1- batchsize)/T(_itercount+num_elem+1); } Aeven.scal(scal); Beven.scal(scal); Aodd.scal(scal); Bodd.scal(scal); if ((_itercount > 0 && i*batchsize < M) || (_itercount == 0 && t0 != 0 && i*batchsize < 10000)) { Aorig.scal(scal); Borig.scal(scal); _A.copy(Aorig); _B.copy(Borig); } else { _A.setZeros(); _B.setZeros(); } for (j = 0; j<NUM_THREADS; ++j) { Aeven.add(AT[j],scal2); Beven.add(BT[j],scal2); } _A.add(Aodd); _A.add(Aeven); _B.add(Bodd); _B.add(Beven); ++count; ++countPrev; Vector<T> di, ai,bi; Vector<T> newd(n); for (j = 0; j<param.iter_updateD; ++j) { for (int k = 0; k<K; ++k) { if (_A[k*K+k] > 1e-6) { _D.refCol(k,di); _A.refCol(k,ai); _B.refCol(k,bi); _D.mult(ai,newd,T(-1.0)); newd.add(bi); newd.scal(T(1.0)/_A[k*K+k]); newd.add(di); if (param.modeD != L2) { newd.sparseProject(di,T(1.0),sparseD, param.gamma1,param.gamma2,T(2.0),param.posD); } else { if (param.posD) newd.thrsPos(); newd.normalize2(); di.copy(newd); } } else if (param.clean && ((_itercount+i)*batchsize) > 10000) { _D.refCol(k,di); di.setZeros(); } } } } } _itercount += i; if (param.verbose) time.printElapsed(); delete[](spcoeffT); delete[](DtRT); delete[](AT); delete[](BT); delete[](GsT); delete[](invGsT); delete[](GaT); delete[](uT); delete[](XT); delete[](workT); for (int i = 0; i<NUM_THREADS; ++i) { delete (losses[i]); losses[i] = NULL; delete (regularizers[i]); regularizers[i]=NULL; } delete[] (losses); delete[] (regularizers); }; template <typename T> void writeLog(const Matrix<T>& D, const T time, int iter, char* name) { std::ofstream f; f.precision(12); f.flags(std::ios_base::scientific); f.open(name, ofstream::trunc); f << time << " " << iter << std::endl; for (int i = 0; i<D.n(); ++i) { for (int j = 0; j<D.m(); ++j) { f << D[i*D.m()+j] << " "; } f << std::endl; } f << std::endl; f.close(); }; template <typename T> void Trainer<T>::trainOffline(const Data<T>& X, const ParamDictLearn<T>& param) { int sparseD = param.modeD == L1L2 ? 2 : 6; int J = param.iter; int batch_size= _batchsize; int batchsize= _batchsize; int NUM_THREADS=init_omp(_NUM_THREADS); const int n = X.m(); const int K = _k; const int M = X.n(); cout << "*****Offline Dictionary Learning*****" << endl; fprintf(stderr,"num param iterD: %d\n",param.iter_updateD); cout << "batch size: " << _batchsize << endl; cout << "lambda: " << param.lambda << endl; cout << "X: " << n << " x " << M << endl; cout << "D: " << n << " x " << K << endl; flush(cout); srandom(0); Vector<T> col(n); if (!_initialDict) { _D.resize(n,K); for (int i = 0; i<K; ++i) { const int ind = random() % M; Vector<T> d; _D.refCol(i,d); X.getData(col,ind); d.copy(col); } _initialDict=true; } Vector<T> tmp(n); if (param.modeD != L2) { for (int i = 0; i<K; ++i) { Vector<T> d; _D.refCol(i,d); tmp.copy(d); tmp.sparseProject(d,T(1.0),sparseD,param.gamma1, param.gamma2,T(2.0),param.posD); } } else { if (param.posD) _D.thrsPos(); _D.normalize(); } Matrix<T> G(K,K); Matrix<T> coeffs(K,M); coeffs.setZeros(); Matrix<T> B(n,K); Matrix<T> A(K,K); SpVector<T>* spcoeffT=new SpVector<T>[NUM_THREADS]; Vector<T>* DtRT=new Vector<T>[NUM_THREADS]; Vector<T>* coeffsoldT=new Vector<T>[NUM_THREADS]; Matrix<T>* BT=new Matrix<T>[NUM_THREADS]; Matrix<T>* AT=new Matrix<T>[NUM_THREADS]; for (int i = 0; i<NUM_THREADS; ++i) { spcoeffT[i].resize(K); DtRT[i].resize(K); coeffsoldT[i].resize(K); BT[i].resize(n,K); BT[i].setZeros(); AT[i].resize(K,K); AT[i].setZeros(); } Timer time; time.start(); srandom(0); Vector<int> perm; perm.randperm(M); int JJ = J < 0 ? 100000000 : J; Vector<T> weights(M); weights.setZeros(); for (int i = 0; i<JJ; ++i) { if (J < 0 && time.getElapsed() > T(-J)) break; _D.XtX(G); if (param.clean) this->cleanDict(X,G,param.posD, param.modeD,param.gamma1,param.gamma2); int j; #pragma omp parallel for private(j) for (j = 0; j<batch_size; ++j) { #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif const int ind=perm[(j+i*batch_size) % M]; Vector<T> Xj, coeffj; SpVector<T>& spcoeffj = spcoeffT[numT]; Vector<T>& DtRj = DtRT[numT]; Vector<T>& oldcoeffj = coeffsoldT[numT]; X.getData(Xj,ind); if (param.whiten) { if (param.pattern) { Vector<T> mean(4); Xj.whiten(mean,param.pattern); } else { Xj.whiten(X.V()); } } coeffs.refCol(ind,coeffj); oldcoeffj.copy(coeffj); _D.multTrans(Xj,DtRj); coeffj.toSparse(spcoeffj); G.mult(spcoeffj,DtRj,T(-1.0),T(1.0)); if (param.mode == PENALTY) { coreIST(G,DtRj,coeffj,param.lambda,200,T(1e-3)); } else { T normX = Xj.nrm2sq(); coreISTconstrained(G,DtRj,coeffj,normX,param.lambda,200,T(1e-3)); } oldcoeffj.toSparse(spcoeffj); AT[numT].rank1Update(spcoeffj,-weights[ind]); coeffj.toSparse(spcoeffj); AT[numT].rank1Update(spcoeffj); weights[ind]++; oldcoeffj.scal(weights[ind]); oldcoeffj.sub(coeffj); oldcoeffj.toSparse(spcoeffj); BT[numT].rank1Update(Xj,spcoeffj,T(-1.0)); } A.setZeros(); B.setZeros(); T scal; int totaliter=i; int ii = i; int num_elem=MIN(2*M, ii < batchsize ? ii*batchsize : batchsize*batchsize+ii-batchsize); if (param.modeParam == PARAM2) { scal=param.rho; } else if (param.modeParam == PARAM1) { scal=MAX(0.95,pow(T(totaliter)/T(totaliter+1),-param.rho)); } else { scal = T(num_elem+1- batchsize)/T(num_elem+1); } for (j = 0; j<NUM_THREADS; ++j) { A.add(AT[j]); B.add(BT[j]); AT[j].scal(scal); BT[j].scal(scal); } weights.scal(scal); Vector<T> di, ai,bi; Vector<T> newd(n); for (j = 0; j<param.iter_updateD; ++j) { for (int k = 0; k<K; ++k) { if (A[k*K+k] > 1e-6) { _D.refCol(k,di); A.refCol(k,ai); B.refCol(k,bi); _D.mult(ai,newd,T(-1.0)); newd.add(bi); newd.scal(T(1.0)/A[k*K+k]); newd.add(di); if (param.modeD != L2) { newd.sparseProject(di,T(1.0), sparseD,param.gamma1, param.gamma2,T(2.0),param.posD); } else { if (param.posD) newd.thrsPos(); newd.normalize2(); di.copy(newd); } } else if (param.clean) { _D.refCol(k,di); di.setZeros(); } } } } _D.XtX(G); if (param.clean) this->cleanDict(X,G,param.posD,param.modeD, param.gamma1,param.gamma2); time.printElapsed(); delete[](spcoeffT); delete[](DtRT); delete[](AT); delete[](BT); delete[](coeffsoldT); } #endif
GB_binop__islt_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__islt_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__islt_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__islt_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__islt_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_uint32) // A*D function (colscale): GB (_AxD__islt_uint32) // D*A function (rowscale): GB (_DxB__islt_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__islt_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__islt_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_uint32) // C=scalar+B GB (_bind1st__islt_uint32) // C=scalar+B' GB (_bind1st_tran__islt_uint32) // C=A+scalar GB (_bind2nd__islt_uint32) // C=A'+scalar GB (_bind2nd_tran__islt_uint32) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_UINT32 || GxB_NO_ISLT_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__islt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__islt_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__islt_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__islt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__islt_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__islt_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__islt_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__islt_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__islt_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__islt_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__islt_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__islt_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__islt_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__islt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
hashOMP.h
#include <iostream> #include <cstdlib> #include <cstring> #include <fstream> #include <vector> #include <cstdio> #include<omp.h> #include <bits/stdc++.h> #include "e2r.h" using namespace std; void hash_table(elementToprocessor &obj) { omp_set_num_threads(8); char filename[50]; int rank; int element; int count1=0; ifstream input("65536r2e.csv"); ifstream input1("65536r2e.csv"); ifstream input2("65536r2e.csv"); vector<string> result; int col_count = 0; int line_count = 0; int x = 0; int i = 0; int max_el = 0; int max = 0; int z = 0; int flag = 0; for (string line; getline(input, line);) { //cout << line.size() << endl; //Count the number of lines in input file => Rows line_count++; for (int i = 0; i < line.size(); i++) { if (line[i] == ',') col_count++; //Count the number of commas in one row of input file => Columns } col_count++; if (col_count > max) { max = col_count; } col_count = 0; } int arr[line_count][max]; string str; int row = 0; int column = 0; for (string line; getline(input1, line);) { for (int z = 0; z < line.size(); z++) { if (line[z] == ',') { flag = 1; } if (flag == 1) { arr[row][column] = stoi(str); if (stoi(str) > max_el) { max_el = stoi(str); } flag = 0; str = ""; column++; } else { str = str + line[z]; } } arr[row][column] = stoi(str); if (stoi(str) > max_el) { max_el = stoi(str); } for (int j = column + 1; j < max; j++) { arr[row][j] = stoi("0"); } str = ""; row++; column = 0; } //cout<<"Max "<<max_el<<endl; int counter = 0; int RowRank = 0; int RankArray[max_el + 1], RankArrayarr[max_el + 1]; #pragma omp parallel for for (int i = 0; i < line_count; i++) { if(omp_get_thread_num() == 1) { count1++; } for (int j = 0; j < max; j++) { if (j == 0) { RowRank = arr[i][j]; } else if (j != 0 && arr[i][j] != 0) { RankArrayarr[arr[i][j]] = RowRank; } } } cout<<"Thread 1 in my rank"<<" "<<count1<<endl; obj.mem_allocate(max_el + 1); obj.hashArray(RankArrayarr); }
restrict_mex.c
#include <inttypes.h> #include <omp.h> #include "mex.h" #include "restrict_mex.h" void restrictf(float *x2, const float *x, const uint8_t *G2, const size_t *sz2, const size_t *sz); void restrictd(double *x2, const double *x, const uint8_t *G2, const size_t *sz2, const size_t *sz); #ifdef RESTRICT_MEX void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { if ((nrhs != 3) || (nlhs > 1)) { mexErrMsgTxt("Usage: restrict_mex(x2, x, G2);"); } const uint8_t *G2 = (const uint8_t *)mxGetData(prhs[2]); const size_t *sz2 = (const size_t *)mxGetDimensions(prhs[0]); const size_t *sz = (const size_t *)mxGetDimensions(prhs[1]); if (mxIsSingle(prhs[0])) { float *x2 = (float *)mxGetData(prhs[0]); const float *x = (const float *)mxGetData(prhs[1]); restrictf(x2, x, G2, sz2, sz); } else { double *x2 = (double *)mxGetData(prhs[0]); const double *x = (const double *)mxGetData(prhs[1]); restrictd(x2, x, G2, sz2, sz); } if (nlhs == 1) { plhs[0] = mxCreateDoubleScalar(1.0); } return; } #endif void mx_restrict(mxArray *mxx2, const mxArray *mxx, const mxArray *mxG2) { const uint8_t *G2 = (const uint8_t *)mxGetData(mxG2); const size_t *sz2 = (const size_t *)mxGetDimensions(mxx2); const size_t *sz = (const size_t *)mxGetDimensions(mxx); if (mxIsSingle(mxx2)) { float *x2 = (float *)mxGetData(mxx2); const float *x = (const float *)mxGetData(mxx); restrictf(x2, x, G2, sz2, sz); } else { double *x2 = (double *)mxGetData(mxx2); const double *x = (const double *)mxGetData(mxx); restrictd(x2, x, G2, sz2, sz); } return; } void restrictf(float *x2, const float *x, const uint8_t *G2, const size_t *sz2, const size_t *sz) { size_t i2, j2, k2; size_t l2, lk2; size_t l, lk; const size_t nx = sz[0]; const size_t ny = sz[1]; const size_t nz = sz[2]; const size_t nxny = nx*ny; const size_t nx2 = sz2[0]; const size_t ny2 = sz2[1]; const size_t nz2 = sz2[2]; const size_t nxny2 = nx2*ny2; const size_t NX2 = nx2-1; const size_t NY2 = ny2-1; const size_t NZ2 = nz2-1; /* 1/64 * [:, :, 1] = 1 2 1 2 4 2 1 2 1 [:, :, 2] = 2 4 2 4 8 4 2 4 2 [:, :, 3] = 1 2 1 2 4 2 1 2 1 */ const size_t o111 = -1 -nx -nxny; /* 1 */ const size_t o211 = 0 -nx -nxny; /* 2 */ const size_t o311 = 1 -nx -nxny; /* 1 */ const size_t o121 = -1 +0 -nxny; /* 2 */ const size_t o221 = 0 +0 -nxny; /* 4 */ const size_t o321 = 1 +0 -nxny; /* 2 */ const size_t o131 = -1 +nx -nxny; /* 1 */ const size_t o231 = 0 +nx -nxny; /* 2 */ const size_t o331 = 1 +nx -nxny; /* 1 */ const size_t o112 = -1 -nx +0; /* 2 */ const size_t o212 = 0 -nx +0; /* 4 */ const size_t o312 = 1 -nx +0; /* 2 */ const size_t o122 = -1 +0 +0; /* 4 */ const size_t o322 = 1 +0 +0; /* 4 */ const size_t o132 = -1 +nx +0; /* 2 */ const size_t o232 = 0 +nx +0; /* 4 */ const size_t o332 = 1 +nx +0; /* 2 */ const size_t o113 = -1 -nx +nxny; /* 1 */ const size_t o213 = 0 -nx +nxny; /* 2 */ const size_t o313 = 1 -nx +nxny; /* 1 */ const size_t o123 = -1 +0 +nxny; /* 2 */ const size_t o223 = 0 +0 +nxny; /* 4 */ const size_t o323 = 1 +0 +nxny; /* 2 */ const size_t o133 = -1 +nx +nxny; /* 1 */ const size_t o233 = 0 +nx +nxny; /* 2 */ const size_t o333 = 1 +nx +nxny; /* 1 */ #pragma omp parallel for private(i2,j2,k2,l2,lk2,l,lk) schedule(static) \ if (nxny*nz > 32*32*32) for(k2 = 1; k2 < NZ2; ++k2) { lk2 = nxny2*k2; lk = nxny*((k2<<1)-1); for(j2 = 1; j2 < NY2; ++j2) { l2 = 1 + nx2*j2 + lk2; l = 1 + nx*((j2<<1)-1) + lk; for(i2 = 1; i2 < NX2; ++i2, ++l2, l += 2) { x2[l2] = G2[l2] ? 0.015625f * ( x[l+o111] + x[l+o311] + x[l+o131] + x[l+o331] + x[l+o113] + x[l+o313] + x[l+o133] + x[l+o333] ) + 0.03125f * ( x[l+o211] + x[l+o121] + x[l+o321] + x[l+o231] + x[l+o312] + x[l+o112] + x[l+o132] + x[l+o332] + x[l+o213] + x[l+o123] + x[l+o323] + x[l+o233] ) + 0.0625f * ( x[l+o221] + x[l+o212] + x[l+o122] + x[l+o322] + x[l+o232] + x[l+o223] ) + 0.125f * x[l] : 0.0f; } } } return; } void restrictd(double *x2, const double *x, const uint8_t *G2, const size_t *sz2, const size_t *sz) { size_t i2, j2, k2; size_t l2, lk2; size_t l, lk; const size_t nx = sz[0]; const size_t ny = sz[1]; const size_t nz = sz[2]; const size_t nxny = nx*ny; const size_t nx2 = sz2[0]; const size_t ny2 = sz2[1]; const size_t nz2 = sz2[2]; const size_t nxny2 = nx2*ny2; const size_t NX2 = nx2-1; const size_t NY2 = ny2-1; const size_t NZ2 = nz2-1; /* 1/64 * [:, :, 1] = 1 2 1 2 4 2 1 2 1 [:, :, 2] = 2 4 2 4 8 4 2 4 2 [:, :, 3] = 1 2 1 2 4 2 1 2 1 */ const size_t o111 = -1 -nx -nxny; /* 1 */ const size_t o211 = 0 -nx -nxny; /* 2 */ const size_t o311 = 1 -nx -nxny; /* 1 */ const size_t o121 = -1 +0 -nxny; /* 2 */ const size_t o221 = 0 +0 -nxny; /* 4 */ const size_t o321 = 1 +0 -nxny; /* 2 */ const size_t o131 = -1 +nx -nxny; /* 1 */ const size_t o231 = 0 +nx -nxny; /* 2 */ const size_t o331 = 1 +nx -nxny; /* 1 */ const size_t o112 = -1 -nx +0; /* 2 */ const size_t o212 = 0 -nx +0; /* 4 */ const size_t o312 = 1 -nx +0; /* 2 */ const size_t o122 = -1 +0 +0; /* 4 */ const size_t o322 = 1 +0 +0; /* 4 */ const size_t o132 = -1 +nx +0; /* 2 */ const size_t o232 = 0 +nx +0; /* 4 */ const size_t o332 = 1 +nx +0; /* 2 */ const size_t o113 = -1 -nx +nxny; /* 1 */ const size_t o213 = 0 -nx +nxny; /* 2 */ const size_t o313 = 1 -nx +nxny; /* 1 */ const size_t o123 = -1 +0 +nxny; /* 2 */ const size_t o223 = 0 +0 +nxny; /* 4 */ const size_t o323 = 1 +0 +nxny; /* 2 */ const size_t o133 = -1 +nx +nxny; /* 1 */ const size_t o233 = 0 +nx +nxny; /* 2 */ const size_t o333 = 1 +nx +nxny; /* 1 */ #pragma omp parallel for private(i2,j2,k2,l2,lk2,l,lk) schedule(static) \ if (nxny*nz > 32*32*32) for(k2 = 1; k2 < NZ2; ++k2) { lk2 = nxny2*k2; lk = nxny*((k2<<1)-1); for(j2 = 1; j2 < NY2; ++j2) { l2 = 1 + nx2*j2 + lk2; l = 1 + nx*((j2<<1)-1) + lk; for(i2 = 1; i2 < NX2; ++i2, ++l2, l += 2) { x2[l2] = G2[l2] ? 0.015625 * ( x[l+o111] + x[l+o311] + x[l+o131] + x[l+o331] + x[l+o113] + x[l+o313] + x[l+o133] + x[l+o333] ) + 0.03125 * ( x[l+o211] + x[l+o121] + x[l+o321] + x[l+o231] + x[l+o312] + x[l+o112] + x[l+o132] + x[l+o332] + x[l+o213] + x[l+o123] + x[l+o323] + x[l+o233] ) + 0.0625 * ( x[l+o221] + x[l+o212] + x[l+o122] + x[l+o322] + x[l+o232] + x[l+o223] ) + 0.125 * x[l] : 0.0; } } } return; }
par_strength.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * *****************************************************************************/ #include "_hypre_parcsr_ls.h" /*==========================================================================*/ /*==========================================================================*/ /** Generates strength matrix Notes: \begin{itemize} \item The underlying matrix storage scheme is a hypre_ParCSR matrix. \item The routine returns the following: \begin{itemize} \item S - a ParCSR matrix representing the "strength matrix". This is used in the coarsening and interpolation routines. \end{itemize} \item The graph of the "strength matrix" for A is a subgraph of the graph of A, but requires nonsymmetric storage even if A is symmetric. This is because of the directional nature of the "strengh of dependence" notion (see below). Since we are using nonsymmetric storage for A right now, this is not a problem. If we ever add the ability to store A symmetrically, then we could store the strength graph as floats instead of doubles to save space. \item This routine currently "compresses" the strength matrix. We should consider the possibility of defining this matrix to have the same "nonzero structure" as A. To do this, we could use the same A\_i and A\_j arrays, and would need only define the S\_data array. There are several pros and cons to discuss. \end{itemize} Terminology: \begin{itemize} \item Ruge's terminology: A point is "strongly connected to" $j$, or "strongly depends on" $j$, if $-a_ij >= \theta max_{l != j} \{-a_il\}$. \item Here, we retain some of this terminology, but with a more generalized notion of "strength". We also retain the "natural" graph notation for representing the directed graph of a matrix. That is, the nonzero entry $a_ij$ is represented as: i --> j. In the strength matrix, S, the entry $s_ij$ is also graphically denoted as above, and means both of the following: \begin{itemize} \item $i$ "depends on" $j$ with "strength" $s_ij$ \item $j$ "influences" $i$ with "strength" $s_ij$ \end{itemize} \end{itemize} {\bf Input files:} _hypre_parcsr_ls.h @return Error code. @param A [IN] coefficient matrix @param strength_threshold [IN] threshold parameter used to define strength @param max_row_sum [IN] parameter used to modify definition of strength for diagonal dominant matrices @param S_ptr [OUT] strength matrix @see */ /*--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSHost(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; /* HYPRE_Real *S_diag_data; */ hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; /* HYPRE_Real *S_offd_data; */ HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jA, jS; HYPRE_Int ierr = 0; HYPRE_Int *dof_func_offd; HYPRE_Int num_sends; HYPRE_Int *int_buf_data; HYPRE_Int index, start, j; HYPRE_Int *prefix_sum_workspace; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, memory_location); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, memory_location); S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_temp_diag_j = hypre_CSRMatrixJ(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); S_diag_j = hypre_TAlloc(HYPRE_Int, num_nonzeros_diag, memory_location); HYPRE_Int *S_temp_offd_j = NULL; dof_func_offd = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); S_temp_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_BigInt *col_map_offd_S = hypre_TAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S; if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } S_offd_j = hypre_TAlloc(HYPRE_Int, num_nonzeros_offd, memory_location); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols_offd; i++) { col_map_offd_S[i] = col_map_offd_A[i]; } } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_functions > 1) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); /* give S same nonzero structure as A */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,diag,row_scale,row_sum,jA,jS) #endif { HYPRE_Int start, stop; hypre_GetSimpleThreadPartition(&start, &stop, num_variables); HYPRE_Int jS_diag = 0, jS_offd = 0; for (i = start; i < stop; i++) { S_diag_i[i] = jS_diag; if (num_cols_offd) { S_offd_i[i] = jS_offd; } diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = diag; if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } /* diag >= 0*/ } /* num_functions <= 1 */ jS_diag += A_diag_i[i + 1] - A_diag_i[i] - 1; jS_offd += A_offd_i[i + 1] - A_offd_i[i]; /* compute row entries of S */ S_temp_diag_j[A_diag_i[i]] = -1; if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0)) { /* make all dependencies weak */ for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { S_temp_diag_j[jA] = -1; } jS_diag -= A_diag_i[i + 1] - (A_diag_i[i] + 1); for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_temp_offd_j[jA] = -1; } jS_offd -= A_offd_i[i + 1] - A_offd_i[i]; } else { if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] <= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] <= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] >= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] >= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] <= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] <= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] >= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; --jS_diag; } else { S_temp_diag_j[jA] = A_diag_j[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] >= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; --jS_offd; } else { S_temp_offd_j[jA] = A_offd_j[jA]; } } } /* diag >= 0 */ } /* num_functions <= 1 */ } /* !((row_sum > max_row_sum) && (max_row_sum < 1.0)) */ } /* for each variable */ hypre_prefix_sum_pair(&jS_diag, S_diag_i + num_variables, &jS_offd, S_offd_i + num_variables, prefix_sum_workspace); /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may be removed, and * coarsening will still be done correctly. However, the routine * that builds interpolation would have to be modified first. *----------------------------------------------------------------*/ for (i = start; i < stop; i++) { S_diag_i[i] += jS_diag; S_offd_i[i] += jS_offd; jS = S_diag_i[i]; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (S_temp_diag_j[jA] > -1) { S_diag_j[jS] = S_temp_diag_j[jA]; jS++; } } jS = S_offd_i[i]; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (S_temp_offd_j[jA] > -1) { S_offd_j[jS] = S_temp_offd_j[jA]; jS++; } } } /* for each variable */ } /* omp parallel */ hypre_CSRMatrixNumNonzeros(S_diag) = S_diag_i[num_variables]; hypre_CSRMatrixNumNonzeros(S_offd) = S_offd_i[num_variables]; hypre_CSRMatrixJ(S_diag) = S_diag_j; hypre_CSRMatrixJ(S_offd) = S_offd_j; hypre_CSRMatrixMemoryLocation(S_diag) = memory_location; hypre_CSRMatrixMemoryLocation(S_offd) = memory_location; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_offd_j, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] += hypre_MPI_Wtime(); #endif return (ierr); } /* ----------------------------------------------------------------------- */ HYPRE_Int hypre_BoomerAMGCreateS(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("CreateS"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_BoomerAMGCreateSDevice(A,0,strength_threshold,max_row_sum,num_functions,dof_func,S_ptr); } else #endif { ierr = hypre_BoomerAMGCreateSHost(A,strength_threshold,max_row_sum,num_functions,dof_func,S_ptr); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; } /* ----------------------------------------------------------------------- */ /* Create Strength matrix from CF marker array data. Provides a more general form to build S for specific nodes of the 'global' matrix (for example, F points or A_FF part), given the entire matrix. These nodes have the SMRK tag. Could possibly be merged with BoomerAMGCreateS() to yield a more general function. */ HYPRE_Int hypre_BoomerAMGCreateSFromCFMarker(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int *CF_marker, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int SMRK, hypre_ParCSRMatrix **S_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; /* HYPRE_Real *S_diag_data; */ hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; /* HYPRE_Real *S_offd_data; */ HYPRE_Int *dof_func_offd = NULL; HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jj, jA, jS; HYPRE_Int num_sends, start, j, index; HYPRE_Int *int_buf_data; HYPRE_Int ierr = 0; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *prefix_sum_workspace; HYPRE_Int my_id; /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ hypre_MPI_Comm_rank(comm, &my_id); num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_temp_diag_j = hypre_CSRMatrixJ(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); S_diag_j = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST); HYPRE_Int *S_temp_offd_j = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); S_temp_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_BigInt *col_map_offd_S = hypre_TAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S; if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } S_offd_j = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols_offd; i++) { col_map_offd_S[i] = col_map_offd_A[i]; } } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_functions > 1) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (num_cols_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); /* give S same nonzero structure as A */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,diag,row_scale,row_sum,jA,jS) #endif { HYPRE_Int start, stop; hypre_GetSimpleThreadPartition(&start, &stop, num_variables); HYPRE_Int jS_diag = 0, jS_offd = 0; for (i = start; i < stop; i++) { if (CF_marker[i] == SMRK) { S_diag_i[i] = jS_diag; if (num_cols_offd) { S_offd_i[i] = jS_offd; } diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = diag; if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if ((CF_marker[jj] == SMRK) && (dof_func[i] == dof_func[jj])) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if ((CF_marker_offd[jj] == SMRK) && (dof_func[i] == dof_func_offd[jj])) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag < 0 */ else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if ((CF_marker[jj] == SMRK) && (dof_func[i] == dof_func[jj])) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if ((CF_marker_offd[jj] == SMRK) && (dof_func[i] == dof_func_offd[A_offd_j[jA]])) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { row_scale = hypre_max(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { row_scale = hypre_max(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag < 0 */ else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { row_scale = hypre_min(row_scale, A_diag_data[jA]); row_sum += A_diag_data[jA]; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { row_scale = hypre_min(row_scale, A_offd_data[jA]); row_sum += A_offd_data[jA]; } } } /* diag >= 0*/ } /* num_functions <=1 */ /* compute row entries of S */ S_temp_diag_j[A_diag_i[i]] = -1; if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0)) { /* make all dependencies weak */ for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { S_temp_diag_j[jA] = -1; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_temp_offd_j[jA] = -1; } } else { if (num_functions > 1) { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if ((A_diag_data[jA] <= strength_threshold * row_scale) || (dof_func[i] != dof_func[jj])) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if ((A_offd_data[jA] <= strength_threshold * row_scale) || (dof_func[i] != dof_func_offd[jj])) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* end diag < 0 */ else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if ((A_diag_data[jA] >= strength_threshold * row_scale) || (dof_func[i] != dof_func[jj])) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if ((A_offd_data[jA] >= strength_threshold * row_scale) || (dof_func[i] != dof_func_offd[jj])) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* diag >= 0 */ } /* num_functions > 1 */ else { if (diag < 0) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if (A_diag_data[jA] <= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if (A_offd_data[jA] <= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* diag < 0 */ else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { jj = A_diag_j[jA]; if (CF_marker[jj] == SMRK) { if (A_diag_data[jA] >= strength_threshold * row_scale) { S_temp_diag_j[jA] = -1; } else { S_temp_diag_j[jA] = jj; ++jS_diag; } } else { S_temp_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { jj = A_offd_j[jA]; if (CF_marker_offd[jj] == SMRK) { if (A_offd_data[jA] >= strength_threshold * row_scale) { S_temp_offd_j[jA] = -1; } else { S_temp_offd_j[jA] = jj; ++jS_offd; } } else { S_temp_offd_j[jA] = -1; } } } /* diag >= 0 */ } /* num_functions <=1 */ } /* !((row_sum > max_row_sum) && (max_row_sum < 1.0)) */ } /* CF_marker == SMRK */ else { S_diag_i[i] = jS_diag; if (num_cols_offd) { S_offd_i[i] = jS_offd; } for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { S_temp_diag_j[jA] = -1; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_temp_offd_j[jA] = -1; } } /* CF_marker != SMRK */ } /* for each variable */ hypre_prefix_sum_pair(&jS_diag, S_diag_i + num_variables, &jS_offd, S_offd_i + num_variables, prefix_sum_workspace); /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may be removed, and * coarsening will still be done correctly. However, the routine * that builds interpolation would have to be modified first. *----------------------------------------------------------------*/ for (i = start; i < stop; i++) { S_diag_i[i] += jS_diag; S_offd_i[i] += jS_offd; jS = S_diag_i[i]; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (S_temp_diag_j[jA] > -1) { S_diag_j[jS] = S_temp_diag_j[jA]; jS++; } } jS = S_offd_i[i]; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (S_temp_offd_j[jA] > -1) { S_offd_j[jS] = S_temp_offd_j[jA]; jS++; } } } /* for each variable */ } /* omp parallel */ hypre_CSRMatrixNumNonzeros(S_diag) = S_diag_i[num_variables]; hypre_CSRMatrixNumNonzeros(S_offd) = S_offd_i[num_variables]; hypre_CSRMatrixJ(S_diag) = S_diag_j; hypre_CSRMatrixJ(S_offd) = S_offd_j; hypre_CSRMatrixMemoryLocation(S_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(S_offd) = HYPRE_MEMORY_HOST; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(S_temp_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATES] += hypre_MPI_Wtime(); #endif return (ierr); } /*==========================================================================*/ /*==========================================================================*/ /** Generates strength matrix Notes: \begin{itemize} \item The underlying matrix storage scheme is a hypre_ParCSR matrix. \item The routine returns the following: \begin{itemize} \item S - a ParCSR matrix representing the "strength matrix". This is used in the coarsening and interpolation routines. \end{itemize} \item The graph of the "strength matrix" for A is a subgraph of the graph of A, but requires nonsymmetric storage even if A is symmetric. This is because of the directional nature of the "strengh of dependence" notion (see below). Since we are using nonsymmetric storage for A right now, this is not a problem. If we ever add the ability to store A symmetrically, then we could store the strength graph as floats instead of doubles to save space. \item This routine currently "compresses" the strength matrix. We should consider the possibility of defining this matrix to have the same "nonzero structure" as A. To do this, we could use the same A\_i and A\_j arrays, and would need only define the S\_data array. There are several pros and cons to discuss. \end{itemize} Terminology: \begin{itemize} \item Ruge's terminology: A point is "strongly connected to" $j$, or "strongly depends on" $j$, if $|a_ij| >= \theta max_{l != j} |a_il|}$. \item Here, we retain some of this terminology, but with a more generalized notion of "strength". We also retain the "natural" graph notation for representing the directed graph of a matrix. That is, the nonzero entry $a_ij$ is represented as: i --> j. In the strength matrix, S, the entry $s_ij$ is also graphically denoted as above, and means both of the following: \begin{itemize} \item $i$ "depends on" $j$ with "strength" $s_ij$ \item $j$ "influences" $i$ with "strength" $s_ij$ \end{itemize} \end{itemize} {\bf Input files:} _hypre_parcsr_ls.h @return Error code. @param A [IN] coefficient matrix @param strength_threshold [IN] threshold parameter used to define strength @param max_row_sum [IN] parameter used to modify definition of strength for diagonal dominant matrices @param S_ptr [OUT] strength matrix @see */ /*--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSabsHost(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int num_cols_offd = 0; hypre_ParCSRMatrix *S; hypre_CSRMatrix *S_diag; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; /* HYPRE_Real *S_diag_data; */ hypre_CSRMatrix *S_offd; HYPRE_Int *S_offd_i = NULL; HYPRE_Int *S_offd_j = NULL; /* HYPRE_Real *S_offd_data; */ HYPRE_Real diag, row_scale, row_sum; HYPRE_Int i, jA, jS; HYPRE_Int ierr = 0; HYPRE_Int *dof_func_offd; HYPRE_Int num_sends; HYPRE_Int *int_buf_data; HYPRE_Int index, start, j; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * Absolute "strength" of dependence/influence is defined in * the following way: i depends on j if * abs(aij) > hypre_max (k != i) abs(aik) * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ num_nonzeros_diag = A_diag_i[num_variables]; num_cols_offd = hypre_CSRMatrixNumCols(A_offd); A_offd_i = hypre_CSRMatrixI(A_offd); num_nonzeros_offd = A_offd_i[num_variables]; S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars, row_starts, row_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, memory_location); hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, memory_location); S_offd = hypre_ParCSRMatrixOffd(S); hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, memory_location); S_diag_i = hypre_CSRMatrixI(S_diag); S_diag_j = hypre_CSRMatrixJ(S_diag); S_offd_i = hypre_CSRMatrixI(S_offd); hypre_CSRMatrixMemoryLocation(S_diag) = memory_location; hypre_CSRMatrixMemoryLocation(S_offd) = memory_location; dof_func_offd = NULL; if (num_cols_offd) { A_offd_data = hypre_CSRMatrixData(A_offd); hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, memory_location); S_offd_j = hypre_CSRMatrixJ(S_offd); hypre_ParCSRMatrixColMapOffd(S) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } } /*------------------------------------------------------------------- * Get the dof_func data for the off-processor columns *-------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_functions > 1) { int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } /* give S same nonzero structure as A */ hypre_ParCSRMatrixCopy(A,S,0); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_variables; i++) { diag = A_diag_data[A_diag_i[i]]; /* compute scaling factor and row sum */ row_scale = 0.0; row_sum = fabs(diag); if (num_functions > 1) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (dof_func[i] == dof_func[A_diag_j[jA]]) { row_scale = hypre_max(row_scale, fabs(A_diag_data[jA])); row_sum += fabs(A_diag_data[jA]); } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (dof_func[i] == dof_func_offd[A_offd_j[jA]]) { row_scale = hypre_max(row_scale, fabs(A_offd_data[jA])); row_sum += fabs(A_offd_data[jA]); } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { row_scale = hypre_max(row_scale, fabs(A_diag_data[jA])); row_sum += fabs(A_diag_data[jA]); } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { row_scale = hypre_max(row_scale, fabs(A_offd_data[jA])); row_sum += fabs(A_offd_data[jA]); } } /* compute row entries of S */ S_diag_j[A_diag_i[i]] = -1; /* reject diag entry */ if ( fabs(row_sum) < fabs(diag)*(2.0-max_row_sum) && max_row_sum < 1.0 ) { /* make all dependencies weak */ for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { S_diag_j[jA] = -1; } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { S_offd_j[jA] = -1; } } else { if (num_functions > 1) { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (fabs(A_diag_data[jA]) <= strength_threshold * row_scale || dof_func[i] != dof_func[A_diag_j[jA]]) { S_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (fabs(A_offd_data[jA]) <= strength_threshold * row_scale || dof_func[i] != dof_func_offd[A_offd_j[jA]]) { S_offd_j[jA] = -1; } } } else { for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++) { if (fabs(A_diag_data[jA]) <= strength_threshold * row_scale) { S_diag_j[jA] = -1; } } for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (fabs(A_offd_data[jA]) <= strength_threshold * row_scale) { S_offd_j[jA] = -1; } } } } } /*-------------------------------------------------------------- * "Compress" the strength matrix. * * NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor! * * NOTE: This "compression" section of code may be removed, and * coarsening will still be done correctly. However, the routine * that builds interpolation would have to be modified first. *----------------------------------------------------------------*/ /* RDF: not sure if able to thread this loop */ jS = 0; for (i = 0; i < num_variables; i++) { S_diag_i[i] = jS; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (S_diag_j[jA] > -1) { S_diag_j[jS] = S_diag_j[jA]; jS++; } } } S_diag_i[num_variables] = jS; hypre_CSRMatrixNumNonzeros(S_diag) = jS; /* RDF: not sure if able to thread this loop */ jS = 0; for (i = 0; i < num_variables; i++) { S_offd_i[i] = jS; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (S_offd_j[jA] > -1) { S_offd_j[jS] = S_offd_j[jA]; jS++; } } } S_offd_i[num_variables] = jS; hypre_CSRMatrixNumNonzeros(S_offd) = jS; hypre_ParCSRMatrixCommPkg(S) = NULL; *S_ptr = S; hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); return (ierr); } HYPRE_Int hypre_BoomerAMGCreateSabs(hypre_ParCSRMatrix *A, HYPRE_Real strength_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("CreateSabs"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_BoomerAMGCreateSDevice(A,1,strength_threshold,max_row_sum,num_functions,dof_func,S_ptr); } else #endif { ierr = hypre_BoomerAMGCreateSabsHost(A,strength_threshold,max_row_sum,num_functions,dof_func,S_ptr); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; } /*--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSCommPkg(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *S, HYPRE_Int **col_offd_S_to_A_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_MPI_Status *status; hypre_MPI_Request *requests; hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommPkg *comm_pkg_S; hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_BigInt *col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int *recv_procs_A = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts_A = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int *send_procs_A = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int *recv_procs_S; HYPRE_Int *recv_vec_starts_S; HYPRE_Int *send_procs_S; HYPRE_Int *send_map_starts_S; HYPRE_Int *send_map_elmts_S = NULL; HYPRE_BigInt *big_send_map_elmts_S = NULL; HYPRE_Int *col_offd_S_to_A; HYPRE_Int *S_marker; HYPRE_Int *send_change; HYPRE_Int *recv_change; HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int num_cols_offd_S; HYPRE_Int i, j, jcol; HYPRE_Int proc, cnt, proc_cnt, total_nz; HYPRE_BigInt first_row; HYPRE_Int ierr = 0; HYPRE_Int num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int num_recvs_A = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int num_sends_S; HYPRE_Int num_recvs_S; HYPRE_Int num_nonzeros; num_nonzeros = S_offd_i[num_variables]; S_marker = NULL; if (num_cols_offd_A) S_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_A; i++) S_marker[i] = -1; for (i=0; i < num_nonzeros; i++) { jcol = S_offd_j[i]; S_marker[jcol] = 0; } proc = 0; proc_cnt = 0; cnt = 0; num_recvs_S = 0; for (i=0; i < num_recvs_A; i++) { for (j=recv_vec_starts_A[i]; j < recv_vec_starts_A[i+1]; j++) { if (!S_marker[j]) { S_marker[j] = cnt; cnt++; proc = 1; } } if (proc) {num_recvs_S++; proc = 0;} } num_cols_offd_S = cnt; recv_change = NULL; recv_procs_S = NULL; send_change = NULL; if (col_map_offd_S) hypre_TFree(col_map_offd_S, HYPRE_MEMORY_HOST); col_map_offd_S = NULL; col_offd_S_to_A = NULL; if (num_recvs_A) recv_change = hypre_CTAlloc(HYPRE_Int, num_recvs_A, HYPRE_MEMORY_HOST); if (num_sends_A) send_change = hypre_CTAlloc(HYPRE_Int, num_sends_A, HYPRE_MEMORY_HOST); if (num_recvs_S) recv_procs_S = hypre_CTAlloc(HYPRE_Int, num_recvs_S, HYPRE_MEMORY_HOST); recv_vec_starts_S = hypre_CTAlloc(HYPRE_Int, num_recvs_S+1, HYPRE_MEMORY_HOST); if (num_cols_offd_S) { col_map_offd_S = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_S, HYPRE_MEMORY_HOST); col_offd_S_to_A = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S, HYPRE_MEMORY_HOST); } if (num_cols_offd_S < num_cols_offd_A) { for (i=0; i < num_nonzeros; i++) { jcol = S_offd_j[i]; S_offd_j[i] = S_marker[jcol]; } proc = 0; proc_cnt = 0; cnt = 0; recv_vec_starts_S[0] = 0; for (i=0; i < num_recvs_A; i++) { for (j=recv_vec_starts_A[i]; j < recv_vec_starts_A[i+1]; j++) { if (S_marker[j] != -1) { col_map_offd_S[cnt] = col_map_offd_A[j]; col_offd_S_to_A[cnt++] = j; proc = 1; } } recv_change[i] = j-cnt-recv_vec_starts_A[i]+recv_vec_starts_S[proc_cnt]; if (proc) { recv_procs_S[proc_cnt++] = recv_procs_A[i]; recv_vec_starts_S[proc_cnt] = cnt; proc = 0; } } } else { for (i=0; i < num_recvs_A; i++) { for (j=recv_vec_starts_A[i]; j < recv_vec_starts_A[i+1]; j++) { col_map_offd_S[j] = col_map_offd_A[j]; col_offd_S_to_A[j] = j; } recv_procs_S[i] = recv_procs_A[i]; recv_vec_starts_S[i] = recv_vec_starts_A[i]; } recv_vec_starts_S[num_recvs_A] = recv_vec_starts_A[num_recvs_A]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_sends_A+num_recvs_A, HYPRE_MEMORY_HOST); j=0; for (i=0; i < num_sends_A; i++) hypre_MPI_Irecv(&send_change[i],1,HYPRE_MPI_INT,send_procs_A[i], 0,comm,&requests[j++]); for (i=0; i < num_recvs_A; i++) hypre_MPI_Isend(&recv_change[i],1,HYPRE_MPI_INT,recv_procs_A[i], 0,comm,&requests[j++]); status = hypre_CTAlloc(hypre_MPI_Status, j, HYPRE_MEMORY_HOST); hypre_MPI_Waitall(j,requests,status); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); num_sends_S = 0; total_nz = send_map_starts_A[num_sends_A]; for (i=0; i < num_sends_A; i++) { if (send_change[i]) { if ((send_map_starts_A[i+1]-send_map_starts_A[i]) > send_change[i]) num_sends_S++; } else num_sends_S++; total_nz -= send_change[i]; } send_procs_S = NULL; if (num_sends_S) send_procs_S = hypre_CTAlloc(HYPRE_Int, num_sends_S, HYPRE_MEMORY_HOST); send_map_starts_S = hypre_CTAlloc(HYPRE_Int, num_sends_S+1, HYPRE_MEMORY_HOST); send_map_elmts_S = NULL; if (total_nz) { send_map_elmts_S = hypre_CTAlloc(HYPRE_Int, total_nz, HYPRE_MEMORY_HOST); big_send_map_elmts_S = hypre_CTAlloc(HYPRE_BigInt, total_nz, HYPRE_MEMORY_HOST); } proc = 0; proc_cnt = 0; for (i=0; i < num_sends_A; i++) { cnt = send_map_starts_A[i+1]-send_map_starts_A[i]-send_change[i]; if (cnt) { send_procs_S[proc_cnt++] = send_procs_A[i]; send_map_starts_S[proc_cnt] = send_map_starts_S[proc_cnt-1]+cnt; } } comm_pkg_S = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_S) = comm; hypre_ParCSRCommPkgNumRecvs(comm_pkg_S) = num_recvs_S; hypre_ParCSRCommPkgRecvProcs(comm_pkg_S) = recv_procs_S; hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_S) = recv_vec_starts_S; hypre_ParCSRCommPkgNumSends(comm_pkg_S) = num_sends_S; hypre_ParCSRCommPkgSendProcs(comm_pkg_S) = send_procs_S; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_S) = send_map_starts_S; comm_handle = hypre_ParCSRCommHandleCreate(22, comm_pkg_S, col_map_offd_S, big_send_map_elmts_S); hypre_ParCSRCommHandleDestroy(comm_handle); first_row = hypre_ParCSRMatrixFirstRowIndex(A); if (first_row) for (i=0; i < send_map_starts_S[num_sends_S]; i++) send_map_elmts_S[i] = (HYPRE_Int)(big_send_map_elmts_S[i]-first_row); hypre_ParCSRCommPkgSendMapElmts(comm_pkg_S) = send_map_elmts_S; hypre_ParCSRMatrixCommPkg(S) = comm_pkg_S; hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S; hypre_CSRMatrixNumCols(S_offd) = num_cols_offd_S; hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(send_change, HYPRE_MEMORY_HOST); hypre_TFree(recv_change, HYPRE_MEMORY_HOST); *col_offd_S_to_A_ptr = col_offd_S_to_A; return ierr; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCreate2ndS : creates strength matrix on coarse points * for second coarsening pass in aggressive coarsening (S*S+2S) *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreate2ndSHost( hypre_ParCSRMatrix *S, HYPRE_Int *CF_marker, HYPRE_Int num_paths, HYPRE_BigInt *coarse_row_starts, hypre_ParCSRMatrix **C_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATE_2NDS] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int num_cols_diag_S = hypre_CSRMatrixNumCols(S_diag); HYPRE_Int num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd); hypre_ParCSRMatrix *S2; HYPRE_BigInt *col_map_offd_C = NULL; hypre_CSRMatrix *C_diag; /*HYPRE_Int *C_diag_data = NULL;*/ HYPRE_Int *C_diag_i; HYPRE_Int *C_diag_j = NULL; hypre_CSRMatrix *C_offd; /*HYPRE_Int *C_offd_data=NULL;*/ HYPRE_Int *C_offd_i; HYPRE_Int *C_offd_j=NULL; HYPRE_Int num_cols_offd_C = 0; HYPRE_Int *S_ext_diag_i = NULL; HYPRE_Int *S_ext_diag_j = NULL; HYPRE_Int S_ext_diag_size = 0; HYPRE_Int *S_ext_offd_i = NULL; HYPRE_Int *S_ext_offd_j = NULL; HYPRE_Int S_ext_offd_size = 0; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *S_marker = NULL; HYPRE_Int *S_marker_offd = NULL; //HYPRE_Int *temp = NULL; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int *map_S_to_C = NULL; HYPRE_Int num_sends = 0; HYPRE_Int num_recvs = 0; HYPRE_Int *send_map_starts; HYPRE_Int *tmp_send_map_starts = NULL; HYPRE_Int *send_map_elmts; HYPRE_Int *recv_vec_starts; HYPRE_Int *tmp_recv_vec_starts = NULL; HYPRE_Int *int_buf_data = NULL; HYPRE_BigInt *big_int_buf_data = NULL; HYPRE_BigInt *temp = NULL; HYPRE_Int i, j, k; HYPRE_Int i1, i2, i3; HYPRE_BigInt big_i1; HYPRE_Int jj1, jj2, jrow, j_cnt; /*HYPRE_Int cnt, cnt_offd, cnt_diag;*/ HYPRE_Int num_procs, my_id; HYPRE_Int index; /*HYPRE_Int value;*/ HYPRE_Int num_coarse; HYPRE_Int num_nonzeros; HYPRE_BigInt global_num_coarse; HYPRE_BigInt my_first_cpt, my_last_cpt; HYPRE_Int *S_int_i = NULL; HYPRE_BigInt *S_int_j = NULL; HYPRE_Int *S_ext_i = NULL; HYPRE_BigInt *S_ext_j = NULL; /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ HYPRE_Int *prefix_sum_workspace; HYPRE_Int *num_coarse_prefix_sum; prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); num_coarse_prefix_sum = hypre_TAlloc(HYPRE_Int, hypre_NumThreads() + 1, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Extract S_ext, i.e. portion of B that is stored on neighbor procs * and needed locally for matrix matrix product *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); my_first_cpt = coarse_row_starts[0]; my_last_cpt = coarse_row_starts[1]-1; if (my_id == (num_procs -1)) global_num_coarse = coarse_row_starts[1]; hypre_MPI_Bcast(&global_num_coarse, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); if (num_cols_offd_S) { CF_marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_S, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_S, HYPRE_MEMORY_HOST); } HYPRE_Int *coarse_to_fine = NULL; if (num_cols_diag_S) { fine_to_coarse = hypre_TAlloc(HYPRE_Int, num_cols_diag_S, HYPRE_MEMORY_HOST); coarse_to_fine = hypre_TAlloc(HYPRE_Int, num_cols_diag_S, HYPRE_MEMORY_HOST); } /*HYPRE_Int num_coarse_prefix_sum[hypre_NumThreads() + 1];*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i) #endif { HYPRE_Int num_coarse_private = 0; HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_diag_S); for (i = i_begin; i < i_end; i++) { if (CF_marker[i] > 0) num_coarse_private++; } hypre_prefix_sum(&num_coarse_private, &num_coarse, num_coarse_prefix_sum); for (i = i_begin; i < i_end; i++) { if (CF_marker[i] > 0) { fine_to_coarse[i] = num_coarse_private; coarse_to_fine[num_coarse_private] = i; num_coarse_private++; } else { fine_to_coarse[i] = -1; } } } /* omp parallel */ if (num_procs > 1) { if (!comm_pkg) { hypre_MatvecCommPkgCreate(S); comm_pkg = hypre_ParCSRMatrixCommPkg(S); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); HYPRE_Int begin = send_map_starts[0]; HYPRE_Int end = send_map_starts[num_sends]; big_int_buf_data = hypre_TAlloc(HYPRE_BigInt, end, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (index = begin; index < end; index++) { big_int_buf_data[index - begin] = (HYPRE_BigInt)fine_to_coarse[send_map_elmts[index]] + my_first_cpt; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); int_buf_data = hypre_TAlloc(HYPRE_Int, end, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (index = begin; index < end; index++) { int_buf_data[index - begin] = CF_marker[send_map_elmts[index]]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(big_int_buf_data, HYPRE_MEMORY_HOST); S_int_i = hypre_TAlloc(HYPRE_Int, end+1, HYPRE_MEMORY_HOST); S_ext_i = hypre_CTAlloc(HYPRE_Int, recv_vec_starts[num_recvs]+1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * generate S_int_i through adding number of coarse row-elements of offd and diag * for corresponding rows. S_int_i[j+1] contains the number of coarse elements of * a row j (which is determined through send_map_elmts) *--------------------------------------------------------------------------*/ S_int_i[0] = 0; num_nonzeros = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j,k) reduction(+:num_nonzeros) HYPRE_SMP_SCHEDULE #endif for (j = begin; j < end; j++) { HYPRE_Int jrow = send_map_elmts[j]; HYPRE_Int index = 0; for (k = S_diag_i[jrow]; k < S_diag_i[jrow+1]; k++) { if (CF_marker[S_diag_j[k]] > 0) index++; } for (k = S_offd_i[jrow]; k < S_offd_i[jrow+1]; k++) { if (CF_marker_offd[S_offd_j[k]] > 0) index++; } S_int_i[j - begin + 1] = index; num_nonzeros += S_int_i[j - begin + 1]; } /*-------------------------------------------------------------------------- * initialize communication *--------------------------------------------------------------------------*/ if (num_procs > 1) comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg,&S_int_i[1],&S_ext_i[1]); if (num_nonzeros) S_int_j = hypre_TAlloc(HYPRE_BigInt, num_nonzeros, HYPRE_MEMORY_HOST); tmp_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); tmp_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); tmp_send_map_starts[0] = 0; j_cnt = 0; for (i=0; i < num_sends; i++) { for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++) { jrow = send_map_elmts[j]; for (k=S_diag_i[jrow]; k < S_diag_i[jrow+1]; k++) { if (CF_marker[S_diag_j[k]] > 0) S_int_j[j_cnt++] = (HYPRE_BigInt)fine_to_coarse[S_diag_j[k]]+my_first_cpt; } for (k=S_offd_i[jrow]; k < S_offd_i[jrow+1]; k++) { if (CF_marker_offd[S_offd_j[k]] > 0) S_int_j[j_cnt++] = fine_to_coarse_offd[S_offd_j[k]]; } } tmp_send_map_starts[i+1] = j_cnt; } tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = tmp_send_map_starts; hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; /*-------------------------------------------------------------------------- * after communication exchange S_ext_i[j+1] contains the number of coarse elements * of a row j ! * evaluate S_ext_i and compute num_nonzeros for S_ext *--------------------------------------------------------------------------*/ for (i=0; i < recv_vec_starts[num_recvs]; i++) S_ext_i[i+1] += S_ext_i[i]; num_nonzeros = S_ext_i[recv_vec_starts[num_recvs]]; if (num_nonzeros) S_ext_j = hypre_TAlloc(HYPRE_BigInt, num_nonzeros, HYPRE_MEMORY_HOST); tmp_recv_vec_starts[0] = 0; for (i=0; i < num_recvs; i++) tmp_recv_vec_starts[i+1] = S_ext_i[recv_vec_starts[i+1]]; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = tmp_recv_vec_starts; comm_handle = hypre_ParCSRCommHandleCreate(21,tmp_comm_pkg,S_int_j,S_ext_j); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; hypre_TFree(tmp_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); hypre_TFree(S_int_i, HYPRE_MEMORY_HOST); hypre_TFree(S_int_j, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime(); #endif #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_BigInt *S_big_offd_j = NULL; S_ext_diag_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); S_ext_diag_i[0] = 0; S_ext_offd_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); S_ext_offd_i[0] = 0; hypre_UnorderedBigIntSet found_set; hypre_UnorderedBigIntSetCreate(&found_set, S_ext_i[num_cols_offd_S] + num_cols_offd_S, 16*hypre_NumThreads()); #pragma omp parallel private(i,j, big_i1) { HYPRE_Int S_ext_offd_size_private = 0; HYPRE_Int S_ext_diag_size_private = 0; HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_S); for (i = i_begin; i < i_end; i++) { if (CF_marker_offd[i] > 0) { hypre_UnorderedBigIntSetPut(&found_set, fine_to_coarse_offd[i]); } for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { big_i1 = S_ext_j[j]; if (big_i1 < my_first_cpt || big_i1 > my_last_cpt) { S_ext_offd_size_private++; hypre_UnorderedBigIntSetPut(&found_set, big_i1); } else S_ext_diag_size_private++; } } hypre_prefix_sum_pair( &S_ext_diag_size_private, &S_ext_diag_size, &S_ext_offd_size_private, &S_ext_offd_size, prefix_sum_workspace); #pragma omp master { if (S_ext_diag_size) S_ext_diag_j = hypre_TAlloc(HYPRE_Int, S_ext_diag_size, HYPRE_MEMORY_HOST); if (S_ext_offd_size) { S_ext_offd_j = hypre_TAlloc(HYPRE_Int, S_ext_offd_size, HYPRE_MEMORY_HOST); S_big_offd_j = hypre_TAlloc(HYPRE_BigInt, S_ext_offd_size, HYPRE_MEMORY_HOST); } } #pragma omp barrier for (i = i_begin; i < i_end; i++) { for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { big_i1 = S_ext_j[j]; if (big_i1 < my_first_cpt || big_i1 > my_last_cpt) S_big_offd_j[S_ext_offd_size_private++] = big_i1; //S_ext_offd_j[S_ext_offd_size_private++] = big_i1; else S_ext_diag_j[S_ext_diag_size_private++] = (HYPRE_Int)(big_i1 - my_first_cpt); } S_ext_diag_i[i + 1] = S_ext_diag_size_private; S_ext_offd_i[i + 1] = S_ext_offd_size_private; } } // omp parallel temp = hypre_UnorderedBigIntSetCopyToArray(&found_set, &num_cols_offd_C); hypre_UnorderedBigIntSetDestroy(&found_set); hypre_TFree(S_ext_i, HYPRE_MEMORY_HOST); hypre_UnorderedBigIntMap col_map_offd_C_inverse; hypre_big_sort_and_create_inverse_map(temp, num_cols_offd_C, &col_map_offd_C, &col_map_offd_C_inverse); #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i=0 ; i < S_ext_offd_size; i++) S_ext_offd_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_C_inverse, S_big_offd_j[i]); //S_ext_offd_j[i] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, S_ext_offd_j[i]); hypre_TFree(S_ext_j, HYPRE_MEMORY_HOST); hypre_TFree(S_big_offd_j, HYPRE_MEMORY_HOST); if (num_cols_offd_C) hypre_UnorderedBigIntMapDestroy(&col_map_offd_C_inverse); #else /* !HYPRE_CONCURRENT_HOPSCOTCH */ HYPRE_Int cnt_offd, cnt_diag, cnt, value; S_ext_diag_size = 0; S_ext_offd_size = 0; for (i=0; i < num_cols_offd_S; i++) { for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { if (S_ext_j[j] < my_first_cpt || S_ext_j[j] > my_last_cpt) S_ext_offd_size++; else S_ext_diag_size++; } } S_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); S_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S+1, HYPRE_MEMORY_HOST); if (S_ext_diag_size) { S_ext_diag_j = hypre_CTAlloc(HYPRE_Int, S_ext_diag_size, HYPRE_MEMORY_HOST); } if (S_ext_offd_size) { S_ext_offd_j = hypre_CTAlloc(HYPRE_Int, S_ext_offd_size, HYPRE_MEMORY_HOST); } cnt_offd = 0; cnt_diag = 0; cnt = 0; HYPRE_Int num_coarse_offd = 0; for (i=0; i < num_cols_offd_S; i++) { if (CF_marker_offd[i] > 0) num_coarse_offd++; for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { big_i1 = S_ext_j[j]; if (big_i1 < my_first_cpt || big_i1 > my_last_cpt) S_ext_j[cnt_offd++] = big_i1; else S_ext_diag_j[cnt_diag++] = (HYPRE_Int)(big_i1 - my_first_cpt); } S_ext_diag_i[++cnt] = cnt_diag; S_ext_offd_i[cnt] = cnt_offd; } hypre_TFree(S_ext_i, HYPRE_MEMORY_HOST); cnt = 0; if (S_ext_offd_size || num_coarse_offd) { temp = hypre_CTAlloc(HYPRE_BigInt, S_ext_offd_size+num_coarse_offd, HYPRE_MEMORY_HOST); for (i=0; i < S_ext_offd_size; i++) temp[i] = S_ext_j[i]; cnt = S_ext_offd_size; for (i=0; i < num_cols_offd_S; i++) if (CF_marker_offd[i] > 0) temp[cnt++] = fine_to_coarse_offd[i]; } if (cnt) { hypre_BigQsort0(temp, 0, cnt-1); num_cols_offd_C = 1; value = temp[0]; for (i=1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd_C; i++) col_map_offd_C[i] = temp[i]; if (S_ext_offd_size || num_coarse_offd) hypre_TFree(temp, HYPRE_MEMORY_HOST); for (i=0 ; i < S_ext_offd_size; i++) S_ext_offd_j[i] = hypre_BigBinarySearch(col_map_offd_C, S_ext_j[i], num_cols_offd_C); hypre_TFree(S_ext_j, HYPRE_MEMORY_HOST); #endif /* !HYPRE_CONCURRENT_HOPSCOTCH */ if (num_cols_offd_S) { map_S_to_C = hypre_TAlloc(HYPRE_Int, num_cols_offd_S, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i) #endif { HYPRE_Int i_begin, i_end; hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_S); HYPRE_BigInt cnt = 0; for (i = i_begin; i < i_end; i++) { if (CF_marker_offd[i] > 0) { cnt = hypre_BigLowerBound(col_map_offd_C + cnt, col_map_offd_C + num_cols_offd_C, fine_to_coarse_offd[i]) - col_map_offd_C; map_S_to_C[i] = cnt++; } else map_S_to_C[i] = -1; } } /* omp parallel */ } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime(); #endif } /* num_procs > 1 */ /*----------------------------------------------------------------------- * Allocate and initialize some stuff. *-----------------------------------------------------------------------*/ HYPRE_Int *S_marker_array = NULL, *S_marker_offd_array = NULL; if (num_coarse) S_marker_array = hypre_TAlloc(HYPRE_Int, num_coarse*hypre_NumThreads(), HYPRE_MEMORY_HOST); if (num_cols_offd_C) S_marker_offd_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_C*hypre_NumThreads(), HYPRE_MEMORY_HOST); HYPRE_Int *C_temp_offd_j_array = NULL; HYPRE_Int *C_temp_diag_j_array = NULL; HYPRE_Int *C_temp_offd_data_array = NULL; HYPRE_Int *C_temp_diag_data_array = NULL; if (num_paths > 1) { C_temp_diag_j_array = hypre_TAlloc(HYPRE_Int, num_coarse*hypre_NumThreads(), HYPRE_MEMORY_HOST); C_temp_offd_j_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_C*hypre_NumThreads(), HYPRE_MEMORY_HOST); C_temp_diag_data_array = hypre_TAlloc(HYPRE_Int, num_coarse*hypre_NumThreads(), HYPRE_MEMORY_HOST); C_temp_offd_data_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_C*hypre_NumThreads(), HYPRE_MEMORY_HOST); } C_diag_i = hypre_CTAlloc(HYPRE_Int, num_coarse+1, HYPRE_MEMORY_HOST); C_offd_i = hypre_CTAlloc(HYPRE_Int, num_coarse+1, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Loop over rows of S *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i1,i2,i3,jj1,jj2,index) #endif { HYPRE_Int my_thread_num = hypre_GetThreadNum(); HYPRE_Int i1_begin, i1_end; hypre_GetSimpleThreadPartition(&i1_begin, &i1_end, num_cols_diag_S); HYPRE_Int *C_temp_diag_j = NULL, *C_temp_offd_j = NULL; HYPRE_Int *C_temp_diag_data = NULL, *C_temp_offd_data = NULL; if (num_paths > 1) { C_temp_diag_j = C_temp_diag_j_array + num_coarse*my_thread_num; C_temp_offd_j = C_temp_offd_j_array + num_cols_offd_C*my_thread_num; C_temp_diag_data = C_temp_diag_data_array + num_coarse*my_thread_num; C_temp_offd_data = C_temp_offd_data_array + num_cols_offd_C*my_thread_num; } HYPRE_Int *S_marker = NULL, *S_marker_offd = NULL; if (num_coarse) S_marker = S_marker_array + num_coarse*my_thread_num; if (num_cols_offd_C) S_marker_offd = S_marker_offd_array + num_cols_offd_C*my_thread_num; for (i1 = 0; i1 < num_coarse; i1++) { S_marker[i1] = -1; } for (i1 = 0; i1 < num_cols_offd_C; i1++) { S_marker_offd[i1] = -1; } // These two counters are for before filtering by num_paths HYPRE_Int jj_count_diag = 0; HYPRE_Int jj_count_offd = 0; // These two counters are for after filtering by num_paths HYPRE_Int num_nonzeros_diag = 0; HYPRE_Int num_nonzeros_offd = 0; HYPRE_Int ic_begin = num_coarse_prefix_sum[my_thread_num]; HYPRE_Int ic_end = num_coarse_prefix_sum[my_thread_num + 1]; HYPRE_Int ic; if (num_paths == 1) { for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = num_nonzeros_diag; HYPRE_Int jj_row_begin_offd = num_nonzeros_offd; C_diag_i[ic] = num_nonzeros_diag; if (num_cols_offd_C) { C_offd_i[ic] = num_nonzeros_offd; } for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; num_nonzeros_diag++; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0) { index = fine_to_coarse[i3]; if (index != ic && S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; num_nonzeros_diag++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; num_nonzeros_offd++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; num_nonzeros_offd++; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic && S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = num_nonzeros_diag; num_nonzeros_diag++; } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = num_nonzeros_offd; num_nonzeros_offd++; } } } } /* for each row */ } /* num_paths == 1 */ else { for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = jj_count_diag; HYPRE_Int jj_row_begin_offd = jj_count_offd; C_diag_i[ic] = num_nonzeros_diag; if (num_cols_offd_C) { C_offd_i[ic] = num_nonzeros_offd; } for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 2; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag] += 2; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0 && fine_to_coarse[i3] != ic) { index = fine_to_coarse[i3]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag]++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd]++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 2; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd] += 2; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic) { if (S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = jj_count_diag; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[i3] - jj_row_begin_diag]++; } } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = jj_count_offd; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[i3] - jj_row_begin_offd]++; } } } for (jj1 = jj_row_begin_diag; jj1 < jj_count_diag; jj1++) { if (C_temp_diag_data[jj1 - jj_row_begin_diag] >= num_paths) { ++num_nonzeros_diag; } C_temp_diag_data[jj1 - jj_row_begin_diag] = 0; } for (jj1 = jj_row_begin_offd; jj1 < jj_count_offd; jj1++) { if (C_temp_offd_data[jj1 - jj_row_begin_offd] >= num_paths) { ++num_nonzeros_offd; } C_temp_offd_data[jj1 - jj_row_begin_offd] = 0; } } /* for each row */ } /* num_paths > 1 */ hypre_prefix_sum_pair( &num_nonzeros_diag, &C_diag_i[num_coarse], &num_nonzeros_offd, &C_offd_i[num_coarse], prefix_sum_workspace); for (i1 = 0; i1 < num_coarse; i1++) { S_marker[i1] = -1; } for (i1 = 0; i1 < num_cols_offd_C; i1++) { S_marker_offd[i1] = -1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #pragma omp master #endif { if (C_diag_i[num_coarse]) { C_diag_j = hypre_TAlloc(HYPRE_Int, C_diag_i[num_coarse], HYPRE_MEMORY_HOST); } if (C_offd_i[num_coarse]) { C_offd_j = hypre_TAlloc(HYPRE_Int, C_offd_i[num_coarse], HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (ic = ic_begin; ic < ic_end - 1; ic++) { if (C_diag_i[ic+1] == C_diag_i[ic] && C_offd_i[ic+1] == C_offd_i[ic]) CF_marker[coarse_to_fine[ic]] = 2; C_diag_i[ic] += num_nonzeros_diag; C_offd_i[ic] += num_nonzeros_offd; } if (ic_begin < ic_end) { C_diag_i[ic] += num_nonzeros_diag; C_offd_i[ic] += num_nonzeros_offd; HYPRE_Int next_C_diag_i = prefix_sum_workspace[2*(my_thread_num + 1)]; HYPRE_Int next_C_offd_i = prefix_sum_workspace[2*(my_thread_num + 1) + 1]; if (next_C_diag_i == C_diag_i[ic] && next_C_offd_i == C_offd_i[ic]) CF_marker[coarse_to_fine[ic]] = 2; } if (num_paths == 1) { for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = num_nonzeros_diag; HYPRE_Int jj_row_begin_offd = num_nonzeros_offd; for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; C_diag_j[num_nonzeros_diag] = index; num_nonzeros_diag++; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0) { index = fine_to_coarse[i3]; if (index != ic && S_marker[index] < jj_row_begin_diag) { S_marker[index] = num_nonzeros_diag; C_diag_j[num_nonzeros_diag] = index; num_nonzeros_diag++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; C_offd_j[num_nonzeros_offd] = index; num_nonzeros_offd++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = num_nonzeros_offd; C_offd_j[num_nonzeros_offd] = index; num_nonzeros_offd++; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic && S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = num_nonzeros_diag; C_diag_j[num_nonzeros_diag] = i3; num_nonzeros_diag++; } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = num_nonzeros_offd; C_offd_j[num_nonzeros_offd] = i3; num_nonzeros_offd++; } } } } /* for each row */ } /* num_paths == 1 */ else { jj_count_diag = num_nonzeros_diag; jj_count_offd = num_nonzeros_offd; for (ic = ic_begin; ic < ic_end; ic++) { /*-------------------------------------------------------------------- * Set marker for diagonal entry, C_{i1,i1} (for square matrices). *--------------------------------------------------------------------*/ i1 = coarse_to_fine[ic]; HYPRE_Int jj_row_begin_diag = jj_count_diag; HYPRE_Int jj_row_begin_offd = jj_count_offd; for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (CF_marker[i2] > 0) { index = fine_to_coarse[i2]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_j[jj_count_diag - jj_row_begin_diag] = index; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 2; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag] += 2; } } for (jj2 = S_diag_i[i2]; jj2 < S_diag_i[i2+1]; jj2++) { i3 = S_diag_j[jj2]; if (CF_marker[i3] > 0 && fine_to_coarse[i3] != ic) { index = fine_to_coarse[i3]; if (S_marker[index] < jj_row_begin_diag) { S_marker[index] = jj_count_diag; C_temp_diag_j[jj_count_diag - jj_row_begin_diag] = index; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[index] - jj_row_begin_diag]++; } } } for (jj2 = S_offd_i[i2]; jj2 < S_offd_i[i2+1]; jj2++) { i3 = S_offd_j[jj2]; if (CF_marker_offd[i3] > 0) { index = map_S_to_C[i3]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_j[jj_count_offd - jj_row_begin_offd] = index; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd]++; } } } } for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (CF_marker_offd[i2] > 0) { index = map_S_to_C[i2]; if (S_marker_offd[index] < jj_row_begin_offd) { S_marker_offd[index] = jj_count_offd; C_temp_offd_j[jj_count_offd - jj_row_begin_offd] = index; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 2; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[index] - jj_row_begin_offd] += 2; } } for (jj2 = S_ext_diag_i[i2]; jj2 < S_ext_diag_i[i2+1]; jj2++) { i3 = S_ext_diag_j[jj2]; if (i3 != ic) { if (S_marker[i3] < jj_row_begin_diag) { S_marker[i3] = jj_count_diag; C_temp_diag_j[jj_count_diag - jj_row_begin_diag] = i3; C_temp_diag_data[jj_count_diag - jj_row_begin_diag] = 1; jj_count_diag++; } else { C_temp_diag_data[S_marker[i3] - jj_row_begin_diag]++; } } } for (jj2 = S_ext_offd_i[i2]; jj2 < S_ext_offd_i[i2+1]; jj2++) { i3 = S_ext_offd_j[jj2]; if (S_marker_offd[i3] < jj_row_begin_offd) { S_marker_offd[i3] = jj_count_offd; C_temp_offd_j[jj_count_offd - jj_row_begin_offd] = i3; C_temp_offd_data[jj_count_offd - jj_row_begin_offd] = 1; jj_count_offd++; } else { C_temp_offd_data[S_marker_offd[i3] - jj_row_begin_offd]++; } } } for (jj1 = jj_row_begin_diag; jj1 < jj_count_diag; jj1++) { if (C_temp_diag_data[jj1 - jj_row_begin_diag] >= num_paths) { C_diag_j[num_nonzeros_diag++] = C_temp_diag_j[jj1 - jj_row_begin_diag]; } C_temp_diag_data[jj1 - jj_row_begin_diag] = 0; } for (jj1 = jj_row_begin_offd; jj1 < jj_count_offd; jj1++) { if (C_temp_offd_data[jj1 - jj_row_begin_offd] >= num_paths) { C_offd_j[num_nonzeros_offd++] = C_temp_offd_j[jj1 - jj_row_begin_offd]; } C_temp_offd_data[jj1 - jj_row_begin_offd] = 0; } } /* for each row */ } /* num_paths > 1 */ } /* omp parallel */ S2 = hypre_ParCSRMatrixCreate(comm, global_num_coarse, global_num_coarse, coarse_row_starts, coarse_row_starts, num_cols_offd_C, C_diag_i[num_coarse], C_offd_i[num_coarse]); C_diag = hypre_ParCSRMatrixDiag(S2); hypre_CSRMatrixI(C_diag) = C_diag_i; if (C_diag_i[num_coarse]) hypre_CSRMatrixJ(C_diag) = C_diag_j; C_offd = hypre_ParCSRMatrixOffd(S2); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_ParCSRMatrixOffd(S2) = C_offd; if (num_cols_offd_C) { if (C_offd_i[num_coarse]) hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_ParCSRMatrixColMapOffd(S2) = col_map_offd_C; } /*----------------------------------------------------------------------- * Free various arrays *-----------------------------------------------------------------------*/ hypre_TFree(C_temp_diag_j_array, HYPRE_MEMORY_HOST); hypre_TFree(C_temp_diag_data_array, HYPRE_MEMORY_HOST); hypre_TFree(C_temp_offd_j_array, HYPRE_MEMORY_HOST); hypre_TFree(C_temp_offd_data_array, HYPRE_MEMORY_HOST); hypre_TFree(S_marker_array, HYPRE_MEMORY_HOST); hypre_TFree(S_marker_offd_array, HYPRE_MEMORY_HOST); hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(S_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(S_ext_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_to_fine, HYPRE_MEMORY_HOST); if (S_ext_diag_size) { hypre_TFree(S_ext_diag_j, HYPRE_MEMORY_HOST); } hypre_TFree(S_ext_offd_i, HYPRE_MEMORY_HOST); if (S_ext_offd_size) { hypre_TFree(S_ext_offd_j, HYPRE_MEMORY_HOST); } if (num_cols_offd_S) { hypre_TFree(map_S_to_C, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); } hypre_CSRMatrixMemoryLocation(C_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(C_offd) = HYPRE_MEMORY_HOST; *C_ptr = S2; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_CREATE_2NDS] += hypre_MPI_Wtime(); #endif hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); hypre_TFree(num_coarse_prefix_sum, HYPRE_MEMORY_HOST); return 0; } //----------------------------------------------------------------------- HYPRE_Int hypre_BoomerAMGCreate2ndS( hypre_ParCSRMatrix *S, HYPRE_Int *CF_marker, HYPRE_Int num_paths, HYPRE_BigInt *coarse_row_starts, hypre_ParCSRMatrix **C_ptr) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("Create2ndS"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(S) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_BoomerAMGCreate2ndSDevice( S, CF_marker, num_paths, coarse_row_starts, C_ptr ); } else #endif { ierr = hypre_BoomerAMGCreate2ndSHost( S, CF_marker, num_paths, coarse_row_starts, C_ptr ); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCorrectCFMarker : corrects CF_marker after aggr. coarsening *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCorrectCFMarkerHost(hypre_IntArray *CF_marker, hypre_IntArray *new_CF_marker) { HYPRE_Int i, cnt; cnt = 0; for (i=0; i < hypre_IntArraySize(CF_marker); i++) { if (hypre_IntArrayData(CF_marker)[i] > 0 ) { if (hypre_IntArrayData(CF_marker)[i] == 1) { hypre_IntArrayData(CF_marker)[i] = hypre_IntArrayData(new_CF_marker)[cnt++]; } else { hypre_IntArrayData(CF_marker)[i] = 1; cnt++; } } } return 0; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCorrectCFMarker2 : corrects CF_marker after aggr. coarsening, * but marks new F-points (previous C-points) as -2 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCorrectCFMarker2Host(hypre_IntArray *CF_marker, hypre_IntArray *new_CF_marker) { HYPRE_Int i, cnt; cnt = 0; for (i=0; i < hypre_IntArraySize(CF_marker); i++) { if (hypre_IntArrayData(CF_marker)[i] > 0 ) { if (hypre_IntArrayData(new_CF_marker)[cnt] == -1) { hypre_IntArrayData(CF_marker)[i] = -2; } else { hypre_IntArrayData(CF_marker)[i] = 1; } cnt++; } } return 0; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCorrectCFMarker : corrects CF_marker after aggr. coarsening *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCorrectCFMarker(hypre_IntArray *CF_marker, hypre_IntArray *new_CF_marker) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("CorrectCFMarker"); #endif #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_IntArrayMemoryLocation(CF_marker), hypre_IntArrayMemoryLocation(new_CF_marker)); if (exec == HYPRE_EXEC_DEVICE) { hypre_BoomerAMGCorrectCFMarkerDevice(CF_marker, new_CF_marker); } else #endif { hypre_BoomerAMGCorrectCFMarkerHost(CF_marker, new_CF_marker); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_BoomerAMGCorrectCFMarker2 : corrects CF_marker after aggr. coarsening, * but marks new F-points (previous C-points) as -2 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCorrectCFMarker2(hypre_IntArray *CF_marker, hypre_IntArray *new_CF_marker) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("CorrectCFMarker2"); #endif #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_IntArrayMemoryLocation(CF_marker), hypre_IntArrayMemoryLocation(new_CF_marker)); if (exec == HYPRE_EXEC_DEVICE) { hypre_BoomerAMGCorrectCFMarker2Device(CF_marker, new_CF_marker); } else #endif { hypre_BoomerAMGCorrectCFMarker2Host(CF_marker, new_CF_marker); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return hypre_error_flag; }
ideal_gas_kernel_c.c
/*Crown Copyright 2012 AWE. * * This file is part of CloverLeaf. * * CloverLeaf is free software: you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the * Free Software Foundation, either version 3 of the License, or (at your option) * any later version. * * CloverLeaf is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License along with * CloverLeaf. If not, see http://www.gnu.org/licenses/. */ /** * @brief C ideal gas kernel. * @author Wayne Gaudin * @details Calculates the pressure and sound speed for the mesh chunk using * the ideal gas equation of state, with a fixed gamma of 1.4. */ #include <stdio.h> #include <stdlib.h> #include "ftocmacros.h" #include <math.h> void ideal_gas_kernel_c_(int *xmin,int *xmax,int *ymin,int *ymax, double *density, double *energy, double *pressure, double *soundspeed) { int x_min=*xmin; int x_max=*xmax; int y_min=*ymin; int y_max=*ymax; int j,k; double sound_speed_squared,v,pressurebyenergy,pressurebyvolume; #pragma omp parallel private(j) { #pragma omp for private(v,pressurebyenergy,pressurebyvolume,sound_speed_squared) for (k=y_min;k<=y_max;k++) { #pragma ivdep for (j=x_min;j<=x_max;j++) { v=1.0/density[FTNREF2D(j ,k ,x_max+4,x_min-2,y_min-2)]; pressure[FTNREF2D(j ,k ,x_max+4,x_min-2,y_min-2)]=(1.4-1.0)*density[FTNREF2D(j ,k ,x_max+4,x_min-2,y_min-2)] *energy[FTNREF2D(j ,k ,x_max+4,x_min-2,y_min-2)]; pressurebyenergy=(1.4-1.0)*density[FTNREF2D(j ,k ,x_max+4,x_min-2,y_min-2)]; pressurebyvolume=-density[FTNREF2D(j ,k ,x_max+4,x_min-2,y_min-2)]*pressure[FTNREF2D(j ,k ,x_max+4,x_min-2,y_min-2)]; sound_speed_squared=v*v*(pressure[FTNREF2D(j ,k ,x_max+4,x_min-2,y_min-2)]*pressurebyenergy-pressurebyvolume); soundspeed[FTNREF2D(j ,k ,x_max+4,x_min-2,y_min-2)]=sqrt(sound_speed_squared); } } } }
golOpenMP.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <mpi.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> #define DIM 960 #define TESTFILE "../TestFiles/960.txt" #define BUFFER_SIZE 1024 #define NUMBER_OF_LOOPS 150 int main(int argc, char* argv[]) { int comm_sz; /* Num of procs */ int my_rank; /* Number of current proc */ int block_dim; /* char buffer[BUFFER_SIZE];*/ int i; MPI_Init(NULL, NULL); MPI_Comm_size(MPI_COMM_WORLD, &comm_sz); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); /*Create cartesian topology*/ MPI_Comm new_comm; int dim_size[2],periods[2]; dim_size[0] = sqrt(comm_sz); dim_size[1] = sqrt(comm_sz); periods[0] = 1; //1 gia periodikes grammes periods[1] = 1; //1 gia periodikes stiles MPI_Cart_create(MPI_COMM_WORLD,2,dim_size,periods,1,&new_comm); //Creator topologias(plegmatos) /*Dimension of the process arrays including extra rows and columns*/ block_dim = DIM/sqrt(comm_sz) + 2; char **a, **b; /* Allocate arrays */ a = malloc(block_dim*sizeof(char*)); b = malloc(block_dim*sizeof(char*)); a[0] = malloc(block_dim*block_dim*sizeof(char)); b[0] = malloc(block_dim*block_dim*sizeof(char)); for(i=1; i<block_dim; i++) { a[i] = &(a[0][i*block_dim]); b[i] = &(b[0][i*block_dim]); } int t; for(i = 0; i < block_dim;i++){ for(t = 0; t < block_dim;t++){ b[i][t] = '2'; } } /*Find the correspodning cartesian coordinates of the process*/ int coords[2]; MPI_Cart_coords(new_comm,my_rank,2,coords); MPI_Datatype mysubarray; int array_of_sizes[2]; int array_of_subsizes[2]; int starts[2]; array_of_sizes[0] = DIM; array_of_sizes[1] = DIM; array_of_subsizes[0] = block_dim-2; array_of_subsizes[1] = block_dim-2; starts[0] = coords[0] * (block_dim-2); starts[1] = coords[1] * (block_dim-2); MPI_Type_create_subarray(2,array_of_sizes,array_of_subsizes,starts,MPI_ORDER_C,MPI_CHAR,&mysubarray); MPI_Type_commit(&mysubarray); MPI_File fh; MPI_File_open(new_comm, TESTFILE, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh); MPI_File_set_view(fh,0,MPI_CHAR,mysubarray,"native",MPI_INFO_NULL); MPI_Request* requ; requ = malloc((block_dim-2)*sizeof(MPI_Request)); /* Diavazoume grammh grammh apo to arxeio (tis grammes pou mas endiaferoun kai pou vlepei dhladh to process) */ for(i = 1; i < block_dim - 1; i++) MPI_File_iread(fh, &a[i][1], block_dim-2, MPI_CHAR, &requ[i-1]); MPI_File_close(&fh); /* Find the neighbours */ int N,S,W,E,NW,NE,SW,SE; int neigh_coords[2]; neigh_coords[0] = coords[0] - 1; neigh_coords[1] = coords[1]; MPI_Cart_rank(new_comm,neigh_coords,&N); neigh_coords[0] = coords[0] + 1; neigh_coords[1] = coords[1]; MPI_Cart_rank(new_comm,neigh_coords,&S); neigh_coords[0] = coords[0]; neigh_coords[1] = coords[1] + 1; MPI_Cart_rank(new_comm,neigh_coords,&E); neigh_coords[0] = coords[0]; neigh_coords[1] = coords[1] - 1; MPI_Cart_rank(new_comm,neigh_coords,&W); neigh_coords[0] = coords[0] - 1; neigh_coords[1] = coords[1] + 1; MPI_Cart_rank(new_comm,neigh_coords,&NE); neigh_coords[0] = coords[0] - 1; neigh_coords[1] = coords[1] - 1; MPI_Cart_rank(new_comm,neigh_coords,&NW); neigh_coords[0] = coords[0] + 1; neigh_coords[1] = coords[1] + 1; MPI_Cart_rank(new_comm,neigh_coords,&SE); neigh_coords[0] = coords[0] + 1; neigh_coords[1] = coords[1] - 1; MPI_Cart_rank(new_comm,neigh_coords,&SW); /*Data types for rows and columns to send*/ MPI_Datatype row,col; MPI_Type_contiguous(block_dim-2,MPI_CHAR,&row); MPI_Type_commit(&row); MPI_Type_vector(block_dim-2,1,block_dim,MPI_CHAR,&col); MPI_Type_commit(&col); MPI_Request r[16]; MPI_Status stats[16]; MPI_Send_init(&a[1][1],1,row,N,0,new_comm,&r[0]); MPI_Send_init(&a[block_dim-2][1],1,row,S,1,new_comm,&r[1]); MPI_Send_init(&a[1][block_dim-2],1,col,E,2,new_comm,&r[2]); MPI_Send_init(&a[1][1],1,col,W,3,new_comm,&r[3]); MPI_Send_init(&a[1][block_dim-2],1,MPI_CHAR,NE,4,new_comm,&r[4]); MPI_Send_init(&a[1][1],1,MPI_CHAR,NW,5,new_comm,&r[5]); MPI_Send_init(&a[block_dim-2][block_dim-2],1,MPI_CHAR,SE,6,new_comm,&r[6]); MPI_Send_init(&a[block_dim-2][1],1,MPI_CHAR,SW,7,new_comm,&r[7]); MPI_Recv_init(&a[block_dim-1][1],1,row,S,0,new_comm,&r[8]); MPI_Recv_init(&a[0][1],1,row,N,1,new_comm,&r[9]); MPI_Recv_init(&a[1][0],1,col,W,2,new_comm,&r[10]); MPI_Recv_init(&a[1][block_dim-1],1,col,E,3,new_comm,&r[11]); MPI_Recv_init(&a[block_dim-1][0],1,MPI_CHAR,SW,4,new_comm,&r[12]); MPI_Recv_init(&a[block_dim-1][block_dim-1],1,MPI_CHAR,SE,5,new_comm,&r[13]); MPI_Recv_init(&a[0][0],1,MPI_CHAR,NW,6,new_comm,&r[14]); MPI_Recv_init(&a[0][block_dim-1],1,MPI_CHAR,NE,7,new_comm,&r[15]); MPI_Request r2[16]; MPI_Status stats2[16]; MPI_Send_init(&b[1][1],1,row,N,0,new_comm,&r2[0]); MPI_Send_init(&b[block_dim-2][1],1,row,S,1,new_comm,&r2[1]); MPI_Send_init(&b[1][block_dim-2],1,col,E,2,new_comm,&r2[2]); MPI_Send_init(&b[1][1],1,col,W,3,new_comm,&r2[3]); MPI_Send_init(&b[1][block_dim-2],1,MPI_CHAR,NE,4,new_comm,&r2[4]); MPI_Send_init(&b[1][1],1,MPI_CHAR,NW,5,new_comm,&r2[5]); MPI_Send_init(&b[block_dim-2][block_dim-2],1,MPI_CHAR,SE,6,new_comm,&r2[6]); MPI_Send_init(&b[block_dim-2][1],1,MPI_CHAR,SW,7,new_comm,&r2[7]); MPI_Recv_init(&b[block_dim-1][1],1,row,S,0,new_comm,&r2[8]); MPI_Recv_init(&b[0][1],1,row,N,1,new_comm,&r2[9]); MPI_Recv_init(&b[1][0],1,col,W,2,new_comm,&r2[10]); MPI_Recv_init(&b[1][block_dim-1],1,col,E,3,new_comm,&r2[11]); MPI_Recv_init(&b[block_dim-1][0],1,MPI_CHAR,SW,4,new_comm,&r2[12]); MPI_Recv_init(&b[block_dim-1][block_dim-1],1,MPI_CHAR,SE,5,new_comm,&r2[13]); MPI_Recv_init(&b[0][0],1,MPI_CHAR,NW,6,new_comm,&r2[14]); MPI_Recv_init(&b[0][block_dim-1],1,MPI_CHAR,NE,7,new_comm,&r2[15]); /*if(my_rank == 0){ struct stat st = {0}; if (stat("./generations", &st) == -1) mkdir("./generations", 0700); }*/ MPI_Waitall(block_dim-2,requ,MPI_STATUSES_IGNORE); /*Wait for the termination of all file readings*/ free(requ); int j,z/*,n*/; int neighbors; /*int local_sum[2]; int global_sum[2];*/ char **c; double local_start,local_elapsed,elapsed; /*local_sum[0] = 0; local_sum[1] = 0; int local_sum1,local_sum0;*/ MPI_Barrier(new_comm); local_start = MPI_Wtime(); /*Start the iterations*/ for(i=0; i<NUMBER_OF_LOOPS;i++){ /*char file_name[100]; sprintf(file_name, "./generations/%dGeneration.txt",i+1);*/ /*Create a file with the output of each iteration*/ /*if(my_rank == 0){ MPI_File gen; MPI_File_open(MPI_COMM_SELF, file_name, MPI_MODE_WRONLY | MPI_MODE_CREATE, MPI_INFO_NULL, &gen); MPI_File_close(&gen); }*/ if((i%2) == 0) MPI_Startall(16,r); else MPI_Startall(16,r2); /*local_sum[0] = 0;*/ /*Xrhsimopoieitai gia na elegxoume an allakse h katastash kapoiou cell*/ /* local_sum[1] = 0;*/ /*Plhthos twn cell sthn epomenh katastash*/ /*local_sum1 = 0; local_sum0 = 0; int local_sum00; int local_sum11;*/ /*Computate internal elements*/ #pragma omp parallel for schedule(static,1) num_threads(8) private(z/*,local_sum11,local_sum00,*/,neighbors) /*reduction(+:local_sum1) reduction(+:local_sum0)*/ for(j = 2; j<block_dim-2;j++) { /*local_sum11 = 0; local_sum00 = 0;*/ for(z = 2; z<block_dim-2;z++) { neighbors = 0; neighbors += (a[j-1][z-1]-'0') + (a[j-1][z]-'0') + (a[j-1][z+1]-'0') + (a[j][z+1]-'0') + (a[j+1][z+1]-'0') + (a[j+1][z]-'0') + (a[j+1][z-1]-'0') + (a[j][z-1]-'0'); if((a[j][z] == '0') && (neighbors == 3)) { b[j][z] = '1'; /*local_sum00 = 1; local_sum11++;*/ } else if(a[j][z] == '1') { if(neighbors < 2){ b[j][z] = '0'; /*local_sum00 = 1;*/ } else if(neighbors < 4) { b[j][z] = '1'; /*local_sum11++;*/ } else{ b[j][z] = '0'; /*local_sum00 = 1;*/ } } else b[j][z] = '0'; } /*local_sum1 += local_sum11; local_sum0 += local_sum00;*/ } /*Wait for the sends/receives to end*/ if((i%2) == 0) MPI_Waitall(16,r,stats); else MPI_Waitall(16,r2,stats2); /*Computate external elements*/ #pragma omp parallel num_threads(8) private(/*local_sum11,local_sum00,*/neighbors) /*reduction(+:local_sum1) reduction(+:local_sum0)*/ { /*local_sum11 = 0; local_sum00 = 0;*/ /*West column*/ #pragma omp for schedule(static,1) for(j = 1;j < block_dim-1;j++){ neighbors = 0; neighbors += (a[j-1][0]-'0') + (a[j-1][1]-'0') + (a[j-1][2]-'0') + (a[j][2]-'0') + (a[j+1][2]-'0') + (a[j+1][1]-'0') + (a[j+1][0]-'0') + (a[j][0]-'0'); if((a[j][1] == '0') && (neighbors == 3)){ b[j][1] = '1';; /*local_sum00 = 1; local_sum11++;*/ } else if(a[j][1] == '1'){ if(neighbors < 2){ b[j][1] = '0'; /*local_sum00 = 1;*/ } else if(neighbors < 4){ b[j][1] = '1'; /*local_sum11++;*/ } else{ b[j][1] = '0'; /*local_sum00 = 1;*/ } } else b[j][1] = '0'; } /*East column*/ #pragma omp for schedule(static,1) for(j = 1;j < block_dim-1;j++){ neighbors = 0; neighbors += (a[j-1][block_dim-3]-'0') + (a[j-1][block_dim-2]-'0') + (a[j-1][block_dim-1]-'0') + (a[j][block_dim-1]-'0') + (a[j+1][block_dim-1]-'0') + (a[j+1][block_dim-2]-'0') + (a[j+1][block_dim-3]-'0') + (a[j][block_dim-3]-'0'); if((a[j][block_dim-2] == '0') && (neighbors == 3)){ b[j][block_dim-2] = '1'; /*local_sum00 = 1; local_sum11++;*/ } else if(a[j][block_dim-2] == '1'){ if(neighbors < 2){ b[j][block_dim-2] = '0'; /*local_sum00 = 1;*/ } else if(neighbors < 4){ b[j][block_dim-2] = '1'; /*local_sum11++;*/ } else{ b[j][block_dim-2] = '0'; /*local_sum00 = 1;*/ } } else b[j][block_dim-2] = '0'; } /*North row*/ #pragma omp for schedule(static,1) for(j = 2;j < block_dim-2;j++){ neighbors = 0; neighbors += (a[0][j-1]-'0') + (a[1][j-1]-'0') + (a[2][j-1]-'0') + (a[2][j]-'0') + (a[2][j+1]-'0') + (a[1][j+1]-'0') + (a[0][j+1]-'0') + (a[0][j]-'0'); if((a[1][j] == '0') && (neighbors == 3)){ b[1][j] = '1'; /*local_sum00 = 1; local_sum11++;*/ } else if(a[1][j] == '1'){ if(neighbors < 2){ b[1][j] = '0'; /*local_sum00 = 1;*/ } else if(neighbors < 4){ b[1][j] = '1'; /*local_sum11++;*/ } else{ b[1][j] = '0'; /*local_sum00 = 1;*/ } } else b[1][j] = '0'; } /*South row*/ #pragma omp for schedule(static,1) for(j = 2;j < block_dim-2;j++){ neighbors = 0; neighbors += (a[block_dim-3][j-1]-'0') + (a[block_dim-2][j-1]-'0') + (a[block_dim-1][j-1]-'0') + (a[block_dim-1][j]-'0') + (a[block_dim-1][j+1]-'0') + (a[block_dim-2][j+1]-'0') + (a[block_dim-3][j+1]-'0') + (a[block_dim-3][j]-'0'); if((a[block_dim-2][j] == '0') && (neighbors == 3)){ b[block_dim-2][j] = '1'; /*local_sum00 = 1; local_sum11++;*/ } else if(a[block_dim-2][j] == '1'){ if(neighbors < 2){ b[block_dim-2][j] = '0'; /*local_sum00 = 1;*/ } else if(neighbors < 4){ b[block_dim-2][j] = '1'; /*local_sum11++;*/ } else{ b[block_dim-2][j] = '0'; /*local_sum00 = 1;*/ } } else b[block_dim-2][j] = '0'; } /*local_sum1 += local_sum11; local_sum0 += local_sum00;*/ } /*local_sum[0] = local_sum0; local_sum[1] = local_sum1;*/ /*Write the next generation array to a file*/ /*MPI_File ge; MPI_Status ge_status; MPI_File_open(new_comm,file_name, MPI_MODE_WRONLY, MPI_INFO_NULL, &ge); MPI_File_set_view(ge,0,MPI_CHAR,mysubarray,"native",MPI_INFO_NULL); for(j = 1; j < block_dim - 1; j++) MPI_File_write_all(ge, &b[j][1], block_dim-2, MPI_CHAR, &ge_status); MPI_File_close(&ge);*/ /*Check if there is no change in the grid or if there are no cells left alive*/ /*if((i%10) == 0){ MPI_Allreduce(local_sum,global_sum,2,MPI_INT,MPI_SUM,new_comm); if((global_sum[0] == 0) || (global_sum[1] == 0)) break; }*/ c = a; a = b; b = c; } local_elapsed = MPI_Wtime() - local_start; MPI_Reduce(&local_elapsed,&elapsed,1,MPI_DOUBLE,MPI_MAX,0,new_comm); if(i < NUMBER_OF_LOOPS) i++; /*If loop was stopped by "break"*/ if (my_rank == 0) { printf( "Elapsed time: %f seconds\n",elapsed); /*for(n = 0; n < i; n++) { sprintf(buffer, "./generations/%dGeneration.txt",n+1); char buf[100]; sprintf(buf, "./generations/%dGen.txt",n+1); char command[150]; sprintf(command,"../SerialToVertical/ConvertVert %d < %s > %s",DIM,buffer,buf); system(command); unlink(buffer); }*/ } MPI_Finalize(); free(a[0]); free(b[0]); free(a); free(b); return 0; }
bad3ff_so8_adv_icc.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include "omp.h" #include <stdio.h> #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; double section1; double section2; }; void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r49_vec, float *restrict r50_vec, float *restrict r51_vec, float *restrict r52_vec, float *restrict r53_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, float **restrict r108_vec, float **restrict r109_vec, const int time, const int tw); int ForwardTTI(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, struct dataobj *restrict delta_vec, const float dt, struct dataobj *restrict epsilon_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict phi_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict theta_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x_size, const int y_size, const int z_size, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine) { int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data; float(*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[delta_vec->size[1]][delta_vec->size[2]])delta_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__((aligned(64))) = (float(*)[phi_vec->size[1]][phi_vec->size[2]])phi_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float(*restrict theta)[theta_vec->size[1]][theta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[theta_vec->size[1]][theta_vec->size[2]])theta_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data; float(*r49)[y_size + 2 + 2][z_size + 2 + 2]; posix_memalign((void **)&r49, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2])); float(*r50)[y_size + 2 + 2][z_size + 2 + 2]; posix_memalign((void **)&r50, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2])); float(*r51)[y_size + 2 + 2][z_size + 2 + 2]; posix_memalign((void **)&r51, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2])); float(*r52)[y_size + 2 + 2][z_size + 2 + 2]; posix_memalign((void **)&r52, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2])); float(*r53)[y_size + 2 + 2][z_size + 2 + 2]; posix_memalign((void **)&r53, 64, sizeof(float[x_size + 2 + 2][y_size + 2 + 2][z_size + 2 + 2])); float **r94; posix_memalign((void **)&r94, 64, sizeof(float *) * nthreads); float **r95; posix_memalign((void **)&r95, 64, sizeof(float *) * nthreads); int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; int xb_size = block_sizes[0]; int sf = 4; int t_blk_size = 2 * sf * (time_M - time_m); #pragma omp parallel num_threads(nthreads) { const int tid = omp_get_thread_num(); posix_memalign((void **)&r94[tid], 64, sizeof(float[x0_blk0_size + 2 + 2][y0_blk0_size + 2 + 2][z_size + 2 + 2])); posix_memalign((void **)&r95[tid], 64, sizeof(float[x0_blk0_size + 2 + 2][y0_blk0_size + 2 + 2][z_size + 2 + 2])); } /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(static, 1) for (int x = x_m - 2; x <= x_M + 2; x += 1) { for (int y = y_m - 2; y <= y_M + 2; y += 1) { #pragma omp simd aligned(delta, phi, theta : 32) for (int z = z_m - 2; z <= z_M + 2; z += 1) { r49[x + 2][y + 2][z + 2] = sqrt(2 * delta[x + 8][y + 8][z + 8] + 1); r50[x + 2][y + 2][z + 2] = cos(theta[x + 8][y + 8][z + 8]); r51[x + 2][y + 2][z + 2] = cos(phi[x + 8][y + 8][z + 8]); r52[x + 2][y + 2][z + 2] = sin(theta[x + 8][y + 8][z + 8]); r53[x + 2][y + 2][z + 2] = sin(phi[x + 8][y + 8][z + 8]); } } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000; for (int t_blk = time_m; t_blk <= 1 + sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { for (int xb = x_m - 1; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size) { //printf(" Change of outer xblock %d \n", xb); for (int yb = y_m - 1; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size) { for (int time = t_blk, t0 = (time) % (3), t1 = (time + 2) % (3), t2 = (time + 1) % (3); time <= 2 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1))) % (3), t1 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); struct timeval start_section1, end_section1; gettimeofday(&start_section1, NULL); /* Begin section1 */ bf0(damp_vec, dt, epsilon_vec, (float *)r49, (float *)r50, (float *)r51, (float *)r52, (float *)r53, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x0_blk0_size, x_size, y0_blk0_size, y_size, z_size, t0, t1, t2, x_M, x_m, y_M, y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, (float **)r94, (float **)r95, time, tw); // x_M - (x_M - x_m + 1)%(x0_blk0_size), x_m, y_M - (y_M - y_m + 1)%(y0_blk0_size), y_m, /* End section1 */ gettimeofday(&end_section1, NULL); timers->section1 += (double)(end_section1.tv_sec - start_section1.tv_sec) + (double)(end_section1.tv_usec - start_section1.tv_usec) / 1000000; } } } } #pragma omp parallel num_threads(nthreads) { const int tid = omp_get_thread_num(); free(r94[tid]); free(r95[tid]); } free(r49); free(r50); free(r51); free(r52); free(r53); free(r94); free(r95); return 0; } void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r49_vec, float *restrict r50_vec, float *restrict r51_vec, float *restrict r52_vec, float *restrict r53_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, float **restrict r94_vec, float **restrict r95_vec, const int time, const int tw) { float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; float(*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__((aligned(64))) = (float(*)[epsilon_vec->size[1]][epsilon_vec->size[2]])epsilon_vec->data; float(*restrict r49)[y_size + 2 + 2][z_size + 2 + 2] __attribute__((aligned(64))) = (float(*)[y_size + 2 + 2][z_size + 2 + 2]) r49_vec; float(*restrict r50)[y_size + 2 + 2][z_size + 2 + 2] __attribute__((aligned(64))) = (float(*)[y_size + 2 + 2][z_size + 2 + 2]) r50_vec; float(*restrict r51)[y_size + 2 + 2][z_size + 2 + 2] __attribute__((aligned(64))) = (float(*)[y_size + 2 + 2][z_size + 2 + 2]) r51_vec; float(*restrict r52)[y_size + 2 + 2][z_size + 2 + 2] __attribute__((aligned(64))) = (float(*)[y_size + 2 + 2][z_size + 2 + 2]) r52_vec; float(*restrict r53)[y_size + 2 + 2][z_size + 2 + 2] __attribute__((aligned(64))) = (float(*)[y_size + 2 + 2][z_size + 2 + 2]) r53_vec; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data; float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; float **r94 = (float **)r94_vec; float **r95 = (float **)r95_vec; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; #pragma omp parallel num_threads(nthreads) { const int tid = omp_get_thread_num(); float(*restrict r82)[y0_blk0_size + 2 + 2][z_size + 2 + 2] __attribute__((aligned(64))) = (float(*)[y0_blk0_size + 2 + 2][z_size + 2 + 2]) r94[tid]; float(*restrict r83)[y0_blk0_size + 2 + 2][z_size + 2 + 2] __attribute__((aligned(64))) = (float(*)[y0_blk0_size + 2 + 2][z_size + 2 + 2]) r95[tid]; #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0 - 2, xs = 0; x <= min(min((x_M + time), (xb + xb_size + 1)), (x0_blk0 + x0_blk0_size + 1)); x++, xs++) { for (int y = y0_blk0 - 2, ys = 0; y <= min(min((y_M + time), (yb + yb_size + 1)), (y0_blk0 + y0_blk0_size + 1)); y++, ys++) { //printf(" bf0 Timestep tw: %d, Updating x: %d y: %d , Updating xs: %d ys: %d \n", tw, x - time + 8, y - time + 8, xs, ys); #pragma omp simd aligned(u, v : 64) for (int z = z_m - 2; z <= z_M + 2; z += 1) { r82[xs][ys][z + 2] = -(8.33333346e-3F * (u[t0][x - time + 6][y - time + 8][z + 8] - u[t0][x - time + 10][y - time + 8][z + 8]) + 6.66666677e-2F * (-u[t0][x - time + 7][y - time + 8][z + 8] + u[t0][x - time + 9][y - time + 8][z + 8])) * r51[x - time + 2][y - time + 2][z + 2] * r52[x - time + 2][y - time + 2][z + 2] - (8.33333346e-3F * (u[t0][x - time + 8][y - time + 6][z + 8] - u[t0][x - time + 8][y - time + 10][z + 8]) + 6.66666677e-2F * (-u[t0][x - time + 8][y - time + 7][z + 8] + u[t0][x - time + 8][y - time + 9][z + 8])) * r52[x - time + 2][y - time + 2][z + 2] * r53[x - time + 2][y - time + 2][z + 2] - (8.33333346e-3F * (u[t0][x - time + 8][y - time + 8][z + 6] - u[t0][x - time + 8][y - time + 8][z + 10]) + 6.66666677e-2F * (-u[t0][x - time + 8][y - time + 8][z + 7] + u[t0][x - time + 8][y - time + 8][z + 9])) * r50[x - time + 2][y - time + 2][z + 2]; r83[xs][ys][z + 2] = -(8.33333346e-3F * (v[t0][x - time + 6][y - time + 8][z + 8] - v[t0][x - time + 10][y - time + 8][z + 8]) + 6.66666677e-2F * (-v[t0][x - time + 7][y - time + 8][z + 8] + v[t0][x - time + 9][y - time + 8][z + 8])) * r51[x - time + 2][y - time + 2][z + 2] * r52[x - time + 2][y - time + 2][z + 2] - (8.33333346e-3F * (v[t0][x - time + 8][y - time + 6][z + 8] - v[t0][x - time + 8][y - time + 10][z + 8]) + 6.66666677e-2F * (-v[t0][x - time + 8][y - time + 7][z + 8] + v[t0][x - time + 8][y - time + 9][z + 8])) * r52[x - time + 2][y - time + 2][z + 2] * r53[x - time + 2][y - time + 2][z + 2] - (8.33333346e-3F * (v[t0][x - time + 8][y - time + 8][z + 6] - v[t0][x - time + 8][y - time + 8][z + 10]) + 6.66666677e-2F * (-v[t0][x - time + 8][y - time + 8][z + 7] + v[t0][x - time + 8][y - time + 8][z + 9])) * r50[x - time + 2][y - time + 2][z + 2]; } } } for (int x = x0_blk0, xs = 0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++, xs++) { for (int y = y0_blk0, ys = 0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++, ys++) { //printf(" bf1 Timestep tw: %d, Updating x: %d y: %d , Updating xs: %d ys: %d \n", tw, x - time + 8, y - time + 8, xs, ys); #pragma omp simd aligned(damp, epsilon, u, v, vp : 64) for (int z = z_m; z <= z_M; z += 1) { float r93 = 1.0 / dt; float r92 = 1.0 / (dt * dt); float r91 = 6.66666677e-2F * (r50[x - time + 2][y - time + 2][z + 1] * r83[xs + 2][ys + 2][z + 1] - r50[x - time + 2][y - time + 2][z + 3] * r83[xs + 2][ys + 2][z + 3] + r51[x - time + 1][y - time + 2][z + 2] * r52[x - time + 1][y - time + 2][z + 2] * r83[xs + 1][ys + 2][z + 2] - r51[x - time + 3][y - time + 2][z + 2] * r52[x - time + 3][y - time + 2][z + 2] * r83[xs + 3][ys + 2][z + 2] + r52[x - time + 2][y - time + 1][z + 2] * r53[x - time + 2][y - time + 1][z + 2] * r83[xs + 2][ys + 1][z + 2] - r52[x - time + 2][y - time + 3][z + 2] * r53[x - time + 2][y - time + 3][z + 2] * r83[xs + 2][ys + 3][z + 2]); float r90 = 8.33333346e-3F * (-r50[x - time + 2][y - time + 2][z] * r83[xs + 2][ys + 2][z] + r50[x - time + 2][y - time + 2][z + 4] * r83[xs + 2][ys + 2][z + 4] - r51[x-time][y - time + 2][z + 2] * r52[x-time][y - time + 2][z + 2] * r83[xs][ys + 2][z + 2] + r51[x - time + 4][y - time + 2][z + 2] * r52[x - time + 4][y - time + 2][z + 2] * r83[xs + 4][ys + 2][z + 2] - r52[x - time + 2][y-time][z + 2] * r53[x - time + 2][y-time][z + 2] * r83[xs + 2][ys][z + 2] + r52[x - time + 2][y - time + 4][z + 2] * r53[x - time + 2][y - time + 4][z + 2] * r83[xs + 2][ys + 4][z + 2]); float r89 = 1.0 / (vp[x - time + 8][y - time + 8][z + 8] * vp[x - time + 8][y - time + 8][z + 8]); float r88 = 1.0 / (r89 * r92 + r93 * damp[x - time + 1][y - time + 1][z + 1]); float r87 = 8.33333346e-3F * (r50[x - time + 2][y - time + 2][z] * r82[xs + 2][ys + 2][z] - r50[x - time + 2][y - time + 2][z + 4] * r82[xs + 2][ys + 2][z + 4] + r51[x-time][y - time + 2][z + 2] * r52[x-time][y - time + 2][z + 2] * r82[xs][ys + 2][z + 2] - r51[x - time + 4][y - time + 2][z + 2] * r52[x - time + 4][y - time + 2][z + 2] * r82[xs + 4][ys + 2][z + 2] + r52[x - time + 2][y-time][z + 2] * r53[x - time + 2][y-time][z + 2] * r82[xs + 2][ys][z + 2] - r52[x - time + 2][y - time + 4][z + 2] * r53[x - time + 2][y - time + 4][z + 2] * r82[xs + 2][ys + 4][z + 2]) + 6.66666677e-2F * (-r50[x - time + 2][y - time + 2][z + 1] * r82[xs + 2][ys + 2][z + 1] + r50[x - time + 2][y - time + 2][z + 3] * r82[xs + 2][ys + 2][z + 3] - r51[x - time + 1][y - time + 2][z + 2] * r52[x - time + 1][y - time + 2][z + 2] * r82[xs + 1][ys + 2][z + 2] + r51[x - time + 3][y - time + 2][z + 2] * r52[x - time + 3][y - time + 2][z + 2] * r82[xs + 3][ys + 2][z + 2] - r52[x - time + 2][y - time + 1][z + 2] * r53[x - time + 2][y - time + 1][z + 2] * r82[xs + 2][ys + 1][z + 2] + r52[x - time + 2][y - time + 3][z + 2] * r53[x - time + 2][y - time + 3][z + 2] * r82[xs + 2][ys + 3][z + 2]) - 1.78571425e-5F * (u[t0][x - time + 4][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 4][z + 8] + u[t0][x - time + 8][y - time + 8][z + 4] + u[t0][x - time + 8][y - time + 8][z + 12] + u[t0][x - time + 8][y - time + 12][z + 8] + u[t0][x - time + 12][y - time + 8][z + 8]) + 2.53968248e-4F * (u[t0][x - time + 5][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 5][z + 8] + u[t0][x - time + 8][y - time + 8][z + 5] + u[t0][x - time + 8][y - time + 8][z + 11] + u[t0][x - time + 8][y - time + 11][z + 8] + u[t0][x - time + 11][y - time + 8][z + 8]) - 1.99999996e-3F * (u[t0][x - time + 6][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 6][z + 8] + u[t0][x - time + 8][y - time + 8][z + 6] + u[t0][x - time + 8][y - time + 8][z + 10] + u[t0][x - time + 8][y - time + 10][z + 8] + u[t0][x - time + 10][y - time + 8][z + 8]) + 1.59999996e-2F * (u[t0][x - time + 7][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 7][z + 8] + u[t0][x - time + 8][y - time + 8][z + 7] + u[t0][x - time + 8][y - time + 8][z + 9] + u[t0][x - time + 8][y - time + 9][z + 8] + u[t0][x - time + 9][y - time + 8][z + 8]) - 8.54166647e-2F * u[t0][x - time + 8][y - time + 8][z + 8]; float r80 = r92 * (-2.0F * u[t0][x - time + 8][y - time + 8][z + 8] + u[t1][x - time + 8][y - time + 8][z + 8]); float r81 = r92 * (-2.0F * v[t0][x - time + 8][y - time + 8][z + 8] + v[t1][x - time + 8][y - time + 8][z + 8]); u[t2][x - time + 8][y - time + 8][z + 8] = r88 * ((-r80) * r89 + r87 * (2 * epsilon[x - time + 8][y - time + 8][z + 8] + 1) + r93 * (damp[x - time + 1][y - time + 1][z + 1] * u[t0][x - time + 8][y - time + 8][z + 8]) + (r90 + r91) * r49[x - time + 2][y - time + 2][z + 2]); v[t2][x - time + 8][y - time + 8][z + 8] = r88 * ((-r81) * r89 + r87 * r49[x - time + 2][y - time + 2][z + 2] + r90 + r91 + r93 * (damp[x - time + 1][y - time + 1][z + 1] * v[t0][x - time + 8][y - time + 8][z + 8])); } int sp_zi_M = nnz_sp_source_mask[x - time][y - time] - 1; for (int sp_zi = sp_zi_m; sp_zi <= sp_zi_M; sp_zi += 1) { int zind = sp_source_mask[x - time][y - time][sp_zi]; float r22 = save_src_u[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; u[t2][x - time + 8][y - time + 8][zind + 8] += r22; float r23 = save_src_v[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; v[t2][x - time + 8][y - time + 8][zind + 8] += r23; //printf("Source injection at time %d , at : x: %d, y: %d, %d, %f, %f \n", tw, x - time + 4, y - time + 4, zind + 4, r22, r23); } } } } } } }
integrateFullOrbit.c
/* Wrappers around the C integration code for Full Orbits */ #ifdef _WIN32 #include <Python.h> #endif #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <math.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_spline.h> #include <bovy_coords.h> #include <bovy_symplecticode.h> #include <leung_dop853.h> #include <bovy_rk.h> #include <integrateFullOrbit.h> //Potentials #include <galpy_potentials.h> #ifndef M_PI #define M_PI 3.14159265358979323846 #endif #ifndef ORBITS_CHUNKSIZE #define ORBITS_CHUNKSIZE 1 #endif //Macros to export functions in DLL on different OS #if defined(_WIN32) #define EXPORT __declspec(dllexport) #elif defined(__GNUC__) #define EXPORT __attribute__((visibility("default"))) #else // Just do nothing? #define EXPORT #endif #ifdef _WIN32 // On Windows, *need* to define this function to allow the package to be imported #if PY_MAJOR_VERSION >= 3 PyMODINIT_FUNC PyInit_libgalpy(void) { // Python 3 return NULL; } #else PyMODINIT_FUNC initlibgalpy(void) {} // Python 2 #endif #endif /* Function Declarations */ void evalRectForce(double, double *, double *, int, struct potentialArg *); void evalRectDeriv(double, double *, double *, int, struct potentialArg *); void evalRectDeriv_dxdv(double,double *, double *, int, struct potentialArg *); void initMovingObjectSplines(struct potentialArg *, double ** pot_args); void initChandrasekharDynamicalFrictionSplines(struct potentialArg *, double ** pot_args); /* Actual functions */ void parse_leapFuncArgs_Full(int npot, struct potentialArg * potentialArgs, int ** pot_type, double ** pot_args){ int ii,jj,kk; int nR, nz, nr; double * Rgrid, * zgrid, * potGrid_splinecoeffs; init_potentialArgs(npot,potentialArgs); for (ii=0; ii < npot; ii++){ switch ( *(*pot_type)++ ) { case 0: //LogarithmicHaloPotential, 4 arguments potentialArgs->potentialEval= &LogarithmicHaloPotentialEval; potentialArgs->Rforce= &LogarithmicHaloPotentialRforce; potentialArgs->zforce= &LogarithmicHaloPotentialzforce; potentialArgs->phiforce= &LogarithmicHaloPotentialphiforce; potentialArgs->dens= &LogarithmicHaloPotentialDens; //potentialArgs->R2deriv= &LogarithmicHaloPotentialR2deriv; //potentialArgs->planarphi2deriv= &ZeroForce; //potentialArgs->planarRphideriv= &ZeroForce; potentialArgs->nargs= 4; potentialArgs->requiresVelocity= false; break; case 1: //DehnenBarPotential, 6 arguments potentialArgs->Rforce= &DehnenBarPotentialRforce; potentialArgs->phiforce= &DehnenBarPotentialphiforce; potentialArgs->zforce= &DehnenBarPotentialzforce; potentialArgs->nargs= 6; potentialArgs->requiresVelocity= false; break; case 5: //MiyamotoNagaiPotential, 3 arguments potentialArgs->potentialEval= &MiyamotoNagaiPotentialEval; potentialArgs->Rforce= &MiyamotoNagaiPotentialRforce; potentialArgs->zforce= &MiyamotoNagaiPotentialzforce; potentialArgs->phiforce= &ZeroForce; potentialArgs->dens= &MiyamotoNagaiPotentialDens; //potentialArgs->R2deriv= &MiyamotoNagaiPotentialR2deriv; //potentialArgs->planarphi2deriv= &ZeroForce; //potentialArgs->planarRphideriv= &ZeroForce; potentialArgs->nargs= 3; potentialArgs->requiresVelocity= false; break; case 7: //PowerSphericalPotential, 2 arguments potentialArgs->potentialEval= &PowerSphericalPotentialEval; potentialArgs->Rforce= &PowerSphericalPotentialRforce; potentialArgs->zforce= &PowerSphericalPotentialzforce; potentialArgs->phiforce= &ZeroForce; potentialArgs->dens= &PowerSphericalPotentialDens; //potentialArgs->R2deriv= &PowerSphericalPotentialR2deriv; //potentialArgs->planarphi2deriv= &ZeroForce; //potentialArgs->planarRphideriv= &ZeroForce; potentialArgs->nargs= 2; potentialArgs->requiresVelocity= false; break; case 8: //HernquistPotential, 2 arguments potentialArgs->potentialEval= &HernquistPotentialEval; potentialArgs->Rforce= &HernquistPotentialRforce; potentialArgs->zforce= &HernquistPotentialzforce; potentialArgs->phiforce= &ZeroForce; potentialArgs->dens= &HernquistPotentialDens; //potentialArgs->R2deriv= &HernquistPotentialR2deriv; //potentialArgs->planarphi2deriv= &ZeroForce; //potentialArgs->planarRphideriv= &ZeroForce; potentialArgs->nargs= 2; potentialArgs->requiresVelocity= false; break; case 9: //NFWPotential, 2 arguments potentialArgs->potentialEval= &NFWPotentialEval; potentialArgs->Rforce= &NFWPotentialRforce; potentialArgs->zforce= &NFWPotentialzforce; potentialArgs->phiforce= &ZeroForce; potentialArgs->dens= &NFWPotentialDens; //potentialArgs->R2deriv= &NFWPotentialR2deriv; //potentialArgs->planarphi2deriv= &ZeroForce; //potentialArgs->planarRphideriv= &ZeroForce; potentialArgs->nargs= 2; potentialArgs->requiresVelocity= false; break; case 10: //JaffePotential, 2 arguments potentialArgs->potentialEval= &JaffePotentialEval; potentialArgs->Rforce= &JaffePotentialRforce; potentialArgs->zforce= &JaffePotentialzforce; potentialArgs->phiforce= &ZeroForce; potentialArgs->dens= &JaffePotentialDens; //potentialArgs->R2deriv= &JaffePotentialR2deriv; //potentialArgs->planarphi2deriv= &ZeroForce; //potentialArgs->planarRphideriv= &ZeroForce; potentialArgs->nargs= 2; potentialArgs->requiresVelocity= false; break; case 11: //DoubleExponentialDiskPotential, XX arguments potentialArgs->potentialEval= &DoubleExponentialDiskPotentialEval; potentialArgs->Rforce= &DoubleExponentialDiskPotentialRforce; potentialArgs->zforce= &DoubleExponentialDiskPotentialzforce; potentialArgs->phiforce= &ZeroForce; potentialArgs->dens= &DoubleExponentialDiskPotentialDens; //Look at pot_args to figure out the number of arguments potentialArgs->nargs= (int) (5 + 4 * *(*pot_args+4) ); potentialArgs->requiresVelocity= false; break; case 12: //FlattenedPowerPotential, 4 arguments potentialArgs->potentialEval= &FlattenedPowerPotentialEval; potentialArgs->Rforce= &FlattenedPowerPotentialRforce; potentialArgs->zforce= &FlattenedPowerPotentialzforce; potentialArgs->phiforce= &ZeroForce; potentialArgs->dens= &FlattenedPowerPotentialDens; potentialArgs->nargs= 4; potentialArgs->requiresVelocity= false; break; case 13: //interpRZPotential, XX arguments //Grab the grids and the coefficients nR= (int) *(*pot_args)++; nz= (int) *(*pot_args)++; Rgrid= (double *) malloc ( nR * sizeof ( double ) ); zgrid= (double *) malloc ( nz * sizeof ( double ) ); potGrid_splinecoeffs= (double *) malloc ( nR * nz * sizeof ( double ) ); for (kk=0; kk < nR; kk++) *(Rgrid+kk)= *(*pot_args)++; for (kk=0; kk < nz; kk++) *(zgrid+kk)= *(*pot_args)++; for (kk=0; kk < nR; kk++) put_row(potGrid_splinecoeffs,kk,*pot_args+kk*nz,nz); *pot_args+= nR*nz; potentialArgs->i2d= interp_2d_alloc(nR,nz); interp_2d_init(potentialArgs->i2d,Rgrid,zgrid,potGrid_splinecoeffs, INTERP_2D_LINEAR); //latter bc we already calculated the coeffs potentialArgs->accx= gsl_interp_accel_alloc (); potentialArgs->accy= gsl_interp_accel_alloc (); for (kk=0; kk < nR; kk++) put_row(potGrid_splinecoeffs,kk,*pot_args+kk*nz,nz); *pot_args+= nR*nz; potentialArgs->i2drforce= interp_2d_alloc(nR,nz); interp_2d_init(potentialArgs->i2drforce,Rgrid,zgrid,potGrid_splinecoeffs, INTERP_2D_LINEAR); //latter bc we already calculated the coeffs potentialArgs->accxrforce= gsl_interp_accel_alloc (); potentialArgs->accyrforce= gsl_interp_accel_alloc (); for (kk=0; kk < nR; kk++) put_row(potGrid_splinecoeffs,kk,*pot_args+kk*nz,nz); *pot_args+= nR*nz; potentialArgs->i2dzforce= interp_2d_alloc(nR,nz); interp_2d_init(potentialArgs->i2dzforce,Rgrid,zgrid,potGrid_splinecoeffs, INTERP_2D_LINEAR); //latter bc we already calculated the coeffs potentialArgs->accxzforce= gsl_interp_accel_alloc (); potentialArgs->accyzforce= gsl_interp_accel_alloc (); potentialArgs->potentialEval= &interpRZPotentialEval; potentialArgs->Rforce= &interpRZPotentialRforce; potentialArgs->zforce= &interpRZPotentialzforce; potentialArgs->phiforce= &ZeroForce; potentialArgs->nargs= 2; //clean up free(Rgrid); free(zgrid); free(potGrid_splinecoeffs); potentialArgs->requiresVelocity= false; break; case 14: //IsochronePotential, 2 arguments potentialArgs->potentialEval= &IsochronePotentialEval; potentialArgs->Rforce= &IsochronePotentialRforce; potentialArgs->zforce= &IsochronePotentialzforce; potentialArgs->phiforce= &ZeroForce; potentialArgs->dens= &IsochronePotentialDens; potentialArgs->nargs= 2; potentialArgs->requiresVelocity= false; break; case 15: //PowerSphericalwCutoffPotential, 3 arguments potentialArgs->potentialEval= &PowerSphericalPotentialwCutoffEval; potentialArgs->Rforce= &PowerSphericalPotentialwCutoffRforce; potentialArgs->zforce= &PowerSphericalPotentialwCutoffzforce; potentialArgs->phiforce= &ZeroForce; potentialArgs->dens= &PowerSphericalPotentialwCutoffDens; //potentialArgs->R2deriv= &PowerSphericalPotentialR2deriv; //potentialArgs->planarphi2deriv= &ZeroForce; //potentialArgs->planarRphideriv= &ZeroForce; potentialArgs->nargs= 3; potentialArgs->requiresVelocity= false; break; case 16: //KuzminKutuzovStaeckelPotential, 3 arguments potentialArgs->potentialEval= &KuzminKutuzovStaeckelPotentialEval; potentialArgs->Rforce= &KuzminKutuzovStaeckelPotentialRforce; potentialArgs->zforce= &KuzminKutuzovStaeckelPotentialzforce; potentialArgs->phiforce= &ZeroForce; //potentialArgs->R2deriv= &KuzminKutuzovStaeckelPotentialR2deriv; potentialArgs->nargs= 3; potentialArgs->requiresVelocity= false; break; case 17: //PlummerPotential, 2 arguments potentialArgs->potentialEval= &PlummerPotentialEval; potentialArgs->Rforce= &PlummerPotentialRforce; potentialArgs->zforce= &PlummerPotentialzforce; potentialArgs->phiforce= &ZeroForce; potentialArgs->dens= &PlummerPotentialDens; //potentialArgs->R2deriv= &PlummerPotentialR2deriv; potentialArgs->nargs= 2; potentialArgs->requiresVelocity= false; break; case 18: //PseudoIsothermalPotential, 2 arguments potentialArgs->potentialEval= &PseudoIsothermalPotentialEval; potentialArgs->Rforce= &PseudoIsothermalPotentialRforce; potentialArgs->zforce= &PseudoIsothermalPotentialzforce; potentialArgs->phiforce= &ZeroForce; potentialArgs->dens= &PseudoIsothermalPotentialDens; //potentialArgs->R2deriv= &PseudoIsothermalPotentialR2deriv; potentialArgs->nargs= 2; potentialArgs->requiresVelocity= false; break; case 19: //KuzminDiskPotential, 2 arguments potentialArgs->potentialEval= &KuzminDiskPotentialEval; potentialArgs->Rforce= &KuzminDiskPotentialRforce; potentialArgs->zforce= &KuzminDiskPotentialzforce; potentialArgs->phiforce= &ZeroForce; potentialArgs->nargs= 2; potentialArgs->requiresVelocity= false; break; case 20: //BurkertPotential, 2 arguments potentialArgs->potentialEval= &BurkertPotentialEval; potentialArgs->Rforce= &BurkertPotentialRforce; potentialArgs->zforce= &BurkertPotentialzforce; potentialArgs->dens= &BurkertPotentialDens; potentialArgs->phiforce= &ZeroForce; potentialArgs->nargs= 2; potentialArgs->requiresVelocity= false; break; case 21: //TriaxialHernquistPotential, lots of arguments potentialArgs->potentialEval= &EllipsoidalPotentialEval; potentialArgs->Rforce = &EllipsoidalPotentialRforce; potentialArgs->zforce = &EllipsoidalPotentialzforce; potentialArgs->phiforce = &EllipsoidalPotentialphiforce; potentialArgs->dens= &EllipsoidalPotentialDens; // Also assign functions specific to EllipsoidalPotential potentialArgs->psi= &TriaxialHernquistPotentialpsi; potentialArgs->mdens= &TriaxialHernquistPotentialmdens; potentialArgs->mdensDeriv= &TriaxialHernquistPotentialmdensDeriv; potentialArgs->nargs = (int) (21 + *(*pot_args+7) + 2 * *(*pot_args + (int) (*(*pot_args+7) + 20))); potentialArgs->requiresVelocity= false; break; case 22: //TriaxialNFWPotential, lots of arguments potentialArgs->potentialEval= &EllipsoidalPotentialEval; potentialArgs->Rforce = &EllipsoidalPotentialRforce; potentialArgs->zforce = &EllipsoidalPotentialzforce; potentialArgs->phiforce = &EllipsoidalPotentialphiforce; potentialArgs->dens= &EllipsoidalPotentialDens; // Also assign functions specific to EllipsoidalPotential potentialArgs->psi= &TriaxialNFWPotentialpsi; potentialArgs->mdens= &TriaxialNFWPotentialmdens; potentialArgs->mdensDeriv= &TriaxialNFWPotentialmdensDeriv; potentialArgs->nargs = (int) (21 + *(*pot_args+7) + 2 * *(*pot_args + (int) (*(*pot_args+7) + 20))); potentialArgs->requiresVelocity= false; break; case 23: //TriaxialJaffePotential, lots of arguments potentialArgs->potentialEval= &EllipsoidalPotentialEval; potentialArgs->Rforce = &EllipsoidalPotentialRforce; potentialArgs->zforce = &EllipsoidalPotentialzforce; potentialArgs->phiforce = &EllipsoidalPotentialphiforce; potentialArgs->dens= &EllipsoidalPotentialDens; // Also assign functions specific to EllipsoidalPotential potentialArgs->psi= &TriaxialJaffePotentialpsi; potentialArgs->mdens= &TriaxialJaffePotentialmdens; potentialArgs->mdensDeriv= &TriaxialJaffePotentialmdensDeriv; potentialArgs->nargs = (int) (21 + *(*pot_args+7) + 2 * *(*pot_args + (int) (*(*pot_args+7) + 20))); potentialArgs->requiresVelocity= false; break; case 24: //SCFPotential, many arguments potentialArgs->potentialEval= &SCFPotentialEval; potentialArgs->Rforce= &SCFPotentialRforce; potentialArgs->zforce= &SCFPotentialzforce; potentialArgs->phiforce= &SCFPotentialphiforce; potentialArgs->dens= &SCFPotentialDens; potentialArgs->nargs= (int) (5 + (1 + *(*pot_args + 1)) * *(*pot_args+2) * *(*pot_args+3)* *(*pot_args+4) + 7); potentialArgs->requiresVelocity= false; break; case 25: //SoftenedNeedleBarPotential, 13 arguments potentialArgs->potentialEval= &SoftenedNeedleBarPotentialEval; potentialArgs->Rforce= &SoftenedNeedleBarPotentialRforce; potentialArgs->zforce= &SoftenedNeedleBarPotentialzforce; potentialArgs->phiforce= &SoftenedNeedleBarPotentialphiforce; potentialArgs->nargs= (int) 13; potentialArgs->requiresVelocity= false; break; case 26: //DiskSCFPotential, nsigma+3 arguments potentialArgs->potentialEval= &DiskSCFPotentialEval; potentialArgs->Rforce= &DiskSCFPotentialRforce; potentialArgs->zforce= &DiskSCFPotentialzforce; potentialArgs->dens= &DiskSCFPotentialDens; potentialArgs->phiforce= &ZeroForce; potentialArgs->nargs= (int) **pot_args + 3; potentialArgs->requiresVelocity= false; break; case 27: // SpiralArmsPotential, 10 arguments + array of Cs potentialArgs->Rforce = &SpiralArmsPotentialRforce; potentialArgs->zforce = &SpiralArmsPotentialzforce; potentialArgs->phiforce = &SpiralArmsPotentialphiforce; //potentialArgs->R2deriv = &SpiralArmsPotentialR2deriv; //potentialArgs->z2deriv = &SpiralArmsPotentialz2deriv; potentialArgs->phi2deriv = &SpiralArmsPotentialphi2deriv; //potentialArgs->Rzderiv = &SpiralArmsPotentialRzderiv; potentialArgs->Rphideriv = &SpiralArmsPotentialRphideriv; potentialArgs->nargs = (int) 10 + **pot_args; potentialArgs->requiresVelocity= false; break; case 30: // PerfectEllipsoidPotential, lots of arguments potentialArgs->potentialEval= &EllipsoidalPotentialEval; potentialArgs->Rforce = &EllipsoidalPotentialRforce; potentialArgs->zforce = &EllipsoidalPotentialzforce; potentialArgs->phiforce = &EllipsoidalPotentialphiforce; potentialArgs->dens= &EllipsoidalPotentialDens; //potentialArgs->R2deriv = &EllipsoidalPotentialR2deriv; //potentialArgs->z2deriv = &EllipsoidalPotentialz2deriv; //potentialArgs->phi2deriv = &EllipsoidalPotentialphi2deriv; //potentialArgs->Rzderiv = &EllipsoidalPotentialRzderiv; //potentialArgs->Rphideriv = &EllipsoidalPotentialRphideriv; // Also assign functions specific to EllipsoidalPotential potentialArgs->psi= &PerfectEllipsoidPotentialpsi; potentialArgs->mdens= &PerfectEllipsoidPotentialmdens; potentialArgs->mdensDeriv= &PerfectEllipsoidPotentialmdensDeriv; potentialArgs->nargs = (int) (21 + *(*pot_args+7) + 2 * *(*pot_args + (int) (*(*pot_args+7) + 20))); potentialArgs->requiresVelocity= false; break; // 31: KGPotential // 32: IsothermalDiskPotential case 33: //DehnenCoreSphericalPotential, 2 arguments potentialArgs->potentialEval= &DehnenCoreSphericalPotentialEval; potentialArgs->Rforce= &DehnenCoreSphericalPotentialRforce; potentialArgs->zforce= &DehnenCoreSphericalPotentialzforce; potentialArgs->phiforce= &ZeroForce; potentialArgs->dens= &DehnenCoreSphericalPotentialDens; //potentialArgs->R2deriv= &DehnenCoreSphericalPotentialR2deriv; //potentialArgs->planarphi2deriv= &ZeroForce; //potentialArgs->planarRphideriv= &ZeroForce; potentialArgs->nargs= 2; potentialArgs->requiresVelocity= false; break; case 34: //DehnenSphericalPotential, 3 arguments potentialArgs->potentialEval= &DehnenSphericalPotentialEval; potentialArgs->Rforce= &DehnenSphericalPotentialRforce; potentialArgs->zforce= &DehnenSphericalPotentialzforce; potentialArgs->phiforce= &ZeroForce; potentialArgs->dens= &DehnenSphericalPotentialDens; //potentialArgs->R2deriv= &DehnenSphericalPotentialR2deriv; //potentialArgs->planarphi2deriv= &ZeroForce; //potentialArgs->planarRphideriv= &ZeroForce; potentialArgs->nargs= 3; potentialArgs->requiresVelocity= false; break; case 35: //HomogeneousSpherePotential, 3 arguments potentialArgs->potentialEval= &HomogeneousSpherePotentialEval; potentialArgs->Rforce= &HomogeneousSpherePotentialRforce; potentialArgs->zforce= &HomogeneousSpherePotentialzforce; potentialArgs->phiforce= &ZeroForce; potentialArgs->dens= &HomogeneousSpherePotentialDens; potentialArgs->nargs= 3; potentialArgs->requiresVelocity= false; break; case 36: //interpSphericalPotential, XX arguments // Set up 1 spline in potentialArgs potentialArgs->nspline1d= 1; potentialArgs->spline1d= (gsl_spline **) \ malloc ( potentialArgs->nspline1d*sizeof ( gsl_spline *) ); potentialArgs->acc1d= (gsl_interp_accel **) \ malloc ( potentialArgs->nspline1d * sizeof ( gsl_interp_accel * ) ); // allocate accelerator *potentialArgs->acc1d= gsl_interp_accel_alloc(); // Set up interpolater nr= (int) **pot_args; *potentialArgs->spline1d= gsl_spline_alloc(gsl_interp_cspline,nr); gsl_spline_init(*potentialArgs->spline1d,*pot_args+1,*pot_args+1+nr,nr); *pot_args+= 2*nr+1; // Bind forces potentialArgs->potentialEval= &SphericalPotentialEval; potentialArgs->Rforce = &SphericalPotentialRforce; potentialArgs->zforce = &SphericalPotentialzforce; potentialArgs->phiforce= &ZeroForce; potentialArgs->dens= &SphericalPotentialDens; // Also assign functions specific to SphericalPotential potentialArgs->revaluate= &interpSphericalPotentialrevaluate; potentialArgs->rforce= &interpSphericalPotentialrforce; potentialArgs->r2deriv= &interpSphericalPotentialr2deriv; potentialArgs->rdens= &interpSphericalPotentialrdens; potentialArgs->nargs = (int) 6; potentialArgs->requiresVelocity= false; break; case 37: // TriaxialGaussianPotential, lots of arguments potentialArgs->potentialEval= &EllipsoidalPotentialEval; potentialArgs->Rforce = &EllipsoidalPotentialRforce; potentialArgs->zforce = &EllipsoidalPotentialzforce; potentialArgs->phiforce = &EllipsoidalPotentialphiforce; potentialArgs->dens= &EllipsoidalPotentialDens; //potentialArgs->R2deriv = &EllipsoidalPotentialR2deriv; //potentialArgs->z2deriv = &EllipsoidalPotentialz2deriv; //potentialArgs->phi2deriv = &EllipsoidalPotentialphi2deriv; //potentialArgs->Rzderiv = &EllipsoidalPotentialRzderiv; //potentialArgs->Rphideriv = &EllipsoidalPotentialRphideriv; // Also assign functions specific to EllipsoidalPotential potentialArgs->psi= &TriaxialGaussianPotentialpsi; potentialArgs->mdens= &TriaxialGaussianPotentialmdens; potentialArgs->mdensDeriv= &TriaxialGaussianPotentialmdensDeriv; potentialArgs->nargs = (int) (21 + *(*pot_args+7) + 2 * *(*pot_args + (int) (*(*pot_args+7) + 20))); potentialArgs->requiresVelocity= false; break; //////////////////////////////// WRAPPERS ///////////////////////////////////// case -1: //DehnenSmoothWrapperPotential potentialArgs->potentialEval= &DehnenSmoothWrapperPotentialEval; potentialArgs->Rforce= &DehnenSmoothWrapperPotentialRforce; potentialArgs->zforce= &DehnenSmoothWrapperPotentialzforce; potentialArgs->phiforce= &DehnenSmoothWrapperPotentialphiforce; potentialArgs->nargs= (int) 4; potentialArgs->requiresVelocity= false; break; case -2: //SolidBodyRotationWrapperPotential potentialArgs->Rforce= &SolidBodyRotationWrapperPotentialRforce; potentialArgs->zforce= &SolidBodyRotationWrapperPotentialzforce; potentialArgs->phiforce= &SolidBodyRotationWrapperPotentialphiforce; potentialArgs->nargs= (int) 3; potentialArgs->requiresVelocity= false; break; case -4: //CorotatingRotationWrapperPotential potentialArgs->Rforce= &CorotatingRotationWrapperPotentialRforce; potentialArgs->zforce= &CorotatingRotationWrapperPotentialzforce; potentialArgs->phiforce= &CorotatingRotationWrapperPotentialphiforce; potentialArgs->nargs= (int) 5; potentialArgs->requiresVelocity= false; break; case -5: //GaussianAmplitudeWrapperPotential potentialArgs->potentialEval= &GaussianAmplitudeWrapperPotentialEval; potentialArgs->Rforce= &GaussianAmplitudeWrapperPotentialRforce; potentialArgs->zforce= &GaussianAmplitudeWrapperPotentialzforce; potentialArgs->phiforce= &GaussianAmplitudeWrapperPotentialphiforce; potentialArgs->nargs= (int) 3; potentialArgs->requiresVelocity= false; break; case -6: //MovingObjectPotential potentialArgs->Rforce= &MovingObjectPotentialRforce; potentialArgs->zforce= &MovingObjectPotentialzforce; potentialArgs->phiforce= &MovingObjectPotentialphiforce; potentialArgs->nargs= (int) 3; potentialArgs->requiresVelocity= false; break; case -7: //ChandrasekharDynamicalFrictionForce potentialArgs->RforceVelocity= &ChandrasekharDynamicalFrictionForceRforce; potentialArgs->zforceVelocity= &ChandrasekharDynamicalFrictionForcezforce; potentialArgs->phiforceVelocity= &ChandrasekharDynamicalFrictionForcephiforce; potentialArgs->nargs= (int) 16; potentialArgs->requiresVelocity= true; break; } int setupMovingObjectSplines = *(*pot_type-1) == -6 ? 1 : 0; int setupChandrasekharDynamicalFrictionSplines = *(*pot_type-1) == -7 ? 1 : 0; if ( *(*pot_type-1) < 0 ) { // Parse wrapped potential for wrappers potentialArgs->nwrapped= (int) *(*pot_args)++; potentialArgs->wrappedPotentialArg= \ (struct potentialArg *) malloc ( potentialArgs->nwrapped \ * sizeof (struct potentialArg) ); parse_leapFuncArgs_Full(potentialArgs->nwrapped, potentialArgs->wrappedPotentialArg, pot_type,pot_args); } if (setupMovingObjectSplines) initMovingObjectSplines(potentialArgs, pot_args); if (setupChandrasekharDynamicalFrictionSplines) initChandrasekharDynamicalFrictionSplines(potentialArgs,pot_args); potentialArgs->args= (double *) malloc( potentialArgs->nargs * sizeof(double)); for (jj=0; jj < potentialArgs->nargs; jj++){ *(potentialArgs->args)= *(*pot_args)++; potentialArgs->args++; } potentialArgs->args-= potentialArgs->nargs; potentialArgs++; } potentialArgs-= npot; } EXPORT void integrateFullOrbit(int nobj, double *yo, int nt, double *t, int npot, int * pot_type, double * pot_args, double dt, double rtol, double atol, double *result, int * err, int odeint_type){ //Set up the forces, first count int ii,jj; int dim; int max_threads; int * thread_pot_type; double * thread_pot_args; max_threads= ( nobj < omp_get_max_threads() ) ? nobj : omp_get_max_threads(); // Because potentialArgs may cache, safest to have one / thread struct potentialArg * potentialArgs= (struct potentialArg *) malloc ( max_threads * npot * sizeof (struct potentialArg) ); #pragma omp parallel for schedule(static,1) private(ii,thread_pot_type,thread_pot_args) num_threads(max_threads) for (ii=0; ii < max_threads; ii++) { thread_pot_type= pot_type; // need to make thread-private pointers, bc thread_pot_args= pot_args; // these pointers are changed in parse_... parse_leapFuncArgs_Full(npot,potentialArgs+ii*npot, &thread_pot_type,&thread_pot_args); } //Integrate void (*odeint_func)(void (*func)(double, double *, double *, int, struct potentialArg *), int, double *, int, double, double *, int, struct potentialArg *, double, double, double *,int *); void (*odeint_deriv_func)(double, double *, double *, int,struct potentialArg *); switch ( odeint_type ) { case 0: //leapfrog odeint_func= &leapfrog; odeint_deriv_func= &evalRectForce; dim= 3; break; case 1: //RK4 odeint_func= &bovy_rk4; odeint_deriv_func= &evalRectDeriv; dim= 6; break; case 2: //RK6 odeint_func= &bovy_rk6; odeint_deriv_func= &evalRectDeriv; dim= 6; break; case 3: //symplec4 odeint_func= &symplec4; odeint_deriv_func= &evalRectForce; dim= 3; break; case 4: //symplec6 odeint_func= &symplec6; odeint_deriv_func= &evalRectForce; dim= 3; break; case 5: //DOPR54 odeint_func= &bovy_dopr54; odeint_deriv_func= &evalRectDeriv; dim= 6; break; case 6: //DOP853 odeint_func= &dop853; odeint_deriv_func= &evalRectDeriv; dim= 6; break; } #pragma omp parallel for schedule(dynamic,ORBITS_CHUNKSIZE) private(ii,jj) num_threads(max_threads) for (ii=0; ii < nobj; ii++) { cyl_to_rect_galpy(yo+6*ii); odeint_func(odeint_deriv_func,dim,yo+6*ii,nt,dt,t, npot,potentialArgs+omp_get_thread_num()*npot,rtol,atol, result+6*nt*ii,err+ii); for (jj=0; jj < nt; jj++) rect_to_cyl_galpy(result+6*jj+6*nt*ii); } //Free allocated memory #pragma omp parallel for schedule(static,1) private(ii) num_threads(max_threads) for (ii=0; ii < max_threads; ii++) free_potentialArgs(npot,potentialArgs+ii*npot); free(potentialArgs); //Done! } // LCOV_EXCL_START void integrateOrbit_dxdv(double *yo, int nt, double *t, int npot, int * pot_type, double * pot_args, double rtol, double atol, double *result, int * err, int odeint_type){ //Set up the forces, first count int dim; struct potentialArg * potentialArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) ); parse_leapFuncArgs_Full(npot,potentialArgs,&pot_type,&pot_args); //Integrate void (*odeint_func)(void (*func)(double, double *, double *, int, struct potentialArg *), int, double *, int, double, double *, int, struct potentialArg *, double, double, double *,int *); void (*odeint_deriv_func)(double, double *, double *, int,struct potentialArg *); switch ( odeint_type ) { case 0: //leapfrog odeint_func= &leapfrog; odeint_deriv_func= &evalRectForce; dim= 6; break; case 1: //RK4 odeint_func= &bovy_rk4; odeint_deriv_func= &evalRectDeriv_dxdv; dim= 12; break; case 2: //RK6 odeint_func= &bovy_rk6; odeint_deriv_func= &evalRectDeriv_dxdv; dim= 12; break; case 3: //symplec4 odeint_func= &symplec4; odeint_deriv_func= &evalRectForce; dim= 6; break; case 4: //symplec6 odeint_func= &symplec6; odeint_deriv_func= &evalRectForce; dim= 6; break; case 5: //DOPR54 odeint_func= &bovy_dopr54; odeint_deriv_func= &evalRectDeriv_dxdv; dim= 12; break; case 6: //DOP853 odeint_func= &dop853; odeint_deriv_func= &evalRectDeriv_dxdv; dim= 12; break; } odeint_func(odeint_deriv_func,dim,yo,nt,-9999.99,t,npot,potentialArgs, rtol,atol,result,err); //Free allocated memory free_potentialArgs(npot,potentialArgs); free(potentialArgs); //Done! } // LCOV_EXCL_STOP void evalRectForce(double t, double *q, double *a, int nargs, struct potentialArg * potentialArgs){ double sinphi, cosphi, x, y, phi,R,Rforce,phiforce, z, zforce; //q is rectangular so calculate R and phi x= *q; y= *(q+1); z= *(q+2); R= sqrt(x*x+y*y); phi= acos(x/R); sinphi= y/R; cosphi= x/R; if ( y < 0. ) phi= 2.*M_PI-phi; //Calculate the forces Rforce= calcRforce(R,z,phi,t,nargs,potentialArgs); zforce= calczforce(R,z,phi,t,nargs,potentialArgs); phiforce= calcPhiforce(R,z,phi,t,nargs,potentialArgs); *a++= cosphi*Rforce-1./R*sinphi*phiforce; *a++= sinphi*Rforce+1./R*cosphi*phiforce; *a= zforce; } void evalRectDeriv(double t, double *q, double *a, int nargs, struct potentialArg * potentialArgs){ double sinphi, cosphi, x, y, phi,R,Rforce,phiforce,z,zforce,vR,vT; //first three derivatives are just the velocities *a++= *(q+3); *a++= *(q+4); *a++= *(q+5); //Rest is force //q is rectangular so calculate R and phi, vR and vT (for dissipative) x= *q; y= *(q+1); z= *(q+2); R= sqrt(x*x+y*y); phi= acos(x/R); sinphi= y/R; cosphi= x/R; if ( y < 0. ) phi= 2.*M_PI-phi; vR= *(q+3) * cosphi + *(q+4) * sinphi; vT= -*(q+3) * sinphi + *(q+4) * cosphi; //Calculate the forces Rforce= calcRforce(R,z,phi,t,nargs,potentialArgs,vR,vT,*(q+5)); zforce= calczforce(R,z,phi,t,nargs,potentialArgs,vR,vT,*(q+5)); phiforce= calcPhiforce(R,z,phi,t,nargs,potentialArgs,vR,vT,*(q+5)); *a++= cosphi*Rforce-1./R*sinphi*phiforce; *a++= sinphi*Rforce+1./R*cosphi*phiforce; *a= zforce; } void initMovingObjectSplines(struct potentialArg * potentialArgs, double ** pot_args){ gsl_interp_accel *x_accel_ptr = gsl_interp_accel_alloc(); gsl_interp_accel *y_accel_ptr = gsl_interp_accel_alloc(); gsl_interp_accel *z_accel_ptr = gsl_interp_accel_alloc(); int nPts = (int) **pot_args; gsl_spline *x_spline = gsl_spline_alloc(gsl_interp_cspline, nPts); gsl_spline *y_spline = gsl_spline_alloc(gsl_interp_cspline, nPts); gsl_spline *z_spline = gsl_spline_alloc(gsl_interp_cspline, nPts); double * t_arr = *pot_args+1; double * x_arr = t_arr+1*nPts; double * y_arr = t_arr+2*nPts; double * z_arr = t_arr+3*nPts; double * t= (double *) malloc ( nPts * sizeof (double) ); double tf = *(t_arr+4*nPts+2); double to = *(t_arr+4*nPts+1); int ii; for (ii=0; ii < nPts; ii++) *(t+ii) = (t_arr[ii]-to)/(tf-to); gsl_spline_init(x_spline, t, x_arr, nPts); gsl_spline_init(y_spline, t, y_arr, nPts); gsl_spline_init(z_spline, t, z_arr, nPts); potentialArgs->nspline1d= 3; potentialArgs->spline1d= (gsl_spline **) malloc ( 3*sizeof ( gsl_spline *) ); potentialArgs->acc1d= (gsl_interp_accel **) \ malloc ( 3 * sizeof ( gsl_interp_accel * ) ); *potentialArgs->spline1d = x_spline; *potentialArgs->acc1d = x_accel_ptr; *(potentialArgs->spline1d+1)= y_spline; *(potentialArgs->acc1d+1)= y_accel_ptr; *(potentialArgs->spline1d+2)= z_spline; *(potentialArgs->acc1d+2)= z_accel_ptr; *pot_args = *pot_args + (int) (1+4*nPts); free(t); } void initChandrasekharDynamicalFrictionSplines(struct potentialArg * potentialArgs, double ** pot_args){ gsl_interp_accel *sr_accel_ptr = gsl_interp_accel_alloc(); int nPts = (int) **pot_args; gsl_spline *sr_spline = gsl_spline_alloc(gsl_interp_cspline,nPts); double * r_arr = *pot_args+1; double * sr_arr = r_arr+1*nPts; double * r= (double *) malloc ( nPts * sizeof (double) ); double ro = *(r_arr+2*nPts+14); double rf = *(r_arr+2*nPts+15); int ii; for (ii=0; ii < nPts; ii++) *(r+ii) = (r_arr[ii]-ro)/(rf-ro); gsl_spline_init(sr_spline,r,sr_arr,nPts); potentialArgs->nspline1d= 1; potentialArgs->spline1d= (gsl_spline **) \ malloc ( potentialArgs->nspline1d*sizeof ( gsl_spline *) ); potentialArgs->acc1d= (gsl_interp_accel **) \ malloc ( potentialArgs->nspline1d * sizeof ( gsl_interp_accel * ) ); *potentialArgs->spline1d = sr_spline; *potentialArgs->acc1d = sr_accel_ptr; *pot_args = *pot_args + (int) (1+(1+potentialArgs->nspline1d)*nPts); free(r); } // LCOV_EXCL_START void evalRectDeriv_dxdv(double t, double *q, double *a, int nargs, struct potentialArg * potentialArgs){ double sinphi, cosphi, x, y, phi,R,Rforce,phiforce,z,zforce; double R2deriv, phi2deriv, Rphideriv, dFxdx, dFxdy, dFydx, dFydy; //first three derivatives are just the velocities *a++= *(q+3); *a++= *(q+4); *a++= *(q+5); //Rest is force //q is rectangular so calculate R and phi x= *q; y= *(q+1); z= *(q+2); R= sqrt(x*x+y*y); phi= acos(x/R); sinphi= y/R; cosphi= x/R; if ( y < 0. ) phi= 2.*M_PI-phi; //Calculate the forces Rforce= calcRforce(R,z,phi,t,nargs,potentialArgs); zforce= calczforce(R,z,phi,t,nargs,potentialArgs); phiforce= calcPhiforce(R,z,phi,t,nargs,potentialArgs); *a++= cosphi*Rforce-1./R*sinphi*phiforce; *a++= sinphi*Rforce+1./R*cosphi*phiforce; *a++= zforce; //dx derivatives are just dv *a++= *(q+9); *a++= *(q+10); *a++= *(q+11); //for the dv derivatives we need also R2deriv, phi2deriv, and Rphideriv R2deriv= calcR2deriv(R,z,phi,t,nargs,potentialArgs); phi2deriv= calcphi2deriv(R,z,phi,t,nargs,potentialArgs); Rphideriv= calcRphideriv(R,z,phi,t,nargs,potentialArgs); //..and dFxdx, dFxdy, dFydx, dFydy dFxdx= -cosphi*cosphi*R2deriv +2.*cosphi*sinphi/R/R*phiforce +sinphi*sinphi/R*Rforce +2.*sinphi*cosphi/R*Rphideriv -sinphi*sinphi/R/R*phi2deriv; dFxdy= -sinphi*cosphi*R2deriv +(sinphi*sinphi-cosphi*cosphi)/R/R*phiforce -cosphi*sinphi/R*Rforce -(cosphi*cosphi-sinphi*sinphi)/R*Rphideriv +cosphi*sinphi/R/R*phi2deriv; dFydx= -cosphi*sinphi*R2deriv +(sinphi*sinphi-cosphi*cosphi)/R/R*phiforce +(sinphi*sinphi-cosphi*cosphi)/R*Rphideriv -sinphi*cosphi/R*Rforce +sinphi*cosphi/R/R*phi2deriv; dFydy= -sinphi*sinphi*R2deriv -2.*sinphi*cosphi/R/R*phiforce -2.*sinphi*cosphi/R*Rphideriv +cosphi*cosphi/R*Rforce -cosphi*cosphi/R/R*phi2deriv; *a++= dFxdx * *(q+4) + dFxdy * *(q+5); *a++= dFydx * *(q+4) + dFydy * *(q+5); *a= 0; //BOVY: PUT IN Z2DERIVS } // LCOV_EXCL_STOP
dfwavelet.c
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <time.h> #ifdef USE_OPENMP #include <omp.h> #endif #include "dfwavelet.h" #ifdef USE_CUDA #include "dfwavelet_kernels.h" #endif #define str_eq(s1,s2) (!strcmp ((s1),(s2))) /******** Header *********/ static void dffwt3_cpu(struct dfwavelet_plan_s* plan, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn, data_t* in_vx,data_t* in_vy,data_t* in_vz); static void dfiwt3_cpu(struct dfwavelet_plan_s* plan, data_t* out_vx,data_t* out_vy,data_t* out_vz, data_t* in_wcdf1,data_t* in_wcdf2,data_t* in_wcn); static void dfsoftthresh_cpu(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn); static void dfwavthresh3_cpu(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh,data_t* out_vx,data_t* out_vy,data_t* out_vz,data_t* in_vx,data_t* in_vy,data_t* in_vz); static void dfwavthresh3_spin_cpu(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh,int spins,int isRand,data_t* out_vx,data_t* out_vy,data_t* out_vz,data_t* in_vx,data_t* in_vy,data_t* in_vz); static void dfwavthresh3_SURE_cpu(struct dfwavelet_plan_s* plan,scalar_t sigma,data_t* out_vx,data_t* out_vy,data_t* out_vz,data_t* in_vx,data_t* in_vy,data_t* in_vz); static void dfwavthresh3_SURE_spin_cpu(struct dfwavelet_plan_s* plan,scalar_t sigma,int spins, int isRand,data_t* out_vx,data_t* out_vy,data_t* out_vz,data_t* in_vx,data_t* in_vy,data_t* in_vz); static void dfwavthresh3_SURE_MAD_cpu(struct dfwavelet_plan_s* plan,data_t* out_vx,data_t* out_vy,data_t* out_vz,data_t* in_vx,data_t* in_vy,data_t* in_vz); static void dfwavthresh3_SURE_MAD_spin_cpu(struct dfwavelet_plan_s* plan,int spins, int isRand,data_t* out_vx,data_t* out_vy,data_t* out_vz,data_t* in_vx,data_t* in_vy,data_t* in_vz); void dflincomb_cpu(struct dfwavelet_plan_s* plan,data_t* wc1,data_t* wc2,data_t* wc3); void dfunlincomb_cpu(struct dfwavelet_plan_s* plan,data_t* wc1,data_t* wc2,data_t* wc3); static void fwt3_cpu(struct dfwavelet_plan_s* plan, data_t* out, data_t* in,int dir); static void iwt3_cpu(struct dfwavelet_plan_s* plan, data_t* out, data_t* in,int dir); static void softthresh_cpu(struct dfwavelet_plan_s* plan, int length, scalar_t thresh, data_t* in); static void circshift_cpu(struct dfwavelet_plan_s* plan, data_t *data); static void circunshift_cpu(struct dfwavelet_plan_s* plan, data_t *data); static void conv_down_3d(data_t *out, data_t *in, int size1, int skip1, int size2, int skip2, int size3, int skip3, scalar_t *filter, int filterLen); static void conv_up_3d(data_t *out, data_t *in, int size1, int skip1, int size2, int skip2, int size3, int skip3, scalar_t *filter, int filterLen); static void add(data_t* out,data_t *in,int maxInd); static void mult(data_t* in,scalar_t scalar,int maxInd); static void create_numLevels(struct dfwavelet_plan_s* plan); static void create_wavelet_sizes(struct dfwavelet_plan_s* plan); static void create_wavelet_filters(struct dfwavelet_plan_s* plan); static void count_zeros_cpu(struct dfwavelet_plan_s* plan, data_t* vx, data_t* vy, data_t* vz); static void get_noise_amp (struct dfwavelet_plan_s* plan); struct dfwavelet_plan_s* prepare_dfwavelet_plan(int numdims, int* imSize, int* minSize, scalar_t* res,int use_gpu) { #ifdef USE_OPENMP omp_set_num_threads( omp_get_max_threads() ); #endif struct dfwavelet_plan_s* plan = (struct dfwavelet_plan_s*) malloc(sizeof(struct dfwavelet_plan_s)); plan->use_gpu = use_gpu; plan->numdims = numdims; plan->imSize = (int*) malloc(sizeof(int)*numdims); plan->minSize = (int*) malloc(sizeof(int)*numdims); plan->res = (scalar_t*) malloc(sizeof(scalar_t)*numdims); plan->percentZero = -1; plan->noiseAmp = NULL; // Get imSize, numPixel, numdims plan->numPixel = 1; plan->numPixel = 1; int i; for (i = 0; i < numdims; i++) { plan->imSize[i] = imSize[i]; plan->numPixel *= imSize[i]; plan->minSize[i] = minSize[i]; plan->res[i] = res[i]; } create_wavelet_filters(plan); create_numLevels(plan); create_wavelet_sizes(plan); plan->randShift = (int*) malloc(sizeof(int)*plan->numdims); memset(plan->randShift,0,sizeof(int)*plan->numdims); return plan; } void dfwavelet_forward(struct dfwavelet_plan_s* plan, data_t* out_wcdf1, data_t* out_wcdf2, data_t* out_wcn, data_t* in_vx, data_t* in_vy, data_t* in_vz) { if(plan->use_gpu==0) dffwt3_cpu(plan,out_wcdf1,out_wcdf2,out_wcn,in_vx,in_vy,in_vz); #ifdef USE_CUDA if(plan->use_gpu==1) dffwt3_gpu(plan,out_wcdf1,out_wcdf2,out_wcn,in_vx,in_vy,in_vz); if(plan->use_gpu==2) dffwt3_gpuHost(plan,out_wcdf1,out_wcdf2,out_wcn,in_vx,in_vy,in_vz); #endif } void dfwavelet_inverse(struct dfwavelet_plan_s* plan, data_t* out_vx,data_t* out_vy,data_t* out_vz, data_t* in_wcdf1,data_t* in_wcdf2,data_t* in_wcn) { if(plan->use_gpu==0) dfiwt3_cpu(plan,out_vx,out_vy,out_vz,in_wcdf1,in_wcdf2,in_wcn); #ifdef USE_CUDA if(plan->use_gpu==1) dfiwt3_gpu(plan,out_vx,out_vy,out_vz,in_wcdf1,in_wcdf2,in_wcn); if(plan->use_gpu==2) dfiwt3_gpuHost(plan,out_vx,out_vy,out_vz,in_wcdf1,in_wcdf2,in_wcn); #endif } void dfwavelet_thresh(struct dfwavelet_plan_s* plan, scalar_t dfthresh, scalar_t nthresh,data_t* out_vx, data_t* out_vy, data_t* out_vz, data_t* in_vx,data_t* in_vy, data_t* in_vz) { get_noise_amp(plan); if(plan->use_gpu==0) dfwavthresh3_cpu(plan,dfthresh,nthresh,out_vx,out_vy,out_vz, in_vx,in_vy,in_vz); #ifdef USE_CUDA if(plan->use_gpu==1) dfwavthresh3_gpu(plan,dfthresh,nthresh, out_vx,out_vy,out_vz, in_vx,in_vy,in_vz); if(plan->use_gpu==2) dfwavthresh3_gpuHost(plan,dfthresh,nthresh,out_vx,out_vy,out_vz,in_vx,in_vy,in_vz); #endif } void dfsoft_thresh(struct dfwavelet_plan_s* plan, scalar_t dfthresh, scalar_t nthresh,data_t* wcdf1,data_t* wcdf2, data_t* wcn) { get_noise_amp(plan); if(plan->use_gpu==0) dfsoftthresh_cpu(plan,dfthresh,nthresh,wcdf1,wcdf2,wcn); #ifdef USE_CUDA if(plan->use_gpu==1) dfsoftthresh_gpu(plan,dfthresh,nthresh,wcdf1,wcdf2,wcn); if(plan->use_gpu==2) dfsoftthresh_gpuHost(plan,dfthresh,nthresh,wcdf1,wcdf2,wcn); #endif } void dfwavelet_thresh_spin(struct dfwavelet_plan_s* plan, scalar_t dfthresh, scalar_t nthresh,int spins, int isRand,data_t* out_vx, data_t* out_vy, data_t* out_vz, data_t* in_vx,data_t* in_vy, data_t* in_vz) { get_noise_amp(plan); if(plan->use_gpu==0) dfwavthresh3_spin_cpu(plan,dfthresh,nthresh,spins,isRand,out_vx,out_vy,out_vz, in_vx,in_vy,in_vz); #ifdef USE_CUDA if(plan->use_gpu==1) dfwavthresh3_spin_gpu(plan,dfthresh,nthresh,spins,isRand,out_vx,out_vy,out_vz, in_vx,in_vy,in_vz); if(plan->use_gpu==2) dfwavthresh3_spin_gpuHost(plan,dfthresh,nthresh,spins,isRand,out_vx,out_vy,out_vz,in_vx,in_vy,in_vz); #endif } void dfwavelet_thresh_SURE(struct dfwavelet_plan_s* plan, scalar_t sigma,data_t* out_vx, data_t* out_vy, data_t* out_vz, data_t* in_vx,data_t* in_vy, data_t* in_vz) { get_noise_amp(plan); if(plan->use_gpu==0) dfwavthresh3_SURE_cpu(plan,sigma,out_vx,out_vy,out_vz,in_vx,in_vy,in_vz); #ifdef USE_CUDA if(plan->use_gpu==1) dfwavthresh3_SURE_gpu(plan,sigma,out_vx,out_vy,out_vz,in_vx,in_vy,in_vz); if(plan->use_gpu==2) dfwavthresh3_SURE_gpuHost(plan,sigma,out_vx,out_vy,out_vz,in_vx,in_vy,in_vz); #endif } void dfwavelet_thresh_SURE_spin(struct dfwavelet_plan_s* plan, scalar_t sigma,int spins,int isRand,data_t* out_vx, data_t* out_vy, data_t* out_vz, data_t* in_vx,data_t* in_vy, data_t* in_vz) { get_noise_amp(plan); if(plan->use_gpu==0) dfwavthresh3_SURE_spin_cpu(plan,sigma,spins,isRand,out_vx,out_vy,out_vz,in_vx,in_vy,in_vz); #ifdef USE_CUDA if(plan->use_gpu==1) dfwavthresh3_SURE_spin_gpu(plan,sigma,spins,isRand,out_vx,out_vy,out_vz,in_vx,in_vy,in_vz); if(plan->use_gpu==2) dfwavthresh3_SURE_spin_gpuHost(plan,sigma,spins,isRand,out_vx,out_vy,out_vz,in_vx,in_vy,in_vz); #endif } void dfwavelet_thresh_SURE_MAD(struct dfwavelet_plan_s* plan,data_t* out_vx, data_t* out_vy, data_t* out_vz, data_t* in_vx,data_t* in_vy, data_t* in_vz) { get_noise_amp(plan); if(plan->use_gpu==0) dfwavthresh3_SURE_MAD_cpu(plan,out_vx,out_vy,out_vz,in_vx,in_vy,in_vz); #ifdef USE_CUDA if(plan->use_gpu==1) dfwavthresh3_SURE_MAD_gpu(plan,out_vx,out_vy,out_vz,in_vx,in_vy,in_vz); if(plan->use_gpu==2) dfwavthresh3_SURE_MAD_gpuHost(plan,out_vx,out_vy,out_vz,in_vx,in_vy,in_vz); #endif } void dfwavelet_thresh_SURE_MAD_spin(struct dfwavelet_plan_s* plan,int spins,int isRand,data_t* out_vx, data_t* out_vy, data_t* out_vz, data_t* in_vx,data_t* in_vy, data_t* in_vz) { get_noise_amp(plan); if(plan->use_gpu==0) dfwavthresh3_SURE_MAD_spin_cpu(plan,spins,isRand,out_vx,out_vy,out_vz,in_vx,in_vy,in_vz); #ifdef USE_CUDA if(plan->use_gpu==1) dfwavthresh3_SURE_MAD_spin_gpu(plan,spins,isRand,out_vx,out_vy,out_vz,in_vx,in_vy,in_vz); if(plan->use_gpu==2) dfwavthresh3_SURE_MAD_spin_gpuHost(plan,spins,isRand,out_vx,out_vy,out_vz,in_vx,in_vy,in_vz); #endif } int rand_lim(int limit) { int divisor = RAND_MAX/(limit+1); int retval; do { retval = rand() / divisor; } while (retval > limit); return retval; } void dfwavelet_new_randshift (struct dfwavelet_plan_s* plan) { int i; i = rand(); for(i = 0; i < plan->numdims; i++) { // Determine maximum shift value for this dimension int log2dim = 1; while( (1<<log2dim) < plan->imSize[i]) { log2dim++; } int maxShift = 1 << (log2dim-plan->numLevels); if (maxShift > 8) { maxShift = 8; } // Generate random shift value between 0 and maxShift plan->randShift[i] = rand_lim(maxShift); } } void dfwavelet_clear_randshift (struct dfwavelet_plan_s* plan) { memset(plan->randShift, 0, plan->numdims*sizeof(int)); } void dfwavelet_free(struct dfwavelet_plan_s* plan) { free(plan->imSize); free(plan->minSize); free(plan->lod0); free(plan->lod1); free(plan->res); free(plan->waveSizes); free(plan->randShift); if (plan->noiseAmp != NULL) free(plan->noiseAmp); free(plan); } ////////////// Private Functions ////////////// void dffwt3_cpu(struct dfwavelet_plan_s* plan, data_t* out_wcdf1,data_t* out_wcdf2,data_t* out_wcn, data_t* in_vx,data_t* in_vy,data_t* in_vz) { fwt3_cpu(plan,out_wcdf1,in_vx,0); fwt3_cpu(plan,out_wcdf2,in_vy,1); fwt3_cpu(plan,out_wcn,in_vz,2); mult(out_wcdf1,1/plan->res[0],plan->numCoeff); mult(out_wcdf2,1/plan->res[1],plan->numCoeff); mult(out_wcn,1/plan->res[2],plan->numCoeff); dflincomb_cpu(plan,out_wcdf1,out_wcdf2,out_wcn); } void dfiwt3_cpu(struct dfwavelet_plan_s* plan, data_t* out_vx,data_t* out_vy,data_t* out_vz, data_t* in_wcdf1,data_t* in_wcdf2,data_t* in_wcn) { dfunlincomb_cpu(plan,in_wcdf1,in_wcdf2,in_wcn); mult(in_wcdf1,plan->res[0],plan->numCoeff); mult(in_wcdf2,plan->res[1],plan->numCoeff); mult(in_wcn,plan->res[2],plan->numCoeff); iwt3_cpu(plan,out_vx,in_wcdf1,0); iwt3_cpu(plan,out_vy,in_wcdf2,1); iwt3_cpu(plan,out_vz,in_wcn,2); } void dfsoftthresh_cpu(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh, data_t* wcdf1,data_t* wcdf2,data_t* wcn) { data_t* HxLyLz1 = wcdf1 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2]; data_t* HxLyLz2 = wcdf2 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2]; data_t* HxLyLz3 = wcn + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2]; int l; for (l = 1; l <= plan->numLevels; ++l){ HxLyLz1 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l]; HxLyLz2 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l]; HxLyLz3 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l]; } int dx = plan->imSize[0]; int dy = plan->imSize[1]; int dz = plan->imSize[2]; int dxNext = plan->waveSizes[0 + 3*plan->numLevels]; int dyNext = plan->waveSizes[1 + 3*plan->numLevels]; int dzNext = plan->waveSizes[2 + 3*plan->numLevels]; int blockSize = dxNext*dyNext*dzNext; int naInd = 0; for (l = plan->numLevels; l >= 1; --l) { dxNext = plan->waveSizes[0 + 3*l]; dyNext = plan->waveSizes[1 + 3*l]; dzNext = plan->waveSizes[2 + 3*l]; blockSize = dxNext*dyNext*dzNext; HxLyLz1 = HxLyLz1 - 7*blockSize; HxLyLz2 = HxLyLz2 - 7*blockSize; HxLyLz3 = HxLyLz3 - 7*blockSize; int bandInd; for (bandInd=0; bandInd<7*3;bandInd++) { data_t *subband; scalar_t lambda; if (bandInd<7) { subband = HxLyLz1 + bandInd*blockSize; lambda = dfthresh * plan->noiseAmp[naInd]; } else if (bandInd<14) { subband = HxLyLz2 + (bandInd-7)*blockSize; lambda = dfthresh * plan->noiseAmp[naInd]; } else { subband = HxLyLz3 + (bandInd-14)*blockSize; lambda = nthresh * plan->noiseAmp[naInd]; } // SoftThresh softthresh_cpu(plan, blockSize, lambda,subband); naInd++; } } } void dfwavthresh3_cpu(struct dfwavelet_plan_s* plan,scalar_t dfthresh, scalar_t nthresh,data_t* out_vx,data_t* out_vy,data_t* out_vz,data_t* in_vx,data_t* in_vy,data_t* in_vz) { data_t *wcdf1,*wcdf2,*wcn; wcdf1 = (data_t*) malloc(sizeof(data_t)*plan->numCoeff); wcdf2 = (data_t*) malloc(sizeof(data_t)*plan->numCoeff); wcn = (data_t*) malloc(sizeof(data_t)*plan->numCoeff); dffwt3_cpu(plan, wcdf1,wcdf2,wcn,in_vx,in_vy,in_vz); dfsoftthresh_cpu(plan,dfthresh,nthresh,wcdf1,wcdf2,wcn); dfiwt3_cpu(plan,out_vx,out_vy,out_vz,wcdf1,wcdf2,wcn); free(wcdf1); free(wcdf2); free(wcn); } void dfwavthresh3_spin_cpu(struct dfwavelet_plan_s* plan, scalar_t dfthresh, scalar_t nthresh,int spins,int isRand,data_t* out_vx, data_t* out_vy, data_t* out_vz, data_t* in_vx,data_t* in_vy, data_t* in_vz) { data_t *temp_vx,*temp_vy,*temp_vz; temp_vx = (data_t*) malloc(sizeof(data_t)*plan->numPixel); temp_vy = (data_t*) malloc(sizeof(data_t)*plan->numPixel); temp_vz = (data_t*) malloc(sizeof(data_t)*plan->numPixel); int s1,s2,s3; for (s1=0;s1<spins;s1++) for (s2=0;s2<spins;s2++) for (s3=0;s3<spins;s3++) { if (isRand) { dfwavelet_new_randshift(plan); } else { plan->randShift[0] = s1; plan->randShift[1] = s2; plan->randShift[2] = s3; } dfwavthresh3_cpu(plan,dfthresh,nthresh,temp_vx,temp_vy,temp_vz,in_vx,in_vy,in_vz); add(out_vx,temp_vx,plan->numPixel); add(out_vy,temp_vy,plan->numPixel); add(out_vz,temp_vz,plan->numPixel); } mult(out_vx,1.0/((scalar_t) spins*spins*spins),plan->numPixel); mult(out_vy,1.0/((scalar_t) spins*spins*spins),plan->numPixel); mult(out_vz,1.0/((scalar_t) spins*spins*spins),plan->numPixel); } void dfwavthresh3_SURE_cpu(struct dfwavelet_plan_s* plan,scalar_t sigma,data_t* out_vx,data_t* out_vy,data_t* out_vz,data_t* in_vx,data_t* in_vy,data_t* in_vz) { data_t *wcdf1,*wcdf2,*wcn; wcdf1 = (data_t*) malloc(sizeof(data_t)*plan->numCoeff); wcdf2 = (data_t*) malloc(sizeof(data_t)*plan->numCoeff); wcn = (data_t*) malloc(sizeof(data_t)*plan->numCoeff); count_zeros_cpu(plan,in_vx,in_vy,in_vz); dffwt3_cpu(plan, wcdf1,wcdf2,wcn,in_vx,in_vy,in_vz); dfSUREshrink_cpu(plan,sigma,wcdf1,wcdf2,wcn); dfiwt3_cpu(plan,out_vx,out_vy,out_vz,wcdf1,wcdf2,wcn); free(wcdf1); free(wcdf2); free(wcn); } void dfwavthresh3_SURE_spin_cpu(struct dfwavelet_plan_s* plan, scalar_t sigma,int spins,int isRand,data_t* out_vx, data_t* out_vy, data_t* out_vz, data_t* in_vx,data_t* in_vy, data_t* in_vz) { memset(out_vx,0,sizeof(data_t)*plan->numPixel); memset(out_vy,0,sizeof(data_t)*plan->numPixel); memset(out_vz,0,sizeof(data_t)*plan->numPixel); data_t *temp_vx,*temp_vy,*temp_vz; temp_vx = (data_t*) malloc(sizeof(data_t)*plan->numPixel); temp_vy = (data_t*) malloc(sizeof(data_t)*plan->numPixel); temp_vz = (data_t*) malloc(sizeof(data_t)*plan->numPixel); int s1,s2,s3; for (s1=0;s1<spins;s1++) for (s2=0;s2<spins;s2++) for (s3=0;s3<spins;s3++) { plan->randShift[0] = s1; plan->randShift[1] = s2; plan->randShift[2] = s3; dfwavthresh3_SURE_cpu(plan,sigma,temp_vx,temp_vy,temp_vz,in_vx,in_vy,in_vz); add(out_vx,temp_vx,plan->numPixel); add(out_vy,temp_vy,plan->numPixel); add(out_vz,temp_vz,plan->numPixel); } mult(out_vx,1.0/((scalar_t) spins*spins*spins),plan->numPixel); mult(out_vy,1.0/((scalar_t) spins*spins*spins),plan->numPixel); mult(out_vz,1.0/((scalar_t) spins*spins*spins),plan->numPixel); } void dfwavthresh3_SURE_MAD_cpu(struct dfwavelet_plan_s* plan,data_t* out_vx,data_t* out_vy,data_t* out_vz,data_t* in_vx,data_t* in_vy,data_t* in_vz) { data_t *wcdf1,*wcdf2,*wcn; wcdf1 = (data_t*) malloc(sizeof(data_t)*plan->numCoeff); wcdf2 = (data_t*) malloc(sizeof(data_t)*plan->numCoeff); wcn = (data_t*) malloc(sizeof(data_t)*plan->numCoeff); count_zeros_cpu(plan,in_vx,in_vy,in_vz); dffwt3_cpu(plan, wcdf1,wcdf2,wcn,in_vx,in_vy,in_vz); scalar_t sigma = getMADsigma_cpu(plan,wcn); dfSUREshrink_cpu(plan,sigma,wcdf1,wcdf2,wcn); dfiwt3_cpu(plan,out_vx,out_vy,out_vz,wcdf1,wcdf2,wcn); free(wcdf1); free(wcdf2); free(wcn); } void dfwavthresh3_SURE_MAD_spin_cpu(struct dfwavelet_plan_s* plan,int spins,int isRand,data_t* out_vx, data_t* out_vy, data_t* out_vz, data_t* in_vx,data_t* in_vy, data_t* in_vz) { memset(out_vx,0,sizeof(data_t)*plan->numPixel); memset(out_vy,0,sizeof(data_t)*plan->numPixel); memset(out_vz,0,sizeof(data_t)*plan->numPixel); data_t *temp_vx,*temp_vy,*temp_vz; temp_vx = (data_t*) malloc(sizeof(data_t)*plan->numPixel); temp_vy = (data_t*) malloc(sizeof(data_t)*plan->numPixel); temp_vz = (data_t*) malloc(sizeof(data_t)*plan->numPixel); int s1,s2,s3; for (s1=0;s1<spins;s1++) for (s2=0;s2<spins;s2++) for (s3=0;s3<spins;s3++) { plan->randShift[0] = s1; plan->randShift[1] = s2; plan->randShift[2] = s3; dfwavthresh3_SURE_MAD_cpu(plan,temp_vx,temp_vy,temp_vz,in_vx,in_vy,in_vz); add(out_vx,temp_vx,plan->numPixel); add(out_vy,temp_vy,plan->numPixel); add(out_vz,temp_vz,plan->numPixel); } mult(out_vx,1.0/((scalar_t) spins*spins*spins),plan->numPixel); mult(out_vy,1.0/((scalar_t) spins*spins*spins),plan->numPixel); mult(out_vz,1.0/((scalar_t) spins*spins*spins),plan->numPixel); } void dflincomb_cpu(struct dfwavelet_plan_s* plan,data_t* wc1,data_t* wc2,data_t* wc3) { data_t* HxLyLz1 = wc1 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2]; data_t* HxLyLz2 = wc2 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2]; data_t* HxLyLz3 = wc3 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2]; int l; for (l = 1; l <= plan->numLevels; ++l){ HxLyLz1 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l]; HxLyLz2 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l]; HxLyLz3 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l]; } int dx = plan->imSize[0]; int dy = plan->imSize[1]; int dz = plan->imSize[2]; int dxNext = plan->waveSizes[0 + 3*plan->numLevels]; int dyNext = plan->waveSizes[1 + 3*plan->numLevels]; int dzNext = plan->waveSizes[2 + 3*plan->numLevels]; int blockSize = dxNext*dyNext*dzNext; int i,j,k; for (l = plan->numLevels; l >= 1; --l) { dxNext = plan->waveSizes[0 + 3*l]; dyNext = plan->waveSizes[1 + 3*l]; dzNext = plan->waveSizes[2 + 3*l]; blockSize = dxNext*dyNext*dzNext; HxLyLz1 = HxLyLz1 - 7*blockSize; HxLyLz2 = HxLyLz2 - 7*blockSize; HxLyLz3 = HxLyLz3 - 7*blockSize; data_t* LxHyLz1 = HxLyLz1 + blockSize; data_t* HxHyLz1 = LxHyLz1 + blockSize; data_t* LxLyHz1 = HxHyLz1 + blockSize; data_t* HxLyHz1 = LxLyHz1 + blockSize; data_t* LxHyHz1 = HxLyHz1 + blockSize; data_t* HxHyHz1 = LxHyHz1 + blockSize; data_t* LxHyLz2 = HxLyLz2 + blockSize; data_t* HxHyLz2 = LxHyLz2 + blockSize; data_t* LxLyHz2 = HxHyLz2 + blockSize; data_t* HxLyHz2 = LxLyHz2 + blockSize; data_t* LxHyHz2 = HxLyHz2 + blockSize; data_t* HxHyHz2 = LxHyHz2 + blockSize; data_t* LxHyLz3 = HxLyLz3 + blockSize; data_t* HxHyLz3 = LxHyLz3 + blockSize; data_t* LxLyHz3 = HxHyLz3 + blockSize; data_t* HxLyHz3 = LxLyHz3 + blockSize; data_t* LxHyHz3 = HxLyHz3 + blockSize; data_t* HxHyHz3 = LxHyHz3 + blockSize; #ifdef USE_OPENMP #pragma omp parallel for private(i,j,k) #endif for (k=0;k<dzNext;k++) for (j=0;j<dyNext;j++) for (i=0;i<dxNext;i++) { int ind = i+j*dxNext+k*dxNext*dyNext; data_t wcx100 = HxLyLz1[ind]; data_t wcy100 = HxLyLz2[ind]; data_t wcz100 = HxLyLz3[ind]; data_t wcx010 = LxHyLz1[ind]; data_t wcy010 = LxHyLz2[ind]; data_t wcz010 = LxHyLz3[ind]; data_t wcx001 = LxLyHz1[ind]; data_t wcy001 = LxLyHz2[ind]; data_t wcz001 = LxLyHz3[ind]; data_t wcx110 = HxHyLz1[ind]; data_t wcy110 = HxHyLz2[ind]; data_t wcz110 = HxHyLz3[ind]; data_t wcx101 = HxLyHz1[ind]; data_t wcy101 = HxLyHz2[ind]; data_t wcz101 = HxLyHz3[ind]; data_t wcx011 = LxHyHz1[ind]; data_t wcy011 = LxHyHz2[ind]; data_t wcz011 = LxHyHz3[ind]; data_t wcx111 = HxHyHz1[ind]; data_t wcy111 = HxHyHz2[ind]; data_t wcz111 = HxHyHz3[ind]; HxLyLz1[ind] = wcy100; LxHyLz1[ind] = wcx010; LxLyHz1[ind] = wcy001; HxLyLz2[ind] = wcz100; LxHyLz2[ind] = wcz010; LxLyHz2[ind] = wcx001; HxLyLz3[ind] = wcx100; LxHyLz3[ind] = wcy010; LxLyHz3[ind] = wcz001; HxHyLz1[ind] = 0.5*(wcx110-wcy110); HxLyHz1[ind] = 0.5*(wcz101-wcx101); LxHyHz1[ind] = 0.5*(wcy011-wcz011); HxHyLz2[ind] = wcz110; HxLyHz2[ind] = wcy101; LxHyHz2[ind] = wcx011; HxHyLz3[ind] = 0.5*(wcx110+wcy110); HxLyHz3[ind] = 0.5*(wcz101+wcx101); LxHyHz3[ind] = 0.5*(wcy011+wcz011); HxHyHz1[ind] = 1/3.*(-2*wcx111+wcy111+wcz111); HxHyHz2[ind] = 1/3.*(-wcx111+2*wcy111-wcz111); HxHyHz3[ind] = 1/3.*(wcx111+wcy111+wcz111); } #ifdef USE_OPENMP #pragma omp barrier #pragma omp parallel for private(i,j,k) #endif for (k=0;k<dzNext;k++) for (j=0;j<dyNext;j++) for (i=0;i<dxNext;i++) { int ind = i+j*dxNext+k*dxNext*dyNext; int indxs = ind-1; int indys = ind-dxNext; int indzs = ind-dxNext*dyNext; if (i==0) indxs = 0; if (j==0) indys = 0; if (k==0) indzs = 0; data_t wcy100 = HxLyLz1[ind]; data_t wcy100s = HxLyLz1[indys]; data_t wcz100 = HxLyLz2[ind]; data_t wcz100s = HxLyLz2[indzs]; data_t wcx010 = LxHyLz1[ind]; data_t wcx010s = LxHyLz1[indxs]; data_t wcz010 = LxHyLz2[ind]; data_t wcz010s = LxHyLz2[indzs]; data_t wcx001 = LxLyHz2[ind]; data_t wcx001s = LxLyHz2[indxs]; data_t wcy001 = LxLyHz1[ind]; data_t wcy001s = LxLyHz1[indys]; data_t wcz110 = HxHyLz2[ind]; data_t wcz110s = HxHyLz2[indzs]; data_t wcy101 = HxLyHz2[ind]; data_t wcy101s = HxLyHz2[indys]; data_t wcx011 = LxHyHz2[ind]; data_t wcx011s = LxHyHz2[indxs]; HxLyLz3[ind] = HxLyLz3[ind]+0.25*(wcy100-wcy100s+wcz100-wcz100s); LxHyLz3[ind] = LxHyLz3[ind]+0.25*(wcx010-wcx010s+wcz010-wcz010s); LxLyHz3[ind] = LxLyHz3[ind]+0.25*(wcx001-wcx001s+wcy001-wcy001s); HxHyLz3[ind] = HxHyLz3[ind] + 0.125*(wcz110-wcz110s); HxLyHz3[ind] = HxLyHz3[ind] + 0.125*(wcy101-wcy101s); LxHyHz3[ind] = LxHyHz3[ind] + 0.125*(wcx011-wcx011s); } dx = dxNext; dy = dyNext; dz = dzNext; } } void dfunlincomb_cpu(struct dfwavelet_plan_s* plan,data_t* wc1,data_t* wc2,data_t* wc3) { data_t* HxLyLz1 = wc1 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2]; data_t* HxLyLz2 = wc2 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2]; data_t* HxLyLz3 = wc3 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2]; int l; for (l = 1; l <= plan->numLevels; ++l){ HxLyLz1 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l]; HxLyLz2 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l]; HxLyLz3 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l]; } int dx = plan->imSize[0]; int dy = plan->imSize[1]; int dz = plan->imSize[2]; int dxNext = plan->waveSizes[0 + 3*plan->numLevels]; int dyNext = plan->waveSizes[1 + 3*plan->numLevels]; int dzNext = plan->waveSizes[2 + 3*plan->numLevels]; int blockSize = dxNext*dyNext*dzNext; int i,j,k; for (l = plan->numLevels; l >= 1; --l) { dxNext = plan->waveSizes[0 + 3*l]; dyNext = plan->waveSizes[1 + 3*l]; dzNext = plan->waveSizes[2 + 3*l]; blockSize = dxNext*dyNext*dzNext; HxLyLz1 = HxLyLz1 - 7*blockSize; HxLyLz2 = HxLyLz2 - 7*blockSize; HxLyLz3 = HxLyLz3 - 7*blockSize; data_t* LxHyLz1 = HxLyLz1 + blockSize; data_t* HxHyLz1 = LxHyLz1 + blockSize; data_t* LxLyHz1 = HxHyLz1 + blockSize; data_t* HxLyHz1 = LxLyHz1 + blockSize; data_t* LxHyHz1 = HxLyHz1 + blockSize; data_t* HxHyHz1 = LxHyHz1 + blockSize; data_t* LxHyLz2 = HxLyLz2 + blockSize; data_t* HxHyLz2 = LxHyLz2 + blockSize; data_t* LxLyHz2 = HxHyLz2 + blockSize; data_t* HxLyHz2 = LxLyHz2 + blockSize; data_t* LxHyHz2 = HxLyHz2 + blockSize; data_t* HxHyHz2 = LxHyHz2 + blockSize; data_t* LxHyLz3 = HxLyLz3 + blockSize; data_t* HxHyLz3 = LxHyLz3 + blockSize; data_t* LxLyHz3 = HxHyLz3 + blockSize; data_t* HxLyHz3 = LxLyHz3 + blockSize; data_t* LxHyHz3 = HxLyHz3 + blockSize; data_t* HxHyHz3 = LxHyHz3 + blockSize; #ifdef USE_OPENMP #pragma omp parallel for private(i,j,k) #endif for (k=0;k<dzNext;k++) for (j=0;j<dyNext;j++) for (i=0;i<dxNext;i++) { int ind = i+j*dxNext+k*dxNext*dyNext; data_t df1_100 = HxLyLz1[ind]; data_t df2_100 = HxLyLz2[ind]; data_t n_100 = HxLyLz3[ind]; data_t df1_010 = LxHyLz1[ind]; data_t df2_010 = LxHyLz2[ind]; data_t n_010 = LxHyLz3[ind]; data_t df1_001 = LxLyHz1[ind]; data_t df2_001 = LxLyHz2[ind]; data_t n_001 = LxLyHz3[ind]; data_t df1_110 = HxHyLz1[ind]; data_t df2_110 = HxHyLz2[ind]; data_t n_110 = HxHyLz3[ind]; data_t df1_101 = HxLyHz1[ind]; data_t df2_101 = HxLyHz2[ind]; data_t n_101 = HxLyHz3[ind]; data_t df1_011 = LxHyHz1[ind]; data_t df2_011 = LxHyHz2[ind]; data_t n_011 = LxHyHz3[ind]; data_t df1_111 = HxHyHz1[ind]; data_t df2_111 = HxHyHz2[ind]; data_t n_111 = HxHyHz3[ind]; HxLyLz2[ind] = df1_100; LxHyLz1[ind] = df1_010; LxLyHz2[ind] = df1_001; HxLyLz3[ind] = df2_100; LxHyLz3[ind] = df2_010; LxLyHz1[ind] = df2_001; HxLyLz1[ind] = n_100; LxHyLz2[ind] = n_010; LxLyHz3[ind] = n_001; HxHyLz3[ind] = df2_110; HxLyHz2[ind] = df2_101; LxHyHz1[ind] = df2_011; HxHyLz1[ind] = (df1_110+n_110); HxLyHz3[ind] = (df1_101+n_101); LxHyHz2[ind] = (df1_011+n_011); HxHyLz2[ind] = (-df1_110+n_110); HxLyHz1[ind] = (-df1_101+n_101); LxHyHz3[ind] = (-df1_011+n_011); HxHyHz1[ind] = (-df1_111+n_111); HxHyHz2[ind] = (df2_111+n_111); HxHyHz3[ind] = df1_111-df2_111+n_111; } #ifdef USE_OPENMP #pragma omp barrier #pragma omp parallel for private(i,j,k) #endif for (k=0;k<dzNext;k++) for (j=0;j<dyNext;j++) for (i=0;i<dxNext;i++) { int ind = i+j*dxNext+k*dxNext*dyNext; int indxs = ind-1; int indys = ind-dxNext; int indzs = ind-dxNext*dyNext; if (i==0) indxs = 0; if (j==0) indys = 0; if (k==0) indzs = 0; data_t df1_100 = HxLyLz2[ind]; data_t df1_100s = HxLyLz2[indys]; data_t df2_100 = HxLyLz3[ind]; data_t df2_100s = HxLyLz3[indzs]; data_t df1_010 = LxHyLz1[ind]; data_t df1_010s = LxHyLz1[indxs]; data_t df2_010 = LxHyLz3[ind]; data_t df2_010s = LxHyLz3[indzs]; data_t df2_001 = LxLyHz1[ind]; data_t df2_001s = LxLyHz1[indxs]; data_t df1_001 = LxLyHz2[ind]; data_t df1_001s = LxLyHz2[indys]; data_t df2_110 = HxHyLz3[ind]; data_t df2_110s = HxHyLz3[indzs]; data_t df2_101 = HxLyHz2[ind]; data_t df2_101s = HxLyHz2[indys]; data_t df2_011 = LxHyHz1[ind]; data_t df2_011s = LxHyHz1[indxs]; HxLyLz1[ind] = HxLyLz1[ind]-0.25*(df1_100-df1_100s+df2_100-df2_100s); LxHyLz2[ind] = LxHyLz2[ind]-0.25*(df1_010-df1_010s+df2_010-df2_010s); LxLyHz3[ind] = LxLyHz3[ind]-0.25*(df2_001-df2_001s+df1_001-df1_001s); HxHyLz1[ind] = HxHyLz1[ind] - 0.125*(df2_110-df2_110s); HxLyHz3[ind] = HxLyHz3[ind] - 0.125*(df2_101-df2_101s); LxHyHz2[ind] = LxHyHz2[ind] - 0.125*(df2_011-df2_011s); HxHyLz2[ind] = HxHyLz2[ind] - 0.125*(df2_110-df2_110s); HxLyHz1[ind] = HxLyHz1[ind] - 0.125*(df2_101-df2_101s); LxHyHz3[ind] = LxHyHz3[ind] - 0.125*(df2_011-df2_011s); } dx = dxNext; dy = dyNext; dz = dzNext; } } int compare (const void * a, const void * b) { if ( *(scalar_t*)a < *(scalar_t*)b ) return -1; if ( *(scalar_t*)a == *(scalar_t*)b ) return 0; if ( *(scalar_t*)a > *(scalar_t*)b ) return 1; return 0; } // Use Median Absolute Deviation on the last subband to estimate sigma // MAD = median(abs(x_i-median(x_i))). Assume median(x_i) = 0, MAD = median(abs(x_i)); // sigma = 1.4826*MAD scalar_t getMADsigma_cpu(struct dfwavelet_plan_s* plan, data_t* wcn) { get_noise_amp(plan); data_t* subband = wcn + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2]; int l; for (l = 1; l <= plan->numLevels; ++l){ subband += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l]; } int dx = plan->waveSizes[0 + 3*plan->numLevels]; int dy = plan->waveSizes[1 + 3*plan->numLevels]; int dz = plan->waveSizes[2 + 3*plan->numLevels]; int blockSize = dx*dy*dz; int numZeros = (int) (plan->percentZero*blockSize); int num = blockSize-numZeros; subband = subband - blockSize; scalar_t *subband2 = (scalar_t *) malloc(sizeof(scalar_t)*blockSize); int i; for (i=0;i<blockSize;i++) { scalar_t t = fabs(subband[i]); subband2[i] = t*t; } qsort(subband2,blockSize,sizeof(data_t),compare); scalar_t sigma = 1.4826*sqrt(subband2[numZeros+num/2]); // Scale by 1/noiseAMP for that subband sigma = sigma/plan->noiseAmp[20]; free(subband2); return sigma; } void dfSUREshrink_cpu(struct dfwavelet_plan_s* plan,scalar_t sigma,data_t* wcdf1,data_t* wcdf2,data_t* wcn) { get_noise_amp(plan); scalar_t percentZero = plan->percentZero; data_t* HxLyLz1 = wcdf1 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2]; data_t* HxLyLz2 = wcdf2 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2]; data_t* HxLyLz3 = wcn + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2]; int l; for (l = 1; l <= plan->numLevels; ++l){ HxLyLz1 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l]; HxLyLz2 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l]; HxLyLz3 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l]; } int dx = plan->imSize[0]; int dy = plan->imSize[1]; int dz = plan->imSize[2]; int dxNext = plan->waveSizes[0 + 3*plan->numLevels]; int dyNext = plan->waveSizes[1 + 3*plan->numLevels]; int dzNext = plan->waveSizes[2 + 3*plan->numLevels]; int blockSize = dxNext*dyNext*dzNext; int naInd = 0; for (l = plan->numLevels; l >= 1; --l) { dxNext = plan->waveSizes[0 + 3*l]; dyNext = plan->waveSizes[1 + 3*l]; dzNext = plan->waveSizes[2 + 3*l]; blockSize = dxNext*dyNext*dzNext; HxLyLz1 = HxLyLz1 - 7*blockSize; HxLyLz2 = HxLyLz2 - 7*blockSize; HxLyLz3 = HxLyLz3 - 7*blockSize; int bandInd; scalar_t *subband2 = (scalar_t*) malloc(sizeof(data_t)*blockSize); #ifdef USE_OPENMP #pragma omp parallel for private(bandInd) #endif for (bandInd=0; bandInd<7*3;bandInd++) { data_t *subband; if (bandInd<7) { subband = HxLyLz1 + bandInd*blockSize; } else if (bandInd<14) { subband = HxLyLz2 + (bandInd-7)*blockSize; } else { subband = HxLyLz3 + (bandInd-14)*blockSize; } // Scale sigma by noise amplifiction in each subband scalar_t sigma_band = sigma*plan->noiseAmp[naInd]; scalar_t sigma2 = sigma_band*sigma_band; // Get Energy and Density int i; scalar_t ener=0; for (i=0;i<blockSize;i++) { scalar_t t = fabs(subband[i]); subband2[i] = t*t; ener += t*t; } qsort(subband2,blockSize,sizeof(data_t),compare); // N = blockSize*(1-%zero) int numZeros = (int) (percentZero*blockSize); int num = blockSize-numZeros; scalar_t thresh=0; // Choose visuShrink or sureShrink /*scalar_t density = (ener/num/sigma2-1); scalar_t critical = (scalar_t) pow(log2 ((double) num),1.5)/sqrt(num); if (density < critical) { thresh = sqrt(sigma2*2*log(num)); //visuShrink } else*/ { // Get optimal SURE threshold scalar_t min_risk = 3e38; scalar_t cum_sum = 0; int sureInd; for(i=numZeros;i<blockSize;i++) { scalar_t t2 = subband2[i]; scalar_t risk = -( 2*sigma2*(i-numZeros+1))+cum_sum+t2*(blockSize-i-1); cum_sum = cum_sum + t2; if (risk < min_risk) { min_risk = risk; thresh = sqrt(t2); sureInd = i; } } if (thresh> (scalar_t) sqrt(sigma2 * 2.0 * log( (scalar_t) num))) thresh = (scalar_t) sqrt(sigma2 * 2.0 * log( (scalar_t) num)); } // SoftThresh for(i=0;i<blockSize;i++) { scalar_t norm = fabs(subband[i]); scalar_t red = norm - thresh; subband[i] = (red > 0.) ? ((red / norm) * (subband[i])) : 0.; } naInd++; } free(subband2); dx = dxNext; dy = dyNext; dz = dzNext; } } void fwt3_cpu(struct dfwavelet_plan_s* plan, data_t* coeff, data_t* inImage,int dir) { circshift_cpu(plan,inImage); data_t* origInImage = inImage; data_t* HxLyLz = coeff + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2]; int l; for (l = 1; l <= plan->numLevels; ++l){ HxLyLz += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l]; } int dx = plan->imSize[0]; int dy = plan->imSize[1]; int dz = plan->imSize[2]; int dxNext = plan->waveSizes[0 + 3*plan->numLevels]; int dyNext = plan->waveSizes[1 + 3*plan->numLevels]; int dzNext = plan->waveSizes[2 + 3*plan->numLevels]; int blockSize = dxNext*dyNext*dzNext; data_t* LxLyLz = (data_t*) malloc(sizeof(data_t)*blockSize); data_t* tempz = (data_t*) malloc(sizeof(data_t)*dx*dy*dzNext); data_t* tempyz = (data_t*) malloc(sizeof(data_t)*dx*dyNext*dzNext); data_t* tempxyz = (data_t*) malloc(sizeof(data_t)*blockSize); // Assign Filters scalar_t *lodx,*lody,*lodz,*hidx,*hidy,*hidz; lodx = plan->lod0; lody = plan->lod0; lodz = plan->lod0; hidx = plan->hid0; hidy = plan->hid0; hidz = plan->hid0; if (dir==0) { lodx = plan->lod1; hidx = plan->hid1; } if (dir==1) { lody = plan->lod1; hidy = plan->hid1; } if (dir==2) { lodz = plan->lod1; hidz = plan->hid1; } for (l = plan->numLevels; l >= 1; --l) { dxNext = plan->waveSizes[0 + 3*l]; dyNext = plan->waveSizes[1 + 3*l]; dzNext = plan->waveSizes[2 + 3*l]; blockSize = dxNext*dyNext*dzNext; HxLyLz = HxLyLz - 7*blockSize; data_t* LxHyLz = HxLyLz + blockSize; data_t* HxHyLz = LxHyLz + blockSize; data_t* LxLyHz = HxHyLz + blockSize; data_t* HxLyHz = LxLyHz + blockSize; data_t* LxHyHz = HxLyHz + blockSize; data_t* HxHyHz = LxHyHz + blockSize; int dxy = dx*dy; int newdz = (dz + plan->filterLen-1) / 2; int newdy = (dy + plan->filterLen-1) / 2; int newdxy = dx*newdy; // Lz conv_down_3d(tempz, inImage, dz, dxy, dx, 1, dy, dx, lodz,plan->filterLen); // LyLz conv_down_3d(tempyz, tempz, dy, dx, dx, 1, newdz, dxy, lody,plan->filterLen); conv_down_3d(LxLyLz, tempyz, dx, 1, newdy, dx, newdz, newdxy, lodx,plan->filterLen); conv_down_3d(HxLyLz, tempyz, dx, 1, newdy, dx, newdz, newdxy, hidx,plan->filterLen); // HyLz conv_down_3d(tempyz, tempz, dy, dx, dx, 1, newdz, dxy, hidy,plan->filterLen); conv_down_3d(LxHyLz, tempyz, dx, 1, newdy, dx, newdz, newdxy, lodx,plan->filterLen); conv_down_3d(HxHyLz, tempyz, dx, 1, newdy, dx, newdz, newdxy, hidx,plan->filterLen); // Hz conv_down_3d(tempz, inImage, dz, dxy, dx, 1, dy, dx, hidz,plan->filterLen); // LyHz conv_down_3d(tempyz, tempz, dy, dx, dx, 1, newdz, dxy, lody,plan->filterLen); conv_down_3d(LxLyHz, tempyz, dx, 1, newdy, dx, newdz, newdxy, lodx,plan->filterLen); conv_down_3d(HxLyHz, tempyz, dx, 1, newdy, dx, newdz, newdxy, hidx,plan->filterLen); // HyHz conv_down_3d(tempyz, tempz, dy, dx, dx, 1, newdz, dxy, hidy,plan->filterLen); conv_down_3d(LxHyHz, tempyz, dx, 1, newdy, dx, newdz, newdxy, lodx,plan->filterLen); conv_down_3d(HxHyHz, tempyz, dx, 1, newdy, dx, newdz, newdxy, hidx,plan->filterLen); memcpy(tempxyz, LxLyLz, blockSize*sizeof(data_t)); inImage = tempxyz; dx = dxNext; dy = dyNext; dz = dzNext; } // Final LxLyLz memcpy(coeff, inImage, plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2]*sizeof(data_t)); free(LxLyLz); free(tempz); free(tempyz); free(tempxyz); circunshift_cpu(plan,origInImage); } void iwt3_cpu(struct dfwavelet_plan_s* plan, data_t* outImage, data_t* coeff,int dir) { // Workspace dimensions int dxWork = plan->waveSizes[0 + 3*plan->numLevels]*2-1 + plan->filterLen-1; int dyWork = plan->waveSizes[1 + 3*plan->numLevels]*2-1 + plan->filterLen-1; int dzWork = plan->waveSizes[2 + 3*plan->numLevels]*2-1 + plan->filterLen-1; int dyWork2 = plan->waveSizes[1 + 3*(plan->numLevels-1)]*2-1 + plan->filterLen-1; int dzWork2 = plan->waveSizes[2 + 3*(plan->numLevels-1)]*2-1 + plan->filterLen-1; // Workspace data_t* tempyz = (data_t*) malloc(sizeof(data_t)*dxWork*dyWork2*dzWork2); data_t* tempz = (data_t*) malloc(sizeof(data_t)*dxWork*dyWork*dzWork2); data_t* tempFull = (data_t*) malloc(sizeof(data_t)*dxWork*dyWork*dzWork); int dx = plan->waveSizes[0]; int dy = plan->waveSizes[1]; int dz = plan->waveSizes[2]; // Assign Filters scalar_t *lorx,*lory,*lorz,*hirx,*hiry,*hirz; lorx = plan->lor0; lory = plan->lor0; lorz = plan->lor0; hirx = plan->hir0; hiry = plan->hir0; hirz = plan->hir0; if (dir==0) { lorx = plan->lor1; hirx = plan->hir1; } if (dir==1) { lory = plan->lor1; hiry = plan->hir1; } if (dir==2) { lorz = plan->lor1; hirz = plan->hir1; } memcpy(outImage, coeff, dx*dy*dz*sizeof(data_t)); data_t* HxLyLz = coeff + dx*dy*dz; int level; for (level = 1; level < plan->numLevels+1; ++level) { dx = plan->waveSizes[0 + 3*level]; dy = plan->waveSizes[1 + 3*level]; dz = plan->waveSizes[2 + 3*level]; int blockSize = dx*dy*dz; data_t* LxHyLz = HxLyLz + blockSize; data_t* HxHyLz = LxHyLz + blockSize; data_t* LxLyHz = HxHyLz + blockSize; data_t* HxLyHz = LxLyHz + blockSize; data_t* LxHyHz = HxLyHz + blockSize; data_t* HxHyHz = LxHyHz + blockSize; data_t* LxLyLz = outImage; int newdx = 2*dx-1 + plan->filterLen-1; int newdy = 2*dy-1 + plan->filterLen-1; int newdz = 2*dz-1 + plan->filterLen-1; int dxy = dx*dy; int newdxy = newdx*dy; int newnewdxy = newdx*newdy; memset(tempFull, 0, newnewdxy*newdz*sizeof(data_t)); memset(tempz, 0, newnewdxy*dz*sizeof(data_t)); memset(tempyz, 0, newdxy*dz*sizeof(data_t)); conv_up_3d(tempyz, LxLyLz, dx, 1, dy, dx, dz, dxy, lorx,plan->filterLen); conv_up_3d(tempyz, HxLyLz, dx, 1, dy, dx, dz, dxy, hirx,plan->filterLen); conv_up_3d(tempz, tempyz, dy, newdx, newdx, 1, dz, newdxy, lory,plan->filterLen); memset(tempyz, 0, newdxy*dz*sizeof(data_t)); conv_up_3d(tempyz, LxHyLz, dx, 1, dy, dx, dz, dxy, lorx,plan->filterLen); conv_up_3d(tempyz, HxHyLz, dx, 1, dy, dx, dz, dxy, hirx,plan->filterLen); conv_up_3d(tempz, tempyz, dy, newdx, newdx, 1, dz, newdxy, hiry,plan->filterLen); conv_up_3d(tempFull, tempz, dz, newnewdxy, newdx, 1, newdy, newdx, lorz,plan->filterLen); memset(tempz, 0, newnewdxy*dz*sizeof(data_t)); memset(tempyz, 0, newdxy*dz*sizeof(data_t)); conv_up_3d(tempyz, LxLyHz, dx, 1, dy, dx, dz, dxy, lorx,plan->filterLen); conv_up_3d(tempyz, HxLyHz, dx, 1, dy, dx, dz, dxy, hirx,plan->filterLen); conv_up_3d(tempz, tempyz, dy, newdx, newdx, 1, dz, newdxy, lory,plan->filterLen); memset(tempyz, 0, newdxy*dz*sizeof(data_t)); conv_up_3d(tempyz, LxHyHz, dx, 1, dy, dx, dz, dxy, lorx,plan->filterLen); conv_up_3d(tempyz, HxHyHz, dx, 1, dy, dx, dz, dxy, hirx,plan->filterLen); conv_up_3d(tempz, tempyz, dy, newdx, newdx, 1, dz, newdxy, hiry,plan->filterLen); conv_up_3d(tempFull, tempz, dz, newnewdxy, newdx, 1, newdy, newdx, hirz,plan->filterLen); // Crop center of workspace int dxNext = plan->waveSizes[0+3*(level+1)]; int dyNext = plan->waveSizes[1+3*(level+1)]; int dzNext = plan->waveSizes[2+3*(level+1)]; int dxyNext = dxNext*dyNext; dxWork = (2*dx-1 + plan->filterLen-1); dyWork = (2*dy-1 + plan->filterLen-1); dzWork = (2*dz-1 + plan->filterLen-1); int dxyWork = dxWork*dyWork; int xOffset = (int) ((dxWork - dxNext) / 2.0); int yOffset = (int) ((dyWork - dyNext) / 2.0); int zOffset = (int) ((dzWork - dzNext) / 2.0); int k,j; for (k = 0; k < dzNext; ++k){ for (j = 0; j < dyNext; ++j){ memcpy(outImage+j*dxNext + k*dxyNext, tempFull+xOffset + (yOffset+j)*dxWork + (zOffset+k)*dxyWork, dxNext*sizeof(data_t)); } } HxLyLz += 7*blockSize; } free(tempyz); free(tempz); free(tempFull); circunshift_cpu(plan,outImage); } void softthresh_cpu(struct dfwavelet_plan_s* plan, int length, scalar_t thresh, data_t* coeff) { int i; #ifdef USE_OPENMP #pragma omp parallel for #endif for(i = 0; i < length; i++) { scalar_t norm = fabs(coeff[i]); scalar_t red = norm - thresh; coeff[i] = (red > 0.) ? ((red / norm) * (coeff[i])) : 0.; } } void circshift_cpu(struct dfwavelet_plan_s* plan, data_t *data) { // Return if no shifts int zeroShift = 1; int i; for (i = 0; i< plan->numdims; i++) { zeroShift &= (plan->randShift[i]==0); } if(zeroShift) { return; } // Copy data data_t* dataCopy = (data_t*) malloc(sizeof(data_t)*plan->numPixel); memcpy(dataCopy, data, plan->numPixel*sizeof(data_t)); if (plan->numdims==2) { int dx,dy,r0,r1,j,i,index,indexShifted; dx = plan->imSize[0]; dy = plan->imSize[1]; r0 = plan->randShift[0]; r1 = plan->randShift[1]; #ifdef USE_OPENMP #pragma omp parallel for private(index, j, i,indexShifted) #endif for(j = 0; j < dy; j++) { for(i = 0; i < dx; i++) { index = i+j*dx; indexShifted = (((i+r0) + (j+r1)*dx)%(dx*dy)+dx*dy)%(dx*dy); data[indexShifted] = dataCopy[index]; } } } if (plan->numdims==3) { int dx,dy,dz,r0,r1,r2,k,j,i,index,indexShifted; dx = plan->imSize[0]; dy = plan->imSize[1]; dz = plan->imSize[2]; r0 = plan->randShift[0]; r1 = plan->randShift[1]; r2 = plan->randShift[2]; #ifdef USE_OPENMP #pragma omp parallel for private(index, k, j, i,indexShifted) #endif for (k = 0; k < dz; k++) { for(j = 0; j < dy; j++) { for(i = 0; i < dx; i++) { index = i+j*dx+k*dx*dy; indexShifted = ((i+r0 + (j+r1)*dx + (k+r2)*dx*dy)%(dx*dy*dz)+(dx*dy*dz))%(dx*dy*dz); data[indexShifted] = dataCopy[index]; } } } } #ifdef USE_OPENMP #pragma omp barrier #endif free(dataCopy); } void circunshift_cpu(struct dfwavelet_plan_s* plan, data_t *data) { // Return if no shifts int zeroShift = 1; int i; for (i = 0; i< plan->numdims; i++) { zeroShift &= (plan->randShift[i]==0); } if(zeroShift) { return; } // Copy data data_t* dataCopy = (data_t*) malloc(sizeof(data_t)*plan->numPixel); memcpy(dataCopy, data, plan->numPixel*sizeof(data_t)); if (plan->numdims==2) { int dx,dy,r0,r1,j,i,index,indexShifted; dx = plan->imSize[0]; dy = plan->imSize[1]; r0 = plan->randShift[0]; r1 = plan->randShift[1]; #ifdef USE_OPENMP #pragma omp parallel for private(index, j, i,indexShifted) #endif for(j = 0; j < dy; j++) { for(i = 0; i < dx; i++) { index = i+j*dx; indexShifted = (((i+r0) + (j+r1)*dx)%(dx*dy)+dx*dy)%(dx*dy); data[index] = dataCopy[indexShifted]; } } } if (plan->numdims==3) { int dx,dy,dz,r0,r1,r2,k,j,i,index,indexShifted; dx = plan->imSize[0]; dy = plan->imSize[1]; dz = plan->imSize[2]; r0 = plan->randShift[0]; r1 = plan->randShift[1]; r2 = plan->randShift[2]; #ifdef USE_OPENMP #pragma omp parallel for private(index, k, j, i,indexShifted) #endif for (k = 0; k < dz; k++) { for(j = 0; j < dy; j++) { for(i = 0; i < dx; i++) { index = i+j*dx+k*dx*dy; indexShifted = ((i+r0 + (j+r1)*dx + (k+r2)*dx*dy)%(dx*dy*dz)+(dx*dy*dz))%(dx*dy*dz); data[index] = dataCopy[indexShifted]; } } } } free(dataCopy); } /********** Helper Function *********/ void conv_down_3d(data_t *out, data_t *in, int size1, int skip1, int size2, int skip2, int size3, int skip3, scalar_t *filter, int filterLen) { int outSize1 = (size1 + filterLen-1) / 2; // Adjust out skip 2 and 3 if needed int outSkip2; if(skip2 > skip1) { outSkip2 = outSize1*skip2/size1; } else { outSkip2 = skip2; } int outSkip3; if(skip3 > skip1) { outSkip3 = outSize1*skip3/size1; } else { outSkip3 = skip3; } int i32; #ifdef USE_OPENMP #pragma omp parallel for #endif for (i32 = 0; i32 < size2*size3; ++i32) { int i2 = i32 % size2; int i3 = i32 / size2; int i1; for (i1 = 0; i1 < outSize1; ++i1) { out[i3*outSkip3 + i2*outSkip2 + i1*skip1] = 0.0f; int k; for (k = 0; k < filterLen; ++k) { int out_i1 = 2*i1+1 - (filterLen-1) + k; if (out_i1 < 0) out_i1 = -out_i1-1; if (out_i1 >= size1) out_i1 = size1-1 - (out_i1-size1); out[i3*outSkip3 + i2*outSkip2 + i1*skip1] += in[i3*skip3 + i2*skip2 + out_i1*skip1] * filter[filterLen-1-k]; } } } } void conv_up_3d(data_t *out, data_t *in, int size1, int skip1, int size2, int skip2, int size3, int skip3, scalar_t *filter, int filterLen) { int outSize1 = 2*size1-1 + filterLen-1; // Adjust out skip 2 and 3 if needed int outSkip2; if(skip2 > skip1) { outSkip2 = outSize1*skip2/size1; } else { outSkip2 = skip2; } int outSkip3; if(skip3 > skip1) { outSkip3 = outSize1*skip3/size1; } else { outSkip3 = skip3; } int i32; #ifdef USE_OPENMP #pragma omp parallel for #endif for (i32 = 0; i32 < size2*size3; ++i32) { int i2 = i32 % size2; int i3 = i32 / size2; int i1; for (i1 = 0; i1 < outSize1; ++i1) { int k; for (k = (i1 - (filterLen-1)) & 1; k < filterLen; k += 2){ int in_i1 = (i1 - (filterLen-1) + k) >> 1; if (in_i1 >= 0 && in_i1 < size1) out[i3*outSkip3 + i2*outSkip2 + i1*skip1] += in[i3*skip3 + i2*skip2 + in_i1*skip1] * filter[filterLen-1-k]; } } } } void add(data_t* out,data_t* in,int numMax) { int i; for(i=0; i<numMax;i++) out[i]+=in[i]; } void mult(data_t* in,scalar_t scale,int numMax) { int i; for(i=0; i<numMax;i++) in[i]*=scale; } void create_numLevels(struct dfwavelet_plan_s* plan) { int numdims = plan->numdims; int filterLen = plan->filterLen; int bandSize, l, minSize; plan->numLevels = 10000000; int d; for (d = 0; d < numdims; d++) { bandSize = plan->imSize[d]; minSize = plan->minSize[d]; l = 0; while (bandSize > minSize) { ++l; bandSize = (bandSize + filterLen - 1) / 2; } l--; plan->numLevels = (l < plan->numLevels) ? l : plan->numLevels; } } void create_wavelet_sizes(struct dfwavelet_plan_s* plan) { int numdims = plan->numdims; int filterLen = plan->filterLen; int numLevels = plan->numLevels; int numSubCoef; plan->waveSizes = (int*) malloc(sizeof(int)*numdims*(numLevels+2)); // Get number of subband per level, (3 for 2d, 7 for 3d) // Set the last bandSize to be imSize int d,l; int numSubband = 1; for (d = 0; d<numdims; d++) { plan->waveSizes[d + numdims*(numLevels+1)] = plan->imSize[d]; numSubband <<= 1; } numSubband--; // Get numCoeff and waveSizes // Each bandSize[l] is (bandSize[l+1] + filterLen - 1)/2 plan->numCoeff = 0; for (l = plan->numLevels; l >= 1; --l) { numSubCoef = 1; for (d = 0; d < numdims; d++) { plan->waveSizes[d + numdims*l] = (plan->waveSizes[d + numdims*(l+1)] + filterLen - 1) / 2; numSubCoef *= plan->waveSizes[d + numdims*l]; } plan->numCoeff += numSubband*numSubCoef; if (l==1) plan->numCoarse = numSubCoef; } numSubCoef = 1; for (d = 0; d < numdims; d++) { plan->waveSizes[d] = plan->waveSizes[numdims+d]; numSubCoef *= plan->waveSizes[d]; } plan->numCoeff += numSubCoef; } /* All filter coefficients are obtained from http://wavelets.pybytes.com/ */ void create_wavelet_filters(struct dfwavelet_plan_s* plan) { int filterLen = 0; scalar_t* filter1, *filter2; filterLen = 6; // CDF 2.2 and CDF 3.1 Wavelet scalar_t cdf22[] = { 0.0,-0.17677669529663689,0.35355339059327379,1.0606601717798214,0.35355339059327379,-0.17677669529663689, 0.0,0.35355339059327379,-0.70710678118654757,0.35355339059327379,0.0,0.0, 0.0,0.35355339059327379,0.70710678118654757,0.35355339059327379,0.0,0.0, 0.0,0.17677669529663689,0.35355339059327379,-1.0606601717798214,0.35355339059327379,0.17677669529663689 }; scalar_t cdf31[] = { 0.0,-0.35355339059327379,1.0606601717798214,1.0606601717798214,-0.35355339059327379,0.0 , 0.0,-0.17677669529663689,0.53033008588991071,-0.53033008588991071,0.17677669529663689,0.0, 0.0,0.17677669529663689,0.53033008588991071,0.53033008588991071,0.17677669529663689,0.0, 0.0,-0.35355339059327379,-1.0606601717798214,1.0606601717798214,0.35355339059327379,0.0 }; filter1 = cdf22; filter2 = cdf31; // Allocate filters contiguously (for convenience) plan->filterLen = filterLen; plan->lod0 = (scalar_t*) malloc(sizeof(scalar_t) * 4 * filterLen); memcpy(plan->lod0, filter1, 4*filterLen*sizeof(scalar_t)); plan->lod1 = (scalar_t*) malloc(sizeof(scalar_t) * 4 * filterLen); memcpy(plan->lod1, filter2, 4*filterLen*sizeof(scalar_t)); plan->hid0 = plan->lod0 + 1*filterLen; plan->lor0 = plan->lod0 + 2*filterLen; plan->hir0 = plan->lod0 + 3*filterLen; plan->hid1 = plan->lod1 + 1*filterLen; plan->lor1 = plan->lod1 + 2*filterLen; plan->hir1 = plan->lod1 + 3*filterLen; } void print_array(char* prefix,int* array,int length) { int i; printf("\t%s:\t",prefix); for (i=0; i<length; i++) { if (i==length-1) printf("%d\n",array[i]); else printf("%d, ",array[i]); } } void print_filter(char* prefix,scalar_t* array,int length) { int i; printf("\t%s:\t",prefix); for (i=0; i<length; i++) { if (i==length-1) printf("%.3f\n",array[i]); else printf("%.3f, ",array[i]); } } void print_plan(struct dfwavelet_plan_s* plan) { printf("Parameters for Input/Output:\n"); print_array("use_gpu",&(plan->use_gpu),1); print_array("numdims",&(plan->numdims),1); print_array("imSize ",plan->imSize,plan->numdims); printf("\tres :\t%.1f, %.1f, %.1f\n",plan->res[0],plan->res[1],plan->res[2]); print_array("numPixel",&(plan->numPixel),1); print_array("numCoeff",&(plan->numCoeff),1); printf("\tmemory (float):\t%.1f MegaBytes\n",plan->numCoeff*3.*4./1024./1024.); printf("Parameters for each wavelet transform:\n"); print_array("minSize",plan->minSize,plan->numdims); print_array("numCoarse",&(plan->numCoarse),1); print_array("waveSizes",plan->waveSizes,plan->numdims*(plan->numLevels+2)); print_array("numLevels",&(plan->numLevels),1); print_array("randShift",plan->randShift,plan->numdims); printf("Parameters for filters:\n"); print_array("filter_Len",&(plan->filterLen),1); print_filter("lod0\t",plan->lod0,plan->filterLen); print_filter("hid0\t",plan->hid0,plan->filterLen); print_filter("lor0\t",plan->lor0,plan->filterLen); print_filter("hid0\t",plan->hid0,plan->filterLen); print_filter("lod1\t",plan->lod1,plan->filterLen); print_filter("hid1\t",plan->hid1,plan->filterLen); print_filter("lor1\t",plan->lor1,plan->filterLen); print_filter("hid1\t",plan->hid1,plan->filterLen); } void count_zeros_cpu(struct dfwavelet_plan_s* plan, data_t* vx, data_t* vy, data_t* vz) { if (plan->percentZero==-1) { int i; scalar_t percentZero = 0; for (i=0;i<plan->numPixel;i++) if((vx[i]==0.)&(vy[i]==0.)&(vz[i]==0)) percentZero += 1.; plan->percentZero = percentZero/plan->numPixel; } } #ifndef M_PI #define M_PI 3.14159265358979323846 #endif data_t drand() /* uniform distribution, (0..1] */ { return (rand()+1.0)/(RAND_MAX+1.0); } void random_normal(data_t* in,int length) /* normal distribution, centered on 0, std dev 1 */ { int i; for (i=0;i<length;i++) in[i] = sqrt(-2*log(drand())) * cos(2*M_PI*drand()); } void get_noise_amp(struct dfwavelet_plan_s* plan) { if (plan->noiseAmp==NULL) { // Generate Gaussian w/ mean=0, std=1 data data_t* vx,*vy,*vz; data_t* wcdf1,*wcdf2,*wcn; vx = (data_t*) malloc(sizeof(data_t)*plan->numPixel); vy = (data_t*) malloc(sizeof(data_t)*plan->numPixel); vz = (data_t*) malloc(sizeof(data_t)*plan->numPixel); random_normal(vx,plan->numPixel); random_normal(vy,plan->numPixel); random_normal(vz,plan->numPixel); wcdf1 = (data_t*) malloc(sizeof(data_t)*plan->numCoeff); wcdf2 = (data_t*) malloc(sizeof(data_t)*plan->numCoeff); wcn = (data_t*) malloc(sizeof(data_t)*plan->numCoeff); // Get Wavelet Coefficients int temp_use_gpu = plan->use_gpu; if (plan->use_gpu==1) plan->use_gpu = 2; dfwavelet_forward(plan,wcdf1,wcdf2,wcn,vx,vy,vz); plan->use_gpu = temp_use_gpu; // Get Noise Amp for each subband data_t* HxLyLz1 = wcdf1 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2]; data_t* HxLyLz2 = wcdf2 + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2]; data_t* HxLyLz3 = wcn + plan->waveSizes[0]*plan->waveSizes[1]*plan->waveSizes[2]; int l; for (l = 1; l <= plan->numLevels; ++l){ HxLyLz1 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l]; HxLyLz2 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l]; HxLyLz3 += 7*plan->waveSizes[0 + 3*l]*plan->waveSizes[1 + 3*l]*plan->waveSizes[2 + 3*l]; } int numBand = 7*plan->numLevels*3; plan->noiseAmp = (scalar_t*) malloc(sizeof(scalar_t)*numBand); int naInd = 0; for (l = plan->numLevels; l >= 1; --l) { int dxNext = plan->waveSizes[0 + 3*l]; int dyNext = plan->waveSizes[1 + 3*l]; int dzNext = plan->waveSizes[2 + 3*l]; int blockSize = dxNext*dyNext*dzNext; HxLyLz1 = HxLyLz1 - 7*blockSize; HxLyLz2 = HxLyLz2 - 7*blockSize; HxLyLz3 = HxLyLz3 - 7*blockSize; int bandInd; #ifdef USE_OPENMP #pragma omp parallel for private(bandInd) #endif for (bandInd=0; bandInd<7*3;bandInd++) { data_t *subband; if (bandInd<7) { subband = HxLyLz1 + bandInd*blockSize; } else if (bandInd<14) { subband = HxLyLz2 + (bandInd-7)*blockSize; } else { subband = HxLyLz3 + (bandInd-14)*blockSize; } data_t sig = 0; data_t mean = 0; data_t mean_old; int i; for (i=0; i<blockSize; i++) { scalar_t x = subband[i]; mean_old = mean; mean = mean_old + (x-mean_old)/(i+1); sig = sig + (x - mean_old)*(x-mean); } sig = sqrt(sig/(blockSize-1)); plan->noiseAmp[naInd] = sig; naInd++; } } free(vx); free(vy); free(vz); free(wcdf1); free(wcdf2); free(wcn); } }
concattest3.c
#include <stdlib.h> #include "concattest3.h" void concattest3(float* l,int m,int n,float*output){ #pragma omp parallel for for (int H3 = 0; H3 < m; H3++) { for (int H7 = 0; H7 < 1; H7++) { output[(m) * (H7) + H3] = l[(((m)) * (H7)) + H3]; } for (int H8 = 1; H8 < n; H8++) { output[(m) * (((H8 - (1)) + 1)) + H3] = l[(((m)) * (H8)) + H3]; } } }
resample.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE SSSSS AAA M M PPPP L EEEEE % % R R E SS A A MM MM P P L E % % RRRR EEE SSS AAAAA M M M PPPP L EEE % % R R E SS A A M M P L E % % R R EEEEE SSSSS A A M M P LLLLL EEEEE % % % % % % MagickCore Pixel Resampling Methods % % % % Software Design % % Cristy % % Anthony Thyssen % % August 2007 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/color-private.h" #include "MagickCore/cache.h" #include "MagickCore/draw.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/random_.h" #include "MagickCore/resample.h" #include "MagickCore/resize.h" #include "MagickCore/resize-private.h" #include "MagickCore/resource_.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/signature-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/option.h" /* EWA Resampling Options */ /* select ONE resampling method */ #define EWA 1 /* Normal EWA handling - raw or clamped */ /* if 0 then use "High Quality EWA" */ #define EWA_CLAMP 1 /* EWA Clamping from Nicolas Robidoux */ #define FILTER_LUT 1 /* Use a LUT rather then direct filter calls */ /* output debugging information */ #define DEBUG_ELLIPSE 0 /* output ellipse info for debug */ #define DEBUG_HIT_MISS 0 /* output hit/miss pixels (as gnuplot commands) */ #define DEBUG_NO_PIXEL_HIT 0 /* Make pixels that fail to hit anything - RED */ #if ! FILTER_DIRECT #define WLUT_WIDTH 1024 /* size of the filter cache */ #endif /* Typedef declarations. */ struct _ResampleFilter { CacheView *view; Image *image; ExceptionInfo *exception; MagickBooleanType debug; /* Information about image being resampled */ ssize_t image_area; PixelInterpolateMethod interpolate; VirtualPixelMethod virtual_pixel; FilterType filter; /* processing settings needed */ MagickBooleanType limit_reached, do_interpolate, average_defined; PixelInfo average_pixel; /* current ellipitical area being resampled around center point */ double A, B, C, Vlimit, Ulimit, Uwidth, slope; #if FILTER_LUT /* LUT of weights for filtered average in elliptical area */ double filter_lut[WLUT_WIDTH]; #else /* Use a Direct call to the filter functions */ ResizeFilter *filter_def; double F; #endif /* the practical working support of the filter */ double support; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e R e s a m p l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResampleFilter() initializes the information resample needs do to a % scaled lookup of a color from an image, using area sampling. % % The algorithm is based on a Elliptical Weighted Average, where the pixels % found in a large elliptical area is averaged together according to a % weighting (filter) function. For more details see "Fundamentals of Texture % Mapping and Image Warping" a master's thesis by Paul.S.Heckbert, June 17, % 1989. Available for free from, http://www.cs.cmu.edu/~ph/ % % As EWA resampling (or any sort of resampling) can require a lot of % calculations to produce a distorted scaling of the source image for each % output pixel, the ResampleFilter structure generated holds that information % between individual image resampling. % % This function will make the appropriate AcquireCacheView() calls % to view the image, calling functions do not need to open a cache view. % % Usage Example... % resample_filter=AcquireResampleFilter(image,exception); % SetResampleFilter(resample_filter, GaussianFilter); % for (y=0; y < (ssize_t) image->rows; y++) { % for (x=0; x < (ssize_t) image->columns; x++) { % u= ....; v= ....; % ScaleResampleFilter(resample_filter, ... scaling vectors ...); % (void) ResamplePixelColor(resample_filter,u,v,&pixel); % ... assign resampled pixel value ... % } % } % DestroyResampleFilter(resample_filter); % % The format of the AcquireResampleFilter method is: % % ResampleFilter *AcquireResampleFilter(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ResampleFilter *AcquireResampleFilter(const Image *image, ExceptionInfo *exception) { ResampleFilter *resample_filter; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); resample_filter=(ResampleFilter *) AcquireCriticalMemory(sizeof( *resample_filter)); (void) memset(resample_filter,0,sizeof(*resample_filter)); resample_filter->exception=exception; resample_filter->image=ReferenceImage((Image *) image); resample_filter->view=AcquireVirtualCacheView(resample_filter->image, exception); resample_filter->debug=IsEventLogging(); resample_filter->image_area=(ssize_t) (image->columns*image->rows); resample_filter->average_defined=MagickFalse; resample_filter->signature=MagickCoreSignature; SetResampleFilter(resample_filter,image->filter); (void) SetResampleFilterInterpolateMethod(resample_filter,image->interpolate); (void) SetResampleFilterVirtualPixelMethod(resample_filter, GetImageVirtualPixelMethod(image)); return(resample_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y R e s a m p l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResampleFilter() finalizes and cleans up the resampling % resample_filter as returned by AcquireResampleFilter(), freeing any memory % or other information as needed. % % The format of the DestroyResampleFilter method is: % % ResampleFilter *DestroyResampleFilter(ResampleFilter *resample_filter) % % A description of each parameter follows: % % o resample_filter: resampling information structure % */ MagickExport ResampleFilter *DestroyResampleFilter( ResampleFilter *resample_filter) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->view=DestroyCacheView(resample_filter->view); resample_filter->image=DestroyImage(resample_filter->image); #if ! FILTER_LUT resample_filter->filter_def=DestroyResizeFilter(resample_filter->filter_def); #endif resample_filter->signature=(~MagickCoreSignature); resample_filter=(ResampleFilter *) RelinquishMagickMemory(resample_filter); return(resample_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e P i x e l C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResamplePixelColor() samples the pixel values surrounding the location % given using an elliptical weighted average, at the scale previously % calculated, and in the most efficent manner possible for the % VirtualPixelMethod setting. % % The format of the ResamplePixelColor method is: % % MagickBooleanType ResamplePixelColor(ResampleFilter *resample_filter, % const double u0,const double v0,PixelInfo *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o u0,v0: A double representing the center of the area to resample, % The distortion transformed transformed x,y coordinate. % % o pixel: the resampled pixel is returned here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ResamplePixelColor( ResampleFilter *resample_filter,const double u0,const double v0, PixelInfo *pixel,ExceptionInfo *exception) { MagickBooleanType status; ssize_t u,v, v1, v2, uw, hit; double u1; double U,V,Q,DQ,DDQ; double divisor_c,divisor_m; double weight; const Quantum *pixels; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); status=MagickTrue; /* GetPixelInfo(resample_filter->image,pixel); */ if ( resample_filter->do_interpolate ) { status=InterpolatePixelInfo(resample_filter->image,resample_filter->view, resample_filter->interpolate,u0,v0,pixel,resample_filter->exception); return(status); } #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "u0=%lf; v0=%lf;\n", u0, v0); #endif /* Does resample area Miss the image Proper? If and that area a simple solid color - then simply return that color! This saves a lot of calculation when resampling outside the bounds of the source image. However it probably should be expanded to image bounds plus the filters scaled support size. */ hit = 0; switch ( resample_filter->virtual_pixel ) { case BackgroundVirtualPixelMethod: case TransparentVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case WhiteVirtualPixelMethod: case MaskVirtualPixelMethod: if ( resample_filter->limit_reached || u0 + resample_filter->Ulimit < 0.0 || u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 || v0 + resample_filter->Vlimit < 0.0 || v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) hit++; break; case UndefinedVirtualPixelMethod: case EdgeVirtualPixelMethod: if ( ( u0 + resample_filter->Ulimit < 0.0 && v0 + resample_filter->Vlimit < 0.0 ) || ( u0 + resample_filter->Ulimit < 0.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 && v0 + resample_filter->Vlimit < 0.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) ) hit++; break; case HorizontalTileVirtualPixelMethod: if ( v0 + resample_filter->Vlimit < 0.0 || v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 ) hit++; /* outside the horizontally tiled images. */ break; case VerticalTileVirtualPixelMethod: if ( u0 + resample_filter->Ulimit < 0.0 || u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0 ) hit++; /* outside the vertically tiled images. */ break; case DitherVirtualPixelMethod: if ( ( u0 + resample_filter->Ulimit < -32.0 && v0 + resample_filter->Vlimit < -32.0 ) || ( u0 + resample_filter->Ulimit < -32.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0 && v0 + resample_filter->Vlimit < -32.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 ) ) hit++; break; case TileVirtualPixelMethod: case MirrorVirtualPixelMethod: case RandomVirtualPixelMethod: case HorizontalTileEdgeVirtualPixelMethod: case VerticalTileEdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: /* resampling of area is always needed - no VP limits */ break; } if ( hit ) { /* The area being resampled is simply a solid color * just return a single lookup color. * * Should this return the users requested interpolated color? */ status=InterpolatePixelInfo(resample_filter->image,resample_filter->view, IntegerInterpolatePixel,u0,v0,pixel,resample_filter->exception); return(status); } /* When Scaling limits reached, return an 'averaged' result. */ if ( resample_filter->limit_reached ) { switch ( resample_filter->virtual_pixel ) { /* This is always handled by the above, so no need. case BackgroundVirtualPixelMethod: case ConstantVirtualPixelMethod: case TransparentVirtualPixelMethod: case GrayVirtualPixelMethod, case WhiteVirtualPixelMethod case MaskVirtualPixelMethod: */ case UndefinedVirtualPixelMethod: case EdgeVirtualPixelMethod: case DitherVirtualPixelMethod: case HorizontalTileEdgeVirtualPixelMethod: case VerticalTileEdgeVirtualPixelMethod: /* We need an average edge pixel, from the correct edge! How should I calculate an average edge color? Just returning an averaged neighbourhood, works well in general, but falls down for TileEdge methods. This needs to be done properly!!!!!! */ status=InterpolatePixelInfo(resample_filter->image, resample_filter->view,AverageInterpolatePixel,u0,v0,pixel, resample_filter->exception); break; case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: /* just return the background pixel - Is there more direct way? */ status=InterpolatePixelInfo(resample_filter->image, resample_filter->view,IntegerInterpolatePixel,-1.0,-1.0,pixel, resample_filter->exception); break; case TileVirtualPixelMethod: case MirrorVirtualPixelMethod: case RandomVirtualPixelMethod: case CheckerTileVirtualPixelMethod: default: /* generate a average color of the WHOLE image */ if ( resample_filter->average_defined == MagickFalse ) { Image *average_image; CacheView *average_view; GetPixelInfo(resample_filter->image,(PixelInfo *) &resample_filter->average_pixel); resample_filter->average_defined=MagickTrue; /* Try to get an averaged pixel color of whole image */ average_image=ResizeImage(resample_filter->image,1,1,BoxFilter, resample_filter->exception); if (average_image == (Image *) NULL) { *pixel=resample_filter->average_pixel; /* FAILED */ break; } average_view=AcquireVirtualCacheView(average_image,exception); pixels=GetCacheViewVirtualPixels(average_view,0,0,1,1, resample_filter->exception); if (pixels == (const Quantum *) NULL) { average_view=DestroyCacheView(average_view); average_image=DestroyImage(average_image); *pixel=resample_filter->average_pixel; /* FAILED */ break; } GetPixelInfoPixel(resample_filter->image,pixels, &(resample_filter->average_pixel)); average_view=DestroyCacheView(average_view); average_image=DestroyImage(average_image); if ( resample_filter->virtual_pixel == CheckerTileVirtualPixelMethod ) { /* CheckerTile is a alpha blend of the image's average pixel color and the current background color */ /* image's average pixel color */ weight = QuantumScale*((double) resample_filter->average_pixel.alpha); resample_filter->average_pixel.red *= weight; resample_filter->average_pixel.green *= weight; resample_filter->average_pixel.blue *= weight; divisor_c = weight; /* background color */ weight = QuantumScale*((double) resample_filter->image->background_color.alpha); resample_filter->average_pixel.red += weight*resample_filter->image->background_color.red; resample_filter->average_pixel.green += weight*resample_filter->image->background_color.green; resample_filter->average_pixel.blue += weight*resample_filter->image->background_color.blue; resample_filter->average_pixel.alpha += resample_filter->image->background_color.alpha; divisor_c += weight; /* alpha blend */ resample_filter->average_pixel.red /= divisor_c; resample_filter->average_pixel.green /= divisor_c; resample_filter->average_pixel.blue /= divisor_c; resample_filter->average_pixel.alpha /= 2; /* 50% blend */ } } *pixel=resample_filter->average_pixel; break; } return(status); } /* Initialize weighted average data collection */ hit = 0; divisor_c = 0.0; divisor_m = 0.0; pixel->red = pixel->green = pixel->blue = 0.0; if (pixel->colorspace == CMYKColorspace) pixel->black = 0.0; if (pixel->alpha_trait != UndefinedPixelTrait) pixel->alpha = 0.0; /* Determine the parellelogram bounding box fitted to the ellipse centered at u0,v0. This area is bounding by the lines... */ v1 = (ssize_t)ceil(v0 - resample_filter->Vlimit); /* range of scan lines */ v2 = (ssize_t)floor(v0 + resample_filter->Vlimit); /* scan line start and width accross the parallelogram */ u1 = u0 + (v1-v0)*resample_filter->slope - resample_filter->Uwidth; uw = (ssize_t)(2.0*resample_filter->Uwidth)+1; #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "v1=%ld; v2=%ld\n", (long)v1, (long)v2); (void) FormatLocaleFile(stderr, "u1=%ld; uw=%ld\n", (long)u1, (long)uw); #else # define DEBUG_HIT_MISS 0 /* only valid if DEBUG_ELLIPSE is enabled */ #endif /* Do weighted resampling of all pixels, within the scaled ellipse, bound by a Parellelogram fitted to the ellipse. */ DDQ = 2*resample_filter->A; for( v=v1; v<=v2; v++ ) { #if DEBUG_HIT_MISS long uu = ceil(u1); /* actual pixel location (for debug only) */ (void) FormatLocaleFile(stderr, "# scan line from pixel %ld, %ld\n", (long)uu, (long)v); #endif u = (ssize_t)ceil(u1); /* first pixel in scanline */ u1 += resample_filter->slope; /* start of next scan line */ /* location of this first pixel, relative to u0,v0 */ U = (double)u-u0; V = (double)v-v0; /* Q = ellipse quotent ( if Q<F then pixel is inside ellipse) */ Q = (resample_filter->A*U + resample_filter->B*V)*U + resample_filter->C*V*V; DQ = resample_filter->A*(2.0*U+1) + resample_filter->B*V; /* get the scanline of pixels for this v */ pixels=GetCacheViewVirtualPixels(resample_filter->view,u,v,(size_t) uw, 1,resample_filter->exception); if (pixels == (const Quantum *) NULL) return(MagickFalse); /* count up the weighted pixel colors */ for( u=0; u<uw; u++ ) { #if FILTER_LUT /* Note that the ellipse has been pre-scaled so F = WLUT_WIDTH */ if ( Q < (double)WLUT_WIDTH ) { weight = resample_filter->filter_lut[(int)Q]; #else /* Note that the ellipse has been pre-scaled so F = support^2 */ if ( Q < (double)resample_filter->F ) { weight = GetResizeFilterWeight(resample_filter->filter_def, sqrt(Q)); /* a SquareRoot! Arrggghhhhh... */ #endif pixel->alpha += weight*GetPixelAlpha(resample_filter->image,pixels); divisor_m += weight; if (pixel->alpha_trait != UndefinedPixelTrait) weight *= QuantumScale*((double) GetPixelAlpha(resample_filter->image,pixels)); pixel->red += weight*GetPixelRed(resample_filter->image,pixels); pixel->green += weight*GetPixelGreen(resample_filter->image,pixels); pixel->blue += weight*GetPixelBlue(resample_filter->image,pixels); if (pixel->colorspace == CMYKColorspace) pixel->black += weight*GetPixelBlack(resample_filter->image,pixels); divisor_c += weight; hit++; #if DEBUG_HIT_MISS /* mark the pixel according to hit/miss of the ellipse */ (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n", (long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1); (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n", (long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1); } else { (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n", (long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1); (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n", (long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1); } uu++; #else } #endif pixels+=GetPixelChannels(resample_filter->image); Q += DQ; DQ += DDQ; } } #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "Hit=%ld; Total=%ld;\n", (long)hit, (long)uw*(v2-v1) ); #endif /* Result sanity check -- this should NOT happen */ if ( hit == 0 || divisor_m <= MagickEpsilon || divisor_c <= MagickEpsilon ) { /* not enough pixels, or bad weighting in resampling, resort to direct interpolation */ #if DEBUG_NO_PIXEL_HIT pixel->alpha = pixel->red = pixel->green = pixel->blue = 0; pixel->red = QuantumRange; /* show pixels for which EWA fails */ #else status=InterpolatePixelInfo(resample_filter->image, resample_filter->view,resample_filter->interpolate,u0,v0,pixel, resample_filter->exception); #endif return status; } /* Finialize results of resampling */ divisor_m = 1.0/divisor_m; if (pixel->alpha_trait != UndefinedPixelTrait) pixel->alpha = (double) ClampToQuantum(divisor_m*pixel->alpha); divisor_c = 1.0/divisor_c; pixel->red = (double) ClampToQuantum(divisor_c*pixel->red); pixel->green = (double) ClampToQuantum(divisor_c*pixel->green); pixel->blue = (double) ClampToQuantum(divisor_c*pixel->blue); if (pixel->colorspace == CMYKColorspace) pixel->black = (double) ClampToQuantum(divisor_c*pixel->black); return(MagickTrue); } #if EWA && EWA_CLAMP /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % - C l a m p U p A x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampUpAxes() function converts the input vectors into a major and % minor axis unit vectors, and their magnitude. This allows us to % ensure that the ellipse generated is never smaller than the unit % circle and thus never too small for use in EWA resampling. % % This purely mathematical 'magic' was provided by Professor Nicolas % Robidoux and his Masters student Chantal Racette. % % Reference: "We Recommend Singular Value Decomposition", David Austin % http://www.ams.org/samplings/feature-column/fcarc-svd % % By generating major and minor axis vectors, we can actually use the % ellipse in its "canonical form", by remapping the dx,dy of the % sampled point into distances along the major and minor axis unit % vectors. % % Reference: http://en.wikipedia.org/wiki/Ellipse#Canonical_form */ static inline void ClampUpAxes(const double dux, const double dvx, const double duy, const double dvy, double *major_mag, double *minor_mag, double *major_unit_x, double *major_unit_y, double *minor_unit_x, double *minor_unit_y) { /* * ClampUpAxes takes an input 2x2 matrix * * [ a b ] = [ dux duy ] * [ c d ] = [ dvx dvy ] * * and computes from it the major and minor axis vectors [major_x, * major_y] and [minor_x,minor_y] of the smallest ellipse containing * both the unit disk and the ellipse which is the image of the unit * disk by the linear transformation * * [ dux duy ] [S] = [s] * [ dvx dvy ] [T] = [t] * * (The vector [S,T] is the difference between a position in output * space and [X,Y]; the vector [s,t] is the difference between a * position in input space and [x,y].) */ /* * Output: * * major_mag is the half-length of the major axis of the "new" * ellipse. * * minor_mag is the half-length of the minor axis of the "new" * ellipse. * * major_unit_x is the x-coordinate of the major axis direction vector * of both the "old" and "new" ellipses. * * major_unit_y is the y-coordinate of the major axis direction vector. * * minor_unit_x is the x-coordinate of the minor axis direction vector. * * minor_unit_y is the y-coordinate of the minor axis direction vector. * * Unit vectors are useful for computing projections, in particular, * to compute the distance between a point in output space and the * center of a unit disk in output space, using the position of the * corresponding point [s,t] in input space. Following the clamping, * the square of this distance is * * ( ( s * major_unit_x + t * major_unit_y ) / major_mag )^2 * + * ( ( s * minor_unit_x + t * minor_unit_y ) / minor_mag )^2 * * If such distances will be computed for many [s,t]'s, it makes * sense to actually compute the reciprocal of major_mag and * minor_mag and multiply them by the above unit lengths. * * Now, if you want to modify the input pair of tangent vectors so * that it defines the modified ellipse, all you have to do is set * * newdux = major_mag * major_unit_x * newdvx = major_mag * major_unit_y * newduy = minor_mag * minor_unit_x = minor_mag * -major_unit_y * newdvy = minor_mag * minor_unit_y = minor_mag * major_unit_x * * and use these tangent vectors as if they were the original ones. * Usually, this is a drastic change in the tangent vectors even if * the singular values are not clamped; for example, the minor axis * vector always points in a direction which is 90 degrees * counterclockwise from the direction of the major axis vector. */ /* * Discussion: * * GOAL: Fix things so that the pullback, in input space, of a disk * of radius r in output space is an ellipse which contains, at * least, a disc of radius r. (Make this hold for any r>0.) * * ESSENCE OF THE METHOD: Compute the product of the first two * factors of an SVD of the linear transformation defining the * ellipse and make sure that both its columns have norm at least 1. * Because rotations and reflexions map disks to themselves, it is * not necessary to compute the third (rightmost) factor of the SVD. * * DETAILS: Find the singular values and (unit) left singular * vectors of Jinv, clampling up the singular values to 1, and * multiply the unit left singular vectors by the new singular * values in order to get the minor and major ellipse axis vectors. * * Image resampling context: * * The Jacobian matrix of the transformation at the output point * under consideration is defined as follows: * * Consider the transformation (x,y) -> (X,Y) from input locations * to output locations. (Anthony Thyssen, elsewhere in resample.c, * uses the notation (u,v) -> (x,y).) * * The Jacobian matrix of the transformation at (x,y) is equal to * * J = [ A, B ] = [ dX/dx, dX/dy ] * [ C, D ] [ dY/dx, dY/dy ] * * that is, the vector [A,C] is the tangent vector corresponding to * input changes in the horizontal direction, and the vector [B,D] * is the tangent vector corresponding to input changes in the * vertical direction. * * In the context of resampling, it is natural to use the inverse * Jacobian matrix Jinv because resampling is generally performed by * pulling pixel locations in the output image back to locations in * the input image. Jinv is * * Jinv = [ a, b ] = [ dx/dX, dx/dY ] * [ c, d ] [ dy/dX, dy/dY ] * * Note: Jinv can be computed from J with the following matrix * formula: * * Jinv = 1/(A*D-B*C) [ D, -B ] * [ -C, A ] * * What we do is modify Jinv so that it generates an ellipse which * is as close as possible to the original but which contains the * unit disk. This can be accomplished as follows: * * Let * * Jinv = U Sigma V^T * * be an SVD decomposition of Jinv. (The SVD is not unique, but the * final ellipse does not depend on the particular SVD.) * * We could clamp up the entries of the diagonal matrix Sigma so * that they are at least 1, and then set * * Jinv = U newSigma V^T. * * However, we do not need to compute V for the following reason: * V^T is an orthogonal matrix (that is, it represents a combination * of rotations and reflexions) so that it maps the unit circle to * itself. For this reason, the exact value of V does not affect the * final ellipse, and we can choose V to be the identity * matrix. This gives * * Jinv = U newSigma. * * In the end, we return the two diagonal entries of newSigma * together with the two columns of U. */ /* * ClampUpAxes was written by Nicolas Robidoux and Chantal Racette * of Laurentian University with insightful suggestions from Anthony * Thyssen and funding from the National Science and Engineering * Research Council of Canada. It is distinguished from its * predecessors by its efficient handling of degenerate cases. * * The idea of clamping up the EWA ellipse's major and minor axes so * that the result contains the reconstruction kernel filter support * is taken from Andreas Gustaffson's Masters thesis "Interactive * Image Warping", Helsinki University of Technology, Faculty of * Information Technology, 59 pages, 1993 (see Section 3.6). * * The use of the SVD to clamp up the singular values of the * Jacobian matrix of the pullback transformation for EWA resampling * is taken from the astrophysicist Craig DeForest. It is * implemented in his PDL::Transform code (PDL = Perl Data * Language). */ const double a = dux; const double b = duy; const double c = dvx; const double d = dvy; /* * n is the matrix Jinv * transpose(Jinv). Eigenvalues of n are the * squares of the singular values of Jinv. */ const double aa = a*a; const double bb = b*b; const double cc = c*c; const double dd = d*d; /* * Eigenvectors of n are left singular vectors of Jinv. */ const double n11 = aa+bb; const double n12 = a*c+b*d; const double n21 = n12; const double n22 = cc+dd; const double det = a*d-b*c; const double twice_det = det+det; const double frobenius_squared = n11+n22; const double discriminant = (frobenius_squared+twice_det)*(frobenius_squared-twice_det); /* * In exact arithmetic, discriminant can't be negative. In floating * point, it can, because of the bad conditioning of SVD * decompositions done through the associated normal matrix. */ const double sqrt_discriminant = sqrt(discriminant > 0.0 ? discriminant : 0.0); /* * s1 is the largest singular value of the inverse Jacobian * matrix. In other words, its reciprocal is the smallest singular * value of the Jacobian matrix itself. * If s1 = 0, both singular values are 0, and any orthogonal pair of * left and right factors produces a singular decomposition of Jinv. */ /* * Initially, we only compute the squares of the singular values. */ const double s1s1 = 0.5*(frobenius_squared+sqrt_discriminant); /* * s2 the smallest singular value of the inverse Jacobian * matrix. Its reciprocal is the largest singular value of the * Jacobian matrix itself. */ const double s2s2 = 0.5*(frobenius_squared-sqrt_discriminant); const double s1s1minusn11 = s1s1-n11; const double s1s1minusn22 = s1s1-n22; /* * u1, the first column of the U factor of a singular decomposition * of Jinv, is a (non-normalized) left singular vector corresponding * to s1. It has entries u11 and u21. We compute u1 from the fact * that it is an eigenvector of n corresponding to the eigenvalue * s1^2. */ const double s1s1minusn11_squared = s1s1minusn11*s1s1minusn11; const double s1s1minusn22_squared = s1s1minusn22*s1s1minusn22; /* * The following selects the largest row of n-s1^2 I as the one * which is used to find the eigenvector. If both s1^2-n11 and * s1^2-n22 are zero, n-s1^2 I is the zero matrix. In that case, * any vector is an eigenvector; in addition, norm below is equal to * zero, and, in exact arithmetic, this is the only case in which * norm = 0. So, setting u1 to the simple but arbitrary vector [1,0] * if norm = 0 safely takes care of all cases. */ const double temp_u11 = ( (s1s1minusn11_squared>=s1s1minusn22_squared) ? n12 : s1s1minusn22 ); const double temp_u21 = ( (s1s1minusn11_squared>=s1s1minusn22_squared) ? s1s1minusn11 : n21 ); const double norm = sqrt(temp_u11*temp_u11+temp_u21*temp_u21); /* * Finalize the entries of first left singular vector (associated * with the largest singular value). */ const double u11 = ( (norm>0.0) ? temp_u11/norm : 1.0 ); const double u21 = ( (norm>0.0) ? temp_u21/norm : 0.0 ); /* * Clamp the singular values up to 1. */ *major_mag = ( (s1s1<=1.0) ? 1.0 : sqrt(s1s1) ); *minor_mag = ( (s2s2<=1.0) ? 1.0 : sqrt(s2s2) ); /* * Return the unit major and minor axis direction vectors. */ *major_unit_x = u11; *major_unit_y = u21; *minor_unit_x = -u21; *minor_unit_y = u11; } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e R e s a m p l e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleResampleFilter() does all the calculations needed to resample an image % at a specific scale, defined by two scaling vectors. This not using % a orthogonal scaling, but two distorted scaling vectors, to allow the % generation of a angled ellipse. % % As only two deritive scaling vectors are used the center of the ellipse % must be the center of the lookup. That is any curvature that the % distortion may produce is discounted. % % The input vectors are produced by either finding the derivitives of the % distortion function, or the partial derivitives from a distortion mapping. % They do not need to be the orthogonal dx,dy scaling vectors, but can be % calculated from other derivatives. For example you could use dr,da/r % polar coordinate vector scaling vectors % % If u,v = DistortEquation(x,y) OR u = Fu(x,y); v = Fv(x,y) % Then the scaling vectors are determined from the deritives... % du/dx, dv/dx and du/dy, dv/dy % If the resulting scaling vectors is othogonally aligned then... % dv/dx = 0 and du/dy = 0 % Producing an othogonally alligned ellipse in source space for the area to % be resampled. % % Note that scaling vectors are different to argument order. Argument order % is the general order the deritives are extracted from the distortion % equations, and not the scaling vectors. As such the middle two vaules % may be swapped from what you expect. Caution is advised. % % WARNING: It is assumed that any SetResampleFilter() method call will % always be performed before the ScaleResampleFilter() method, so that the % size of the ellipse will match the support for the resampling filter being % used. % % The format of the ScaleResampleFilter method is: % % void ScaleResampleFilter(const ResampleFilter *resample_filter, % const double dux,const double duy,const double dvx,const double dvy) % % A description of each parameter follows: % % o resample_filter: the resampling resample_filterrmation defining the % image being resampled % % o dux,duy,dvx,dvy: % The deritives or scaling vectors defining the EWA ellipse. % NOTE: watch the order, which is based on the order deritives % are usally determined from distortion equations (see above). % The middle two values may need to be swapped if you are thinking % in terms of scaling vectors. % */ MagickExport void ScaleResampleFilter(ResampleFilter *resample_filter, const double dux,const double duy,const double dvx,const double dvy) { double A,B,C,F; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); resample_filter->limit_reached = MagickFalse; /* A 'point' filter forces use of interpolation instead of area sampling */ if ( resample_filter->filter == PointFilter ) return; /* EWA turned off - nothing to do */ #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "# -----\n" ); (void) FormatLocaleFile(stderr, "dux=%lf; dvx=%lf; duy=%lf; dvy=%lf;\n", dux, dvx, duy, dvy); #endif /* Find Ellipse Coefficents such that A*u^2 + B*u*v + C*v^2 = F With u,v relative to point around which we are resampling. And the given scaling dx,dy vectors in u,v space du/dx,dv/dx and du/dy,dv/dy */ #if EWA /* Direct conversion of derivatives into elliptical coefficients However when magnifying images, the scaling vectors will be small resulting in a ellipse that is too small to sample properly. As such we need to clamp the major/minor axis to a minumum of 1.0 to prevent it getting too small. */ #if EWA_CLAMP { double major_mag, minor_mag, major_x, major_y, minor_x, minor_y; ClampUpAxes(dux,dvx,duy,dvy, &major_mag, &minor_mag, &major_x, &major_y, &minor_x, &minor_y); major_x *= major_mag; major_y *= major_mag; minor_x *= minor_mag; minor_y *= minor_mag; #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "major_x=%lf; major_y=%lf; minor_x=%lf; minor_y=%lf;\n", major_x, major_y, minor_x, minor_y); #endif A = major_y*major_y+minor_y*minor_y; B = -2.0*(major_x*major_y+minor_x*minor_y); C = major_x*major_x+minor_x*minor_x; F = major_mag*minor_mag; F *= F; /* square it */ } #else /* raw unclamped EWA */ A = dvx*dvx+dvy*dvy; B = -2.0*(dux*dvx+duy*dvy); C = dux*dux+duy*duy; F = dux*dvy-duy*dvx; F *= F; /* square it */ #endif /* EWA_CLAMP */ #else /* HQ_EWA */ /* This Paul Heckbert's "Higher Quality EWA" formula, from page 60 in his thesis, which adds a unit circle to the elliptical area so as to do both Reconstruction and Prefiltering of the pixels in the resampling. It also means it is always likely to have at least 4 pixels within the area of the ellipse, for weighted averaging. No scaling will result with F == 4.0 and a circle of radius 2.0, and F smaller than this means magnification is being used. NOTE: This method produces a very blury result at near unity scale while producing perfect results for strong minitification and magnifications. However filter support is fixed to 2.0 (no good for Windowed Sinc filters) */ A = dvx*dvx+dvy*dvy+1; B = -2.0*(dux*dvx+duy*dvy); C = dux*dux+duy*duy+1; F = A*C - B*B/4; #endif #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "A=%lf; B=%lf; C=%lf; F=%lf\n", A,B,C,F); /* Figure out the various information directly about the ellipse. This information currently not needed at this time, but may be needed later for better limit determination. It is also good to have as a record for future debugging */ { double alpha, beta, gamma, Major, Minor; double Eccentricity, Ellipse_Area, Ellipse_Angle; alpha = A+C; beta = A-C; gamma = sqrt(beta*beta + B*B ); if ( alpha - gamma <= MagickEpsilon ) Major=MagickMaximumValue; else Major=sqrt(2*F/(alpha - gamma)); Minor = sqrt(2*F/(alpha + gamma)); (void) FormatLocaleFile(stderr, "# Major=%lf; Minor=%lf\n", Major, Minor ); /* other information about ellipse include... */ Eccentricity = Major/Minor; Ellipse_Area = MagickPI*Major*Minor; Ellipse_Angle = atan2(B, A-C); (void) FormatLocaleFile(stderr, "# Angle=%lf Area=%lf\n", (double) RadiansToDegrees(Ellipse_Angle), Ellipse_Area); } #endif /* If one or both of the scaling vectors is impossibly large (producing a very large raw F value), we may as well not bother doing any form of resampling since resampled area is very large. In this case some alternative means of pixel sampling, such as the average of the whole image is needed to get a reasonable result. Calculate only as needed. */ if ( (4*A*C - B*B) > MagickMaximumValue ) { resample_filter->limit_reached = MagickTrue; return; } /* Scale ellipse to match the filters support (that is, multiply F by the square of the support) Simplier to just multiply it by the support twice! */ F *= resample_filter->support; F *= resample_filter->support; /* Orthogonal bounds of the ellipse */ resample_filter->Ulimit = sqrt(C*F/(A*C-0.25*B*B)); resample_filter->Vlimit = sqrt(A*F/(A*C-0.25*B*B)); /* Horizontally aligned parallelogram fitted to Ellipse */ resample_filter->Uwidth = sqrt(F/A); /* Half of the parallelogram width */ resample_filter->slope = -B/(2.0*A); /* Reciprocal slope of the parallelogram */ #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "Ulimit=%lf; Vlimit=%lf; UWidth=%lf; Slope=%lf;\n", resample_filter->Ulimit, resample_filter->Vlimit, resample_filter->Uwidth, resample_filter->slope ); #endif /* Check the absolute area of the parallelogram involved. * This limit needs more work, as it is too slow for larger images * with tiled views of the horizon. */ if ( (resample_filter->Uwidth * resample_filter->Vlimit) > (4.0*resample_filter->image_area)) { resample_filter->limit_reached = MagickTrue; return; } /* Scale ellipse formula to directly index the Filter Lookup Table */ { double scale; #if FILTER_LUT /* scale so that F = WLUT_WIDTH; -- hardcoded */ scale=(double) WLUT_WIDTH*PerceptibleReciprocal(F); #else /* scale so that F = resample_filter->F (support^2) */ scale=resample_filter->F*PerceptibleReciprocal(F); #endif resample_filter->A = A*scale; resample_filter->B = B*scale; resample_filter->C = C*scale; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilter() set the resampling filter lookup table based on a % specific filter. Note that the filter is used as a radial filter not as a % two pass othogonally aligned resampling filter. % % The format of the SetResampleFilter method is: % % void SetResampleFilter(ResampleFilter *resample_filter, % const FilterType filter) % % A description of each parameter follows: % % o resample_filter: resampling resample_filterrmation structure % % o filter: the resize filter for elliptical weighting LUT % */ MagickExport void SetResampleFilter(ResampleFilter *resample_filter, const FilterType filter) { ResizeFilter *resize_filter; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); resample_filter->do_interpolate = MagickFalse; resample_filter->filter = filter; /* Default cylindrical filter is a Cubic Keys filter */ if ( filter == UndefinedFilter ) resample_filter->filter = RobidouxFilter; if ( resample_filter->filter == PointFilter ) { resample_filter->do_interpolate = MagickTrue; return; /* EWA turned off - nothing more to do */ } resize_filter = AcquireResizeFilter(resample_filter->image, resample_filter->filter,MagickTrue,resample_filter->exception); if (resize_filter == (ResizeFilter *) NULL) { (void) ThrowMagickException(resample_filter->exception,GetMagickModule(), ModuleError, "UnableToSetFilteringValue", "Fall back to Interpolated 'Point' filter"); resample_filter->filter = PointFilter; resample_filter->do_interpolate = MagickTrue; return; /* EWA turned off - nothing more to do */ } /* Get the practical working support for the filter, * after any API call blur factors have been accoded for. */ #if EWA resample_filter->support = GetResizeFilterSupport(resize_filter); #else resample_filter->support = 2.0; /* fixed support size for HQ-EWA */ #endif #if FILTER_LUT /* Fill the LUT with the weights from the selected filter function */ { int Q; double r_scale; /* Scale radius so the filter LUT covers the full support range */ r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH); for(Q=0; Q<WLUT_WIDTH; Q++) resample_filter->filter_lut[Q] = (double) GetResizeFilterWeight(resize_filter,sqrt((double)Q)*r_scale); /* finished with the resize filter */ resize_filter = DestroyResizeFilter(resize_filter); } #else /* save the filter and the scaled ellipse bounds needed for filter */ resample_filter->filter_def = resize_filter; resample_filter->F = resample_filter->support*resample_filter->support; #endif /* Adjust the scaling of the default unit circle This assumes that any real scaling changes will always take place AFTER the filter method has been initialized. */ ScaleResampleFilter(resample_filter, 1.0, 0.0, 0.0, 1.0); #if 0 /* This is old code kept as a reference only. Basically it generates a Gaussian bell curve, with sigma = 0.5 if the support is 2.0 Create Normal Gaussian 2D Filter Weighted Lookup Table. A normal EWA guassual lookup would use exp(Q*ALPHA) where Q = distance squared from 0.0 (center) to 1.0 (edge) and ALPHA = -4.0*ln(2.0) ==> -2.77258872223978123767 The table is of length 1024, and equates to support radius of 2.0 thus needs to be scaled by ALPHA*4/1024 and any blur factor squared The it comes from reference code provided by Fred Weinhaus. */ r_scale = -2.77258872223978123767/(WLUT_WIDTH*blur*blur); for(Q=0; Q<WLUT_WIDTH; Q++) resample_filter->filter_lut[Q] = exp((double)Q*r_scale); resample_filter->support = WLUT_WIDTH; #endif #if FILTER_LUT #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp single #endif { if (IsStringTrue(GetImageArtifact(resample_filter->image, "resample:verbose")) != MagickFalse) { int Q; double r_scale; /* Debug output of the filter weighting LUT Gnuplot the LUT data, the x scale index has been adjusted plot [0:2][-.2:1] "lut.dat" with lines The filter values should be normalized for comparision */ printf("#\n"); printf("# Resampling Filter LUT (%d values) for '%s' filter\n", WLUT_WIDTH, CommandOptionToMnemonic(MagickFilterOptions, resample_filter->filter) ); printf("#\n"); printf("# Note: values in table are using a squared radius lookup.\n"); printf("# As such its distribution is not uniform.\n"); printf("#\n"); printf("# The X value is the support distance for the Y weight\n"); printf("# so you can use gnuplot to plot this cylindrical filter\n"); printf("# plot [0:2][-.2:1] \"lut.dat\" with lines\n"); printf("#\n"); /* Scale radius so the filter LUT covers the full support range */ r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH); for(Q=0; Q<WLUT_WIDTH; Q++) printf("%8.*g %.*g\n", GetMagickPrecision(),sqrt((double)Q)*r_scale, GetMagickPrecision(),resample_filter->filter_lut[Q] ); printf("\n\n"); /* generate a 'break' in gnuplot if multiple outputs */ } /* Output the above once only for each image, and each setting (void) DeleteImageArtifact(resample_filter->image,"resample:verbose"); */ } #endif /* FILTER_LUT */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r I n t e r p o l a t e M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilterInterpolateMethod() sets the resample filter interpolation % method. % % The format of the SetResampleFilterInterpolateMethod method is: % % MagickBooleanType SetResampleFilterInterpolateMethod( % ResampleFilter *resample_filter,const InterpolateMethod method) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o method: the interpolation method. % */ MagickExport MagickBooleanType SetResampleFilterInterpolateMethod( ResampleFilter *resample_filter,const PixelInterpolateMethod method) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->interpolate=method; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilterVirtualPixelMethod() changes the virtual pixel method % associated with the specified resample filter. % % The format of the SetResampleFilterVirtualPixelMethod method is: % % MagickBooleanType SetResampleFilterVirtualPixelMethod( % ResampleFilter *resample_filter,const VirtualPixelMethod method) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o method: the virtual pixel method. % */ MagickExport MagickBooleanType SetResampleFilterVirtualPixelMethod( ResampleFilter *resample_filter,const VirtualPixelMethod method) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickCoreSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->virtual_pixel=method; if (method != UndefinedVirtualPixelMethod) (void) SetCacheViewVirtualPixelMethod(resample_filter->view,method); return(MagickTrue); }
tree-vect-loop.c
/* Loop Vectorization Copyright (C) 2003-2018 Free Software Foundation, Inc. Contributed by Dorit Naishlos <dorit@il.ibm.com> and Ira Rosen <irar@il.ibm.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "backend.h" #include "target.h" #include "rtl.h" #include "tree.h" #include "gimple.h" #include "cfghooks.h" #include "tree-pass.h" #include "ssa.h" #include "optabs-tree.h" #include "diagnostic-core.h" #include "fold-const.h" #include "stor-layout.h" #include "cfganal.h" #include "gimplify.h" #include "gimple-iterator.h" #include "gimplify-me.h" #include "tree-ssa-loop-ivopts.h" #include "tree-ssa-loop-manip.h" #include "tree-ssa-loop-niter.h" #include "tree-ssa-loop.h" #include "cfgloop.h" #include "params.h" #include "tree-scalar-evolution.h" #include "tree-vectorizer.h" #include "gimple-fold.h" #include "cgraph.h" #include "tree-cfg.h" #include "tree-if-conv.h" #include "internal-fn.h" #include "tree-vector-builder.h" #include "vec-perm-indices.h" #include "tree-eh.h" /* Loop Vectorization Pass. This pass tries to vectorize loops. For example, the vectorizer transforms the following simple loop: short a[N]; short b[N]; short c[N]; int i; for (i=0; i<N; i++){ a[i] = b[i] + c[i]; } as if it was manually vectorized by rewriting the source code into: typedef int __attribute__((mode(V8HI))) v8hi; short a[N]; short b[N]; short c[N]; int i; v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c; v8hi va, vb, vc; for (i=0; i<N/8; i++){ vb = pb[i]; vc = pc[i]; va = vb + vc; pa[i] = va; } The main entry to this pass is vectorize_loops(), in which the vectorizer applies a set of analyses on a given set of loops, followed by the actual vectorization transformation for the loops that had successfully passed the analysis phase. Throughout this pass we make a distinction between two types of data: scalars (which are represented by SSA_NAMES), and memory references ("data-refs"). These two types of data require different handling both during analysis and transformation. The types of data-refs that the vectorizer currently supports are ARRAY_REFS which base is an array DECL (not a pointer), and INDIRECT_REFS through pointers; both array and pointer accesses are required to have a simple (consecutive) access pattern. Analysis phase: =============== The driver for the analysis phase is vect_analyze_loop(). It applies a set of analyses, some of which rely on the scalar evolution analyzer (scev) developed by Sebastian Pop. During the analysis phase the vectorizer records some information per stmt in a "stmt_vec_info" struct which is attached to each stmt in the loop, as well as general information about the loop as a whole, which is recorded in a "loop_vec_info" struct attached to each loop. Transformation phase: ===================== The loop transformation phase scans all the stmts in the loop, and creates a vector stmt (or a sequence of stmts) for each scalar stmt S in the loop that needs to be vectorized. It inserts the vector code sequence just before the scalar stmt S, and records a pointer to the vector code in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct attached to S). This pointer will be used for the vectorization of following stmts which use the def of stmt S. Stmt S is removed if it writes to memory; otherwise, we rely on dead code elimination for removing it. For example, say stmt S1 was vectorized into stmt VS1: VS1: vb = px[i]; S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1 S2: a = b; To vectorize stmt S2, the vectorizer first finds the stmt that defines the operand 'b' (S1), and gets the relevant vector def 'vb' from the vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The resulting sequence would be: VS1: vb = px[i]; S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1 VS2: va = vb; S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2 Operands that are not SSA_NAMEs, are data-refs that appear in load/store operations (like 'x[i]' in S1), and are handled differently. Target modeling: ================= Currently the only target specific information that is used is the size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". Targets that can support different sizes of vectors, for now will need to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More flexibility will be added in the future. Since we only vectorize operations which vector form can be expressed using existing tree codes, to verify that an operation is supported, the vectorizer checks the relevant optab at the relevant machine_mode (e.g, optab_handler (add_optab, V8HImode)). If the value found is CODE_FOR_nothing, then there's no target support, and we can't vectorize the stmt. For additional information on this project see: http://gcc.gnu.org/projects/tree-ssa/vectorization.html */ static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *); /* Function vect_determine_vectorization_factor Determine the vectorization factor (VF). VF is the number of data elements that are operated upon in parallel in a single iteration of the vectorized loop. For example, when vectorizing a loop that operates on 4byte elements, on a target with vector size (VS) 16byte, the VF is set to 4, since 4 elements can fit in a single vector register. We currently support vectorization of loops in which all types operated upon are of the same size. Therefore this function currently sets VF according to the size of the types operated upon, and fails if there are multiple sizes in the loop. VF is also the factor by which the loop iterations are strip-mined, e.g.: original loop: for (i=0; i<N; i++){ a[i] = b[i] + c[i]; } vectorized loop: for (i=0; i<N; i+=VF){ a[i:VF] = b[i:VF] + c[i:VF]; } */ static bool vect_determine_vectorization_factor (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); unsigned nbbs = loop->num_nodes; poly_uint64 vectorization_factor = 1; tree scalar_type = NULL_TREE; gphi *phi; tree vectype; stmt_vec_info stmt_info; unsigned i; HOST_WIDE_INT dummy; gimple *stmt, *pattern_stmt = NULL; gimple_seq pattern_def_seq = NULL; gimple_stmt_iterator pattern_def_si = gsi_none (); bool analyze_pattern_stmt = false; bool bool_result; auto_vec<stmt_vec_info> mask_producers; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_determine_vectorization_factor ===\n"); for (i = 0; i < nbbs; i++) { basic_block bb = bbs[i]; for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { phi = si.phi (); stmt_info = vinfo_for_stmt (phi); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); } gcc_assert (stmt_info); if (STMT_VINFO_RELEVANT_P (stmt_info) || STMT_VINFO_LIVE_P (stmt_info)) { gcc_assert (!STMT_VINFO_VECTYPE (stmt_info)); scalar_type = TREE_TYPE (PHI_RESULT (phi)); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "get vectype for scalar type: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); dump_printf (MSG_NOTE, "\n"); } vectype = get_vectype_for_scalar_type (scalar_type); if (!vectype) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported " "data-type "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } STMT_VINFO_VECTYPE (stmt_info) = vectype; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype); dump_printf (MSG_NOTE, "\n"); } if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "nunits = "); dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vectype)); dump_printf (MSG_NOTE, "\n"); } vect_update_max_nunits (&vectorization_factor, vectype); } } for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si) || analyze_pattern_stmt;) { tree vf_vectype; if (analyze_pattern_stmt) stmt = pattern_stmt; else stmt = gsi_stmt (si); stmt_info = vinfo_for_stmt (stmt); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); } gcc_assert (stmt_info); /* Skip stmts which do not need to be vectorized. */ if ((!STMT_VINFO_RELEVANT_P (stmt_info) && !STMT_VINFO_LIVE_P (stmt_info)) || gimple_clobber_p (stmt)) { if (STMT_VINFO_IN_PATTERN_P (stmt_info) && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) { stmt = pattern_stmt; stmt_info = vinfo_for_stmt (pattern_stmt); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "==> examining pattern statement: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); } } else { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "skip.\n"); gsi_next (&si); continue; } } else if (STMT_VINFO_IN_PATTERN_P (stmt_info) && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) analyze_pattern_stmt = true; /* If a pattern statement has def stmts, analyze them too. */ if (is_pattern_stmt_p (stmt_info)) { if (pattern_def_seq == NULL) { pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info); pattern_def_si = gsi_start (pattern_def_seq); } else if (!gsi_end_p (pattern_def_si)) gsi_next (&pattern_def_si); if (pattern_def_seq != NULL) { gimple *pattern_def_stmt = NULL; stmt_vec_info pattern_def_stmt_info = NULL; while (!gsi_end_p (pattern_def_si)) { pattern_def_stmt = gsi_stmt (pattern_def_si); pattern_def_stmt_info = vinfo_for_stmt (pattern_def_stmt); if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info) || STMT_VINFO_LIVE_P (pattern_def_stmt_info)) break; gsi_next (&pattern_def_si); } if (!gsi_end_p (pattern_def_si)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "==> examining pattern def stmt: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0); } stmt = pattern_def_stmt; stmt_info = pattern_def_stmt_info; } else { pattern_def_si = gsi_none (); analyze_pattern_stmt = false; } } else analyze_pattern_stmt = false; } if (gimple_get_lhs (stmt) == NULL_TREE /* MASK_STORE has no lhs, but is ok. */ && (!is_gimple_call (stmt) || !gimple_call_internal_p (stmt) || gimple_call_internal_fn (stmt) != IFN_MASK_STORE)) { if (is_gimple_call (stmt)) { /* Ignore calls with no lhs. These must be calls to #pragma omp simd functions, and what vectorization factor it really needs can't be determined until vectorizable_simd_clone_call. */ if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si)) { pattern_def_seq = NULL; gsi_next (&si); } continue; } if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: irregular stmt."); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); } return false; } if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt)))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: vector stmt in loop:"); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); } return false; } bool_result = false; if (STMT_VINFO_VECTYPE (stmt_info)) { /* The only case when a vectype had been already set is for stmts that contain a dataref, or for "pattern-stmts" (stmts generated by the vectorizer to represent/replace a certain idiom). */ gcc_assert (STMT_VINFO_DATA_REF (stmt_info) || is_pattern_stmt_p (stmt_info) || !gsi_end_p (pattern_def_si)); vectype = STMT_VINFO_VECTYPE (stmt_info); } else { gcc_assert (!STMT_VINFO_DATA_REF (stmt_info)); if (gimple_call_internal_p (stmt, IFN_MASK_STORE)) scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3)); else scalar_type = TREE_TYPE (gimple_get_lhs (stmt)); /* Bool ops don't participate in vectorization factor computation. For comparison use compared types to compute a factor. */ if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type) && is_gimple_assign (stmt) && gimple_assign_rhs_code (stmt) != COND_EXPR) { if (STMT_VINFO_RELEVANT_P (stmt_info) || STMT_VINFO_LIVE_P (stmt_info)) mask_producers.safe_push (stmt_info); bool_result = true; if (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt)))) scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt)); else { if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si)) { pattern_def_seq = NULL; gsi_next (&si); } continue; } } if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "get vectype for scalar type: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); dump_printf (MSG_NOTE, "\n"); } vectype = get_vectype_for_scalar_type (scalar_type); if (!vectype) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported " "data-type "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } if (!bool_result) STMT_VINFO_VECTYPE (stmt_info) = vectype; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype); dump_printf (MSG_NOTE, "\n"); } } /* Don't try to compute VF out scalar types if we stmt produces boolean vector. Use result vectype instead. */ if (VECTOR_BOOLEAN_TYPE_P (vectype)) vf_vectype = vectype; else { /* The vectorization factor is according to the smallest scalar type (or the largest vector size, but we only support one vector size per loop). */ if (!bool_result) scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "get vectype for scalar type: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type); dump_printf (MSG_NOTE, "\n"); } vf_vectype = get_vectype_for_scalar_type (scalar_type); } if (!vf_vectype) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported data-type "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } if (maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)), GET_MODE_SIZE (TYPE_MODE (vf_vectype)))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: different sized vector " "types in statement, "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype); dump_printf (MSG_MISSED_OPTIMIZATION, " and "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vf_vectype); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "vectype: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, vf_vectype); dump_printf (MSG_NOTE, "\n"); } if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "nunits = "); dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vf_vectype)); dump_printf (MSG_NOTE, "\n"); } vect_update_max_nunits (&vectorization_factor, vf_vectype); if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si)) { pattern_def_seq = NULL; gsi_next (&si); } } } /* TODO: Analyze cost. Decide if worth while to vectorize. */ if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = "); dump_dec (MSG_NOTE, vectorization_factor); dump_printf (MSG_NOTE, "\n"); } if (known_le (vectorization_factor, 1U)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported data-type\n"); return false; } LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor; for (i = 0; i < mask_producers.length (); i++) { tree mask_type = NULL; stmt = STMT_VINFO_STMT (mask_producers[i]); if (is_gimple_assign (stmt) && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison && !VECT_SCALAR_BOOLEAN_TYPE_P (TREE_TYPE (gimple_assign_rhs1 (stmt)))) { scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt)); mask_type = get_mask_type_for_scalar_type (scalar_type); if (!mask_type) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported mask\n"); return false; } } else { tree rhs; ssa_op_iter iter; gimple *def_stmt; enum vect_def_type dt; FOR_EACH_SSA_TREE_OPERAND (rhs, stmt, iter, SSA_OP_USE) { if (!vect_is_simple_use (rhs, mask_producers[i]->vinfo, &def_stmt, &dt, &vectype)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: can't compute mask type " "for statement, "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); } return false; } /* No vectype probably means external definition. Allow it in case there is another operand which allows to determine mask type. */ if (!vectype) continue; if (!mask_type) mask_type = vectype; else if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type), TYPE_VECTOR_SUBPARTS (vectype))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: different sized masks " "types in statement, "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, mask_type); dump_printf (MSG_MISSED_OPTIMIZATION, " and "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } else if (VECTOR_BOOLEAN_TYPE_P (mask_type) != VECTOR_BOOLEAN_TYPE_P (vectype)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: mixed mask and " "nonmask vector types in statement, "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, mask_type); dump_printf (MSG_MISSED_OPTIMIZATION, " and "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, vectype); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return false; } } /* We may compare boolean value loaded as vector of integers. Fix mask_type in such case. */ if (mask_type && !VECTOR_BOOLEAN_TYPE_P (mask_type) && gimple_code (stmt) == GIMPLE_ASSIGN && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison) mask_type = build_same_sized_truth_vector_type (mask_type); } /* No mask_type should mean loop invariant predicate. This is probably a subject for optimization in if-conversion. */ if (!mask_type) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: can't compute mask type " "for statement, "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); } return false; } STMT_VINFO_VECTYPE (mask_producers[i]) = mask_type; } return true; } /* Function vect_is_simple_iv_evolution. FORNOW: A simple evolution of an induction variables in the loop is considered a polynomial evolution. */ static bool vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init, tree * step) { tree init_expr; tree step_expr; tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb); basic_block bb; /* When there is no evolution in this loop, the evolution function is not "simple". */ if (evolution_part == NULL_TREE) return false; /* When the evolution is a polynomial of degree >= 2 the evolution function is not "simple". */ if (tree_is_chrec (evolution_part)) return false; step_expr = evolution_part; init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb)); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "step: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr); dump_printf (MSG_NOTE, ", init: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr); dump_printf (MSG_NOTE, "\n"); } *init = init_expr; *step = step_expr; if (TREE_CODE (step_expr) != INTEGER_CST && (TREE_CODE (step_expr) != SSA_NAME || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr))) && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb)) || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr)) && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)) || !flag_associative_math))) && (TREE_CODE (step_expr) != REAL_CST || !flag_associative_math)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "step unknown.\n"); return false; } return true; } /* Function vect_analyze_scalar_cycles_1. Examine the cross iteration def-use cycles of scalar variables in LOOP. LOOP_VINFO represents the loop that is now being considered for vectorization (can be LOOP, or an outer-loop enclosing LOOP). */ static void vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop) { basic_block bb = loop->header; tree init, step; auto_vec<gimple *, 64> worklist; gphi_iterator gsi; bool double_reduc; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_scalar_cycles ===\n"); /* First - identify all inductions. Reduction detection assumes that all the inductions have been identified, therefore, this order must not be changed. */ for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gphi *phi = gsi.phi (); tree access_fn = NULL; tree def = PHI_RESULT (phi); stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); } /* Skip virtual phi's. The data dependences that are associated with virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */ if (virtual_operand_p (def)) continue; STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type; /* Analyze the evolution function. */ access_fn = analyze_scalar_evolution (loop, def); if (access_fn) { STRIP_NOPS (access_fn); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Access function of PHI: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn); dump_printf (MSG_NOTE, "\n"); } STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo) = initial_condition_in_loop_num (access_fn, loop->num); STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) = evolution_part_in_loop_num (access_fn, loop->num); } if (!access_fn || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step) || (LOOP_VINFO_LOOP (loop_vinfo) != loop && TREE_CODE (step) != INTEGER_CST)) { worklist.safe_push (phi); continue; } gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo) != NULL_TREE); gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n"); STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def; } /* Second - identify all reductions and nested cycles. */ while (worklist.length () > 0) { gimple *phi = worklist.pop (); tree def = PHI_RESULT (phi); stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi); gimple *reduc_stmt; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); } gcc_assert (!virtual_operand_p (def) && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type); reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi, &double_reduc, false); if (reduc_stmt) { if (double_reduc) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Detected double reduction.\n"); STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def; STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) = vect_double_reduction_def; } else { if (loop != LOOP_VINFO_LOOP (loop_vinfo)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Detected vectorizable nested cycle.\n"); STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle; STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) = vect_nested_cycle; } else { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Detected reduction.\n"); STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def; STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) = vect_reduction_def; /* Store the reduction cycles for possible vectorization in loop-aware SLP if it was not detected as reduction chain. */ if (! GROUP_FIRST_ELEMENT (vinfo_for_stmt (reduc_stmt))) LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push (reduc_stmt); } } } else if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Unknown def-use cycle pattern.\n"); } } /* Function vect_analyze_scalar_cycles. Examine the cross iteration def-use cycles of scalar variables, by analyzing the loop-header PHIs of scalar variables. Classify each cycle as one of the following: invariant, induction, reduction, unknown. We do that for the loop represented by LOOP_VINFO, and also to its inner-loop, if exists. Examples for scalar cycles: Example1: reduction: loop1: for (i=0; i<N; i++) sum += a[i]; Example2: induction: loop2: for (i=0; i<N; i++) a[i] = i; */ static void vect_analyze_scalar_cycles (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); vect_analyze_scalar_cycles_1 (loop_vinfo, loop); /* When vectorizing an outer-loop, the inner-loop is executed sequentially. Reductions in such inner-loop therefore have different properties than the reductions in the nest that gets vectorized: 1. When vectorized, they are executed in the same order as in the original scalar loop, so we can't change the order of computation when vectorizing them. 2. FIXME: Inner-loop reductions can be used in the inner-loop, so the current checks are too strict. */ if (loop->inner) vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner); } /* Transfer group and reduction information from STMT to its pattern stmt. */ static void vect_fixup_reduc_chain (gimple *stmt) { gimple *firstp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)); gimple *stmtp; gcc_assert (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (firstp)) && GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))); GROUP_SIZE (vinfo_for_stmt (firstp)) = GROUP_SIZE (vinfo_for_stmt (stmt)); do { stmtp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)); GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmtp)) = firstp; stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)); if (stmt) GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmtp)) = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)); } while (stmt); STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmtp)) = vect_reduction_def; } /* Fixup scalar cycles that now have their stmts detected as patterns. */ static void vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo) { gimple *first; unsigned i; FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first) if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (first))) { gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first)); while (next) { if (! STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))) break; next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next)); } /* If not all stmt in the chain are patterns try to handle the chain without patterns. */ if (! next) { vect_fixup_reduc_chain (first); LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i] = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first)); } } } /* Function vect_get_loop_niters. Determine how many iterations the loop is executed and place it in NUMBER_OF_ITERATIONS. Place the number of latch iterations in NUMBER_OF_ITERATIONSM1. Place the condition under which the niter information holds in ASSUMPTIONS. Return the loop exit condition. */ static gcond * vect_get_loop_niters (struct loop *loop, tree *assumptions, tree *number_of_iterations, tree *number_of_iterationsm1) { edge exit = single_exit (loop); struct tree_niter_desc niter_desc; tree niter_assumptions, niter, may_be_zero; gcond *cond = get_loop_exit_condition (loop); *assumptions = boolean_true_node; *number_of_iterationsm1 = chrec_dont_know; *number_of_iterations = chrec_dont_know; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== get_loop_niters ===\n"); if (!exit) return cond; niter = chrec_dont_know; may_be_zero = NULL_TREE; niter_assumptions = boolean_true_node; if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL) || chrec_contains_undetermined (niter_desc.niter)) return cond; niter_assumptions = niter_desc.assumptions; may_be_zero = niter_desc.may_be_zero; niter = niter_desc.niter; if (may_be_zero && integer_zerop (may_be_zero)) may_be_zero = NULL_TREE; if (may_be_zero) { if (COMPARISON_CLASS_P (may_be_zero)) { /* Try to combine may_be_zero with assumptions, this can simplify computation of niter expression. */ if (niter_assumptions && !integer_nonzerop (niter_assumptions)) niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, niter_assumptions, fold_build1 (TRUTH_NOT_EXPR, boolean_type_node, may_be_zero)); else niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero, build_int_cst (TREE_TYPE (niter), 0), rewrite_to_non_trapping_overflow (niter)); may_be_zero = NULL_TREE; } else if (integer_nonzerop (may_be_zero)) { *number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0); *number_of_iterations = build_int_cst (TREE_TYPE (niter), 1); return cond; } else return cond; } *assumptions = niter_assumptions; *number_of_iterationsm1 = niter; /* We want the number of loop header executions which is the number of latch executions plus one. ??? For UINT_MAX latch executions this number overflows to zero for loops like do { n++; } while (n != 0); */ if (niter && !chrec_contains_undetermined (niter)) niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter), unshare_expr (niter), build_int_cst (TREE_TYPE (niter), 1)); *number_of_iterations = niter; return cond; } /* Function bb_in_loop_p Used as predicate for dfs order traversal of the loop bbs. */ static bool bb_in_loop_p (const_basic_block bb, const void *data) { const struct loop *const loop = (const struct loop *)data; if (flow_bb_inside_loop_p (loop, bb)) return true; return false; } /* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as stmt_vec_info structs for all the stmts in LOOP_IN. */ _loop_vec_info::_loop_vec_info (struct loop *loop_in) : vec_info (vec_info::loop, init_cost (loop_in)), loop (loop_in), bbs (XCNEWVEC (basic_block, loop->num_nodes)), num_itersm1 (NULL_TREE), num_iters (NULL_TREE), num_iters_unchanged (NULL_TREE), num_iters_assumptions (NULL_TREE), th (0), versioning_threshold (0), vectorization_factor (0), max_vectorization_factor (0), mask_skip_niters (NULL_TREE), mask_compare_type (NULL_TREE), unaligned_dr (NULL), peeling_for_alignment (0), ptr_mask (0), ivexpr_map (NULL), slp_unrolling_factor (1), single_scalar_iteration_cost (0), vectorizable (false), can_fully_mask_p (true), fully_masked_p (false), peeling_for_gaps (false), peeling_for_niter (false), operands_swapped (false), no_data_dependencies (false), has_mask_store (false), scalar_loop (NULL), orig_loop_info (NULL) { /* Create/Update stmt_info for all stmts in the loop. */ basic_block *body = get_loop_body (loop); for (unsigned int i = 0; i < loop->num_nodes; i++) { basic_block bb = body[i]; gimple_stmt_iterator si; for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { gimple *phi = gsi_stmt (si); gimple_set_uid (phi, 0); set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, this)); } for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { gimple *stmt = gsi_stmt (si); gimple_set_uid (stmt, 0); set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, this)); } } free (body); /* CHECKME: We want to visit all BBs before their successors (except for latch blocks, for which this assertion wouldn't hold). In the simple case of the loop forms we allow, a dfs order of the BBs would the same as reversed postorder traversal, so we are safe. */ unsigned int nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p, bbs, loop->num_nodes, loop); gcc_assert (nbbs == loop->num_nodes); } /* Free all levels of MASKS. */ void release_vec_loop_masks (vec_loop_masks *masks) { rgroup_masks *rgm; unsigned int i; FOR_EACH_VEC_ELT (*masks, i, rgm) rgm->masks.release (); masks->release (); } /* Free all memory used by the _loop_vec_info, as well as all the stmt_vec_info structs of all the stmts in the loop. */ _loop_vec_info::~_loop_vec_info () { int nbbs; gimple_stmt_iterator si; int j; nbbs = loop->num_nodes; for (j = 0; j < nbbs; j++) { basic_block bb = bbs[j]; for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) free_stmt_vec_info (gsi_stmt (si)); for (si = gsi_start_bb (bb); !gsi_end_p (si); ) { gimple *stmt = gsi_stmt (si); /* We may have broken canonical form by moving a constant into RHS1 of a commutative op. Fix such occurrences. */ if (operands_swapped && is_gimple_assign (stmt)) { enum tree_code code = gimple_assign_rhs_code (stmt); if ((code == PLUS_EXPR || code == POINTER_PLUS_EXPR || code == MULT_EXPR) && CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt))) swap_ssa_operands (stmt, gimple_assign_rhs1_ptr (stmt), gimple_assign_rhs2_ptr (stmt)); else if (code == COND_EXPR && CONSTANT_CLASS_P (gimple_assign_rhs2 (stmt))) { tree cond_expr = gimple_assign_rhs1 (stmt); enum tree_code cond_code = TREE_CODE (cond_expr); if (TREE_CODE_CLASS (cond_code) == tcc_comparison) { bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr, 0)); cond_code = invert_tree_comparison (cond_code, honor_nans); if (cond_code != ERROR_MARK) { TREE_SET_CODE (cond_expr, cond_code); swap_ssa_operands (stmt, gimple_assign_rhs2_ptr (stmt), gimple_assign_rhs3_ptr (stmt)); } } } } /* Free stmt_vec_info. */ free_stmt_vec_info (stmt); gsi_next (&si); } } free (bbs); release_vec_loop_masks (&masks); delete ivexpr_map; loop->aux = NULL; } /* Return an invariant or register for EXPR and emit necessary computations in the LOOP_VINFO loop preheader. */ tree cse_and_gimplify_to_preheader (loop_vec_info loop_vinfo, tree expr) { if (is_gimple_reg (expr) || is_gimple_min_invariant (expr)) return expr; if (! loop_vinfo->ivexpr_map) loop_vinfo->ivexpr_map = new hash_map<tree_operand_hash, tree>; tree &cached = loop_vinfo->ivexpr_map->get_or_insert (expr); if (! cached) { gimple_seq stmts = NULL; cached = force_gimple_operand (unshare_expr (expr), &stmts, true, NULL_TREE); if (stmts) { edge e = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo)); gsi_insert_seq_on_edge_immediate (e, stmts); } } return cached; } /* Return true if we can use CMP_TYPE as the comparison type to produce all masks required to mask LOOP_VINFO. */ static bool can_produce_all_loop_masks_p (loop_vec_info loop_vinfo, tree cmp_type) { rgroup_masks *rgm; unsigned int i; FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm) if (rgm->mask_type != NULL_TREE && !direct_internal_fn_supported_p (IFN_WHILE_ULT, cmp_type, rgm->mask_type, OPTIMIZE_FOR_SPEED)) return false; return true; } /* Calculate the maximum number of scalars per iteration for every rgroup in LOOP_VINFO. */ static unsigned int vect_get_max_nscalars_per_iter (loop_vec_info loop_vinfo) { unsigned int res = 1; unsigned int i; rgroup_masks *rgm; FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm) res = MAX (res, rgm->max_nscalars_per_iter); return res; } /* Each statement in LOOP_VINFO can be masked where necessary. Check whether we can actually generate the masks required. Return true if so, storing the type of the scalar IV in LOOP_VINFO_MASK_COMPARE_TYPE. */ static bool vect_verify_full_masking (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); unsigned int min_ni_width; /* Use a normal loop if there are no statements that need masking. This only happens in rare degenerate cases: it means that the loop has no loads, no stores, and no live-out values. */ if (LOOP_VINFO_MASKS (loop_vinfo).is_empty ()) return false; /* Get the maximum number of iterations that is representable in the counter type. */ tree ni_type = TREE_TYPE (LOOP_VINFO_NITERSM1 (loop_vinfo)); widest_int max_ni = wi::to_widest (TYPE_MAX_VALUE (ni_type)) + 1; /* Get a more refined estimate for the number of iterations. */ widest_int max_back_edges; if (max_loop_iterations (loop, &max_back_edges)) max_ni = wi::smin (max_ni, max_back_edges + 1); /* Account for rgroup masks, in which each bit is replicated N times. */ max_ni *= vect_get_max_nscalars_per_iter (loop_vinfo); /* Work out how many bits we need to represent the limit. */ min_ni_width = wi::min_precision (max_ni, UNSIGNED); /* Find a scalar mode for which WHILE_ULT is supported. */ opt_scalar_int_mode cmp_mode_iter; tree cmp_type = NULL_TREE; FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT) { unsigned int cmp_bits = GET_MODE_BITSIZE (cmp_mode_iter.require ()); if (cmp_bits >= min_ni_width && targetm.scalar_mode_supported_p (cmp_mode_iter.require ())) { tree this_type = build_nonstandard_integer_type (cmp_bits, true); if (this_type && can_produce_all_loop_masks_p (loop_vinfo, this_type)) { /* Although we could stop as soon as we find a valid mode, it's often better to continue until we hit Pmode, since the operands to the WHILE are more likely to be reusable in address calculations. */ cmp_type = this_type; if (cmp_bits >= GET_MODE_BITSIZE (Pmode)) break; } } } if (!cmp_type) return false; LOOP_VINFO_MASK_COMPARE_TYPE (loop_vinfo) = cmp_type; return true; } /* Calculate the cost of one scalar iteration of the loop. */ static void vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); int nbbs = loop->num_nodes, factor; int innerloop_iters, i; /* Gather costs for statements in the scalar loop. */ /* FORNOW. */ innerloop_iters = 1; if (loop->inner) innerloop_iters = 50; /* FIXME */ for (i = 0; i < nbbs; i++) { gimple_stmt_iterator si; basic_block bb = bbs[i]; if (bb->loop_father == loop->inner) factor = innerloop_iters; else factor = 1; for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { gimple *stmt = gsi_stmt (si); stmt_vec_info stmt_info = vinfo_for_stmt (stmt); if (!is_gimple_assign (stmt) && !is_gimple_call (stmt)) continue; /* Skip stmts that are not vectorized inside the loop. */ if (stmt_info && !STMT_VINFO_RELEVANT_P (stmt_info) && (!STMT_VINFO_LIVE_P (stmt_info) || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info))) && !STMT_VINFO_IN_PATTERN_P (stmt_info)) continue; vect_cost_for_stmt kind; if (STMT_VINFO_DATA_REF (stmt_info)) { if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info))) kind = scalar_load; else kind = scalar_store; } else kind = scalar_stmt; record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), factor, kind, stmt_info, 0, vect_prologue); } } /* Now accumulate cost. */ void *target_cost_data = init_cost (loop); stmt_info_for_cost *si; int j; FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si) { struct _stmt_vec_info *stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; (void) add_stmt_cost (target_cost_data, si->count, si->kind, stmt_info, si->misalign, vect_body); } unsigned dummy, body_cost = 0; finish_cost (target_cost_data, &dummy, &body_cost, &dummy); destroy_cost_data (target_cost_data); LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo) = body_cost; } /* Function vect_analyze_loop_form_1. Verify that certain CFG restrictions hold, including: - the loop has a pre-header - the loop has a single entry and exit - the loop exit condition is simple enough - the number of iterations can be analyzed, i.e, a countable loop. The niter could be analyzed under some assumptions. */ bool vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond, tree *assumptions, tree *number_of_iterationsm1, tree *number_of_iterations, gcond **inner_loop_cond) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_loop_form ===\n"); /* Different restrictions apply when we are considering an inner-most loop, vs. an outer (nested) loop. (FORNOW. May want to relax some of these restrictions in the future). */ if (!loop->inner) { /* Inner-most loop. We currently require that the number of BBs is exactly 2 (the header and latch). Vectorizable inner-most loops look like this: (pre-header) | header <--------+ | | | | +--> latch --+ | (exit-bb) */ if (loop->num_nodes != 2) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: control flow in loop.\n"); return false; } if (empty_block_p (loop->header)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: empty loop.\n"); return false; } } else { struct loop *innerloop = loop->inner; edge entryedge; /* Nested loop. We currently require that the loop is doubly-nested, contains a single inner loop, and the number of BBs is exactly 5. Vectorizable outer-loops look like this: (pre-header) | header <---+ | | inner-loop | | | tail ------+ | (exit-bb) The inner-loop has the properties expected of inner-most loops as described above. */ if ((loop->inner)->inner || (loop->inner)->next) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: multiple nested loops.\n"); return false; } if (loop->num_nodes != 5) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: control flow in loop.\n"); return false; } entryedge = loop_preheader_edge (innerloop); if (entryedge->src != loop->header || !single_exit (innerloop) || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: unsupported outerloop form.\n"); return false; } /* Analyze the inner-loop. */ tree inner_niterm1, inner_niter, inner_assumptions; if (! vect_analyze_loop_form_1 (loop->inner, inner_loop_cond, &inner_assumptions, &inner_niterm1, &inner_niter, NULL) /* Don't support analyzing niter under assumptions for inner loop. */ || !integer_onep (inner_assumptions)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: Bad inner loop.\n"); return false; } if (!expr_invariant_in_loop_p (loop, inner_niter)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: inner-loop count not" " invariant.\n"); return false; } if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Considering outer-loop vectorization.\n"); } if (!single_exit (loop) || EDGE_COUNT (loop->header->preds) != 2) { if (dump_enabled_p ()) { if (!single_exit (loop)) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: multiple exits.\n"); else if (EDGE_COUNT (loop->header->preds) != 2) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: too many incoming edges.\n"); } return false; } /* We assume that the loop exit condition is at the end of the loop. i.e, that the loop is represented as a do-while (with a proper if-guard before the loop if needed), where the loop header contains all the executable statements, and the latch is empty. */ if (!empty_block_p (loop->latch) || !gimple_seq_empty_p (phi_nodes (loop->latch))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: latch block not empty.\n"); return false; } /* Make sure the exit is not abnormal. */ edge e = single_exit (loop); if (e->flags & EDGE_ABNORMAL) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: abnormal loop exit edge.\n"); return false; } *loop_cond = vect_get_loop_niters (loop, assumptions, number_of_iterations, number_of_iterationsm1); if (!*loop_cond) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: complicated exit condition.\n"); return false; } if (integer_zerop (*assumptions) || !*number_of_iterations || chrec_contains_undetermined (*number_of_iterations)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: number of iterations cannot be " "computed.\n"); return false; } if (integer_zerop (*number_of_iterations)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: number of iterations = 0.\n"); return false; } return true; } /* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */ loop_vec_info vect_analyze_loop_form (struct loop *loop) { tree assumptions, number_of_iterations, number_of_iterationsm1; gcond *loop_cond, *inner_loop_cond = NULL; if (! vect_analyze_loop_form_1 (loop, &loop_cond, &assumptions, &number_of_iterationsm1, &number_of_iterations, &inner_loop_cond)) return NULL; loop_vec_info loop_vinfo = new _loop_vec_info (loop); LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1; LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations; LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations; if (!integer_onep (assumptions)) { /* We consider to vectorize this loop by versioning it under some assumptions. In order to do this, we need to clear existing information computed by scev and niter analyzer. */ scev_reset_htab (); free_numbers_of_iterations_estimates (loop); /* Also set flag for this loop so that following scev and niter analysis are done under the assumptions. */ loop_constraint_set (loop, LOOP_C_FINITE); /* Also record the assumptions for versioning. */ LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo) = assumptions; } if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Symbolic number of iterations is "); dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations); dump_printf (MSG_NOTE, "\n"); } } STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type; if (inner_loop_cond) STMT_VINFO_TYPE (vinfo_for_stmt (inner_loop_cond)) = loop_exit_ctrl_vec_info_type; gcc_assert (!loop->aux); loop->aux = loop_vinfo; return loop_vinfo; } /* Scan the loop stmts and dependent on whether there are any (non-)SLP statements update the vectorization factor. */ static void vect_update_vf_for_slp (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); int nbbs = loop->num_nodes; poly_uint64 vectorization_factor; int i; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_update_vf_for_slp ===\n"); vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); gcc_assert (known_ne (vectorization_factor, 0U)); /* If all the stmts in the loop can be SLPed, we perform only SLP, and vectorization factor of the loop is the unrolling factor required by the SLP instances. If that unrolling factor is 1, we say, that we perform pure SLP on loop - cross iteration parallelism is not exploited. */ bool only_slp_in_loop = true; for (i = 0; i < nbbs; i++) { basic_block bb = bbs[i]; for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { gimple *stmt = gsi_stmt (si); stmt_vec_info stmt_info = vinfo_for_stmt (stmt); if (STMT_VINFO_IN_PATTERN_P (stmt_info) && STMT_VINFO_RELATED_STMT (stmt_info)) { stmt = STMT_VINFO_RELATED_STMT (stmt_info); stmt_info = vinfo_for_stmt (stmt); } if ((STMT_VINFO_RELEVANT_P (stmt_info) || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info))) && !PURE_SLP_STMT (stmt_info)) /* STMT needs both SLP and loop-based vectorization. */ only_slp_in_loop = false; } } if (only_slp_in_loop) { dump_printf_loc (MSG_NOTE, vect_location, "Loop contains only SLP stmts\n"); vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo); } else { dump_printf_loc (MSG_NOTE, vect_location, "Loop contains SLP and non-SLP stmts\n"); /* Both the vectorization factor and unroll factor have the form current_vector_size * X for some rational X, so they must have a common multiple. */ vectorization_factor = force_common_multiple (vectorization_factor, LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo)); } LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Updating vectorization factor to "); dump_dec (MSG_NOTE, vectorization_factor); dump_printf (MSG_NOTE, ".\n"); } } /* Return true if STMT_INFO describes a double reduction phi and if the other phi in the reduction is also relevant for vectorization. This rejects cases such as: outer1: x_1 = PHI <x_3(outer2), ...>; ... inner: x_2 = ...; ... outer2: x_3 = PHI <x_2(inner)>; if nothing in x_2 or elsewhere makes x_1 relevant. */ static bool vect_active_double_reduction_p (stmt_vec_info stmt_info) { if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def) return false; gimple *other_phi = STMT_VINFO_REDUC_DEF (stmt_info); return STMT_VINFO_RELEVANT_P (vinfo_for_stmt (other_phi)); } /* Function vect_analyze_loop_operations. Scan the loop stmts and make sure they are all vectorizable. */ static bool vect_analyze_loop_operations (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); int nbbs = loop->num_nodes; int i; stmt_vec_info stmt_info; bool need_to_vectorize = false; bool ok; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_loop_operations ===\n"); for (i = 0; i < nbbs; i++) { basic_block bb = bbs[i]; for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { gphi *phi = si.phi (); ok = true; stmt_info = vinfo_for_stmt (phi); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "examining phi: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); } if (virtual_operand_p (gimple_phi_result (phi))) continue; /* Inner-loop loop-closed exit phi in outer-loop vectorization (i.e., a phi in the tail of the outer-loop). */ if (! is_loop_header_bb_p (bb)) { /* FORNOW: we currently don't support the case that these phis are not used in the outerloop (unless it is double reduction, i.e., this phi is vect_reduction_def), cause this case requires to actually do something here. */ if (STMT_VINFO_LIVE_P (stmt_info) && !vect_active_double_reduction_p (stmt_info)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Unsupported loop-closed phi in " "outer-loop.\n"); return false; } /* If PHI is used in the outer loop, we check that its operand is defined in the inner loop. */ if (STMT_VINFO_RELEVANT_P (stmt_info)) { tree phi_op; gimple *op_def_stmt; if (gimple_phi_num_args (phi) != 1) return false; phi_op = PHI_ARG_DEF (phi, 0); if (TREE_CODE (phi_op) != SSA_NAME) return false; op_def_stmt = SSA_NAME_DEF_STMT (phi_op); if (gimple_nop_p (op_def_stmt) || !flow_bb_inside_loop_p (loop, gimple_bb (op_def_stmt)) || !vinfo_for_stmt (op_def_stmt)) return false; if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt)) != vect_used_in_outer && STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt)) != vect_used_in_outer_by_reduction) return false; } continue; } gcc_assert (stmt_info); if ((STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope || STMT_VINFO_LIVE_P (stmt_info)) && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def) { /* A scalar-dependence cycle that we don't support. */ if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: scalar dependence cycle.\n"); return false; } if (STMT_VINFO_RELEVANT_P (stmt_info)) { need_to_vectorize = true; if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def && ! PURE_SLP_STMT (stmt_info)) ok = vectorizable_induction (phi, NULL, NULL, NULL); else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle) && ! PURE_SLP_STMT (stmt_info)) ok = vectorizable_reduction (phi, NULL, NULL, NULL, NULL); } /* SLP PHIs are tested by vect_slp_analyze_node_operations. */ if (ok && STMT_VINFO_LIVE_P (stmt_info) && !PURE_SLP_STMT (stmt_info)) ok = vectorizable_live_operation (phi, NULL, NULL, -1, NULL); if (!ok) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: relevant phi not " "supported: "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0); } return false; } } for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { gimple *stmt = gsi_stmt (si); if (!gimple_clobber_p (stmt) && !vect_analyze_stmt (stmt, &need_to_vectorize, NULL, NULL)) return false; } } /* bbs */ /* All operations in the loop are either irrelevant (deal with loop control, or dead), or only used outside the loop and can be moved out of the loop (e.g. invariants, inductions). The loop can be optimized away by scalar optimizations. We're better off not touching this loop. */ if (!need_to_vectorize) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "All the computation can be taken out of the loop.\n"); if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: redundant loop. no profit to " "vectorize.\n"); return false; } return true; } /* Analyze the cost of the loop described by LOOP_VINFO. Decide if it is worthwhile to vectorize. Return 1 if definitely yes, 0 if definitely no, or -1 if it's worth retrying. */ static int vect_analyze_loop_costing (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo); /* Only fully-masked loops can have iteration counts less than the vectorization factor. */ if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) { HOST_WIDE_INT max_niter; if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) max_niter = LOOP_VINFO_INT_NITERS (loop_vinfo); else max_niter = max_stmt_executions_int (loop); if (max_niter != -1 && (unsigned HOST_WIDE_INT) max_niter < assumed_vf) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: iteration count smaller than " "vectorization factor.\n"); return 0; } } int min_profitable_iters, min_profitable_estimate; vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters, &min_profitable_estimate); if (min_profitable_iters < 0) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: vectorization not profitable.\n"); if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: vector version will never be " "profitable.\n"); return -1; } int min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND) * assumed_vf); /* Use the cost model only if it is more conservative than user specified threshold. */ unsigned int th = (unsigned) MAX (min_scalar_loop_bound, min_profitable_iters); LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th; if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && LOOP_VINFO_INT_NITERS (loop_vinfo) < th) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: vectorization not profitable.\n"); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "not vectorized: iteration count smaller than user " "specified loop bound parameter or minimum profitable " "iterations (whichever is more conservative).\n"); return 0; } HOST_WIDE_INT estimated_niter = estimated_stmt_executions_int (loop); if (estimated_niter == -1) estimated_niter = likely_max_stmt_executions_int (loop); if (estimated_niter != -1 && ((unsigned HOST_WIDE_INT) estimated_niter < MAX (th, (unsigned) min_profitable_estimate))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: estimated iteration count too " "small.\n"); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "not vectorized: estimated iteration count smaller " "than specified loop bound parameter or minimum " "profitable iterations (whichever is more " "conservative).\n"); return -1; } return 1; } /* Function vect_analyze_loop_2. Apply a set of analyses on LOOP, and create a loop_vec_info struct for it. The different analyses will record information in the loop_vec_info struct. */ static bool vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal) { bool ok; int res; unsigned int max_vf = MAX_VECTORIZATION_FACTOR; poly_uint64 min_vf = 2; unsigned int n_stmts = 0; /* The first group of checks is independent of the vector size. */ fatal = true; /* Find all data references in the loop (which correspond to vdefs/vuses) and analyze their evolution in the loop. */ basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); loop_p loop = LOOP_VINFO_LOOP (loop_vinfo); if (!find_loop_nest (loop, &LOOP_VINFO_LOOP_NEST (loop_vinfo))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: loop nest containing two " "or more consecutive inner loops cannot be " "vectorized\n"); return false; } for (unsigned i = 0; i < loop->num_nodes; i++) for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); if (is_gimple_debug (stmt)) continue; ++n_stmts; if (!find_data_references_in_stmt (loop, stmt, &LOOP_VINFO_DATAREFS (loop_vinfo))) { if (is_gimple_call (stmt) && loop->safelen) { tree fndecl = gimple_call_fndecl (stmt), op; if (fndecl != NULL_TREE) { cgraph_node *node = cgraph_node::get (fndecl); if (node != NULL && node->simd_clones != NULL) { unsigned int j, n = gimple_call_num_args (stmt); for (j = 0; j < n; j++) { op = gimple_call_arg (stmt, j); if (DECL_P (op) || (REFERENCE_CLASS_P (op) && get_base_address (op))) break; } op = gimple_call_lhs (stmt); /* Ignore #pragma omp declare simd functions if they don't have data references in the call stmt itself. */ if (j == n && !(op && (DECL_P (op) || (REFERENCE_CLASS_P (op) && get_base_address (op))))) continue; } } } if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: loop contains function " "calls or data references that cannot " "be analyzed\n"); return false; } } /* Analyze the data references and also adjust the minimal vectorization factor according to the loads and stores. */ ok = vect_analyze_data_refs (loop_vinfo, &min_vf); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data references.\n"); return false; } /* Classify all cross-iteration scalar data-flow cycles. Cross-iteration cycles caused by virtual phis are analyzed separately. */ vect_analyze_scalar_cycles (loop_vinfo); vect_pattern_recog (loop_vinfo); vect_fixup_scalar_cycles_with_patterns (loop_vinfo); /* Analyze the access patterns of the data-refs in the loop (consecutive, complex, etc.). FORNOW: Only handle consecutive access pattern. */ ok = vect_analyze_data_ref_accesses (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data access.\n"); return false; } /* Data-flow analysis to detect stmts that do not need to be vectorized. */ ok = vect_mark_stmts_to_be_vectorized (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unexpected pattern.\n"); return false; } /* While the rest of the analysis below depends on it in some way. */ fatal = false; /* Analyze data dependences between the data-refs in the loop and adjust the maximum vectorization factor according to the dependences. FORNOW: fail at the first data dependence that we encounter. */ ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf); if (!ok || (max_vf != MAX_VECTORIZATION_FACTOR && maybe_lt (max_vf, min_vf))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data dependence.\n"); return false; } LOOP_VINFO_MAX_VECT_FACTOR (loop_vinfo) = max_vf; ok = vect_determine_vectorization_factor (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "can't determine vectorization factor.\n"); return false; } if (max_vf != MAX_VECTORIZATION_FACTOR && maybe_lt (max_vf, LOOP_VINFO_VECT_FACTOR (loop_vinfo))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data dependence.\n"); return false; } /* Compute the scalar iteration cost. */ vect_compute_single_scalar_iteration_cost (loop_vinfo); poly_uint64 saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); unsigned th; /* Check the SLP opportunities in the loop, analyze and build SLP trees. */ ok = vect_analyze_slp (loop_vinfo, n_stmts); if (!ok) return false; /* If there are any SLP instances mark them as pure_slp. */ bool slp = vect_make_slp_decision (loop_vinfo); if (slp) { /* Find stmts that need to be both vectorized and SLPed. */ vect_detect_hybrid_slp (loop_vinfo); /* Update the vectorization factor based on the SLP decision. */ vect_update_vf_for_slp (loop_vinfo); } bool saved_can_fully_mask_p = LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo); /* We don't expect to have to roll back to anything other than an empty set of rgroups. */ gcc_assert (LOOP_VINFO_MASKS (loop_vinfo).is_empty ()); /* This is the point where we can re-start analysis with SLP forced off. */ start_over: /* Now the vectorization factor is final. */ poly_uint64 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo); gcc_assert (known_ne (vectorization_factor, 0U)); if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "vectorization_factor = "); dump_dec (MSG_NOTE, vectorization_factor); dump_printf (MSG_NOTE, ", niters = " HOST_WIDE_INT_PRINT_DEC "\n", LOOP_VINFO_INT_NITERS (loop_vinfo)); } HOST_WIDE_INT max_niter = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo)); /* Analyze the alignment of the data-refs in the loop. Fail if a data reference is found that cannot be vectorized. */ ok = vect_analyze_data_refs_alignment (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data alignment.\n"); return false; } /* Prune the list of ddrs to be tested at run-time by versioning for alias. It is important to call pruning after vect_analyze_data_ref_accesses, since we use grouping information gathered by interleaving analysis. */ ok = vect_prune_runtime_alias_test_list (loop_vinfo); if (!ok) return false; /* Do not invoke vect_enhance_data_refs_alignment for eplilogue vectorization. */ if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo)) { /* This pass will decide on using loop versioning and/or loop peeling in order to enhance the alignment of data references in the loop. */ ok = vect_enhance_data_refs_alignment (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad data alignment.\n"); return false; } } if (slp) { /* Analyze operations in the SLP instances. Note this may remove unsupported SLP instances which makes the above SLP kind detection invalid. */ unsigned old_size = LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length (); vect_slp_analyze_operations (loop_vinfo); if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length () != old_size) goto again; } /* Scan all the remaining operations in the loop that are not subject to SLP and make sure they are vectorizable. */ ok = vect_analyze_loop_operations (loop_vinfo); if (!ok) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad operation or unsupported loop bound.\n"); return false; } /* Decide whether to use a fully-masked loop for this vectorization factor. */ LOOP_VINFO_FULLY_MASKED_P (loop_vinfo) = (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) && vect_verify_full_masking (loop_vinfo)); if (dump_enabled_p ()) { if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) dump_printf_loc (MSG_NOTE, vect_location, "using a fully-masked loop.\n"); else dump_printf_loc (MSG_NOTE, vect_location, "not using a fully-masked loop.\n"); } /* If epilog loop is required because of data accesses with gaps, one additional iteration needs to be peeled. Check if there is enough iterations for vectorization. */ if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) { poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); tree scalar_niters = LOOP_VINFO_NITERSM1 (loop_vinfo); if (known_lt (wi::to_widest (scalar_niters), vf)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "loop has no enough iterations to support" " peeling for gaps.\n"); return false; } } /* Check the costings of the loop make vectorizing worthwhile. */ res = vect_analyze_loop_costing (loop_vinfo); if (res < 0) goto again; if (!res) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Loop costings not worthwhile.\n"); return false; } /* Decide whether we need to create an epilogue loop to handle remaining scalar iterations. */ th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo); unsigned HOST_WIDE_INT const_vf; if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) /* The main loop handles all iterations. */ LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false; else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0) { /* Work out the (constant) number of iterations that need to be peeled for reasons other than niters. */ unsigned int peel_niter = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo); if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)) peel_niter += 1; if (!multiple_p (LOOP_VINFO_INT_NITERS (loop_vinfo) - peel_niter, LOOP_VINFO_VECT_FACTOR (loop_vinfo))) LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true; } else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) /* ??? When peeling for gaps but not alignment, we could try to check whether the (variable) niters is known to be VF * N + 1. That's something of a niche case though. */ || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&const_vf) || ((tree_ctz (LOOP_VINFO_NITERS (loop_vinfo)) < (unsigned) exact_log2 (const_vf)) /* In case of versioning, check if the maximum number of iterations is greater than th. If they are identical, the epilogue is unnecessary. */ && (!LOOP_REQUIRES_VERSIONING (loop_vinfo) || ((unsigned HOST_WIDE_INT) max_niter > (th / const_vf) * const_vf)))) LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true; /* If an epilogue loop is required make sure we can create one. */ if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n"); if (!vect_can_advance_ivs_p (loop_vinfo) || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo), single_exit (LOOP_VINFO_LOOP (loop_vinfo)))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not vectorized: can't create required " "epilog loop\n"); goto again; } } /* During peeling, we need to check if number of loop iterations is enough for both peeled prolog loop and vector loop. This check can be merged along with threshold check of loop versioning, so increase threshold for this case if necessary. */ if (LOOP_REQUIRES_VERSIONING (loop_vinfo)) { poly_uint64 niters_th = 0; if (!vect_use_loop_mask_for_alignment_p (loop_vinfo)) { /* Niters for peeled prolog loop. */ if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0) { struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo); tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))); niters_th += TYPE_VECTOR_SUBPARTS (vectype) - 1; } else niters_th += LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo); } /* Niters for at least one iteration of vectorized loop. */ if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) niters_th += LOOP_VINFO_VECT_FACTOR (loop_vinfo); /* One additional iteration because of peeling for gap. */ if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)) niters_th += 1; LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = niters_th; } gcc_assert (known_eq (vectorization_factor, LOOP_VINFO_VECT_FACTOR (loop_vinfo))); /* Ok to vectorize! */ return true; again: /* Try again with SLP forced off but if we didn't do any SLP there is no point in re-trying. */ if (!slp) return false; /* If there are reduction chains re-trying will fail anyway. */ if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ()) return false; /* Likewise if the grouped loads or stores in the SLP cannot be handled via interleaving or lane instructions. */ slp_instance instance; slp_tree node; unsigned i, j; FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance) { stmt_vec_info vinfo; vinfo = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0]); if (! STMT_VINFO_GROUPED_ACCESS (vinfo)) continue; vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo)); unsigned int size = STMT_VINFO_GROUP_SIZE (vinfo); tree vectype = STMT_VINFO_VECTYPE (vinfo); if (! vect_store_lanes_supported (vectype, size, false) && ! known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U) && ! vect_grouped_store_supported (vectype, size)) return false; FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node) { vinfo = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]); vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo)); bool single_element_p = !STMT_VINFO_GROUP_NEXT_ELEMENT (vinfo); size = STMT_VINFO_GROUP_SIZE (vinfo); vectype = STMT_VINFO_VECTYPE (vinfo); if (! vect_load_lanes_supported (vectype, size, false) && ! vect_grouped_load_supported (vectype, single_element_p, size)) return false; } } if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "re-trying with SLP disabled\n"); /* Roll back state appropriately. No SLP this time. */ slp = false; /* Restore vectorization factor as it were without SLP. */ LOOP_VINFO_VECT_FACTOR (loop_vinfo) = saved_vectorization_factor; /* Free the SLP instances. */ FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), j, instance) vect_free_slp_instance (instance); LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release (); /* Reset SLP type to loop_vect on all stmts. */ for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i) { basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i]; for (gimple_stmt_iterator si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { stmt_vec_info stmt_info = vinfo_for_stmt (gsi_stmt (si)); STMT_SLP_TYPE (stmt_info) = loop_vect; } for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) { stmt_vec_info stmt_info = vinfo_for_stmt (gsi_stmt (si)); STMT_SLP_TYPE (stmt_info) = loop_vect; if (STMT_VINFO_IN_PATTERN_P (stmt_info)) { stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info)); STMT_SLP_TYPE (stmt_info) = loop_vect; for (gimple_stmt_iterator pi = gsi_start (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info)); !gsi_end_p (pi); gsi_next (&pi)) { gimple *pstmt = gsi_stmt (pi); STMT_SLP_TYPE (vinfo_for_stmt (pstmt)) = loop_vect; } } } } /* Free optimized alias test DDRS. */ LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).truncate (0); LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release (); LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).release (); /* Reset target cost data. */ destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)); LOOP_VINFO_TARGET_COST_DATA (loop_vinfo) = init_cost (LOOP_VINFO_LOOP (loop_vinfo)); /* Reset accumulated rgroup information. */ release_vec_loop_masks (&LOOP_VINFO_MASKS (loop_vinfo)); /* Reset assorted flags. */ LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false; LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = false; LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = 0; LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = 0; LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = saved_can_fully_mask_p; goto start_over; } /* Function vect_analyze_loop. Apply a set of analyses on LOOP, and create a loop_vec_info struct for it. The different analyses will record information in the loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must be vectorized. */ loop_vec_info vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo) { loop_vec_info loop_vinfo; auto_vector_sizes vector_sizes; /* Autodetect first vector size we try. */ current_vector_size = 0; targetm.vectorize.autovectorize_vector_sizes (&vector_sizes); unsigned int next_size = 0; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "===== analyze_loop_nest =====\n"); if (loop_outer (loop) && loop_vec_info_for_loop (loop_outer (loop)) && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop)))) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "outer-loop already vectorized.\n"); return NULL; } poly_uint64 autodetected_vector_size = 0; while (1) { /* Check the CFG characteristics of the loop (nesting, entry/exit). */ loop_vinfo = vect_analyze_loop_form (loop); if (!loop_vinfo) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "bad loop form.\n"); return NULL; } bool fatal = false; if (orig_loop_vinfo) LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = orig_loop_vinfo; if (vect_analyze_loop_2 (loop_vinfo, fatal)) { LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1; return loop_vinfo; } delete loop_vinfo; if (next_size == 0) autodetected_vector_size = current_vector_size; if (next_size < vector_sizes.length () && known_eq (vector_sizes[next_size], autodetected_vector_size)) next_size += 1; if (fatal || next_size == vector_sizes.length () || known_eq (current_vector_size, 0U)) return NULL; /* Try the next biggest vector size. */ current_vector_size = vector_sizes[next_size++]; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "***** Re-trying analysis with " "vector size "); dump_dec (MSG_NOTE, current_vector_size); dump_printf (MSG_NOTE, "\n"); } } } /* Return true if there is an in-order reduction function for CODE, storing it in *REDUC_FN if so. */ static bool fold_left_reduction_fn (tree_code code, internal_fn *reduc_fn) { switch (code) { case PLUS_EXPR: *reduc_fn = IFN_FOLD_LEFT_PLUS; return true; default: return false; } } /* Function reduction_fn_for_scalar_code Input: CODE - tree_code of a reduction operations. Output: REDUC_FN - the corresponding internal function to be used to reduce the vector of partial results into a single scalar result, or IFN_LAST if the operation is a supported reduction operation, but does not have such an internal function. Return FALSE if CODE currently cannot be vectorized as reduction. */ static bool reduction_fn_for_scalar_code (enum tree_code code, internal_fn *reduc_fn) { switch (code) { case MAX_EXPR: *reduc_fn = IFN_REDUC_MAX; return true; case MIN_EXPR: *reduc_fn = IFN_REDUC_MIN; return true; case PLUS_EXPR: *reduc_fn = IFN_REDUC_PLUS; return true; case BIT_AND_EXPR: *reduc_fn = IFN_REDUC_AND; return true; case BIT_IOR_EXPR: *reduc_fn = IFN_REDUC_IOR; return true; case BIT_XOR_EXPR: *reduc_fn = IFN_REDUC_XOR; return true; case MULT_EXPR: case MINUS_EXPR: *reduc_fn = IFN_LAST; return true; default: return false; } } /* If there is a neutral value X such that SLP reduction NODE would not be affected by the introduction of additional X elements, return that X, otherwise return null. CODE is the code of the reduction. REDUC_CHAIN is true if the SLP statements perform a single reduction, false if each statement performs an independent reduction. */ static tree neutral_op_for_slp_reduction (slp_tree slp_node, tree_code code, bool reduc_chain) { vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node); gimple *stmt = stmts[0]; stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); tree vector_type = STMT_VINFO_VECTYPE (stmt_vinfo); tree scalar_type = TREE_TYPE (vector_type); struct loop *loop = gimple_bb (stmt)->loop_father; gcc_assert (loop); switch (code) { case WIDEN_SUM_EXPR: case DOT_PROD_EXPR: case SAD_EXPR: case PLUS_EXPR: case MINUS_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: return build_zero_cst (scalar_type); case MULT_EXPR: return build_one_cst (scalar_type); case BIT_AND_EXPR: return build_all_ones_cst (scalar_type); case MAX_EXPR: case MIN_EXPR: /* For MIN/MAX the initial values are neutral. A reduction chain has only a single initial value, so that value is neutral for all statements. */ if (reduc_chain) return PHI_ARG_DEF_FROM_EDGE (stmt, loop_preheader_edge (loop)); return NULL_TREE; default: return NULL_TREE; } } /* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement STMT is printed with a message MSG. */ static void report_vect_op (dump_flags_t msg_type, gimple *stmt, const char *msg) { dump_printf_loc (msg_type, vect_location, "%s", msg); dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0); } /* Detect SLP reduction of the form: #a1 = phi <a5, a0> a2 = operation (a1) a3 = operation (a2) a4 = operation (a3) a5 = operation (a4) #a = phi <a5> PHI is the reduction phi node (#a1 = phi <a5, a0> above) FIRST_STMT is the first reduction stmt in the chain (a2 = operation (a1)). Return TRUE if a reduction chain was detected. */ static bool vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi, gimple *first_stmt) { struct loop *loop = (gimple_bb (phi))->loop_father; struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info); enum tree_code code; gimple *loop_use_stmt = NULL; stmt_vec_info use_stmt_info; tree lhs; imm_use_iterator imm_iter; use_operand_p use_p; int nloop_uses, size = 0, n_out_of_loop_uses; bool found = false; if (loop != vect_loop) return false; auto_vec<stmt_vec_info, 8> reduc_chain; lhs = PHI_RESULT (phi); code = gimple_assign_rhs_code (first_stmt); while (1) { nloop_uses = 0; n_out_of_loop_uses = 0; FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; /* Check if we got back to the reduction phi. */ if (use_stmt == phi) { loop_use_stmt = use_stmt; found = true; break; } if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))) { loop_use_stmt = use_stmt; nloop_uses++; } else n_out_of_loop_uses++; /* There are can be either a single use in the loop or two uses in phi nodes. */ if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses)) return false; } if (found) break; /* We reached a statement with no loop uses. */ if (nloop_uses == 0) return false; /* This is a loop exit phi, and we haven't reached the reduction phi. */ if (gimple_code (loop_use_stmt) == GIMPLE_PHI) return false; if (!is_gimple_assign (loop_use_stmt) || code != gimple_assign_rhs_code (loop_use_stmt) || !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt))) return false; /* Insert USE_STMT into reduction chain. */ use_stmt_info = vinfo_for_stmt (loop_use_stmt); reduc_chain.safe_push (use_stmt_info); lhs = gimple_assign_lhs (loop_use_stmt); size++; } if (!found || loop_use_stmt != phi || size < 2) return false; /* Swap the operands, if needed, to make the reduction operand be the second operand. */ lhs = PHI_RESULT (phi); for (unsigned i = 0; i < reduc_chain.length (); ++i) { gassign *next_stmt = as_a <gassign *> (reduc_chain[i]->stmt); if (gimple_assign_rhs2 (next_stmt) == lhs) { tree op = gimple_assign_rhs1 (next_stmt); gimple *def_stmt = NULL; if (TREE_CODE (op) == SSA_NAME) def_stmt = SSA_NAME_DEF_STMT (op); /* Check that the other def is either defined in the loop ("vect_internal_def"), or it's an induction (defined by a loop-header phi-node). */ if (def_stmt && gimple_bb (def_stmt) && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) && (is_gimple_assign (def_stmt) || is_gimple_call (def_stmt) || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) == vect_induction_def || (gimple_code (def_stmt) == GIMPLE_PHI && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) == vect_internal_def && !is_loop_header_bb_p (gimple_bb (def_stmt))))) { lhs = gimple_assign_lhs (next_stmt); continue; } return false; } else { tree op = gimple_assign_rhs2 (next_stmt); gimple *def_stmt = NULL; if (TREE_CODE (op) == SSA_NAME) def_stmt = SSA_NAME_DEF_STMT (op); /* Check that the other def is either defined in the loop ("vect_internal_def"), or it's an induction (defined by a loop-header phi-node). */ if (def_stmt && gimple_bb (def_stmt) && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) && (is_gimple_assign (def_stmt) || is_gimple_call (def_stmt) || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) == vect_induction_def || (gimple_code (def_stmt) == GIMPLE_PHI && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) == vect_internal_def && !is_loop_header_bb_p (gimple_bb (def_stmt))))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0); } swap_ssa_operands (next_stmt, gimple_assign_rhs1_ptr (next_stmt), gimple_assign_rhs2_ptr (next_stmt)); update_stmt (next_stmt); if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt))) LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true; } else return false; } lhs = gimple_assign_lhs (next_stmt); } /* Build up the actual chain. */ for (unsigned i = 0; i < reduc_chain.length () - 1; ++i) { GROUP_FIRST_ELEMENT (reduc_chain[i]) = reduc_chain[0]->stmt; GROUP_NEXT_ELEMENT (reduc_chain[i]) = reduc_chain[i+1]->stmt; } GROUP_FIRST_ELEMENT (reduc_chain.last ()) = reduc_chain[0]->stmt; GROUP_NEXT_ELEMENT (reduc_chain.last ()) = NULL; /* Save the chain for further analysis in SLP detection. */ LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (reduc_chain[0]->stmt); GROUP_SIZE (reduc_chain[0]) = size; return true; } /* Return true if we need an in-order reduction for operation CODE on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer overflow must wrap. */ static bool needs_fold_left_reduction_p (tree type, tree_code code, bool need_wrapping_integral_overflow) { /* CHECKME: check for !flag_finite_math_only too? */ if (SCALAR_FLOAT_TYPE_P (type)) switch (code) { case MIN_EXPR: case MAX_EXPR: return false; default: return !flag_associative_math; } if (INTEGRAL_TYPE_P (type)) { if (!operation_no_trapping_overflow (type, code)) return true; if (need_wrapping_integral_overflow && !TYPE_OVERFLOW_WRAPS (type) && operation_can_overflow (code)) return true; return false; } if (SAT_FIXED_POINT_TYPE_P (type)) return true; return false; } /* Return true if the reduction PHI in LOOP with latch arg LOOP_ARG and reduction operation CODE has a handled computation expression. */ bool check_reduction_path (location_t loc, loop_p loop, gphi *phi, tree loop_arg, enum tree_code code) { auto_vec<std::pair<ssa_op_iter, use_operand_p> > path; auto_bitmap visited; tree lookfor = PHI_RESULT (phi); ssa_op_iter curri; use_operand_p curr = op_iter_init_phiuse (&curri, phi, SSA_OP_USE); while (USE_FROM_PTR (curr) != loop_arg) curr = op_iter_next_use (&curri); curri.i = curri.numops; do { path.safe_push (std::make_pair (curri, curr)); tree use = USE_FROM_PTR (curr); if (use == lookfor) break; gimple *def = SSA_NAME_DEF_STMT (use); if (gimple_nop_p (def) || ! flow_bb_inside_loop_p (loop, gimple_bb (def))) { pop: do { std::pair<ssa_op_iter, use_operand_p> x = path.pop (); curri = x.first; curr = x.second; do curr = op_iter_next_use (&curri); /* Skip already visited or non-SSA operands (from iterating over PHI args). */ while (curr != NULL_USE_OPERAND_P && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME || ! bitmap_set_bit (visited, SSA_NAME_VERSION (USE_FROM_PTR (curr))))); } while (curr == NULL_USE_OPERAND_P && ! path.is_empty ()); if (curr == NULL_USE_OPERAND_P) break; } else { if (gimple_code (def) == GIMPLE_PHI) curr = op_iter_init_phiuse (&curri, as_a <gphi *>(def), SSA_OP_USE); else curr = op_iter_init_use (&curri, def, SSA_OP_USE); while (curr != NULL_USE_OPERAND_P && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME || ! bitmap_set_bit (visited, SSA_NAME_VERSION (USE_FROM_PTR (curr))))) curr = op_iter_next_use (&curri); if (curr == NULL_USE_OPERAND_P) goto pop; } } while (1); if (dump_file && (dump_flags & TDF_DETAILS)) { dump_printf_loc (MSG_NOTE, loc, "reduction path: "); unsigned i; std::pair<ssa_op_iter, use_operand_p> *x; FOR_EACH_VEC_ELT (path, i, x) { dump_generic_expr (MSG_NOTE, TDF_SLIM, USE_FROM_PTR (x->second)); dump_printf (MSG_NOTE, " "); } dump_printf (MSG_NOTE, "\n"); } /* Check whether the reduction path detected is valid. */ bool fail = path.length () == 0; bool neg = false; for (unsigned i = 1; i < path.length (); ++i) { gimple *use_stmt = USE_STMT (path[i].second); tree op = USE_FROM_PTR (path[i].second); if (! has_single_use (op) || ! is_gimple_assign (use_stmt)) { fail = true; break; } if (gimple_assign_rhs_code (use_stmt) != code) { if (code == PLUS_EXPR && gimple_assign_rhs_code (use_stmt) == MINUS_EXPR) { /* Track whether we negate the reduction value each iteration. */ if (gimple_assign_rhs2 (use_stmt) == op) neg = ! neg; } else { fail = true; break; } } } return ! fail && ! neg; } /* Function vect_is_simple_reduction (1) Detect a cross-iteration def-use cycle that represents a simple reduction computation. We look for the following pattern: loop_header: a1 = phi < a0, a2 > a3 = ... a2 = operation (a3, a1) or a3 = ... loop_header: a1 = phi < a0, a2 > a2 = operation (a3, a1) such that: 1. operation is commutative and associative and it is safe to change the order of the computation 2. no uses for a2 in the loop (a2 is used out of the loop) 3. no uses of a1 in the loop besides the reduction operation 4. no uses of a1 outside the loop. Conditions 1,4 are tested here. Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized. (2) Detect a cross-iteration def-use cycle in nested loops, i.e., nested cycles. (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double reductions: a1 = phi < a0, a2 > inner loop (def of a3) a2 = phi < a3 > (4) Detect condition expressions, ie: for (int i = 0; i < N; i++) if (a[i] < val) ret_val = a[i]; */ static gimple * vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi, bool *double_reduc, bool need_wrapping_integral_overflow, enum vect_reduction_type *v_reduc_type) { struct loop *loop = (gimple_bb (phi))->loop_father; struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info); gimple *def_stmt, *def1 = NULL, *def2 = NULL, *phi_use_stmt = NULL; enum tree_code orig_code, code; tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE; tree type; int nloop_uses; tree name; imm_use_iterator imm_iter; use_operand_p use_p; bool phi_def; *double_reduc = false; *v_reduc_type = TREE_CODE_REDUCTION; tree phi_name = PHI_RESULT (phi); /* ??? If there are no uses of the PHI result the inner loop reduction won't be detected as possibly double-reduction by vectorizable_reduction because that tries to walk the PHI arg from the preheader edge which can be constant. See PR60382. */ if (has_zero_uses (phi_name)) return NULL; nloop_uses = 0; FOR_EACH_IMM_USE_FAST (use_p, imm_iter, phi_name) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "intermediate value used outside loop.\n"); return NULL; } nloop_uses++; if (nloop_uses > 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduction value used in loop.\n"); return NULL; } phi_use_stmt = use_stmt; } edge latch_e = loop_latch_edge (loop); tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e); if (TREE_CODE (loop_arg) != SSA_NAME) { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduction: not ssa_name: "); dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg); dump_printf (MSG_MISSED_OPTIMIZATION, "\n"); } return NULL; } def_stmt = SSA_NAME_DEF_STMT (loop_arg); if (is_gimple_assign (def_stmt)) { name = gimple_assign_lhs (def_stmt); phi_def = false; } else if (gimple_code (def_stmt) == GIMPLE_PHI) { name = PHI_RESULT (def_stmt); phi_def = true; } else { if (dump_enabled_p ()) { dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduction: unhandled reduction operation: "); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, def_stmt, 0); } return NULL; } if (! flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))) return NULL; nloop_uses = 0; auto_vec<gphi *, 3> lcphis; FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))) nloop_uses++; else /* We can have more than one loop-closed PHI. */ lcphis.safe_push (as_a <gphi *> (use_stmt)); if (nloop_uses > 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduction used in loop.\n"); return NULL; } } /* If DEF_STMT is a phi node itself, we expect it to have a single argument defined in the inner loop. */ if (phi_def) { op1 = PHI_ARG_DEF (def_stmt, 0); if (gimple_phi_num_args (def_stmt) != 1 || TREE_CODE (op1) != SSA_NAME) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unsupported phi node definition.\n"); return NULL; } def1 = SSA_NAME_DEF_STMT (op1); if (gimple_bb (def1) && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) && loop->inner && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1)) && is_gimple_assign (def1) && flow_bb_inside_loop_p (loop->inner, gimple_bb (phi_use_stmt))) { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "detected double reduction: "); *double_reduc = true; return def_stmt; } return NULL; } /* If we are vectorizing an inner reduction we are executing that in the original order only in case we are not dealing with a double reduction. */ bool check_reduction = true; if (flow_loop_nested_p (vect_loop, loop)) { gphi *lcphi; unsigned i; check_reduction = false; FOR_EACH_VEC_ELT (lcphis, i, lcphi) FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_phi_result (lcphi)) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (! flow_bb_inside_loop_p (vect_loop, gimple_bb (use_stmt))) check_reduction = true; } } bool nested_in_vect_loop = flow_loop_nested_p (vect_loop, loop); code = orig_code = gimple_assign_rhs_code (def_stmt); /* We can handle "res -= x[i]", which is non-associative by simply rewriting this into "res += -x[i]". Avoid changing gimple instruction for the first simple tests and only do this if we're allowed to change code at all. */ if (code == MINUS_EXPR && gimple_assign_rhs2 (def_stmt) != phi_name) code = PLUS_EXPR; if (code == COND_EXPR) { if (! nested_in_vect_loop) *v_reduc_type = COND_REDUCTION; op3 = gimple_assign_rhs1 (def_stmt); if (COMPARISON_CLASS_P (op3)) { op4 = TREE_OPERAND (op3, 1); op3 = TREE_OPERAND (op3, 0); } if (op3 == phi_name || op4 == phi_name) { if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: condition depends on previous" " iteration: "); return NULL; } op1 = gimple_assign_rhs2 (def_stmt); op2 = gimple_assign_rhs3 (def_stmt); } else if (!commutative_tree_code (code) || !associative_tree_code (code)) { if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: not commutative/associative: "); return NULL; } else if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS) { op1 = gimple_assign_rhs1 (def_stmt); op2 = gimple_assign_rhs2 (def_stmt); } else { if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: not handled operation: "); return NULL; } if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME) { if (dump_enabled_p ()) report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: both uses not ssa_names: "); return NULL; } type = TREE_TYPE (gimple_assign_lhs (def_stmt)); if ((TREE_CODE (op1) == SSA_NAME && !types_compatible_p (type,TREE_TYPE (op1))) || (TREE_CODE (op2) == SSA_NAME && !types_compatible_p (type, TREE_TYPE (op2))) || (op3 && TREE_CODE (op3) == SSA_NAME && !types_compatible_p (type, TREE_TYPE (op3))) || (op4 && TREE_CODE (op4) == SSA_NAME && !types_compatible_p (type, TREE_TYPE (op4)))) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "reduction: multiple types: operation type: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, type); dump_printf (MSG_NOTE, ", operands types: "); dump_generic_expr (MSG_NOTE, TDF_SLIM, TREE_TYPE (op1)); dump_printf (MSG_NOTE, ","); dump_generic_expr (MSG_NOTE, TDF_SLIM, TREE_TYPE (op2)); if (op3) { dump_printf (MSG_NOTE, ","); dump_generic_expr (MSG_NOTE, TDF_SLIM, TREE_TYPE (op3)); } if (op4) { dump_printf (MSG_NOTE, ","); dump_generic_expr (MSG_NOTE, TDF_SLIM, TREE_TYPE (op4)); } dump_printf (MSG_NOTE, "\n"); } return NULL; } /* Check whether it's ok to change the order of the computation. Generally, when vectorizing a reduction we change the order of the computation. This may change the behavior of the program in some cases, so we need to check that this is ok. One exception is when vectorizing an outer-loop: the inner-loop is executed sequentially, and therefore vectorizing reductions in the inner-loop during outer-loop vectorization is safe. */ if (check_reduction && *v_reduc_type == TREE_CODE_REDUCTION && needs_fold_left_reduction_p (type, code, need_wrapping_integral_overflow)) *v_reduc_type = FOLD_LEFT_REDUCTION; /* Reduction is safe. We're dealing with one of the following: 1) integer arithmetic and no trapv 2) floating point arithmetic, and special flags permit this optimization 3) nested cycle (i.e., outer loop vectorization). */ if (TREE_CODE (op1) == SSA_NAME) def1 = SSA_NAME_DEF_STMT (op1); if (TREE_CODE (op2) == SSA_NAME) def2 = SSA_NAME_DEF_STMT (op2); if (code != COND_EXPR && ((!def1 || gimple_nop_p (def1)) && (!def2 || gimple_nop_p (def2)))) { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: "); return NULL; } /* Check that one def is the reduction def, defined by PHI, the other def is either defined in the loop ("vect_internal_def"), or it's an induction (defined by a loop-header phi-node). */ if (def2 && def2 == phi && (code == COND_EXPR || !def1 || gimple_nop_p (def1) || !flow_bb_inside_loop_p (loop, gimple_bb (def1)) || (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1)) && (is_gimple_assign (def1) || is_gimple_call (def1) || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1)) == vect_induction_def || (gimple_code (def1) == GIMPLE_PHI && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1)) == vect_internal_def && !is_loop_header_bb_p (gimple_bb (def1))))))) { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "detected reduction: "); return def_stmt; } if (def1 && def1 == phi && (code == COND_EXPR || !def2 || gimple_nop_p (def2) || !flow_bb_inside_loop_p (loop, gimple_bb (def2)) || (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2)) && (is_gimple_assign (def2) || is_gimple_call (def2) || STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2)) == vect_induction_def || (gimple_code (def2) == GIMPLE_PHI && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2)) == vect_internal_def && !is_loop_header_bb_p (gimple_bb (def2))))))) { if (! nested_in_vect_loop && orig_code != MINUS_EXPR) { /* Check if we can swap operands (just for simplicity - so that the rest of the code can assume that the reduction variable is always the last (second) argument). */ if (code == COND_EXPR) { /* Swap cond_expr by inverting the condition. */ tree cond_expr = gimple_assign_rhs1 (def_stmt); enum tree_code invert_code = ERROR_MARK; enum tree_code cond_code = TREE_CODE (cond_expr); if (TREE_CODE_CLASS (cond_code) == tcc_comparison) { bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr, 0)); invert_code = invert_tree_comparison (cond_code, honor_nans); } if (invert_code != ERROR_MARK) { TREE_SET_CODE (cond_expr, invert_code); swap_ssa_operands (def_stmt, gimple_assign_rhs2_ptr (def_stmt), gimple_assign_rhs3_ptr (def_stmt)); } else { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "detected reduction: cannot swap operands " "for cond_expr"); return NULL; } } else swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt), gimple_assign_rhs2_ptr (def_stmt)); if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "detected reduction: need to swap operands: "); if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt))) LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true; } else { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "detected reduction: "); } return def_stmt; } /* Try to find SLP reduction chain. */ if (! nested_in_vect_loop && code != COND_EXPR && orig_code != MINUS_EXPR && vect_is_slp_reduction (loop_info, phi, def_stmt)) { if (dump_enabled_p ()) report_vect_op (MSG_NOTE, def_stmt, "reduction: detected reduction chain: "); return def_stmt; } /* Dissolve group eventually half-built by vect_is_slp_reduction. */ gimple *first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (def_stmt)); while (first) { gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first)); GROUP_FIRST_ELEMENT (vinfo_for_stmt (first)) = NULL; GROUP_NEXT_ELEMENT (vinfo_for_stmt (first)) = NULL; first = next; } /* Look for the expression computing loop_arg from loop PHI result. */ if (check_reduction_path (vect_location, loop, as_a <gphi *> (phi), loop_arg, code)) return def_stmt; if (dump_enabled_p ()) { report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt, "reduction: unknown pattern: "); } return NULL; } /* Wrapper around vect_is_simple_reduction, which will modify code in-place if it enables detection of more reductions. Arguments as there. */ gimple * vect_force_simple_reduction (loop_vec_info loop_info, gimple *phi, bool *double_reduc, bool need_wrapping_integral_overflow) { enum vect_reduction_type v_reduc_type; gimple *def = vect_is_simple_reduction (loop_info, phi, double_reduc, need_wrapping_integral_overflow, &v_reduc_type); if (def) { stmt_vec_info reduc_def_info = vinfo_for_stmt (phi); STMT_VINFO_REDUC_TYPE (reduc_def_info) = v_reduc_type; STMT_VINFO_REDUC_DEF (reduc_def_info) = def; reduc_def_info = vinfo_for_stmt (def); STMT_VINFO_REDUC_TYPE (reduc_def_info) = v_reduc_type; STMT_VINFO_REDUC_DEF (reduc_def_info) = phi; } return def; } /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */ int vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue, int *peel_iters_epilogue, stmt_vector_for_cost *scalar_cost_vec, stmt_vector_for_cost *prologue_cost_vec, stmt_vector_for_cost *epilogue_cost_vec) { int retval = 0; int assumed_vf = vect_vf_for_cost (loop_vinfo); if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) { *peel_iters_epilogue = assumed_vf / 2; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "cost model: epilogue peel iters set to vf/2 " "because loop iterations are unknown .\n"); /* If peeled iterations are known but number of scalar loop iterations are unknown, count a taken branch per peeled loop. */ retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken, NULL, 0, vect_prologue); retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken, NULL, 0, vect_epilogue); } else { int niters = LOOP_VINFO_INT_NITERS (loop_vinfo); peel_iters_prologue = niters < peel_iters_prologue ? niters : peel_iters_prologue; *peel_iters_epilogue = (niters - peel_iters_prologue) % assumed_vf; /* If we need to peel for gaps, but no peeling is required, we have to peel VF iterations. */ if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue) *peel_iters_epilogue = assumed_vf; } stmt_info_for_cost *si; int j; if (peel_iters_prologue) FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si) { stmt_vec_info stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; retval += record_stmt_cost (prologue_cost_vec, si->count * peel_iters_prologue, si->kind, stmt_info, si->misalign, vect_prologue); } if (*peel_iters_epilogue) FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si) { stmt_vec_info stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; retval += record_stmt_cost (epilogue_cost_vec, si->count * *peel_iters_epilogue, si->kind, stmt_info, si->misalign, vect_epilogue); } return retval; } /* Function vect_estimate_min_profitable_iters Return the number of iterations required for the vector version of the loop to be profitable relative to the cost of the scalar version of the loop. *RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold of iterations for vectorization. -1 value means loop vectorization is not profitable. This returned value may be used for dynamic profitability check. *RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used for static check against estimated number of iterations. */ static void vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo, int *ret_min_profitable_niters, int *ret_min_profitable_estimate) { int min_profitable_iters; int min_profitable_estimate; int peel_iters_prologue; int peel_iters_epilogue; unsigned vec_inside_cost = 0; int vec_outside_cost = 0; unsigned vec_prologue_cost = 0; unsigned vec_epilogue_cost = 0; int scalar_single_iter_cost = 0; int scalar_outside_cost = 0; int assumed_vf = vect_vf_for_cost (loop_vinfo); int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo); void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); /* Cost model disabled. */ if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo))) { dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n"); *ret_min_profitable_niters = 0; *ret_min_profitable_estimate = 0; return; } /* Requires loop versioning tests to handle misalignment. */ if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)) { /* FIXME: Make cost depend on complexity of individual check. */ unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length (); (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0, vect_prologue); dump_printf (MSG_NOTE, "cost model: Adding cost of checks for loop " "versioning to treat misalignment.\n"); } /* Requires loop versioning with alias checks. */ if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo)) { /* FIXME: Make cost depend on complexity of individual check. */ unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length (); (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0, vect_prologue); len = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).length (); if (len) /* Count LEN - 1 ANDs and LEN comparisons. */ (void) add_stmt_cost (target_cost_data, len * 2 - 1, scalar_stmt, NULL, 0, vect_prologue); len = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).length (); if (len) { /* Count LEN - 1 ANDs and LEN comparisons. */ unsigned int nstmts = len * 2 - 1; /* +1 for each bias that needs adding. */ for (unsigned int i = 0; i < len; ++i) if (!LOOP_VINFO_LOWER_BOUNDS (loop_vinfo)[i].unsigned_p) nstmts += 1; (void) add_stmt_cost (target_cost_data, nstmts, scalar_stmt, NULL, 0, vect_prologue); } dump_printf (MSG_NOTE, "cost model: Adding cost of checks for loop " "versioning aliasing.\n"); } /* Requires loop versioning with niter checks. */ if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo)) { /* FIXME: Make cost depend on complexity of individual check. */ (void) add_stmt_cost (target_cost_data, 1, vector_stmt, NULL, 0, vect_prologue); dump_printf (MSG_NOTE, "cost model: Adding cost of checks for loop " "versioning niters.\n"); } if (LOOP_REQUIRES_VERSIONING (loop_vinfo)) (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0, vect_prologue); /* Count statements in scalar loop. Using this as scalar cost for a single iteration for now. TODO: Add outer loop support. TODO: Consider assigning different costs to different scalar statements. */ scalar_single_iter_cost = LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo); /* Add additional cost for the peeled instructions in prologue and epilogue loop. (For fully-masked loops there will be no peeling.) FORNOW: If we don't know the value of peel_iters for prologue or epilogue at compile-time - we assume it's vf/2 (the worst would be vf-1). TODO: Build an expression that represents peel_iters for prologue and epilogue to be used in a run-time test. */ if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) { peel_iters_prologue = 0; peel_iters_epilogue = 0; if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)) { /* We need to peel exactly one iteration. */ peel_iters_epilogue += 1; stmt_info_for_cost *si; int j; FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si) { struct _stmt_vec_info *stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; (void) add_stmt_cost (target_cost_data, si->count, si->kind, stmt_info, si->misalign, vect_epilogue); } } } else if (npeel < 0) { peel_iters_prologue = assumed_vf / 2; dump_printf (MSG_NOTE, "cost model: " "prologue peel iters set to vf/2.\n"); /* If peeling for alignment is unknown, loop bound of main loop becomes unknown. */ peel_iters_epilogue = assumed_vf / 2; dump_printf (MSG_NOTE, "cost model: " "epilogue peel iters set to vf/2 because " "peeling for alignment is unknown.\n"); /* If peeled iterations are unknown, count a taken branch and a not taken branch per peeled loop. Even if scalar loop iterations are known, vector iterations are not known since peeled prologue iterations are not known. Hence guards remain the same. */ (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0, vect_prologue); (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken, NULL, 0, vect_prologue); (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0, vect_epilogue); (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken, NULL, 0, vect_epilogue); stmt_info_for_cost *si; int j; FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si) { struct _stmt_vec_info *stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; (void) add_stmt_cost (target_cost_data, si->count * peel_iters_prologue, si->kind, stmt_info, si->misalign, vect_prologue); (void) add_stmt_cost (target_cost_data, si->count * peel_iters_epilogue, si->kind, stmt_info, si->misalign, vect_epilogue); } } else { stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec; stmt_info_for_cost *si; int j; void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); prologue_cost_vec.create (2); epilogue_cost_vec.create (2); peel_iters_prologue = npeel; (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue, &peel_iters_epilogue, &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), &prologue_cost_vec, &epilogue_cost_vec); FOR_EACH_VEC_ELT (prologue_cost_vec, j, si) { struct _stmt_vec_info *stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; (void) add_stmt_cost (data, si->count, si->kind, stmt_info, si->misalign, vect_prologue); } FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si) { struct _stmt_vec_info *stmt_info = si->stmt ? vinfo_for_stmt (si->stmt) : NULL; (void) add_stmt_cost (data, si->count, si->kind, stmt_info, si->misalign, vect_epilogue); } prologue_cost_vec.release (); epilogue_cost_vec.release (); } /* FORNOW: The scalar outside cost is incremented in one of the following ways: 1. The vectorizer checks for alignment and aliasing and generates a condition that allows dynamic vectorization. A cost model check is ANDED with the versioning condition. Hence scalar code path now has the added cost of the versioning check. if (cost > th & versioning_check) jmp to vector code Hence run-time scalar is incremented by not-taken branch cost. 2. The vectorizer then checks if a prologue is required. If the cost model check was not done before during versioning, it has to be done before the prologue check. if (cost <= th) prologue = scalar_iters if (prologue == 0) jmp to vector code else execute prologue if (prologue == num_iters) go to exit Hence the run-time scalar cost is incremented by a taken branch, plus a not-taken branch, plus a taken branch cost. 3. The vectorizer then checks if an epilogue is required. If the cost model check was not done before during prologue check, it has to be done with the epilogue check. if (prologue == 0) jmp to vector code else execute prologue if (prologue == num_iters) go to exit vector code: if ((cost <= th) | (scalar_iters-prologue-epilogue == 0)) jmp to epilogue Hence the run-time scalar cost should be incremented by 2 taken branches. TODO: The back end may reorder the BBS's differently and reverse conditions/branch directions. Change the estimates below to something more reasonable. */ /* If the number of iterations is known and we do not do versioning, we can decide whether to vectorize at compile time. Hence the scalar version do not carry cost model guard costs. */ if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) || LOOP_REQUIRES_VERSIONING (loop_vinfo)) { /* Cost model check occurs at versioning. */ if (LOOP_REQUIRES_VERSIONING (loop_vinfo)) scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken); else { /* Cost model check occurs at prologue generation. */ if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0) scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken) + vect_get_stmt_cost (cond_branch_not_taken); /* Cost model check occurs at epilogue generation. */ else scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken); } } /* Complete the target-specific cost calculations. */ finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost, &vec_inside_cost, &vec_epilogue_cost); vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n"); dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n", vec_inside_cost); dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", vec_prologue_cost); dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", vec_epilogue_cost); dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n", scalar_single_iter_cost); dump_printf (MSG_NOTE, " Scalar outside cost: %d\n", scalar_outside_cost); dump_printf (MSG_NOTE, " Vector outside cost: %d\n", vec_outside_cost); dump_printf (MSG_NOTE, " prologue iterations: %d\n", peel_iters_prologue); dump_printf (MSG_NOTE, " epilogue iterations: %d\n", peel_iters_epilogue); } /* Calculate number of iterations required to make the vector version profitable, relative to the loop bodies only. The following condition must hold true: SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC where SIC = scalar iteration cost, VIC = vector iteration cost, VOC = vector outside cost, VF = vectorization factor, PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations SOC = scalar outside cost for run time cost model check. */ if ((scalar_single_iter_cost * assumed_vf) > (int) vec_inside_cost) { min_profitable_iters = ((vec_outside_cost - scalar_outside_cost) * assumed_vf - vec_inside_cost * peel_iters_prologue - vec_inside_cost * peel_iters_epilogue); if (min_profitable_iters <= 0) min_profitable_iters = 0; else { min_profitable_iters /= ((scalar_single_iter_cost * assumed_vf) - vec_inside_cost); if ((scalar_single_iter_cost * assumed_vf * min_profitable_iters) <= (((int) vec_inside_cost * min_profitable_iters) + (((int) vec_outside_cost - scalar_outside_cost) * assumed_vf))) min_profitable_iters++; } } /* vector version will never be profitable. */ else { if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize) warning_at (vect_location, OPT_Wopenmp_simd, "vectorization " "did not happen for a simd loop"); if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "cost model: the vector iteration cost = %d " "divided by the scalar iteration cost = %d " "is greater or equal to the vectorization factor = %d" ".\n", vec_inside_cost, scalar_single_iter_cost, assumed_vf); *ret_min_profitable_niters = -1; *ret_min_profitable_estimate = -1; return; } dump_printf (MSG_NOTE, " Calculated minimum iters for profitability: %d\n", min_profitable_iters); if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo) && min_profitable_iters < (assumed_vf + peel_iters_prologue)) /* We want the vectorized loop to execute at least once. */ min_profitable_iters = assumed_vf + peel_iters_prologue; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, " Runtime profitability threshold = %d\n", min_profitable_iters); *ret_min_profitable_niters = min_profitable_iters; /* Calculate number of iterations required to make the vector version profitable, relative to the loop bodies only. Non-vectorized variant is SIC * niters and it must win over vector variant on the expected loop trip count. The following condition must hold true: SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */ if (vec_outside_cost <= 0) min_profitable_estimate = 0; else { min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost) * assumed_vf - vec_inside_cost * peel_iters_prologue - vec_inside_cost * peel_iters_epilogue) / ((scalar_single_iter_cost * assumed_vf) - vec_inside_cost); } min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, " Static estimate profitability threshold = %d\n", min_profitable_estimate); *ret_min_profitable_estimate = min_profitable_estimate; } /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET vector elements (not bits) for a vector with NELT elements. */ static void calc_vec_perm_mask_for_shift (unsigned int offset, unsigned int nelt, vec_perm_builder *sel) { /* The encoding is a single stepped pattern. Any wrap-around is handled by vec_perm_indices. */ sel->new_vector (nelt, 1, 3); for (unsigned int i = 0; i < 3; i++) sel->quick_push (i + offset); } /* Checks whether the target supports whole-vector shifts for vectors of mode MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_ it supports vec_perm_const with masks for all necessary shift amounts. */ static bool have_whole_vector_shift (machine_mode mode) { if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing) return true; /* Variable-length vectors should be handled via the optab. */ unsigned int nelt; if (!GET_MODE_NUNITS (mode).is_constant (&nelt)) return false; vec_perm_builder sel; vec_perm_indices indices; for (unsigned int i = nelt / 2; i >= 1; i /= 2) { calc_vec_perm_mask_for_shift (i, nelt, &sel); indices.new_vector (sel, 2, nelt); if (!can_vec_perm_const_p (mode, indices, false)) return false; } return true; } /* TODO: Close dependency between vect_model_*_cost and vectorizable_* functions. Design better to avoid maintenance issues. */ /* Function vect_model_reduction_cost. Models cost for a reduction operation, including the vector ops generated within the strip-mine loop, the initial definition before the loop, and the epilogue code that must be generated. */ static void vect_model_reduction_cost (stmt_vec_info stmt_info, internal_fn reduc_fn, int ncopies) { int prologue_cost = 0, epilogue_cost = 0, inside_cost; enum tree_code code; optab optab; tree vectype; gimple *orig_stmt; machine_mode mode; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = NULL; void *target_cost_data; if (loop_vinfo) { loop = LOOP_VINFO_LOOP (loop_vinfo); target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); } else target_cost_data = BB_VINFO_TARGET_COST_DATA (STMT_VINFO_BB_VINFO (stmt_info)); /* Condition reductions generate two reductions in the loop. */ vect_reduction_type reduction_type = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info); if (reduction_type == COND_REDUCTION) ncopies *= 2; vectype = STMT_VINFO_VECTYPE (stmt_info); mode = TYPE_MODE (vectype); orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info); if (!orig_stmt) orig_stmt = STMT_VINFO_STMT (stmt_info); code = gimple_assign_rhs_code (orig_stmt); if (reduction_type == EXTRACT_LAST_REDUCTION || reduction_type == FOLD_LEFT_REDUCTION) { /* No extra instructions needed in the prologue. */ prologue_cost = 0; if (reduction_type == EXTRACT_LAST_REDUCTION || reduc_fn != IFN_LAST) /* Count one reduction-like operation per vector. */ inside_cost = add_stmt_cost (target_cost_data, ncopies, vec_to_scalar, stmt_info, 0, vect_body); else { /* Use NELEMENTS extracts and NELEMENTS scalar ops. */ unsigned int nelements = ncopies * vect_nunits_for_cost (vectype); inside_cost = add_stmt_cost (target_cost_data, nelements, vec_to_scalar, stmt_info, 0, vect_body); inside_cost += add_stmt_cost (target_cost_data, nelements, scalar_stmt, stmt_info, 0, vect_body); } } else { /* Add in cost for initial definition. For cond reduction we have four vectors: initial index, step, initial result of the data reduction, initial value of the index reduction. */ int prologue_stmts = reduction_type == COND_REDUCTION ? 4 : 1; prologue_cost += add_stmt_cost (target_cost_data, prologue_stmts, scalar_to_vec, stmt_info, 0, vect_prologue); /* Cost of reduction op inside loop. */ inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt, stmt_info, 0, vect_body); } /* Determine cost of epilogue code. We have a reduction operator that will reduce the vector in one statement. Also requires scalar extract. */ if (!loop || !nested_in_vect_loop_p (loop, orig_stmt)) { if (reduc_fn != IFN_LAST) { if (reduction_type == COND_REDUCTION) { /* An EQ stmt and an COND_EXPR stmt. */ epilogue_cost += add_stmt_cost (target_cost_data, 2, vector_stmt, stmt_info, 0, vect_epilogue); /* Reduction of the max index and a reduction of the found values. */ epilogue_cost += add_stmt_cost (target_cost_data, 2, vec_to_scalar, stmt_info, 0, vect_epilogue); /* A broadcast of the max value. */ epilogue_cost += add_stmt_cost (target_cost_data, 1, scalar_to_vec, stmt_info, 0, vect_epilogue); } else { epilogue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt, stmt_info, 0, vect_epilogue); epilogue_cost += add_stmt_cost (target_cost_data, 1, vec_to_scalar, stmt_info, 0, vect_epilogue); } } else if (reduction_type == COND_REDUCTION) { unsigned estimated_nunits = vect_nunits_for_cost (vectype); /* Extraction of scalar elements. */ epilogue_cost += add_stmt_cost (target_cost_data, 2 * estimated_nunits, vec_to_scalar, stmt_info, 0, vect_epilogue); /* Scalar max reductions via COND_EXPR / MAX_EXPR. */ epilogue_cost += add_stmt_cost (target_cost_data, 2 * estimated_nunits - 3, scalar_stmt, stmt_info, 0, vect_epilogue); } else if (reduction_type == EXTRACT_LAST_REDUCTION || reduction_type == FOLD_LEFT_REDUCTION) /* No extra instructions need in the epilogue. */ ; else { int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype)); tree bitsize = TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt))); int element_bitsize = tree_to_uhwi (bitsize); int nelements = vec_size_in_bits / element_bitsize; if (code == COND_EXPR) code = MAX_EXPR; optab = optab_for_tree_code (code, vectype, optab_default); /* We have a whole vector shift available. */ if (optab != unknown_optab && VECTOR_MODE_P (mode) && optab_handler (optab, mode) != CODE_FOR_nothing && have_whole_vector_shift (mode)) { /* Final reduction via vector shifts and the reduction operator. Also requires scalar extract. */ epilogue_cost += add_stmt_cost (target_cost_data, exact_log2 (nelements) * 2, vector_stmt, stmt_info, 0, vect_epilogue); epilogue_cost += add_stmt_cost (target_cost_data, 1, vec_to_scalar, stmt_info, 0, vect_epilogue); } else /* Use extracts and reduction op for final reduction. For N elements, we have N extracts and N-1 reduction ops. */ epilogue_cost += add_stmt_cost (target_cost_data, nelements + nelements - 1, vector_stmt, stmt_info, 0, vect_epilogue); } } if (dump_enabled_p ()) dump_printf (MSG_NOTE, "vect_model_reduction_cost: inside_cost = %d, " "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost, prologue_cost, epilogue_cost); } /* Function vect_model_induction_cost. Models cost for induction operations. */ static void vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies) { loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo); unsigned inside_cost, prologue_cost; if (PURE_SLP_STMT (stmt_info)) return; /* loop cost for vec_loop. */ inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt, stmt_info, 0, vect_body); /* prologue cost for vec_init and vec_step. */ prologue_cost = add_stmt_cost (target_cost_data, 2, scalar_to_vec, stmt_info, 0, vect_prologue); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "vect_model_induction_cost: inside_cost = %d, " "prologue_cost = %d .\n", inside_cost, prologue_cost); } /* Function get_initial_def_for_reduction Input: STMT - a stmt that performs a reduction operation in the loop. INIT_VAL - the initial value of the reduction variable Output: ADJUSTMENT_DEF - a tree that holds a value to be added to the final result of the reduction (used for adjusting the epilog - see below). Return a vector variable, initialized according to the operation that STMT performs. This vector will be used as the initial value of the vector of partial results. Option1 (adjust in epilog): Initialize the vector as follows: add/bit or/xor: [0,0,...,0,0] mult/bit and: [1,1,...,1,1] min/max/cond_expr: [init_val,init_val,..,init_val,init_val] and when necessary (e.g. add/mult case) let the caller know that it needs to adjust the result by init_val. Option2: Initialize the vector as follows: add/bit or/xor: [init_val,0,0,...,0] mult/bit and: [init_val,1,1,...,1] min/max/cond_expr: [init_val,init_val,...,init_val] and no adjustments are needed. For example, for the following code: s = init_val; for (i=0;i<n;i++) s = s + a[i]; STMT is 's = s + a[i]', and the reduction variable is 's'. For a vector of 4 units, we want to return either [0,0,0,init_val], or [0,0,0,0] and let the caller know that it needs to adjust the result at the end by 'init_val'. FORNOW, we are using the 'adjust in epilog' scheme, because this way the initialization vector is simpler (same element in all entries), if ADJUSTMENT_DEF is not NULL, and Option2 otherwise. A cost model should help decide between these two schemes. */ tree get_initial_def_for_reduction (gimple *stmt, tree init_val, tree *adjustment_def) { stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree scalar_type = TREE_TYPE (init_val); tree vectype = get_vectype_for_scalar_type (scalar_type); enum tree_code code = gimple_assign_rhs_code (stmt); tree def_for_init; tree init_def; bool nested_in_vect_loop = false; REAL_VALUE_TYPE real_init_val = dconst0; int int_init_val = 0; gimple *def_stmt = NULL; gimple_seq stmts = NULL; gcc_assert (vectype); gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type) || SCALAR_FLOAT_TYPE_P (scalar_type)); if (nested_in_vect_loop_p (loop, stmt)) nested_in_vect_loop = true; else gcc_assert (loop == (gimple_bb (stmt))->loop_father); /* In case of double reduction we only create a vector variable to be put in the reduction phi node. The actual statement creation is done in vect_create_epilog_for_reduction. */ if (adjustment_def && nested_in_vect_loop && TREE_CODE (init_val) == SSA_NAME && (def_stmt = SSA_NAME_DEF_STMT (init_val)) && gimple_code (def_stmt) == GIMPLE_PHI && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)) && vinfo_for_stmt (def_stmt) && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt)) == vect_double_reduction_def) { *adjustment_def = NULL; return vect_create_destination_var (init_val, vectype); } vect_reduction_type reduction_type = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo); /* In case of a nested reduction do not use an adjustment def as that case is not supported by the epilogue generation correctly if ncopies is not one. */ if (adjustment_def && nested_in_vect_loop) { *adjustment_def = NULL; return vect_get_vec_def_for_operand (init_val, stmt); } switch (code) { case WIDEN_SUM_EXPR: case DOT_PROD_EXPR: case SAD_EXPR: case PLUS_EXPR: case MINUS_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case MULT_EXPR: case BIT_AND_EXPR: { /* ADJUSTMENT_DEF is NULL when called from vect_create_epilog_for_reduction to vectorize double reduction. */ if (adjustment_def) *adjustment_def = init_val; if (code == MULT_EXPR) { real_init_val = dconst1; int_init_val = 1; } if (code == BIT_AND_EXPR) int_init_val = -1; if (SCALAR_FLOAT_TYPE_P (scalar_type)) def_for_init = build_real (scalar_type, real_init_val); else def_for_init = build_int_cst (scalar_type, int_init_val); if (adjustment_def) /* Option1: the first element is '0' or '1' as well. */ init_def = gimple_build_vector_from_val (&stmts, vectype, def_for_init); else if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant ()) { /* Option2 (variable length): the first element is INIT_VAL. */ init_def = build_vector_from_val (vectype, def_for_init); gcall *call = gimple_build_call_internal (IFN_VEC_SHL_INSERT, 2, init_def, init_val); init_def = make_ssa_name (vectype); gimple_call_set_lhs (call, init_def); gimple_seq_add_stmt (&stmts, call); } else { /* Option2: the first element is INIT_VAL. */ tree_vector_builder elts (vectype, 1, 2); elts.quick_push (init_val); elts.quick_push (def_for_init); init_def = gimple_build_vector (&stmts, &elts); } } break; case MIN_EXPR: case MAX_EXPR: case COND_EXPR: { if (adjustment_def) { *adjustment_def = NULL_TREE; if (reduction_type != COND_REDUCTION && reduction_type != EXTRACT_LAST_REDUCTION) { init_def = vect_get_vec_def_for_operand (init_val, stmt); break; } } init_val = gimple_convert (&stmts, TREE_TYPE (vectype), init_val); init_def = gimple_build_vector_from_val (&stmts, vectype, init_val); } break; default: gcc_unreachable (); } if (stmts) gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); return init_def; } /* Get at the initial defs for the reduction PHIs in SLP_NODE. NUMBER_OF_VECTORS is the number of vector defs to create. If NEUTRAL_OP is nonnull, introducing extra elements of that value will not change the result. */ static void get_initial_defs_for_reduction (slp_tree slp_node, vec<tree> *vec_oprnds, unsigned int number_of_vectors, bool reduc_chain, tree neutral_op) { vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node); gimple *stmt = stmts[0]; stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); unsigned HOST_WIDE_INT nunits; unsigned j, number_of_places_left_in_vector; tree vector_type; tree vop; int group_size = stmts.length (); unsigned int vec_num, i; unsigned number_of_copies = 1; vec<tree> voprnds; voprnds.create (number_of_vectors); struct loop *loop; auto_vec<tree, 16> permute_results; vector_type = STMT_VINFO_VECTYPE (stmt_vinfo); gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def); loop = (gimple_bb (stmt))->loop_father; gcc_assert (loop); edge pe = loop_preheader_edge (loop); gcc_assert (!reduc_chain || neutral_op); /* NUMBER_OF_COPIES is the number of times we need to use the same values in created vectors. It is greater than 1 if unrolling is performed. For example, we have two scalar operands, s1 and s2 (e.g., group of strided accesses of size two), while NUNITS is four (i.e., four scalars of this type can be packed in a vector). The output vector will contain two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES will be 2). If GROUP_SIZE > NUNITS, the scalars will be split into several vectors containing the operands. For example, NUNITS is four as before, and the group size is 8 (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and {s5, s6, s7, s8}. */ if (!TYPE_VECTOR_SUBPARTS (vector_type).is_constant (&nunits)) nunits = group_size; number_of_copies = nunits * number_of_vectors / group_size; number_of_places_left_in_vector = nunits; bool constant_p = true; tree_vector_builder elts (vector_type, nunits, 1); elts.quick_grow (nunits); for (j = 0; j < number_of_copies; j++) { for (i = group_size - 1; stmts.iterate (i, &stmt); i--) { tree op; /* Get the def before the loop. In reduction chain we have only one initial value. */ if ((j != (number_of_copies - 1) || (reduc_chain && i != 0)) && neutral_op) op = neutral_op; else op = PHI_ARG_DEF_FROM_EDGE (stmt, pe); /* Create 'vect_ = {op0,op1,...,opn}'. */ number_of_places_left_in_vector--; elts[number_of_places_left_in_vector] = op; if (!CONSTANT_CLASS_P (op)) constant_p = false; if (number_of_places_left_in_vector == 0) { gimple_seq ctor_seq = NULL; tree init; if (constant_p && !neutral_op ? multiple_p (TYPE_VECTOR_SUBPARTS (vector_type), nunits) : known_eq (TYPE_VECTOR_SUBPARTS (vector_type), nunits)) /* Build the vector directly from ELTS. */ init = gimple_build_vector (&ctor_seq, &elts); else if (neutral_op) { /* Build a vector of the neutral value and shift the other elements into place. */ init = gimple_build_vector_from_val (&ctor_seq, vector_type, neutral_op); int k = nunits; while (k > 0 && elts[k - 1] == neutral_op) k -= 1; while (k > 0) { k -= 1; gcall *call = gimple_build_call_internal (IFN_VEC_SHL_INSERT, 2, init, elts[k]); init = make_ssa_name (vector_type); gimple_call_set_lhs (call, init); gimple_seq_add_stmt (&ctor_seq, call); } } else { /* First time round, duplicate ELTS to fill the required number of vectors, then cherry pick the appropriate result for each iteration. */ if (vec_oprnds->is_empty ()) duplicate_and_interleave (&ctor_seq, vector_type, elts, number_of_vectors, permute_results); init = permute_results[number_of_vectors - j - 1]; } if (ctor_seq != NULL) gsi_insert_seq_on_edge_immediate (pe, ctor_seq); voprnds.quick_push (init); number_of_places_left_in_vector = nunits; elts.new_vector (vector_type, nunits, 1); elts.quick_grow (nunits); constant_p = true; } } } /* Since the vectors are created in the reverse order, we should invert them. */ vec_num = voprnds.length (); for (j = vec_num; j != 0; j--) { vop = voprnds[j - 1]; vec_oprnds->quick_push (vop); } voprnds.release (); /* In case that VF is greater than the unrolling factor needed for the SLP group of stmts, NUMBER_OF_VECTORS to be created is greater than NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have to replicate the vectors. */ tree neutral_vec = NULL; while (number_of_vectors > vec_oprnds->length ()) { if (neutral_op) { if (!neutral_vec) { gimple_seq ctor_seq = NULL; neutral_vec = gimple_build_vector_from_val (&ctor_seq, vector_type, neutral_op); if (ctor_seq != NULL) gsi_insert_seq_on_edge_immediate (pe, ctor_seq); } vec_oprnds->quick_push (neutral_vec); } else { for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++) vec_oprnds->quick_push (vop); } } } /* Function vect_create_epilog_for_reduction Create code at the loop-epilog to finalize the result of a reduction computation. VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector reduction statements. STMT is the scalar reduction stmt that is being vectorized. NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits). In this case we have to generate more than one vector stmt - i.e - we need to "unroll" the vector stmt by a factor VF/nunits. For more details see documentation in vectorizable_operation. REDUC_FN is the internal function for the epilog reduction. REDUCTION_PHIS is a list of the phi-nodes that carry the reduction computation. REDUC_INDEX is the index of the operand in the right hand side of the statement that is defined by REDUCTION_PHI. DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled. SLP_NODE is an SLP node containing a group of reduction statements. The first one in this group is STMT. INDUC_VAL is for INTEGER_INDUC_COND_REDUCTION the value to use for the case when the COND_EXPR is never true in the loop. For MAX_EXPR, it needs to be smaller than any value of the IV in the loop, for MIN_EXPR larger than any value of the IV in the loop. INDUC_CODE is the code for epilog reduction if INTEGER_INDUC_COND_REDUCTION. NEUTRAL_OP is the value given by neutral_op_for_slp_reduction; it is null if this is not an SLP reduction This function: 1. Creates the reduction def-use cycles: sets the arguments for REDUCTION_PHIS: The loop-entry argument is the vectorized initial-value of the reduction. The loop-latch argument is taken from VECT_DEFS - the vector of partial sums. 2. "Reduces" each vector of partial results VECT_DEFS into a single result, by calling the function specified by REDUC_FN if available, or by other means (whole-vector shifts or a scalar loop). The function also creates a new phi node at the loop exit to preserve loop-closed form, as illustrated below. The flow at the entry to this function: loop: vec_def = phi <null, null> # REDUCTION_PHI VECT_DEF = vector_stmt # vectorized form of STMT s_loop = scalar_stmt # (scalar) STMT loop_exit: s_out0 = phi <s_loop> # (scalar) EXIT_PHI use <s_out0> use <s_out0> The above is transformed by this function into: loop: vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI VECT_DEF = vector_stmt # vectorized form of STMT s_loop = scalar_stmt # (scalar) STMT loop_exit: s_out0 = phi <s_loop> # (scalar) EXIT_PHI v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI v_out2 = reduce <v_out1> s_out3 = extract_field <v_out2, 0> s_out4 = adjust_result <s_out3> use <s_out4> use <s_out4> */ static void vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt, gimple *reduc_def_stmt, int ncopies, internal_fn reduc_fn, vec<gimple *> reduction_phis, bool double_reduc, slp_tree slp_node, slp_instance slp_node_instance, tree induc_val, enum tree_code induc_code, tree neutral_op) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); stmt_vec_info prev_phi_info; tree vectype; machine_mode mode; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL; basic_block exit_bb; tree scalar_dest; tree scalar_type; gimple *new_phi = NULL, *phi; gimple_stmt_iterator exit_gsi; tree vec_dest; tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest; gimple *epilog_stmt = NULL; enum tree_code code = gimple_assign_rhs_code (stmt); gimple *exit_phi; tree bitsize; tree adjustment_def = NULL; tree vec_initial_def = NULL; tree expr, def, initial_def = NULL; tree orig_name, scalar_result; imm_use_iterator imm_iter, phi_imm_iter; use_operand_p use_p, phi_use_p; gimple *use_stmt, *orig_stmt, *reduction_phi = NULL; bool nested_in_vect_loop = false; auto_vec<gimple *> new_phis; auto_vec<gimple *> inner_phis; enum vect_def_type dt = vect_unknown_def_type; int j, i; auto_vec<tree> scalar_results; unsigned int group_size = 1, k, ratio; auto_vec<tree> vec_initial_defs; auto_vec<gimple *> phis; bool slp_reduc = false; bool direct_slp_reduc; tree new_phi_result; gimple *inner_phi = NULL; tree induction_index = NULL_TREE; if (slp_node) group_size = SLP_TREE_SCALAR_STMTS (slp_node).length (); if (nested_in_vect_loop_p (loop, stmt)) { outer_loop = loop; loop = loop->inner; nested_in_vect_loop = true; gcc_assert (!slp_node); } vectype = STMT_VINFO_VECTYPE (stmt_info); gcc_assert (vectype); mode = TYPE_MODE (vectype); /* 1. Create the reduction def-use cycle: Set the arguments of REDUCTION_PHIS, i.e., transform loop: vec_def = phi <null, null> # REDUCTION_PHI VECT_DEF = vector_stmt # vectorized form of STMT ... into: loop: vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI VECT_DEF = vector_stmt # vectorized form of STMT ... (in case of SLP, do it for all the phis). */ /* Get the loop-entry arguments. */ enum vect_def_type initial_def_dt = vect_unknown_def_type; if (slp_node) { unsigned vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); vec_initial_defs.reserve (vec_num); get_initial_defs_for_reduction (slp_node_instance->reduc_phis, &vec_initial_defs, vec_num, GROUP_FIRST_ELEMENT (stmt_info), neutral_op); } else { /* Get at the scalar def before the loop, that defines the initial value of the reduction variable. */ gimple *def_stmt; initial_def = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt, loop_preheader_edge (loop)); /* Optimize: if initial_def is for REDUC_MAX smaller than the base and we can't use zero for induc_val, use initial_def. Similarly for REDUC_MIN and initial_def larger than the base. */ if (TREE_CODE (initial_def) == INTEGER_CST && (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == INTEGER_INDUC_COND_REDUCTION) && !integer_zerop (induc_val) && ((induc_code == MAX_EXPR && tree_int_cst_lt (initial_def, induc_val)) || (induc_code == MIN_EXPR && tree_int_cst_lt (induc_val, initial_def)))) induc_val = initial_def; vect_is_simple_use (initial_def, loop_vinfo, &def_stmt, &initial_def_dt); vec_initial_def = get_initial_def_for_reduction (stmt, initial_def, &adjustment_def); vec_initial_defs.create (1); vec_initial_defs.quick_push (vec_initial_def); } /* Set phi nodes arguments. */ FOR_EACH_VEC_ELT (reduction_phis, i, phi) { tree vec_init_def = vec_initial_defs[i]; tree def = vect_defs[i]; for (j = 0; j < ncopies; j++) { if (j != 0) { phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)); if (nested_in_vect_loop) vec_init_def = vect_get_vec_def_for_stmt_copy (initial_def_dt, vec_init_def); } /* Set the loop-entry arg of the reduction-phi. */ if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == INTEGER_INDUC_COND_REDUCTION) { /* Initialise the reduction phi to zero. This prevents initial values of non-zero interferring with the reduction op. */ gcc_assert (ncopies == 1); gcc_assert (i == 0); tree vec_init_def_type = TREE_TYPE (vec_init_def); tree induc_val_vec = build_vector_from_val (vec_init_def_type, induc_val); add_phi_arg (as_a <gphi *> (phi), induc_val_vec, loop_preheader_edge (loop), UNKNOWN_LOCATION); } else add_phi_arg (as_a <gphi *> (phi), vec_init_def, loop_preheader_edge (loop), UNKNOWN_LOCATION); /* Set the loop-latch arg for the reduction-phi. */ if (j > 0) def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def); add_phi_arg (as_a <gphi *> (phi), def, loop_latch_edge (loop), UNKNOWN_LOCATION); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "transform reduction: created def-use cycle: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0); } } } /* For cond reductions we want to create a new vector (INDEX_COND_EXPR) which is updated with the current index of the loop for every match of the original loop's cond_expr (VEC_STMT). This results in a vector containing the last time the condition passed for that vector lane. The first match will be a 1 to allow 0 to be used for non-matching indexes. If there are no matches at all then the vector will be all zeroes. */ if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION) { tree indx_before_incr, indx_after_incr; poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype); gimple *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); gcc_assert (gimple_assign_rhs_code (vec_stmt) == VEC_COND_EXPR); int scalar_precision = GET_MODE_PRECISION (SCALAR_TYPE_MODE (TREE_TYPE (vectype))); tree cr_index_scalar_type = make_unsigned_type (scalar_precision); tree cr_index_vector_type = build_vector_type (cr_index_scalar_type, TYPE_VECTOR_SUBPARTS (vectype)); /* First we create a simple vector induction variable which starts with the values {1,2,3,...} (SERIES_VECT) and increments by the vector size (STEP). */ /* Create a {1,2,3,...} vector. */ tree series_vect = build_index_vector (cr_index_vector_type, 1, 1); /* Create a vector of the step value. */ tree step = build_int_cst (cr_index_scalar_type, nunits_out); tree vec_step = build_vector_from_val (cr_index_vector_type, step); /* Create an induction variable. */ gimple_stmt_iterator incr_gsi; bool insert_after; standard_iv_increment_position (loop, &incr_gsi, &insert_after); create_iv (series_vect, vec_step, NULL_TREE, loop, &incr_gsi, insert_after, &indx_before_incr, &indx_after_incr); /* Next create a new phi node vector (NEW_PHI_TREE) which starts filled with zeros (VEC_ZERO). */ /* Create a vector of 0s. */ tree zero = build_zero_cst (cr_index_scalar_type); tree vec_zero = build_vector_from_val (cr_index_vector_type, zero); /* Create a vector phi node. */ tree new_phi_tree = make_ssa_name (cr_index_vector_type); new_phi = create_phi_node (new_phi_tree, loop->header); set_vinfo_for_stmt (new_phi, new_stmt_vec_info (new_phi, loop_vinfo)); add_phi_arg (as_a <gphi *> (new_phi), vec_zero, loop_preheader_edge (loop), UNKNOWN_LOCATION); /* Now take the condition from the loops original cond_expr (VEC_STMT) and produce a new cond_expr (INDEX_COND_EXPR) which for every match uses values from the induction variable (INDEX_BEFORE_INCR) otherwise uses values from the phi node (NEW_PHI_TREE). Finally, we update the phi (NEW_PHI_TREE) to take the value of the new cond_expr (INDEX_COND_EXPR). */ /* Duplicate the condition from vec_stmt. */ tree ccompare = unshare_expr (gimple_assign_rhs1 (vec_stmt)); /* Create a conditional, where the condition is taken from vec_stmt (CCOMPARE), then is the induction index (INDEX_BEFORE_INCR) and else is the phi (NEW_PHI_TREE). */ tree index_cond_expr = build3 (VEC_COND_EXPR, cr_index_vector_type, ccompare, indx_before_incr, new_phi_tree); induction_index = make_ssa_name (cr_index_vector_type); gimple *index_condition = gimple_build_assign (induction_index, index_cond_expr); gsi_insert_before (&incr_gsi, index_condition, GSI_SAME_STMT); stmt_vec_info index_vec_info = new_stmt_vec_info (index_condition, loop_vinfo); STMT_VINFO_VECTYPE (index_vec_info) = cr_index_vector_type; set_vinfo_for_stmt (index_condition, index_vec_info); /* Update the phi with the vec cond. */ add_phi_arg (as_a <gphi *> (new_phi), induction_index, loop_latch_edge (loop), UNKNOWN_LOCATION); } /* 2. Create epilog code. The reduction epilog code operates across the elements of the vector of partial results computed by the vectorized loop. The reduction epilog code consists of: step 1: compute the scalar result in a vector (v_out2) step 2: extract the scalar result (s_out3) from the vector (v_out2) step 3: adjust the scalar result (s_out3) if needed. Step 1 can be accomplished using one the following three schemes: (scheme 1) using reduc_fn, if available. (scheme 2) using whole-vector shifts, if available. (scheme 3) using a scalar loop. In this case steps 1+2 above are combined. The overall epilog code looks like this: s_out0 = phi <s_loop> # original EXIT_PHI v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI v_out2 = reduce <v_out1> # step 1 s_out3 = extract_field <v_out2, 0> # step 2 s_out4 = adjust_result <s_out3> # step 3 (step 3 is optional, and steps 1 and 2 may be combined). Lastly, the uses of s_out0 are replaced by s_out4. */ /* 2.1 Create new loop-exit-phis to preserve loop-closed form: v_out1 = phi <VECT_DEF> Store them in NEW_PHIS. */ exit_bb = single_exit (loop)->dest; prev_phi_info = NULL; new_phis.create (vect_defs.length ()); FOR_EACH_VEC_ELT (vect_defs, i, def) { for (j = 0; j < ncopies; j++) { tree new_def = copy_ssa_name (def); phi = create_phi_node (new_def, exit_bb); set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo)); if (j == 0) new_phis.quick_push (phi); else { def = vect_get_vec_def_for_stmt_copy (dt, def); STMT_VINFO_RELATED_STMT (prev_phi_info) = phi; } SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def); prev_phi_info = vinfo_for_stmt (phi); } } /* The epilogue is created for the outer-loop, i.e., for the loop being vectorized. Create exit phis for the outer loop. */ if (double_reduc) { loop = outer_loop; exit_bb = single_exit (loop)->dest; inner_phis.create (vect_defs.length ()); FOR_EACH_VEC_ELT (new_phis, i, phi) { tree new_result = copy_ssa_name (PHI_RESULT (phi)); gphi *outer_phi = create_phi_node (new_result, exit_bb); SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx, PHI_RESULT (phi)); set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi, loop_vinfo)); inner_phis.quick_push (phi); new_phis[i] = outer_phi; prev_phi_info = vinfo_for_stmt (outer_phi); while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi))) { phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)); new_result = copy_ssa_name (PHI_RESULT (phi)); outer_phi = create_phi_node (new_result, exit_bb); SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx, PHI_RESULT (phi)); set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi, loop_vinfo)); STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi; prev_phi_info = vinfo_for_stmt (outer_phi); } } } exit_gsi = gsi_after_labels (exit_bb); /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3 (i.e. when reduc_fn is not available) and in the final adjustment code (if needed). Also get the original scalar reduction variable as defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it represents a reduction pattern), the tree-code and scalar-def are taken from the original stmt that the pattern-stmt (STMT) replaces. Otherwise (it is a regular reduction) - the tree-code and scalar-def are taken from STMT. */ orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info); if (!orig_stmt) { /* Regular reduction */ orig_stmt = stmt; } else { /* Reduction pattern */ stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt); gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo)); gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt); } code = gimple_assign_rhs_code (orig_stmt); /* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore, partial results are added and not subtracted. */ if (code == MINUS_EXPR) code = PLUS_EXPR; scalar_dest = gimple_assign_lhs (orig_stmt); scalar_type = TREE_TYPE (scalar_dest); scalar_results.create (group_size); new_scalar_dest = vect_create_destination_var (scalar_dest, NULL); bitsize = TYPE_SIZE (scalar_type); /* In case this is a reduction in an inner-loop while vectorizing an outer loop - we don't need to extract a single scalar result at the end of the inner-loop (unless it is double reduction, i.e., the use of reduction is outside the outer-loop). The final vector of partial results will be used in the vectorized outer-loop, or reduced to a scalar result at the end of the outer-loop. */ if (nested_in_vect_loop && !double_reduc) goto vect_finalize_reduction; /* SLP reduction without reduction chain, e.g., # a1 = phi <a2, a0> # b1 = phi <b2, b0> a2 = operation (a1) b2 = operation (b1) */ slp_reduc = (slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))); /* True if we should implement SLP_REDUC using native reduction operations instead of scalar operations. */ direct_slp_reduc = (reduc_fn != IFN_LAST && slp_reduc && !TYPE_VECTOR_SUBPARTS (vectype).is_constant ()); /* In case of reduction chain, e.g., # a1 = phi <a3, a0> a2 = operation (a1) a3 = operation (a2), we may end up with more than one vector result. Here we reduce them to one vector. */ if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) || direct_slp_reduc) { tree first_vect = PHI_RESULT (new_phis[0]); gassign *new_vec_stmt = NULL; vec_dest = vect_create_destination_var (scalar_dest, vectype); for (k = 1; k < new_phis.length (); k++) { gimple *next_phi = new_phis[k]; tree second_vect = PHI_RESULT (next_phi); tree tem = make_ssa_name (vec_dest, new_vec_stmt); new_vec_stmt = gimple_build_assign (tem, code, first_vect, second_vect); gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT); first_vect = tem; } new_phi_result = first_vect; if (new_vec_stmt) { new_phis.truncate (0); new_phis.safe_push (new_vec_stmt); } } /* Likewise if we couldn't use a single defuse cycle. */ else if (ncopies > 1) { gcc_assert (new_phis.length () == 1); tree first_vect = PHI_RESULT (new_phis[0]); gassign *new_vec_stmt = NULL; vec_dest = vect_create_destination_var (scalar_dest, vectype); gimple *next_phi = new_phis[0]; for (int k = 1; k < ncopies; ++k) { next_phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next_phi)); tree second_vect = PHI_RESULT (next_phi); tree tem = make_ssa_name (vec_dest, new_vec_stmt); new_vec_stmt = gimple_build_assign (tem, code, first_vect, second_vect); gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT); first_vect = tem; } new_phi_result = first_vect; new_phis.truncate (0); new_phis.safe_push (new_vec_stmt); } else new_phi_result = PHI_RESULT (new_phis[0]); if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION && reduc_fn != IFN_LAST) { /* For condition reductions, we have a vector (NEW_PHI_RESULT) containing various data values where the condition matched and another vector (INDUCTION_INDEX) containing all the indexes of those matches. We need to extract the last matching index (which will be the index with highest value) and use this to index into the data vector. For the case where there were no matches, the data vector will contain all default values and the index vector will be all zeros. */ /* Get various versions of the type of the vector of indexes. */ tree index_vec_type = TREE_TYPE (induction_index); gcc_checking_assert (TYPE_UNSIGNED (index_vec_type)); tree index_scalar_type = TREE_TYPE (index_vec_type); tree index_vec_cmp_type = build_same_sized_truth_vector_type (index_vec_type); /* Get an unsigned integer version of the type of the data vector. */ int scalar_precision = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type)); tree scalar_type_unsigned = make_unsigned_type (scalar_precision); tree vectype_unsigned = build_vector_type (scalar_type_unsigned, TYPE_VECTOR_SUBPARTS (vectype)); /* First we need to create a vector (ZERO_VEC) of zeros and another vector (MAX_INDEX_VEC) filled with the last matching index, which we can create using a MAX reduction and then expanding. In the case where the loop never made any matches, the max index will be zero. */ /* Vector of {0, 0, 0,...}. */ tree zero_vec = make_ssa_name (vectype); tree zero_vec_rhs = build_zero_cst (vectype); gimple *zero_vec_stmt = gimple_build_assign (zero_vec, zero_vec_rhs); gsi_insert_before (&exit_gsi, zero_vec_stmt, GSI_SAME_STMT); /* Find maximum value from the vector of found indexes. */ tree max_index = make_ssa_name (index_scalar_type); gcall *max_index_stmt = gimple_build_call_internal (IFN_REDUC_MAX, 1, induction_index); gimple_call_set_lhs (max_index_stmt, max_index); gsi_insert_before (&exit_gsi, max_index_stmt, GSI_SAME_STMT); /* Vector of {max_index, max_index, max_index,...}. */ tree max_index_vec = make_ssa_name (index_vec_type); tree max_index_vec_rhs = build_vector_from_val (index_vec_type, max_index); gimple *max_index_vec_stmt = gimple_build_assign (max_index_vec, max_index_vec_rhs); gsi_insert_before (&exit_gsi, max_index_vec_stmt, GSI_SAME_STMT); /* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes with the vector (INDUCTION_INDEX) of found indexes, choosing values from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC) otherwise. Only one value should match, resulting in a vector (VEC_COND) with one data value and the rest zeros. In the case where the loop never made any matches, every index will match, resulting in a vector with all data values (which will all be the default value). */ /* Compare the max index vector to the vector of found indexes to find the position of the max value. */ tree vec_compare = make_ssa_name (index_vec_cmp_type); gimple *vec_compare_stmt = gimple_build_assign (vec_compare, EQ_EXPR, induction_index, max_index_vec); gsi_insert_before (&exit_gsi, vec_compare_stmt, GSI_SAME_STMT); /* Use the compare to choose either values from the data vector or zero. */ tree vec_cond = make_ssa_name (vectype); gimple *vec_cond_stmt = gimple_build_assign (vec_cond, VEC_COND_EXPR, vec_compare, new_phi_result, zero_vec); gsi_insert_before (&exit_gsi, vec_cond_stmt, GSI_SAME_STMT); /* Finally we need to extract the data value from the vector (VEC_COND) into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR reduction, but because this doesn't exist, we can use a MAX reduction instead. The data value might be signed or a float so we need to cast it first. In the case where the loop never made any matches, the data values are all identical, and so will reduce down correctly. */ /* Make the matched data values unsigned. */ tree vec_cond_cast = make_ssa_name (vectype_unsigned); tree vec_cond_cast_rhs = build1 (VIEW_CONVERT_EXPR, vectype_unsigned, vec_cond); gimple *vec_cond_cast_stmt = gimple_build_assign (vec_cond_cast, VIEW_CONVERT_EXPR, vec_cond_cast_rhs); gsi_insert_before (&exit_gsi, vec_cond_cast_stmt, GSI_SAME_STMT); /* Reduce down to a scalar value. */ tree data_reduc = make_ssa_name (scalar_type_unsigned); gcall *data_reduc_stmt = gimple_build_call_internal (IFN_REDUC_MAX, 1, vec_cond_cast); gimple_call_set_lhs (data_reduc_stmt, data_reduc); gsi_insert_before (&exit_gsi, data_reduc_stmt, GSI_SAME_STMT); /* Convert the reduced value back to the result type and set as the result. */ gimple_seq stmts = NULL; new_temp = gimple_build (&stmts, VIEW_CONVERT_EXPR, scalar_type, data_reduc); gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT); scalar_results.safe_push (new_temp); } else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION && reduc_fn == IFN_LAST) { /* Condition reduction without supported IFN_REDUC_MAX. Generate idx = 0; idx_val = induction_index[0]; val = data_reduc[0]; for (idx = 0, val = init, i = 0; i < nelts; ++i) if (induction_index[i] > idx_val) val = data_reduc[i], idx_val = induction_index[i]; return val; */ tree data_eltype = TREE_TYPE (TREE_TYPE (new_phi_result)); tree idx_eltype = TREE_TYPE (TREE_TYPE (induction_index)); unsigned HOST_WIDE_INT el_size = tree_to_uhwi (TYPE_SIZE (idx_eltype)); poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (TREE_TYPE (induction_index)); /* Enforced by vectorizable_reduction, which ensures we have target support before allowing a conditional reduction on variable-length vectors. */ unsigned HOST_WIDE_INT v_size = el_size * nunits.to_constant (); tree idx_val = NULL_TREE, val = NULL_TREE; for (unsigned HOST_WIDE_INT off = 0; off < v_size; off += el_size) { tree old_idx_val = idx_val; tree old_val = val; idx_val = make_ssa_name (idx_eltype); epilog_stmt = gimple_build_assign (idx_val, BIT_FIELD_REF, build3 (BIT_FIELD_REF, idx_eltype, induction_index, bitsize_int (el_size), bitsize_int (off))); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); val = make_ssa_name (data_eltype); epilog_stmt = gimple_build_assign (val, BIT_FIELD_REF, build3 (BIT_FIELD_REF, data_eltype, new_phi_result, bitsize_int (el_size), bitsize_int (off))); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); if (off != 0) { tree new_idx_val = idx_val; tree new_val = val; if (off != v_size - el_size) { new_idx_val = make_ssa_name (idx_eltype); epilog_stmt = gimple_build_assign (new_idx_val, MAX_EXPR, idx_val, old_idx_val); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); } new_val = make_ssa_name (data_eltype); epilog_stmt = gimple_build_assign (new_val, COND_EXPR, build2 (GT_EXPR, boolean_type_node, idx_val, old_idx_val), val, old_val); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); idx_val = new_idx_val; val = new_val; } } /* Convert the reduced value back to the result type and set as the result. */ gimple_seq stmts = NULL; val = gimple_convert (&stmts, scalar_type, val); gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT); scalar_results.safe_push (val); } /* 2.3 Create the reduction code, using one of the three schemes described above. In SLP we simply need to extract all the elements from the vector (without reducing them), so we use scalar shifts. */ else if (reduc_fn != IFN_LAST && !slp_reduc) { tree tmp; tree vec_elem_type; /* Case 1: Create: v_out2 = reduc_expr <v_out1> */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Reduce using direct vector reduction.\n"); vec_elem_type = TREE_TYPE (TREE_TYPE (new_phi_result)); if (!useless_type_conversion_p (scalar_type, vec_elem_type)) { tree tmp_dest = vect_create_destination_var (scalar_dest, vec_elem_type); epilog_stmt = gimple_build_call_internal (reduc_fn, 1, new_phi_result); gimple_set_lhs (epilog_stmt, tmp_dest); new_temp = make_ssa_name (tmp_dest, epilog_stmt); gimple_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); epilog_stmt = gimple_build_assign (new_scalar_dest, NOP_EXPR, new_temp); } else { epilog_stmt = gimple_build_call_internal (reduc_fn, 1, new_phi_result); gimple_set_lhs (epilog_stmt, new_scalar_dest); } new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == INTEGER_INDUC_COND_REDUCTION) && !operand_equal_p (initial_def, induc_val, 0)) { /* Earlier we set the initial value to be a vector if induc_val values. Check the result and if it is induc_val then replace with the original initial value, unless induc_val is the same as initial_def already. */ tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp, induc_val); tmp = make_ssa_name (new_scalar_dest); epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare, initial_def, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); new_temp = tmp; } scalar_results.safe_push (new_temp); } else if (direct_slp_reduc) { /* Here we create one vector for each of the GROUP_SIZE results, with the elements for other SLP statements replaced with the neutral value. We can then do a normal reduction on each vector. */ /* Enforced by vectorizable_reduction. */ gcc_assert (new_phis.length () == 1); gcc_assert (pow2p_hwi (group_size)); slp_tree orig_phis_slp_node = slp_node_instance->reduc_phis; vec<gimple *> orig_phis = SLP_TREE_SCALAR_STMTS (orig_phis_slp_node); gimple_seq seq = NULL; /* Build a vector {0, 1, 2, ...}, with the same number of elements and the same element size as VECTYPE. */ tree index = build_index_vector (vectype, 0, 1); tree index_type = TREE_TYPE (index); tree index_elt_type = TREE_TYPE (index_type); tree mask_type = build_same_sized_truth_vector_type (index_type); /* Create a vector that, for each element, identifies which of the GROUP_SIZE results should use it. */ tree index_mask = build_int_cst (index_elt_type, group_size - 1); index = gimple_build (&seq, BIT_AND_EXPR, index_type, index, build_vector_from_val (index_type, index_mask)); /* Get a neutral vector value. This is simply a splat of the neutral scalar value if we have one, otherwise the initial scalar value is itself a neutral value. */ tree vector_identity = NULL_TREE; if (neutral_op) vector_identity = gimple_build_vector_from_val (&seq, vectype, neutral_op); for (unsigned int i = 0; i < group_size; ++i) { /* If there's no univeral neutral value, we can use the initial scalar value from the original PHI. This is used for MIN and MAX reduction, for example. */ if (!neutral_op) { tree scalar_value = PHI_ARG_DEF_FROM_EDGE (orig_phis[i], loop_preheader_edge (loop)); vector_identity = gimple_build_vector_from_val (&seq, vectype, scalar_value); } /* Calculate the equivalent of: sel[j] = (index[j] == i); which selects the elements of NEW_PHI_RESULT that should be included in the result. */ tree compare_val = build_int_cst (index_elt_type, i); compare_val = build_vector_from_val (index_type, compare_val); tree sel = gimple_build (&seq, EQ_EXPR, mask_type, index, compare_val); /* Calculate the equivalent of: vec = seq ? new_phi_result : vector_identity; VEC is now suitable for a full vector reduction. */ tree vec = gimple_build (&seq, VEC_COND_EXPR, vectype, sel, new_phi_result, vector_identity); /* Do the reduction and convert it to the appropriate type. */ gcall *call = gimple_build_call_internal (reduc_fn, 1, vec); tree scalar = make_ssa_name (TREE_TYPE (vectype)); gimple_call_set_lhs (call, scalar); gimple_seq_add_stmt (&seq, call); scalar = gimple_convert (&seq, scalar_type, scalar); scalar_results.safe_push (scalar); } gsi_insert_seq_before (&exit_gsi, seq, GSI_SAME_STMT); } else { bool reduce_with_shift; tree vec_temp; /* COND reductions all do the final reduction with MAX_EXPR or MIN_EXPR. */ if (code == COND_EXPR) { if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == INTEGER_INDUC_COND_REDUCTION) code = induc_code; else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == CONST_COND_REDUCTION) code = STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info); else code = MAX_EXPR; } /* See if the target wants to do the final (shift) reduction in a vector mode of smaller size and first reduce upper/lower halves against each other. */ enum machine_mode mode1 = mode; tree vectype1 = vectype; unsigned sz = tree_to_uhwi (TYPE_SIZE_UNIT (vectype)); unsigned sz1 = sz; if (!slp_reduc && (mode1 = targetm.vectorize.split_reduction (mode)) != mode) sz1 = GET_MODE_SIZE (mode1).to_constant (); vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz1); reduce_with_shift = have_whole_vector_shift (mode1); if (!VECTOR_MODE_P (mode1)) reduce_with_shift = false; else { optab optab = optab_for_tree_code (code, vectype1, optab_default); if (optab_handler (optab, mode1) == CODE_FOR_nothing) reduce_with_shift = false; } /* First reduce the vector to the desired vector size we should do shift reduction on by combining upper and lower halves. */ new_temp = new_phi_result; while (sz > sz1) { gcc_assert (!slp_reduc); sz /= 2; vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz); /* The target has to make sure we support lowpart/highpart extraction, either via direct vector extract or through an integer mode punning. */ tree dst1, dst2; if (convert_optab_handler (vec_extract_optab, TYPE_MODE (TREE_TYPE (new_temp)), TYPE_MODE (vectype1)) != CODE_FOR_nothing) { /* Extract sub-vectors directly once vec_extract becomes a conversion optab. */ dst1 = make_ssa_name (vectype1); epilog_stmt = gimple_build_assign (dst1, BIT_FIELD_REF, build3 (BIT_FIELD_REF, vectype1, new_temp, TYPE_SIZE (vectype1), bitsize_int (0))); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); dst2 = make_ssa_name (vectype1); epilog_stmt = gimple_build_assign (dst2, BIT_FIELD_REF, build3 (BIT_FIELD_REF, vectype1, new_temp, TYPE_SIZE (vectype1), bitsize_int (sz * BITS_PER_UNIT))); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); } else { /* Extract via punning to appropriately sized integer mode vector. */ tree eltype = build_nonstandard_integer_type (sz * BITS_PER_UNIT, 1); tree etype = build_vector_type (eltype, 2); gcc_assert (convert_optab_handler (vec_extract_optab, TYPE_MODE (etype), TYPE_MODE (eltype)) != CODE_FOR_nothing); tree tem = make_ssa_name (etype); epilog_stmt = gimple_build_assign (tem, VIEW_CONVERT_EXPR, build1 (VIEW_CONVERT_EXPR, etype, new_temp)); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); new_temp = tem; tem = make_ssa_name (eltype); epilog_stmt = gimple_build_assign (tem, BIT_FIELD_REF, build3 (BIT_FIELD_REF, eltype, new_temp, TYPE_SIZE (eltype), bitsize_int (0))); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); dst1 = make_ssa_name (vectype1); epilog_stmt = gimple_build_assign (dst1, VIEW_CONVERT_EXPR, build1 (VIEW_CONVERT_EXPR, vectype1, tem)); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); tem = make_ssa_name (eltype); epilog_stmt = gimple_build_assign (tem, BIT_FIELD_REF, build3 (BIT_FIELD_REF, eltype, new_temp, TYPE_SIZE (eltype), bitsize_int (sz * BITS_PER_UNIT))); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); dst2 = make_ssa_name (vectype1); epilog_stmt = gimple_build_assign (dst2, VIEW_CONVERT_EXPR, build1 (VIEW_CONVERT_EXPR, vectype1, tem)); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); } new_temp = make_ssa_name (vectype1); epilog_stmt = gimple_build_assign (new_temp, code, dst1, dst2); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); } if (reduce_with_shift && !slp_reduc) { int element_bitsize = tree_to_uhwi (bitsize); /* Enforced by vectorizable_reduction, which disallows SLP reductions for variable-length vectors and also requires direct target support for loop reductions. */ int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1)); int nelements = vec_size_in_bits / element_bitsize; vec_perm_builder sel; vec_perm_indices indices; int elt_offset; tree zero_vec = build_zero_cst (vectype1); /* Case 2: Create: for (offset = nelements/2; offset >= 1; offset/=2) { Create: va' = vec_shift <va, offset> Create: va = vop <va, va'> } */ tree rhs; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Reduce using vector shifts\n"); mode1 = TYPE_MODE (vectype1); vec_dest = vect_create_destination_var (scalar_dest, vectype1); for (elt_offset = nelements / 2; elt_offset >= 1; elt_offset /= 2) { calc_vec_perm_mask_for_shift (elt_offset, nelements, &sel); indices.new_vector (sel, 2, nelements); tree mask = vect_gen_perm_mask_any (vectype1, indices); epilog_stmt = gimple_build_assign (vec_dest, VEC_PERM_EXPR, new_temp, zero_vec, mask); new_name = make_ssa_name (vec_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_name); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); epilog_stmt = gimple_build_assign (vec_dest, code, new_name, new_temp); new_temp = make_ssa_name (vec_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); } /* 2.4 Extract the final scalar result. Create: s_out3 = extract_field <v_out2, bitpos> */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "extract scalar result\n"); rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp, bitsize, bitsize_zero_node); epilog_stmt = gimple_build_assign (new_scalar_dest, rhs); new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); scalar_results.safe_push (new_temp); } else { /* Case 3: Create: s = extract_field <v_out2, 0> for (offset = element_size; offset < vector_size; offset += element_size;) { Create: s' = extract_field <v_out2, offset> Create: s = op <s, s'> // For non SLP cases } */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Reduce using scalar code.\n"); int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1)); int element_bitsize = tree_to_uhwi (bitsize); FOR_EACH_VEC_ELT (new_phis, i, new_phi) { int bit_offset; if (gimple_code (new_phi) == GIMPLE_PHI) vec_temp = PHI_RESULT (new_phi); else vec_temp = gimple_assign_lhs (new_phi); tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize, bitsize_zero_node); epilog_stmt = gimple_build_assign (new_scalar_dest, rhs); new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); /* In SLP we don't need to apply reduction operation, so we just collect s' values in SCALAR_RESULTS. */ if (slp_reduc) scalar_results.safe_push (new_temp); for (bit_offset = element_bitsize; bit_offset < vec_size_in_bits; bit_offset += element_bitsize) { tree bitpos = bitsize_int (bit_offset); tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize, bitpos); epilog_stmt = gimple_build_assign (new_scalar_dest, rhs); new_name = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_name); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); if (slp_reduc) { /* In SLP we don't need to apply reduction operation, so we just collect s' values in SCALAR_RESULTS. */ new_temp = new_name; scalar_results.safe_push (new_name); } else { epilog_stmt = gimple_build_assign (new_scalar_dest, code, new_name, new_temp); new_temp = make_ssa_name (new_scalar_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); } } } /* The only case where we need to reduce scalar results in SLP, is unrolling. If the size of SCALAR_RESULTS is greater than GROUP_SIZE, we reduce them combining elements modulo GROUP_SIZE. */ if (slp_reduc) { tree res, first_res, new_res; gimple *new_stmt; /* Reduce multiple scalar results in case of SLP unrolling. */ for (j = group_size; scalar_results.iterate (j, &res); j++) { first_res = scalar_results[j % group_size]; new_stmt = gimple_build_assign (new_scalar_dest, code, first_res, res); new_res = make_ssa_name (new_scalar_dest, new_stmt); gimple_assign_set_lhs (new_stmt, new_res); gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT); scalar_results[j % group_size] = new_res; } } else /* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */ scalar_results.safe_push (new_temp); } if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == INTEGER_INDUC_COND_REDUCTION) && !operand_equal_p (initial_def, induc_val, 0)) { /* Earlier we set the initial value to be a vector if induc_val values. Check the result and if it is induc_val then replace with the original initial value, unless induc_val is the same as initial_def already. */ tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp, induc_val); tree tmp = make_ssa_name (new_scalar_dest); epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare, initial_def, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); scalar_results[0] = tmp; } } vect_finalize_reduction: if (double_reduc) loop = loop->inner; /* 2.5 Adjust the final result by the initial value of the reduction variable. (When such adjustment is not needed, then 'adjustment_def' is zero). For example, if code is PLUS we create: new_temp = loop_exit_def + adjustment_def */ if (adjustment_def) { gcc_assert (!slp_reduc); if (nested_in_vect_loop) { new_phi = new_phis[0]; gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE); expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def); new_dest = vect_create_destination_var (scalar_dest, vectype); } else { new_temp = scalar_results[0]; gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE); expr = build2 (code, scalar_type, new_temp, adjustment_def); new_dest = vect_create_destination_var (scalar_dest, scalar_type); } epilog_stmt = gimple_build_assign (new_dest, expr); new_temp = make_ssa_name (new_dest, epilog_stmt); gimple_assign_set_lhs (epilog_stmt, new_temp); gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT); if (nested_in_vect_loop) { set_vinfo_for_stmt (epilog_stmt, new_stmt_vec_info (epilog_stmt, loop_vinfo)); STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi)); if (!double_reduc) scalar_results.quick_push (new_temp); else scalar_results[0] = new_temp; } else scalar_results[0] = new_temp; new_phis[0] = epilog_stmt; } /* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit phis with new adjusted scalar results, i.e., replace use <s_out0> with use <s_out4>. Transform: loop_exit: s_out0 = phi <s_loop> # (scalar) EXIT_PHI v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI v_out2 = reduce <v_out1> s_out3 = extract_field <v_out2, 0> s_out4 = adjust_result <s_out3> use <s_out0> use <s_out0> into: loop_exit: s_out0 = phi <s_loop> # (scalar) EXIT_PHI v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI v_out2 = reduce <v_out1> s_out3 = extract_field <v_out2, 0> s_out4 = adjust_result <s_out3> use <s_out4> use <s_out4> */ /* In SLP reduction chain we reduce vector results into one vector if necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of the last stmt in the reduction chain, since we are looking for the loop exit phi node. */ if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) { gimple *dest_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]; /* Handle reduction patterns. */ if (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt))) dest_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt)); scalar_dest = gimple_assign_lhs (dest_stmt); group_size = 1; } /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in case that GROUP_SIZE is greater than vectorization factor). Therefore, we need to match SCALAR_RESULTS with corresponding statements. The first (GROUP_SIZE / number of new vector stmts) scalar results correspond to the first vector stmt, etc. (RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */ if (group_size > new_phis.length ()) { ratio = group_size / new_phis.length (); gcc_assert (!(group_size % new_phis.length ())); } else ratio = 1; for (k = 0; k < group_size; k++) { if (k % ratio == 0) { epilog_stmt = new_phis[k / ratio]; reduction_phi = reduction_phis[k / ratio]; if (double_reduc) inner_phi = inner_phis[k / ratio]; } if (slp_reduc) { gimple *current_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[k]; orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt)); /* SLP statements can't participate in patterns. */ gcc_assert (!orig_stmt); scalar_dest = gimple_assign_lhs (current_stmt); } phis.create (3); /* Find the loop-closed-use at the loop exit of the original scalar result. (The reduction result is expected to have two immediate uses - one at the latch block, and one at the loop exit). */ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest) if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))) && !is_gimple_debug (USE_STMT (use_p))) phis.safe_push (USE_STMT (use_p)); /* While we expect to have found an exit_phi because of loop-closed-ssa form we can end up without one if the scalar cycle is dead. */ FOR_EACH_VEC_ELT (phis, i, exit_phi) { if (outer_loop) { stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi); gphi *vect_phi; /* FORNOW. Currently not supporting the case that an inner-loop reduction is not used in the outer-loop (but only outside the outer-loop), unless it is double reduction. */ gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo) && !STMT_VINFO_LIVE_P (exit_phi_vinfo)) || double_reduc); if (double_reduc) STMT_VINFO_VEC_STMT (exit_phi_vinfo) = inner_phi; else STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt; if (!double_reduc || STMT_VINFO_DEF_TYPE (exit_phi_vinfo) != vect_double_reduction_def) continue; /* Handle double reduction: stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop) stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop) stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop) stmt4: s2 = phi <s4> - double reduction stmt (outer loop) At that point the regular reduction (stmt2 and stmt3) is already vectorized, as well as the exit phi node, stmt4. Here we vectorize the phi node of double reduction, stmt1, and update all relevant statements. */ /* Go through all the uses of s2 to find double reduction phi node, i.e., stmt1 above. */ orig_name = PHI_RESULT (exit_phi); FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name) { stmt_vec_info use_stmt_vinfo; stmt_vec_info new_phi_vinfo; tree vect_phi_init, preheader_arg, vect_phi_res; basic_block bb = gimple_bb (use_stmt); gimple *use; /* Check that USE_STMT is really double reduction phi node. */ if (gimple_code (use_stmt) != GIMPLE_PHI || gimple_phi_num_args (use_stmt) != 2 || bb->loop_father != outer_loop) continue; use_stmt_vinfo = vinfo_for_stmt (use_stmt); if (!use_stmt_vinfo || STMT_VINFO_DEF_TYPE (use_stmt_vinfo) != vect_double_reduction_def) continue; /* Create vector phi node for double reduction: vs1 = phi <vs0, vs2> vs1 was created previously in this function by a call to vect_get_vec_def_for_operand and is stored in vec_initial_def; vs2 is defined by INNER_PHI, the vectorized EXIT_PHI; vs0 is created here. */ /* Create vector phi node. */ vect_phi = create_phi_node (vec_initial_def, bb); new_phi_vinfo = new_stmt_vec_info (vect_phi, loop_vec_info_for_loop (outer_loop)); set_vinfo_for_stmt (vect_phi, new_phi_vinfo); /* Create vs0 - initial def of the double reduction phi. */ preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt, loop_preheader_edge (outer_loop)); vect_phi_init = get_initial_def_for_reduction (stmt, preheader_arg, NULL); /* Update phi node arguments with vs0 and vs2. */ add_phi_arg (vect_phi, vect_phi_init, loop_preheader_edge (outer_loop), UNKNOWN_LOCATION); add_phi_arg (vect_phi, PHI_RESULT (inner_phi), loop_latch_edge (outer_loop), UNKNOWN_LOCATION); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "created double reduction phi node: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0); } vect_phi_res = PHI_RESULT (vect_phi); /* Replace the use, i.e., set the correct vs1 in the regular reduction phi node. FORNOW, NCOPIES is always 1, so the loop is redundant. */ use = reduction_phi; for (j = 0; j < ncopies; j++) { edge pr_edge = loop_preheader_edge (loop); SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res); use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use)); } } } } phis.release (); if (nested_in_vect_loop) { if (double_reduc) loop = outer_loop; else continue; } phis.create (3); /* Find the loop-closed-use at the loop exit of the original scalar result. (The reduction result is expected to have two immediate uses, one at the latch block, and one at the loop exit). For double reductions we are looking for exit phis of the outer loop. */ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest) { if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))) { if (!is_gimple_debug (USE_STMT (use_p))) phis.safe_push (USE_STMT (use_p)); } else { if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI) { tree phi_res = PHI_RESULT (USE_STMT (use_p)); FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res) { if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (phi_use_p))) && !is_gimple_debug (USE_STMT (phi_use_p))) phis.safe_push (USE_STMT (phi_use_p)); } } } } FOR_EACH_VEC_ELT (phis, i, exit_phi) { /* Replace the uses: */ orig_name = PHI_RESULT (exit_phi); scalar_result = scalar_results[k]; FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name) FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter) SET_USE (use_p, scalar_result); } phis.release (); } } /* Return a vector of type VECTYPE that is equal to the vector select operation "MASK ? VEC : IDENTITY". Insert the select statements before GSI. */ static tree merge_with_identity (gimple_stmt_iterator *gsi, tree mask, tree vectype, tree vec, tree identity) { tree cond = make_temp_ssa_name (vectype, NULL, "cond"); gimple *new_stmt = gimple_build_assign (cond, VEC_COND_EXPR, mask, vec, identity); gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT); return cond; } /* Successively apply CODE to each element of VECTOR_RHS, in left-to-right order, starting with LHS. Insert the extraction statements before GSI and associate the new scalar SSA names with variable SCALAR_DEST. Return the SSA name for the result. */ static tree vect_expand_fold_left (gimple_stmt_iterator *gsi, tree scalar_dest, tree_code code, tree lhs, tree vector_rhs) { tree vectype = TREE_TYPE (vector_rhs); tree scalar_type = TREE_TYPE (vectype); tree bitsize = TYPE_SIZE (scalar_type); unsigned HOST_WIDE_INT vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype)); unsigned HOST_WIDE_INT element_bitsize = tree_to_uhwi (bitsize); for (unsigned HOST_WIDE_INT bit_offset = 0; bit_offset < vec_size_in_bits; bit_offset += element_bitsize) { tree bitpos = bitsize_int (bit_offset); tree rhs = build3 (BIT_FIELD_REF, scalar_type, vector_rhs, bitsize, bitpos); gassign *stmt = gimple_build_assign (scalar_dest, rhs); rhs = make_ssa_name (scalar_dest, stmt); gimple_assign_set_lhs (stmt, rhs); gsi_insert_before (gsi, stmt, GSI_SAME_STMT); stmt = gimple_build_assign (scalar_dest, code, lhs, rhs); tree new_name = make_ssa_name (scalar_dest, stmt); gimple_assign_set_lhs (stmt, new_name); gsi_insert_before (gsi, stmt, GSI_SAME_STMT); lhs = new_name; } return lhs; } /* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT is the statement that sets the live-out value. REDUC_DEF_STMT is the phi statement. CODE is the operation performed by STMT and OPS are its scalar operands. REDUC_INDEX is the index of the operand in OPS that is set by REDUC_DEF_STMT. REDUC_FN is the function that implements in-order reduction, or IFN_LAST if we should open-code it. VECTYPE_IN is the type of the vector input. MASKS specifies the masks that should be used to control the operation in a fully-masked loop. */ static bool vectorize_fold_left_reduction (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, slp_tree slp_node, gimple *reduc_def_stmt, tree_code code, internal_fn reduc_fn, tree ops[3], tree vectype_in, int reduc_index, vec_loop_masks *masks) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree vectype_out = STMT_VINFO_VECTYPE (stmt_info); gimple *new_stmt = NULL; int ncopies; if (slp_node) ncopies = 1; else ncopies = vect_get_num_copies (loop_vinfo, vectype_in); gcc_assert (!nested_in_vect_loop_p (loop, stmt)); gcc_assert (ncopies == 1); gcc_assert (TREE_CODE_LENGTH (code) == binary_op); gcc_assert (reduc_index == (code == MINUS_EXPR ? 0 : 1)); gcc_assert (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == FOLD_LEFT_REDUCTION); if (slp_node) gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype_out), TYPE_VECTOR_SUBPARTS (vectype_in))); tree op0 = ops[1 - reduc_index]; int group_size = 1; gimple *scalar_dest_def; auto_vec<tree> vec_oprnds0; if (slp_node) { vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, slp_node); group_size = SLP_TREE_SCALAR_STMTS (slp_node).length (); scalar_dest_def = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]; } else { tree loop_vec_def0 = vect_get_vec_def_for_operand (op0, stmt); vec_oprnds0.create (1); vec_oprnds0.quick_push (loop_vec_def0); scalar_dest_def = stmt; } tree scalar_dest = gimple_assign_lhs (scalar_dest_def); tree scalar_type = TREE_TYPE (scalar_dest); tree reduc_var = gimple_phi_result (reduc_def_stmt); int vec_num = vec_oprnds0.length (); gcc_assert (vec_num == 1 || slp_node); tree vec_elem_type = TREE_TYPE (vectype_out); gcc_checking_assert (useless_type_conversion_p (scalar_type, vec_elem_type)); tree vector_identity = NULL_TREE; if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) vector_identity = build_zero_cst (vectype_out); tree scalar_dest_var = vect_create_destination_var (scalar_dest, NULL); int i; tree def0; FOR_EACH_VEC_ELT (vec_oprnds0, i, def0) { tree mask = NULL_TREE; if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) mask = vect_get_loop_mask (gsi, masks, vec_num, vectype_in, i); /* Handle MINUS by adding the negative. */ if (reduc_fn != IFN_LAST && code == MINUS_EXPR) { tree negated = make_ssa_name (vectype_out); new_stmt = gimple_build_assign (negated, NEGATE_EXPR, def0); gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT); def0 = negated; } if (mask) def0 = merge_with_identity (gsi, mask, vectype_out, def0, vector_identity); /* On the first iteration the input is simply the scalar phi result, and for subsequent iterations it is the output of the preceding operation. */ if (reduc_fn != IFN_LAST) { new_stmt = gimple_build_call_internal (reduc_fn, 2, reduc_var, def0); /* For chained SLP reductions the output of the previous reduction operation serves as the input of the next. For the final statement the output cannot be a temporary - we reuse the original scalar destination of the last statement. */ if (i != vec_num - 1) { gimple_set_lhs (new_stmt, scalar_dest_var); reduc_var = make_ssa_name (scalar_dest_var, new_stmt); gimple_set_lhs (new_stmt, reduc_var); } } else { reduc_var = vect_expand_fold_left (gsi, scalar_dest_var, code, reduc_var, def0); new_stmt = SSA_NAME_DEF_STMT (reduc_var); /* Remove the statement, so that we can use the same code paths as for statements that we've just created. */ gimple_stmt_iterator tmp_gsi = gsi_for_stmt (new_stmt); gsi_remove (&tmp_gsi, true); } if (i == vec_num - 1) { gimple_set_lhs (new_stmt, scalar_dest); vect_finish_replace_stmt (scalar_dest_def, new_stmt); } else vect_finish_stmt_generation (scalar_dest_def, new_stmt, gsi); if (slp_node) SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); } if (!slp_node) STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; return true; } /* Function is_nonwrapping_integer_induction. Check if STMT (which is part of loop LOOP) both increments and does not cause overflow. */ static bool is_nonwrapping_integer_induction (gimple *stmt, struct loop *loop) { stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt); tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo); tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo); tree lhs_type = TREE_TYPE (gimple_phi_result (stmt)); widest_int ni, max_loop_value, lhs_max; bool overflow = false; /* Make sure the loop is integer based. */ if (TREE_CODE (base) != INTEGER_CST || TREE_CODE (step) != INTEGER_CST) return false; /* Check that the max size of the loop will not wrap. */ if (TYPE_OVERFLOW_UNDEFINED (lhs_type)) return true; if (! max_stmt_executions (loop, &ni)) return false; max_loop_value = wi::mul (wi::to_widest (step), ni, TYPE_SIGN (lhs_type), &overflow); if (overflow) return false; max_loop_value = wi::add (wi::to_widest (base), max_loop_value, TYPE_SIGN (lhs_type), &overflow); if (overflow) return false; return (wi::min_precision (max_loop_value, TYPE_SIGN (lhs_type)) <= TYPE_PRECISION (lhs_type)); } /* Function vectorizable_reduction. Check if STMT performs a reduction operation that can be vectorized. If VEC_STMT is also passed, vectorize the STMT: create a vectorized stmt to replace it, put it in VEC_STMT, and insert it at GSI. Return FALSE if not a vectorizable STMT, TRUE otherwise. This function also handles reduction idioms (patterns) that have been recognized in advance during vect_pattern_recog. In this case, STMT may be of this form: X = pattern_expr (arg0, arg1, ..., X) and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original sequence that had been detected and replaced by the pattern-stmt (STMT). This function also handles reduction of condition expressions, for example: for (int i = 0; i < N; i++) if (a[i] < value) last = a[i]; This is handled by vectorising the loop and creating an additional vector containing the loop indexes for which "a[i] < value" was true. In the function epilogue this is reduced to a single max value and then used to index into the vector of results. In some cases of reduction patterns, the type of the reduction variable X is different than the type of the other arguments of STMT. In such cases, the vectype that is used when transforming STMT into a vector stmt is different than the vectype that is used to determine the vectorization factor, because it consists of a different number of elements than the actual number of elements that are being operated upon in parallel. For example, consider an accumulation of shorts into an int accumulator. On some targets it's possible to vectorize this pattern operating on 8 shorts at a time (hence, the vectype for purposes of determining the vectorization factor should be V8HI); on the other hand, the vectype that is used to create the vector form is actually V4SI (the type of the result). Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that indicates what is the actual level of parallelism (V8HI in the example), so that the right vectorization factor would be derived. This vectype corresponds to the type of arguments to the reduction stmt, and should *NOT* be used to create the vectorized stmt. The right vectype for the vectorized stmt is obtained from the type of the result X: get_vectype_for_scalar_type (TREE_TYPE (X)) This means that, contrary to "regular" reductions (or "regular" stmts in general), the following equation: STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X)) does *NOT* necessarily hold for reduction patterns. */ bool vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, slp_tree slp_node, slp_instance slp_node_instance) { tree vec_dest; tree scalar_dest; stmt_vec_info stmt_info = vinfo_for_stmt (stmt); tree vectype_out = STMT_VINFO_VECTYPE (stmt_info); tree vectype_in = NULL_TREE; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); enum tree_code code, orig_code; internal_fn reduc_fn; machine_mode vec_mode; int op_type; optab optab; tree new_temp = NULL_TREE; gimple *def_stmt; enum vect_def_type dt, cond_reduc_dt = vect_unknown_def_type; gimple *cond_reduc_def_stmt = NULL; enum tree_code cond_reduc_op_code = ERROR_MARK; tree scalar_type; bool is_simple_use; gimple *orig_stmt; stmt_vec_info orig_stmt_info = NULL; int i; int ncopies; int epilog_copies; stmt_vec_info prev_stmt_info, prev_phi_info; bool single_defuse_cycle = false; gimple *new_stmt = NULL; int j; tree ops[3]; enum vect_def_type dts[3]; bool nested_cycle = false, found_nested_cycle_def = false; bool double_reduc = false; basic_block def_bb; struct loop * def_stmt_loop, *outer_loop = NULL; tree def_arg; gimple *def_arg_stmt; auto_vec<tree> vec_oprnds0; auto_vec<tree> vec_oprnds1; auto_vec<tree> vec_oprnds2; auto_vec<tree> vect_defs; auto_vec<gimple *> phis; int vec_num; tree def0, tem; bool first_p = true; tree cr_index_scalar_type = NULL_TREE, cr_index_vector_type = NULL_TREE; tree cond_reduc_val = NULL_TREE; /* Make sure it was already recognized as a reduction computation. */ if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) != vect_reduction_def && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) != vect_nested_cycle) return false; if (nested_in_vect_loop_p (loop, stmt)) { outer_loop = loop; loop = loop->inner; nested_cycle = true; } /* In case of reduction chain we switch to the first stmt in the chain, but we don't update STMT_INFO, since only the last stmt is marked as reduction and has reduction properties. */ if (GROUP_FIRST_ELEMENT (stmt_info) && GROUP_FIRST_ELEMENT (stmt_info) != stmt) { stmt = GROUP_FIRST_ELEMENT (stmt_info); first_p = false; } if (gimple_code (stmt) == GIMPLE_PHI) { /* Analysis is fully done on the reduction stmt invocation. */ if (! vec_stmt) { if (slp_node) slp_node_instance->reduc_phis = slp_node; STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type; return true; } if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION) /* Leave the scalar phi in place. Note that checking STMT_VINFO_VEC_REDUCTION_TYPE (as below) only works for reductions involving a single statement. */ return true; gimple *reduc_stmt = STMT_VINFO_REDUC_DEF (stmt_info); if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (reduc_stmt))) reduc_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (reduc_stmt)); if (STMT_VINFO_VEC_REDUCTION_TYPE (vinfo_for_stmt (reduc_stmt)) == EXTRACT_LAST_REDUCTION) /* Leave the scalar phi in place. */ return true; gcc_assert (is_gimple_assign (reduc_stmt)); for (unsigned k = 1; k < gimple_num_ops (reduc_stmt); ++k) { tree op = gimple_op (reduc_stmt, k); if (op == gimple_phi_result (stmt)) continue; if (k == 1 && gimple_assign_rhs_code (reduc_stmt) == COND_EXPR) continue; if (!vectype_in || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in))) < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (op))))) vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op)); break; } gcc_assert (vectype_in); if (slp_node) ncopies = 1; else ncopies = vect_get_num_copies (loop_vinfo, vectype_in); use_operand_p use_p; gimple *use_stmt; if (ncopies > 1 && (STMT_VINFO_RELEVANT (vinfo_for_stmt (reduc_stmt)) <= vect_used_only_live) && single_imm_use (gimple_phi_result (stmt), &use_p, &use_stmt) && (use_stmt == reduc_stmt || (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use_stmt)) == reduc_stmt))) single_defuse_cycle = true; /* Create the destination vector */ scalar_dest = gimple_assign_lhs (reduc_stmt); vec_dest = vect_create_destination_var (scalar_dest, vectype_out); if (slp_node) /* The size vect_schedule_slp_instance computes is off for us. */ vec_num = vect_get_num_vectors (LOOP_VINFO_VECT_FACTOR (loop_vinfo) * SLP_TREE_SCALAR_STMTS (slp_node).length (), vectype_in); else vec_num = 1; /* Generate the reduction PHIs upfront. */ prev_phi_info = NULL; for (j = 0; j < ncopies; j++) { if (j == 0 || !single_defuse_cycle) { for (i = 0; i < vec_num; i++) { /* Create the reduction-phi that defines the reduction operand. */ gimple *new_phi = create_phi_node (vec_dest, loop->header); set_vinfo_for_stmt (new_phi, new_stmt_vec_info (new_phi, loop_vinfo)); if (slp_node) SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phi); else { if (j == 0) STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_phi; else STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi; prev_phi_info = vinfo_for_stmt (new_phi); } } } } return true; } /* 1. Is vectorizable reduction? */ /* Not supportable if the reduction variable is used in the loop, unless it's a reduction chain. */ if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer && !GROUP_FIRST_ELEMENT (stmt_info)) return false; /* Reductions that are not used even in an enclosing outer-loop, are expected to be "live" (used out of the loop). */ if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope && !STMT_VINFO_LIVE_P (stmt_info)) return false; /* 2. Has this been recognized as a reduction pattern? Check if STMT represents a pattern that has been recognized in earlier analysis stages. For stmts that represent a pattern, the STMT_VINFO_RELATED_STMT field records the last stmt in the original sequence that constitutes the pattern. */ orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)); if (orig_stmt) { orig_stmt_info = vinfo_for_stmt (orig_stmt); gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info)); gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info)); } /* 3. Check the operands of the operation. The first operands are defined inside the loop body. The last operand is the reduction variable, which is defined by the loop-header-phi. */ gcc_assert (is_gimple_assign (stmt)); /* Flatten RHS. */ switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt))) { case GIMPLE_BINARY_RHS: code = gimple_assign_rhs_code (stmt); op_type = TREE_CODE_LENGTH (code); gcc_assert (op_type == binary_op); ops[0] = gimple_assign_rhs1 (stmt); ops[1] = gimple_assign_rhs2 (stmt); break; case GIMPLE_TERNARY_RHS: code = gimple_assign_rhs_code (stmt); op_type = TREE_CODE_LENGTH (code); gcc_assert (op_type == ternary_op); ops[0] = gimple_assign_rhs1 (stmt); ops[1] = gimple_assign_rhs2 (stmt); ops[2] = gimple_assign_rhs3 (stmt); break; case GIMPLE_UNARY_RHS: return false; default: gcc_unreachable (); } if (code == COND_EXPR && slp_node) return false; scalar_dest = gimple_assign_lhs (stmt); scalar_type = TREE_TYPE (scalar_dest); if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type) && !SCALAR_FLOAT_TYPE_P (scalar_type)) return false; /* Do not try to vectorize bit-precision reductions. */ if (!type_has_mode_precision_p (scalar_type)) return false; /* All uses but the last are expected to be defined in the loop. The last use is the reduction variable. In case of nested cycle this assumption is not true: we use reduc_index to record the index of the reduction variable. */ gimple *reduc_def_stmt = NULL; int reduc_index = -1; for (i = 0; i < op_type; i++) { /* The condition of COND_EXPR is checked in vectorizable_condition(). */ if (i == 0 && code == COND_EXPR) continue; is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &def_stmt, &dts[i], &tem); dt = dts[i]; gcc_assert (is_simple_use); if (dt == vect_reduction_def) { reduc_def_stmt = def_stmt; reduc_index = i; continue; } else if (tem) { /* To properly compute ncopies we are interested in the widest input type in case we're looking at a widening accumulation. */ if (!vectype_in || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in))) < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (tem))))) vectype_in = tem; } if (dt != vect_internal_def && dt != vect_external_def && dt != vect_constant_def && dt != vect_induction_def && !(dt == vect_nested_cycle && nested_cycle)) return false; if (dt == vect_nested_cycle) { found_nested_cycle_def = true; reduc_def_stmt = def_stmt; reduc_index = i; } if (i == 1 && code == COND_EXPR) { /* Record how value of COND_EXPR is defined. */ if (dt == vect_constant_def) { cond_reduc_dt = dt; cond_reduc_val = ops[i]; } if (dt == vect_induction_def && def_stmt != NULL && is_nonwrapping_integer_induction (def_stmt, loop)) { cond_reduc_dt = dt; cond_reduc_def_stmt = def_stmt; } } } if (!vectype_in) vectype_in = vectype_out; /* When vectorizing a reduction chain w/o SLP the reduction PHI is not directy used in stmt. */ if (reduc_index == -1) { if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "in-order reduction chain without SLP.\n"); return false; } if (orig_stmt) reduc_def_stmt = STMT_VINFO_REDUC_DEF (orig_stmt_info); else reduc_def_stmt = STMT_VINFO_REDUC_DEF (stmt_info); } if (! reduc_def_stmt || gimple_code (reduc_def_stmt) != GIMPLE_PHI) return false; if (!(reduc_index == -1 || dts[reduc_index] == vect_reduction_def || dts[reduc_index] == vect_nested_cycle || ((dts[reduc_index] == vect_internal_def || dts[reduc_index] == vect_external_def || dts[reduc_index] == vect_constant_def || dts[reduc_index] == vect_induction_def) && nested_cycle && found_nested_cycle_def))) { /* For pattern recognized stmts, orig_stmt might be a reduction, but some helper statements for the pattern might not, or might be COND_EXPRs with reduction uses in the condition. */ gcc_assert (orig_stmt); return false; } stmt_vec_info reduc_def_info = vinfo_for_stmt (reduc_def_stmt); enum vect_reduction_type v_reduc_type = STMT_VINFO_REDUC_TYPE (reduc_def_info); gimple *tmp = STMT_VINFO_REDUC_DEF (reduc_def_info); STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = v_reduc_type; /* If we have a condition reduction, see if we can simplify it further. */ if (v_reduc_type == COND_REDUCTION) { /* TODO: We can't yet handle reduction chains, since we need to treat each COND_EXPR in the chain specially, not just the last one. E.g. for: x_1 = PHI <x_3, ...> x_2 = a_2 ? ... : x_1; x_3 = a_3 ? ... : x_2; we're interested in the last element in x_3 for which a_2 || a_3 is true, whereas the current reduction chain handling would vectorize x_2 as a normal VEC_COND_EXPR and only treat x_3 as a reduction operation. */ if (reduc_index == -1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "conditional reduction chains not supported\n"); return false; } /* vect_is_simple_reduction ensured that operand 2 is the loop-carried operand. */ gcc_assert (reduc_index == 2); /* Loop peeling modifies initial value of reduction PHI, which makes the reduction stmt to be transformed different to the original stmt analyzed. We need to record reduction code for CONST_COND_REDUCTION type reduction at analyzing stage, thus it can be used directly at transform stage. */ if (STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MAX_EXPR || STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MIN_EXPR) { /* Also set the reduction type to CONST_COND_REDUCTION. */ gcc_assert (cond_reduc_dt == vect_constant_def); STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = CONST_COND_REDUCTION; } else if (direct_internal_fn_supported_p (IFN_FOLD_EXTRACT_LAST, vectype_in, OPTIMIZE_FOR_SPEED)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "optimizing condition reduction with" " FOLD_EXTRACT_LAST.\n"); STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = EXTRACT_LAST_REDUCTION; } else if (cond_reduc_dt == vect_induction_def) { stmt_vec_info cond_stmt_vinfo = vinfo_for_stmt (cond_reduc_def_stmt); tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (cond_stmt_vinfo); tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (cond_stmt_vinfo); gcc_assert (TREE_CODE (base) == INTEGER_CST && TREE_CODE (step) == INTEGER_CST); cond_reduc_val = NULL_TREE; /* Find a suitable value, for MAX_EXPR below base, for MIN_EXPR above base; punt if base is the minimum value of the type for MAX_EXPR or maximum value of the type for MIN_EXPR for now. */ if (tree_int_cst_sgn (step) == -1) { cond_reduc_op_code = MIN_EXPR; if (tree_int_cst_sgn (base) == -1) cond_reduc_val = build_int_cst (TREE_TYPE (base), 0); else if (tree_int_cst_lt (base, TYPE_MAX_VALUE (TREE_TYPE (base)))) cond_reduc_val = int_const_binop (PLUS_EXPR, base, integer_one_node); } else { cond_reduc_op_code = MAX_EXPR; if (tree_int_cst_sgn (base) == 1) cond_reduc_val = build_int_cst (TREE_TYPE (base), 0); else if (tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (base)), base)) cond_reduc_val = int_const_binop (MINUS_EXPR, base, integer_one_node); } if (cond_reduc_val) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "condition expression based on " "integer induction.\n"); STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = INTEGER_INDUC_COND_REDUCTION; } } else if (cond_reduc_dt == vect_constant_def) { enum vect_def_type cond_initial_dt; gimple *def_stmt = SSA_NAME_DEF_STMT (ops[reduc_index]); tree cond_initial_val = PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop)); gcc_assert (cond_reduc_val != NULL_TREE); vect_is_simple_use (cond_initial_val, loop_vinfo, &def_stmt, &cond_initial_dt); if (cond_initial_dt == vect_constant_def && types_compatible_p (TREE_TYPE (cond_initial_val), TREE_TYPE (cond_reduc_val))) { tree e = fold_binary (LE_EXPR, boolean_type_node, cond_initial_val, cond_reduc_val); if (e && (integer_onep (e) || integer_zerop (e))) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "condition expression based on " "compile time constant.\n"); /* Record reduction code at analysis stage. */ STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) = integer_onep (e) ? MAX_EXPR : MIN_EXPR; STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = CONST_COND_REDUCTION; } } } } if (orig_stmt) gcc_assert (tmp == orig_stmt || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == orig_stmt); else /* We changed STMT to be the first stmt in reduction chain, hence we check that in this case the first element in the chain is STMT. */ gcc_assert (stmt == tmp || GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt); if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt))) return false; if (slp_node) ncopies = 1; else ncopies = vect_get_num_copies (loop_vinfo, vectype_in); gcc_assert (ncopies >= 1); vec_mode = TYPE_MODE (vectype_in); poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out); if (code == COND_EXPR) { /* Only call during the analysis stage, otherwise we'll lose STMT_VINFO_TYPE. */ if (!vec_stmt && !vectorizable_condition (stmt, gsi, NULL, ops[reduc_index], 0, NULL)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unsupported condition in reduction\n"); return false; } } else { /* 4. Supportable by target? */ if (code == LSHIFT_EXPR || code == RSHIFT_EXPR || code == LROTATE_EXPR || code == RROTATE_EXPR) { /* Shifts and rotates are only supported by vectorizable_shifts, not vectorizable_reduction. */ if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unsupported shift or rotation.\n"); return false; } /* 4.1. check support for the operation in the loop */ optab = optab_for_tree_code (code, vectype_in, optab_default); if (!optab) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "no optab.\n"); return false; } if (optab_handler (optab, vec_mode) == CODE_FOR_nothing) { if (dump_enabled_p ()) dump_printf (MSG_NOTE, "op not supported by target.\n"); if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD) || !vect_worthwhile_without_simd_p (loop_vinfo, code)) return false; if (dump_enabled_p ()) dump_printf (MSG_NOTE, "proceeding using word mode.\n"); } /* Worthwhile without SIMD support? */ if (!VECTOR_MODE_P (TYPE_MODE (vectype_in)) && !vect_worthwhile_without_simd_p (loop_vinfo, code)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "not worthwhile without SIMD support.\n"); return false; } } /* 4.2. Check support for the epilog operation. If STMT represents a reduction pattern, then the type of the reduction variable may be different than the type of the rest of the arguments. For example, consider the case of accumulation of shorts into an int accumulator; The original code: S1: int_a = (int) short_a; orig_stmt-> S2: int_acc = plus <int_a ,int_acc>; was replaced with: STMT: int_acc = widen_sum <short_a, int_acc> This means that: 1. The tree-code that is used to create the vector operation in the epilog code (that reduces the partial results) is not the tree-code of STMT, but is rather the tree-code of the original stmt from the pattern that STMT is replacing. I.e, in the example above we want to use 'widen_sum' in the loop, but 'plus' in the epilog. 2. The type (mode) we use to check available target support for the vector operation to be created in the *epilog*, is determined by the type of the reduction variable (in the example above we'd check this: optab_handler (plus_optab, vect_int_mode])). However the type (mode) we use to check available target support for the vector operation to be created *inside the loop*, is determined by the type of the other arguments to STMT (in the example we'd check this: optab_handler (widen_sum_optab, vect_short_mode)). This is contrary to "regular" reductions, in which the types of all the arguments are the same as the type of the reduction variable. For "regular" reductions we can therefore use the same vector type (and also the same tree-code) when generating the epilog code and when generating the code inside the loop. */ vect_reduction_type reduction_type = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info); if (orig_stmt && (reduction_type == TREE_CODE_REDUCTION || reduction_type == FOLD_LEFT_REDUCTION)) { /* This is a reduction pattern: get the vectype from the type of the reduction variable, and get the tree-code from orig_stmt. */ orig_code = gimple_assign_rhs_code (orig_stmt); gcc_assert (vectype_out); vec_mode = TYPE_MODE (vectype_out); } else { /* Regular reduction: use the same vectype and tree-code as used for the vector code inside the loop can be used for the epilog code. */ orig_code = code; if (code == MINUS_EXPR) orig_code = PLUS_EXPR; /* For simple condition reductions, replace with the actual expression we want to base our reduction around. */ if (reduction_type == CONST_COND_REDUCTION) { orig_code = STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info); gcc_assert (orig_code == MAX_EXPR || orig_code == MIN_EXPR); } else if (reduction_type == INTEGER_INDUC_COND_REDUCTION) orig_code = cond_reduc_op_code; } if (nested_cycle) { def_bb = gimple_bb (reduc_def_stmt); def_stmt_loop = def_bb->loop_father; def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt, loop_preheader_edge (def_stmt_loop)); if (TREE_CODE (def_arg) == SSA_NAME && (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg)) && gimple_code (def_arg_stmt) == GIMPLE_PHI && flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt)) && vinfo_for_stmt (def_arg_stmt) && STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt)) == vect_double_reduction_def) double_reduc = true; } reduc_fn = IFN_LAST; if (reduction_type == TREE_CODE_REDUCTION || reduction_type == FOLD_LEFT_REDUCTION || reduction_type == INTEGER_INDUC_COND_REDUCTION || reduction_type == CONST_COND_REDUCTION) { if (reduction_type == FOLD_LEFT_REDUCTION ? fold_left_reduction_fn (orig_code, &reduc_fn) : reduction_fn_for_scalar_code (orig_code, &reduc_fn)) { if (reduc_fn != IFN_LAST && !direct_internal_fn_supported_p (reduc_fn, vectype_out, OPTIMIZE_FOR_SPEED)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduc op not supported by target.\n"); reduc_fn = IFN_LAST; } } else { if (!nested_cycle || double_reduc) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "no reduc code for scalar code.\n"); return false; } } } else if (reduction_type == COND_REDUCTION) { int scalar_precision = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type)); cr_index_scalar_type = make_unsigned_type (scalar_precision); cr_index_vector_type = build_vector_type (cr_index_scalar_type, nunits_out); if (direct_internal_fn_supported_p (IFN_REDUC_MAX, cr_index_vector_type, OPTIMIZE_FOR_SPEED)) reduc_fn = IFN_REDUC_MAX; } if (reduction_type != EXTRACT_LAST_REDUCTION && reduc_fn == IFN_LAST && !nunits_out.is_constant ()) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "missing target support for reduction on" " variable-length vectors.\n"); return false; } if ((double_reduc || reduction_type != TREE_CODE_REDUCTION) && ncopies > 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "multiple types in double reduction or condition " "reduction.\n"); return false; } /* For SLP reductions, see if there is a neutral value we can use. */ tree neutral_op = NULL_TREE; if (slp_node) neutral_op = neutral_op_for_slp_reduction (slp_node_instance->reduc_phis, code, GROUP_FIRST_ELEMENT (stmt_info) != NULL); if (double_reduc && reduction_type == FOLD_LEFT_REDUCTION) { /* We can't support in-order reductions of code such as this: for (int i = 0; i < n1; ++i) for (int j = 0; j < n2; ++j) l += a[j]; since GCC effectively transforms the loop when vectorizing: for (int i = 0; i < n1 / VF; ++i) for (int j = 0; j < n2; ++j) for (int k = 0; k < VF; ++k) l += a[j]; which is a reassociation of the original operation. */ if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "in-order double reduction not supported.\n"); return false; } if (reduction_type == FOLD_LEFT_REDUCTION && slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) { /* We cannot use in-order reductions in this case because there is an implicit reassociation of the operations involved. */ if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "in-order unchained SLP reductions not supported.\n"); return false; } /* For double reductions, and for SLP reductions with a neutral value, we construct a variable-length initial vector by loading a vector full of the neutral value and then shift-and-inserting the start values into the low-numbered elements. */ if ((double_reduc || neutral_op) && !nunits_out.is_constant () && !direct_internal_fn_supported_p (IFN_VEC_SHL_INSERT, vectype_out, OPTIMIZE_FOR_SPEED)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "reduction on variable-length vectors requires" " target support for a vector-shift-and-insert" " operation.\n"); return false; } /* Check extra constraints for variable-length unchained SLP reductions. */ if (STMT_SLP_TYPE (stmt_info) && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) && !nunits_out.is_constant ()) { /* We checked above that we could build the initial vector when there's a neutral element value. Check here for the case in which each SLP statement has its own initial value and in which that value needs to be repeated for every instance of the statement within the initial vector. */ unsigned int group_size = SLP_TREE_SCALAR_STMTS (slp_node).length (); scalar_mode elt_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype_out)); if (!neutral_op && !can_duplicate_and_interleave_p (group_size, elt_mode)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unsupported form of SLP reduction for" " variable-length vectors: cannot build" " initial vector.\n"); return false; } /* The epilogue code relies on the number of elements being a multiple of the group size. The duplicate-and-interleave approach to setting up the the initial vector does too. */ if (!multiple_p (nunits_out, group_size)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "unsupported form of SLP reduction for" " variable-length vectors: the vector size" " is not a multiple of the number of results.\n"); return false; } } /* In case of widenning multiplication by a constant, we update the type of the constant to be the type of the other operand. We check that the constant fits the type in the pattern recognition pass. */ if (code == DOT_PROD_EXPR && !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1]))) { if (TREE_CODE (ops[0]) == INTEGER_CST) ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]); else if (TREE_CODE (ops[1]) == INTEGER_CST) ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]); else { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "invalid types in dot-prod\n"); return false; } } if (reduction_type == COND_REDUCTION) { widest_int ni; if (! max_loop_iterations (loop, &ni)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "loop count not known, cannot create cond " "reduction.\n"); return false; } /* Convert backedges to iterations. */ ni += 1; /* The additional index will be the same type as the condition. Check that the loop can fit into this less one (because we'll use up the zero slot for when there are no matches). */ tree max_index = TYPE_MAX_VALUE (cr_index_scalar_type); if (wi::geu_p (ni, wi::to_widest (max_index))) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "loop size is greater than data size.\n"); return false; } } /* In case the vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits), we have to generate more than one vector stmt - i.e - we need to "unroll" the vector stmt by a factor VF/nunits. For more details see documentation in vectorizable_operation. */ /* If the reduction is used in an outer loop we need to generate VF intermediate results, like so (e.g. for ncopies=2): r0 = phi (init, r0) r1 = phi (init, r1) r0 = x0 + r0; r1 = x1 + r1; (i.e. we generate VF results in 2 registers). In this case we have a separate def-use cycle for each copy, and therefore for each copy we get the vector def for the reduction variable from the respective phi node created for this copy. Otherwise (the reduction is unused in the loop nest), we can combine together intermediate results, like so (e.g. for ncopies=2): r = phi (init, r) r = x0 + r; r = x1 + r; (i.e. we generate VF/2 results in a single register). In this case for each copy we get the vector def for the reduction variable from the vectorized reduction operation generated in the previous iteration. This only works when we see both the reduction PHI and its only consumer in vectorizable_reduction and there are no intermediate stmts participating. */ use_operand_p use_p; gimple *use_stmt; if (ncopies > 1 && (STMT_VINFO_RELEVANT (stmt_info) <= vect_used_only_live) && single_imm_use (gimple_phi_result (reduc_def_stmt), &use_p, &use_stmt) && (use_stmt == stmt || STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use_stmt)) == stmt)) { single_defuse_cycle = true; epilog_copies = 1; } else epilog_copies = ncopies; /* If the reduction stmt is one of the patterns that have lane reduction embedded we cannot handle the case of ! single_defuse_cycle. */ if ((ncopies > 1 && ! single_defuse_cycle) && (code == DOT_PROD_EXPR || code == WIDEN_SUM_EXPR || code == SAD_EXPR)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "multi def-use cycle not possible for lane-reducing " "reduction operation\n"); return false; } if (slp_node) vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); else vec_num = 1; internal_fn cond_fn = get_conditional_internal_fn (code); vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo); if (!vec_stmt) /* transformation not required. */ { if (first_p) vect_model_reduction_cost (stmt_info, reduc_fn, ncopies); if (loop_vinfo && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)) { if (reduction_type != FOLD_LEFT_REDUCTION && (cond_fn == IFN_LAST || !direct_internal_fn_supported_p (cond_fn, vectype_in, OPTIMIZE_FOR_SPEED))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "can't use a fully-masked loop because no" " conditional operation is available.\n"); LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false; } else if (reduc_index == -1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "can't use a fully-masked loop for chained" " reductions.\n"); LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false; } else vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num, vectype_in); } if (dump_enabled_p () && reduction_type == FOLD_LEFT_REDUCTION) dump_printf_loc (MSG_NOTE, vect_location, "using an in-order (fold-left) reduction.\n"); STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type; return true; } /* Transform. */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n"); /* FORNOW: Multiple types are not supported for condition. */ if (code == COND_EXPR) gcc_assert (ncopies == 1); bool masked_loop_p = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo); if (reduction_type == FOLD_LEFT_REDUCTION) return vectorize_fold_left_reduction (stmt, gsi, vec_stmt, slp_node, reduc_def_stmt, code, reduc_fn, ops, vectype_in, reduc_index, masks); if (reduction_type == EXTRACT_LAST_REDUCTION) { gcc_assert (!slp_node); return vectorizable_condition (stmt, gsi, vec_stmt, NULL, reduc_index, NULL); } /* Create the destination vector */ vec_dest = vect_create_destination_var (scalar_dest, vectype_out); prev_stmt_info = NULL; prev_phi_info = NULL; if (!slp_node) { vec_oprnds0.create (1); vec_oprnds1.create (1); if (op_type == ternary_op) vec_oprnds2.create (1); } phis.create (vec_num); vect_defs.create (vec_num); if (!slp_node) vect_defs.quick_push (NULL_TREE); if (slp_node) phis.splice (SLP_TREE_VEC_STMTS (slp_node_instance->reduc_phis)); else phis.quick_push (STMT_VINFO_VEC_STMT (vinfo_for_stmt (reduc_def_stmt))); for (j = 0; j < ncopies; j++) { if (code == COND_EXPR) { gcc_assert (!slp_node); vectorizable_condition (stmt, gsi, vec_stmt, PHI_RESULT (phis[0]), reduc_index, NULL); /* Multiple types are not supported for condition. */ break; } /* Handle uses. */ if (j == 0) { if (slp_node) { /* Get vec defs for all the operands except the reduction index, ensuring the ordering of the ops in the vector is kept. */ auto_vec<tree, 3> slp_ops; auto_vec<vec<tree>, 3> vec_defs; slp_ops.quick_push (ops[0]); slp_ops.quick_push (ops[1]); if (op_type == ternary_op) slp_ops.quick_push (ops[2]); vect_get_slp_defs (slp_ops, slp_node, &vec_defs); vec_oprnds0.safe_splice (vec_defs[0]); vec_defs[0].release (); vec_oprnds1.safe_splice (vec_defs[1]); vec_defs[1].release (); if (op_type == ternary_op) { vec_oprnds2.safe_splice (vec_defs[2]); vec_defs[2].release (); } } else { vec_oprnds0.quick_push (vect_get_vec_def_for_operand (ops[0], stmt)); vec_oprnds1.quick_push (vect_get_vec_def_for_operand (ops[1], stmt)); if (op_type == ternary_op) vec_oprnds2.quick_push (vect_get_vec_def_for_operand (ops[2], stmt)); } } else { if (!slp_node) { gcc_assert (reduc_index != -1 || ! single_defuse_cycle); if (single_defuse_cycle && reduc_index == 0) vec_oprnds0[0] = gimple_get_lhs (new_stmt); else vec_oprnds0[0] = vect_get_vec_def_for_stmt_copy (dts[0], vec_oprnds0[0]); if (single_defuse_cycle && reduc_index == 1) vec_oprnds1[0] = gimple_get_lhs (new_stmt); else vec_oprnds1[0] = vect_get_vec_def_for_stmt_copy (dts[1], vec_oprnds1[0]); if (op_type == ternary_op) { if (single_defuse_cycle && reduc_index == 2) vec_oprnds2[0] = gimple_get_lhs (new_stmt); else vec_oprnds2[0] = vect_get_vec_def_for_stmt_copy (dts[2], vec_oprnds2[0]); } } } FOR_EACH_VEC_ELT (vec_oprnds0, i, def0) { tree vop[3] = { def0, vec_oprnds1[i], NULL_TREE }; if (masked_loop_p) { /* Make sure that the reduction accumulator is vop[0]. */ if (reduc_index == 1) { gcc_assert (commutative_tree_code (code)); std::swap (vop[0], vop[1]); } tree mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies, vectype_in, i * ncopies + j); gcall *call = gimple_build_call_internal (cond_fn, 3, mask, vop[0], vop[1]); new_temp = make_ssa_name (vec_dest, call); gimple_call_set_lhs (call, new_temp); gimple_call_set_nothrow (call, true); new_stmt = call; } else { if (op_type == ternary_op) vop[2] = vec_oprnds2[i]; new_temp = make_ssa_name (vec_dest, new_stmt); new_stmt = gimple_build_assign (new_temp, code, vop[0], vop[1], vop[2]); } vect_finish_stmt_generation (stmt, new_stmt, gsi); if (slp_node) { SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); vect_defs.quick_push (new_temp); } else vect_defs[0] = new_temp; } if (slp_node) continue; if (j == 0) STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt; else STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt; prev_stmt_info = vinfo_for_stmt (new_stmt); } /* Finalize the reduction-phi (set its arguments) and create the epilog reduction code. */ if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node) vect_defs[0] = gimple_get_lhs (*vec_stmt); vect_create_epilog_for_reduction (vect_defs, stmt, reduc_def_stmt, epilog_copies, reduc_fn, phis, double_reduc, slp_node, slp_node_instance, cond_reduc_val, cond_reduc_op_code, neutral_op); return true; } /* Function vect_min_worthwhile_factor. For a loop where we could vectorize the operation indicated by CODE, return the minimum vectorization factor that makes it worthwhile to use generic vectors. */ static unsigned int vect_min_worthwhile_factor (enum tree_code code) { switch (code) { case PLUS_EXPR: case MINUS_EXPR: case NEGATE_EXPR: return 4; case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case BIT_NOT_EXPR: return 2; default: return INT_MAX; } } /* Return true if VINFO indicates we are doing loop vectorization and if it is worth decomposing CODE operations into scalar operations for that loop's vectorization factor. */ bool vect_worthwhile_without_simd_p (vec_info *vinfo, tree_code code) { loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo); unsigned HOST_WIDE_INT value; return (loop_vinfo && LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&value) && value >= vect_min_worthwhile_factor (code)); } /* Function vectorizable_induction Check if PHI performs an induction computation that can be vectorized. If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized phi to replace it, put it in VEC_STMT, and add it to the same basic block. Return FALSE if not a vectorizable STMT, TRUE otherwise. */ bool vectorizable_induction (gimple *phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED, gimple **vec_stmt, slp_tree slp_node) { stmt_vec_info stmt_info = vinfo_for_stmt (phi); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); unsigned ncopies; bool nested_in_vect_loop = false; struct loop *iv_loop; tree vec_def; edge pe = loop_preheader_edge (loop); basic_block new_bb; tree new_vec, vec_init, vec_step, t; tree new_name; gimple *new_stmt; gphi *induction_phi; tree induc_def, vec_dest; tree init_expr, step_expr; poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); unsigned i; tree expr; gimple_seq stmts; imm_use_iterator imm_iter; use_operand_p use_p; gimple *exit_phi; edge latch_e; tree loop_arg; gimple_stmt_iterator si; basic_block bb = gimple_bb (phi); if (gimple_code (phi) != GIMPLE_PHI) return false; if (!STMT_VINFO_RELEVANT_P (stmt_info)) return false; /* Make sure it was recognized as induction computation. */ if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def) return false; tree vectype = STMT_VINFO_VECTYPE (stmt_info); poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); if (slp_node) ncopies = 1; else ncopies = vect_get_num_copies (loop_vinfo, vectype); gcc_assert (ncopies >= 1); /* FORNOW. These restrictions should be relaxed. */ if (nested_in_vect_loop_p (loop, phi)) { imm_use_iterator imm_iter; use_operand_p use_p; gimple *exit_phi; edge latch_e; tree loop_arg; if (ncopies > 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "multiple types in nested loop.\n"); return false; } /* FORNOW: outer loop induction with SLP not supported. */ if (STMT_SLP_TYPE (stmt_info)) return false; exit_phi = NULL; latch_e = loop_latch_edge (loop->inner); loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e); FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (!flow_bb_inside_loop_p (loop->inner, gimple_bb (use_stmt))) { exit_phi = use_stmt; break; } } if (exit_phi) { stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi); if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo) && !STMT_VINFO_LIVE_P (exit_phi_vinfo))) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "inner-loop induction only used outside " "of the outer vectorized loop.\n"); return false; } } nested_in_vect_loop = true; iv_loop = loop->inner; } else iv_loop = loop; gcc_assert (iv_loop == (gimple_bb (phi))->loop_father); if (slp_node && !nunits.is_constant ()) { /* The current SLP code creates the initial value element-by-element. */ if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "SLP induction not supported for variable-length" " vectors.\n"); return false; } if (!vec_stmt) /* transformation not required. */ { STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_induction ===\n"); vect_model_induction_cost (stmt_info, ncopies); return true; } /* Transform. */ /* Compute a vector variable, initialized with the first VF values of the induction variable. E.g., for an iv with IV_PHI='X' and evolution S, for a vector of 4 units, we want to compute: [X, X + S, X + 2*S, X + 3*S]. */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n"); latch_e = loop_latch_edge (iv_loop); loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e); step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info); gcc_assert (step_expr != NULL_TREE); pe = loop_preheader_edge (iv_loop); init_expr = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (iv_loop)); stmts = NULL; if (!nested_in_vect_loop) { /* Convert the initial value to the desired type. */ tree new_type = TREE_TYPE (vectype); init_expr = gimple_convert (&stmts, new_type, init_expr); /* If we are using the loop mask to "peel" for alignment then we need to adjust the start value here. */ tree skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo); if (skip_niters != NULL_TREE) { if (FLOAT_TYPE_P (vectype)) skip_niters = gimple_build (&stmts, FLOAT_EXPR, new_type, skip_niters); else skip_niters = gimple_convert (&stmts, new_type, skip_niters); tree skip_step = gimple_build (&stmts, MULT_EXPR, new_type, skip_niters, step_expr); init_expr = gimple_build (&stmts, MINUS_EXPR, new_type, init_expr, skip_step); } } /* Convert the step to the desired type. */ step_expr = gimple_convert (&stmts, TREE_TYPE (vectype), step_expr); if (stmts) { new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); gcc_assert (!new_bb); } /* Find the first insertion point in the BB. */ si = gsi_after_labels (bb); /* For SLP induction we have to generate several IVs as for example with group size 3 we need [i, i, i, i + S] [i + S, i + S, i + 2*S, i + 2*S] [i + 2*S, i + 3*S, i + 3*S, i + 3*S]. The step is the same uniform [VF*S, VF*S, VF*S, VF*S] for all. */ if (slp_node) { /* Enforced above. */ unsigned int const_nunits = nunits.to_constant (); /* Generate [VF*S, VF*S, ... ]. */ if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))) { expr = build_int_cst (integer_type_node, vf); expr = fold_convert (TREE_TYPE (step_expr), expr); } else expr = build_int_cst (TREE_TYPE (step_expr), vf); new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr), expr, step_expr); if (! CONSTANT_CLASS_P (new_name)) new_name = vect_init_vector (phi, new_name, TREE_TYPE (step_expr), NULL); new_vec = build_vector_from_val (vectype, new_name); vec_step = vect_init_vector (phi, new_vec, vectype, NULL); /* Now generate the IVs. */ unsigned group_size = SLP_TREE_SCALAR_STMTS (slp_node).length (); unsigned nvects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); unsigned elts = const_nunits * nvects; unsigned nivs = least_common_multiple (group_size, const_nunits) / const_nunits; gcc_assert (elts % group_size == 0); tree elt = init_expr; unsigned ivn; for (ivn = 0; ivn < nivs; ++ivn) { tree_vector_builder elts (vectype, const_nunits, 1); stmts = NULL; for (unsigned eltn = 0; eltn < const_nunits; ++eltn) { if (ivn*const_nunits + eltn >= group_size && (ivn * const_nunits + eltn) % group_size == 0) elt = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (elt), elt, step_expr); elts.quick_push (elt); } vec_init = gimple_build_vector (&stmts, &elts); if (stmts) { new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); gcc_assert (!new_bb); } /* Create the induction-phi that defines the induction-operand. */ vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_"); induction_phi = create_phi_node (vec_dest, iv_loop->header); set_vinfo_for_stmt (induction_phi, new_stmt_vec_info (induction_phi, loop_vinfo)); induc_def = PHI_RESULT (induction_phi); /* Create the iv update inside the loop */ vec_def = make_ssa_name (vec_dest); new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step); gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo)); /* Set the arguments of the phi node: */ add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION); add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop), UNKNOWN_LOCATION); SLP_TREE_VEC_STMTS (slp_node).quick_push (induction_phi); } /* Re-use IVs when we can. */ if (ivn < nvects) { unsigned vfp = least_common_multiple (group_size, const_nunits) / group_size; /* Generate [VF'*S, VF'*S, ... ]. */ if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))) { expr = build_int_cst (integer_type_node, vfp); expr = fold_convert (TREE_TYPE (step_expr), expr); } else expr = build_int_cst (TREE_TYPE (step_expr), vfp); new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr), expr, step_expr); if (! CONSTANT_CLASS_P (new_name)) new_name = vect_init_vector (phi, new_name, TREE_TYPE (step_expr), NULL); new_vec = build_vector_from_val (vectype, new_name); vec_step = vect_init_vector (phi, new_vec, vectype, NULL); for (; ivn < nvects; ++ivn) { gimple *iv = SLP_TREE_VEC_STMTS (slp_node)[ivn - nivs]; tree def; if (gimple_code (iv) == GIMPLE_PHI) def = gimple_phi_result (iv); else def = gimple_assign_lhs (iv); new_stmt = gimple_build_assign (make_ssa_name (vectype), PLUS_EXPR, def, vec_step); if (gimple_code (iv) == GIMPLE_PHI) gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); else { gimple_stmt_iterator tgsi = gsi_for_stmt (iv); gsi_insert_after (&tgsi, new_stmt, GSI_CONTINUE_LINKING); } set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo)); SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt); } } return true; } /* Create the vector that holds the initial_value of the induction. */ if (nested_in_vect_loop) { /* iv_loop is nested in the loop to be vectorized. init_expr had already been created during vectorization of previous stmts. We obtain it from the STMT_VINFO_VEC_STMT of the defining stmt. */ vec_init = vect_get_vec_def_for_operand (init_expr, phi); /* If the initial value is not of proper type, convert it. */ if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init))) { new_stmt = gimple_build_assign (vect_get_new_ssa_name (vectype, vect_simple_var, "vec_iv_"), VIEW_CONVERT_EXPR, build1 (VIEW_CONVERT_EXPR, vectype, vec_init)); vec_init = gimple_assign_lhs (new_stmt); new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop), new_stmt); gcc_assert (!new_bb); set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo)); } } else { /* iv_loop is the loop to be vectorized. Create: vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */ stmts = NULL; new_name = gimple_convert (&stmts, TREE_TYPE (vectype), init_expr); unsigned HOST_WIDE_INT const_nunits; if (nunits.is_constant (&const_nunits)) { tree_vector_builder elts (vectype, const_nunits, 1); elts.quick_push (new_name); for (i = 1; i < const_nunits; i++) { /* Create: new_name_i = new_name + step_expr */ new_name = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (new_name), new_name, step_expr); elts.quick_push (new_name); } /* Create a vector from [new_name_0, new_name_1, ..., new_name_nunits-1] */ vec_init = gimple_build_vector (&stmts, &elts); } else if (INTEGRAL_TYPE_P (TREE_TYPE (step_expr))) /* Build the initial value directly from a VEC_SERIES_EXPR. */ vec_init = gimple_build (&stmts, VEC_SERIES_EXPR, vectype, new_name, step_expr); else { /* Build: [base, base, base, ...] + (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */ gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))); gcc_assert (flag_associative_math); tree index = build_index_vector (vectype, 0, 1); tree base_vec = gimple_build_vector_from_val (&stmts, vectype, new_name); tree step_vec = gimple_build_vector_from_val (&stmts, vectype, step_expr); vec_init = gimple_build (&stmts, FLOAT_EXPR, vectype, index); vec_init = gimple_build (&stmts, MULT_EXPR, vectype, vec_init, step_vec); vec_init = gimple_build (&stmts, PLUS_EXPR, vectype, vec_init, base_vec); } if (stmts) { new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts); gcc_assert (!new_bb); } } /* Create the vector that holds the step of the induction. */ if (nested_in_vect_loop) /* iv_loop is nested in the loop to be vectorized. Generate: vec_step = [S, S, S, S] */ new_name = step_expr; else { /* iv_loop is the loop to be vectorized. Generate: vec_step = [VF*S, VF*S, VF*S, VF*S] */ gimple_seq seq = NULL; if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))) { expr = build_int_cst (integer_type_node, vf); expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr); } else expr = build_int_cst (TREE_TYPE (step_expr), vf); new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr), expr, step_expr); if (seq) { new_bb = gsi_insert_seq_on_edge_immediate (pe, seq); gcc_assert (!new_bb); } } t = unshare_expr (new_name); gcc_assert (CONSTANT_CLASS_P (new_name) || TREE_CODE (new_name) == SSA_NAME); new_vec = build_vector_from_val (vectype, t); vec_step = vect_init_vector (phi, new_vec, vectype, NULL); /* Create the following def-use cycle: loop prolog: vec_init = ... vec_step = ... loop: vec_iv = PHI <vec_init, vec_loop> ... STMT ... vec_loop = vec_iv + vec_step; */ /* Create the induction-phi that defines the induction-operand. */ vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_"); induction_phi = create_phi_node (vec_dest, iv_loop->header); set_vinfo_for_stmt (induction_phi, new_stmt_vec_info (induction_phi, loop_vinfo)); induc_def = PHI_RESULT (induction_phi); /* Create the iv update inside the loop */ vec_def = make_ssa_name (vec_dest); new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step); gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo)); /* Set the arguments of the phi node: */ add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION); add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop), UNKNOWN_LOCATION); STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = induction_phi; /* In case that vectorization factor (VF) is bigger than the number of elements that we can fit in a vectype (nunits), we have to generate more than one vector stmt - i.e - we need to "unroll" the vector stmt by a factor VF/nunits. For more details see documentation in vectorizable_operation. */ if (ncopies > 1) { gimple_seq seq = NULL; stmt_vec_info prev_stmt_vinfo; /* FORNOW. This restriction should be relaxed. */ gcc_assert (!nested_in_vect_loop); /* Create the vector that holds the step of the induction. */ if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))) { expr = build_int_cst (integer_type_node, nunits); expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr); } else expr = build_int_cst (TREE_TYPE (step_expr), nunits); new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr), expr, step_expr); if (seq) { new_bb = gsi_insert_seq_on_edge_immediate (pe, seq); gcc_assert (!new_bb); } t = unshare_expr (new_name); gcc_assert (CONSTANT_CLASS_P (new_name) || TREE_CODE (new_name) == SSA_NAME); new_vec = build_vector_from_val (vectype, t); vec_step = vect_init_vector (phi, new_vec, vectype, NULL); vec_def = induc_def; prev_stmt_vinfo = vinfo_for_stmt (induction_phi); for (i = 1; i < ncopies; i++) { /* vec_i = vec_prev + vec_step */ new_stmt = gimple_build_assign (vec_dest, PLUS_EXPR, vec_def, vec_step); vec_def = make_ssa_name (vec_dest, new_stmt); gimple_assign_set_lhs (new_stmt, vec_def); gsi_insert_before (&si, new_stmt, GSI_SAME_STMT); set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo)); STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt; prev_stmt_vinfo = vinfo_for_stmt (new_stmt); } } if (nested_in_vect_loop) { /* Find the loop-closed exit-phi of the induction, and record the final vector of induction results: */ exit_phi = NULL; FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg) { gimple *use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (use_stmt))) { exit_phi = use_stmt; break; } } if (exit_phi) { stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi); /* FORNOW. Currently not supporting the case that an inner-loop induction is not used in the outer-loop (i.e. only outside the outer-loop). */ gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo) && !STMT_VINFO_LIVE_P (stmt_vinfo)); STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt; if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "vector of inductions after inner-loop:"); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0); } } } if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "transform induction: created def-use cycle: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (vec_def), 0); } return true; } /* Function vectorizable_live_operation. STMT computes a value that is used outside the loop. Check if it can be supported. */ bool vectorizable_live_operation (gimple *stmt, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED, slp_tree slp_node, int slp_index, gimple **vec_stmt) { stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); imm_use_iterator imm_iter; tree lhs, lhs_type, bitsize, vec_bitsize; tree vectype = STMT_VINFO_VECTYPE (stmt_info); poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); int ncopies; gimple *use_stmt; auto_vec<tree> vec_oprnds; int vec_entry = 0; poly_uint64 vec_index = 0; gcc_assert (STMT_VINFO_LIVE_P (stmt_info)); if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def) return false; /* FORNOW. CHECKME. */ if (nested_in_vect_loop_p (loop, stmt)) return false; /* If STMT is not relevant and it is a simple assignment and its inputs are invariant then it can remain in place, unvectorized. The original last scalar value that it computes will be used. */ if (!STMT_VINFO_RELEVANT_P (stmt_info)) { gcc_assert (is_simple_and_all_uses_invariant (stmt, loop_vinfo)); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "statement is simple and uses invariant. Leaving in " "place.\n"); return true; } if (slp_node) ncopies = 1; else ncopies = vect_get_num_copies (loop_vinfo, vectype); if (slp_node) { gcc_assert (slp_index >= 0); int num_scalar = SLP_TREE_SCALAR_STMTS (slp_node).length (); int num_vec = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); /* Get the last occurrence of the scalar index from the concatenation of all the slp vectors. Calculate which slp vector it is and the index within. */ poly_uint64 pos = (num_vec * nunits) - num_scalar + slp_index; /* Calculate which vector contains the result, and which lane of that vector we need. */ if (!can_div_trunc_p (pos, nunits, &vec_entry, &vec_index)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Cannot determine which vector holds the" " final result.\n"); return false; } } if (!vec_stmt) { /* No transformation required. */ if (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)) { if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST, vectype, OPTIMIZE_FOR_SPEED)) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "can't use a fully-masked loop because " "the target doesn't support extract last " "reduction.\n"); LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false; } else if (slp_node) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "can't use a fully-masked loop because an " "SLP statement is live after the loop.\n"); LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false; } else if (ncopies > 1) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "can't use a fully-masked loop because" " ncopies is greater than 1.\n"); LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false; } else { gcc_assert (ncopies == 1 && !slp_node); vect_record_loop_mask (loop_vinfo, &LOOP_VINFO_MASKS (loop_vinfo), 1, vectype); } } return true; } /* If stmt has a related stmt, then use that for getting the lhs. */ if (is_pattern_stmt_p (stmt_info)) stmt = STMT_VINFO_RELATED_STMT (stmt_info); lhs = (is_a <gphi *> (stmt)) ? gimple_phi_result (stmt) : gimple_get_lhs (stmt); lhs_type = TREE_TYPE (lhs); bitsize = (VECTOR_BOOLEAN_TYPE_P (vectype) ? bitsize_int (TYPE_PRECISION (TREE_TYPE (vectype))) : TYPE_SIZE (TREE_TYPE (vectype))); vec_bitsize = TYPE_SIZE (vectype); /* Get the vectorized lhs of STMT and the lane to use (counted in bits). */ tree vec_lhs, bitstart; if (slp_node) { gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)); /* Get the correct slp vectorized stmt. */ gimple *vec_stmt = SLP_TREE_VEC_STMTS (slp_node)[vec_entry]; if (gphi *phi = dyn_cast <gphi *> (vec_stmt)) vec_lhs = gimple_phi_result (phi); else vec_lhs = gimple_get_lhs (vec_stmt); /* Get entry to use. */ bitstart = bitsize_int (vec_index); bitstart = int_const_binop (MULT_EXPR, bitsize, bitstart); } else { enum vect_def_type dt = STMT_VINFO_DEF_TYPE (stmt_info); vec_lhs = vect_get_vec_def_for_operand_1 (stmt, dt); gcc_checking_assert (ncopies == 1 || !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)); /* For multiple copies, get the last copy. */ for (int i = 1; i < ncopies; ++i) vec_lhs = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, vec_lhs); /* Get the last lane in the vector. */ bitstart = int_const_binop (MINUS_EXPR, vec_bitsize, bitsize); } gimple_seq stmts = NULL; tree new_tree; if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) { /* Emit: SCALAR_RES = EXTRACT_LAST <VEC_LHS, MASK> where VEC_LHS is the vectorized live-out result and MASK is the loop mask for the final iteration. */ gcc_assert (ncopies == 1 && !slp_node); tree scalar_type = TREE_TYPE (STMT_VINFO_VECTYPE (stmt_info)); tree scalar_res = make_ssa_name (scalar_type); tree mask = vect_get_loop_mask (gsi, &LOOP_VINFO_MASKS (loop_vinfo), 1, vectype, 0); gcall *new_stmt = gimple_build_call_internal (IFN_EXTRACT_LAST, 2, mask, vec_lhs); gimple_call_set_lhs (new_stmt, scalar_res); gimple_seq_add_stmt (&stmts, new_stmt); /* Convert the extracted vector element to the required scalar type. */ new_tree = gimple_convert (&stmts, lhs_type, scalar_res); } else { tree bftype = TREE_TYPE (vectype); if (VECTOR_BOOLEAN_TYPE_P (vectype)) bftype = build_nonstandard_integer_type (tree_to_uhwi (bitsize), 1); new_tree = build3 (BIT_FIELD_REF, bftype, vec_lhs, bitsize, bitstart); new_tree = force_gimple_operand (fold_convert (lhs_type, new_tree), &stmts, true, NULL_TREE); } if (stmts) gsi_insert_seq_on_edge_immediate (single_exit (loop), stmts); /* Replace use of lhs with newly computed result. If the use stmt is a single arg PHI, just replace all uses of PHI result. It's necessary because lcssa PHI defining lhs may be before newly inserted stmt. */ use_operand_p use_p; FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, lhs) if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)) && !is_gimple_debug (use_stmt)) { if (gimple_code (use_stmt) == GIMPLE_PHI && gimple_phi_num_args (use_stmt) == 1) { replace_uses_by (gimple_phi_result (use_stmt), new_tree); } else { FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter) SET_USE (use_p, new_tree); } update_stmt (use_stmt); } return true; } /* Kill any debug uses outside LOOP of SSA names defined in STMT. */ static void vect_loop_kill_debug_uses (struct loop *loop, gimple *stmt) { ssa_op_iter op_iter; imm_use_iterator imm_iter; def_operand_p def_p; gimple *ustmt; FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF) { FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p)) { basic_block bb; if (!is_gimple_debug (ustmt)) continue; bb = gimple_bb (ustmt); if (!flow_bb_inside_loop_p (loop, bb)) { if (gimple_debug_bind_p (ustmt)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "killing debug use\n"); gimple_debug_bind_reset_value (ustmt); update_stmt (ustmt); } else gcc_unreachable (); } } } } /* Given loop represented by LOOP_VINFO, return true if computation of LOOP_VINFO_NITERS (= LOOP_VINFO_NITERSM1 + 1) doesn't overflow, false otherwise. */ static bool loop_niters_no_overflow (loop_vec_info loop_vinfo) { /* Constant case. */ if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) { tree cst_niters = LOOP_VINFO_NITERS (loop_vinfo); tree cst_nitersm1 = LOOP_VINFO_NITERSM1 (loop_vinfo); gcc_assert (TREE_CODE (cst_niters) == INTEGER_CST); gcc_assert (TREE_CODE (cst_nitersm1) == INTEGER_CST); if (wi::to_widest (cst_nitersm1) < wi::to_widest (cst_niters)) return true; } widest_int max; struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); /* Check the upper bound of loop niters. */ if (get_max_loop_iterations (loop, &max)) { tree type = TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)); signop sgn = TYPE_SIGN (type); widest_int type_max = widest_int::from (wi::max_value (type), sgn); if (max < type_max) return true; } return false; } /* Return a mask type with half the number of elements as TYPE. */ tree vect_halve_mask_nunits (tree type) { poly_uint64 nunits = exact_div (TYPE_VECTOR_SUBPARTS (type), 2); return build_truth_vector_type (nunits, current_vector_size); } /* Return a mask type with twice as many elements as TYPE. */ tree vect_double_mask_nunits (tree type) { poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (type) * 2; return build_truth_vector_type (nunits, current_vector_size); } /* Record that a fully-masked version of LOOP_VINFO would need MASKS to contain a sequence of NVECTORS masks that each control a vector of type VECTYPE. */ void vect_record_loop_mask (loop_vec_info loop_vinfo, vec_loop_masks *masks, unsigned int nvectors, tree vectype) { gcc_assert (nvectors != 0); if (masks->length () < nvectors) masks->safe_grow_cleared (nvectors); rgroup_masks *rgm = &(*masks)[nvectors - 1]; /* The number of scalars per iteration and the number of vectors are both compile-time constants. */ unsigned int nscalars_per_iter = exact_div (nvectors * TYPE_VECTOR_SUBPARTS (vectype), LOOP_VINFO_VECT_FACTOR (loop_vinfo)).to_constant (); if (rgm->max_nscalars_per_iter < nscalars_per_iter) { rgm->max_nscalars_per_iter = nscalars_per_iter; rgm->mask_type = build_same_sized_truth_vector_type (vectype); } } /* Given a complete set of masks MASKS, extract mask number INDEX for an rgroup that operates on NVECTORS vectors of type VECTYPE, where 0 <= INDEX < NVECTORS. Insert any set-up statements before GSI. See the comment above vec_loop_masks for more details about the mask arrangement. */ tree vect_get_loop_mask (gimple_stmt_iterator *gsi, vec_loop_masks *masks, unsigned int nvectors, tree vectype, unsigned int index) { rgroup_masks *rgm = &(*masks)[nvectors - 1]; tree mask_type = rgm->mask_type; /* Populate the rgroup's mask array, if this is the first time we've used it. */ if (rgm->masks.is_empty ()) { rgm->masks.safe_grow_cleared (nvectors); for (unsigned int i = 0; i < nvectors; ++i) { tree mask = make_temp_ssa_name (mask_type, NULL, "loop_mask"); /* Provide a dummy definition until the real one is available. */ SSA_NAME_DEF_STMT (mask) = gimple_build_nop (); rgm->masks[i] = mask; } } tree mask = rgm->masks[index]; if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type), TYPE_VECTOR_SUBPARTS (vectype))) { /* A loop mask for data type X can be reused for data type Y if X has N times more elements than Y and if Y's elements are N times bigger than X's. In this case each sequence of N elements in the loop mask will be all-zero or all-one. We can then view-convert the mask so that each sequence of N elements is replaced by a single element. */ gcc_assert (multiple_p (TYPE_VECTOR_SUBPARTS (mask_type), TYPE_VECTOR_SUBPARTS (vectype))); gimple_seq seq = NULL; mask_type = build_same_sized_truth_vector_type (vectype); mask = gimple_build (&seq, VIEW_CONVERT_EXPR, mask_type, mask); if (seq) gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT); } return mask; } /* Scale profiling counters by estimation for LOOP which is vectorized by factor VF. */ static void scale_profile_for_vect_loop (struct loop *loop, unsigned vf) { edge preheader = loop_preheader_edge (loop); /* Reduce loop iterations by the vectorization factor. */ gcov_type new_est_niter = niter_for_unrolled_loop (loop, vf); profile_count freq_h = loop->header->count, freq_e = preheader->count (); if (freq_h.nonzero_p ()) { profile_probability p; /* Avoid dropping loop body profile counter to 0 because of zero count in loop's preheader. */ if (!(freq_e == profile_count::zero ())) freq_e = freq_e.force_nonzero (); p = freq_e.apply_scale (new_est_niter + 1, 1).probability_in (freq_h); scale_loop_frequencies (loop, p); } edge exit_e = single_exit (loop); exit_e->probability = profile_probability::always () .apply_scale (1, new_est_niter + 1); edge exit_l = single_pred_edge (loop->latch); profile_probability prob = exit_l->probability; exit_l->probability = exit_e->probability.invert (); if (prob.initialized_p () && exit_l->probability.initialized_p ()) scale_bbs_frequencies (&loop->latch, 1, exit_l->probability / prob); } /* Function vect_transform_loop. The analysis phase has determined that the loop is vectorizable. Vectorize the loop - created vectorized stmts to replace the scalar stmts in the loop, and update the loop exit condition. Returns scalar epilogue loop if any. */ struct loop * vect_transform_loop (loop_vec_info loop_vinfo) { struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); struct loop *epilogue = NULL; basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo); int nbbs = loop->num_nodes; int i; tree niters_vector = NULL_TREE; tree step_vector = NULL_TREE; tree niters_vector_mult_vf = NULL_TREE; poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); unsigned int lowest_vf = constant_lower_bound (vf); bool grouped_store; bool slp_scheduled = false; gimple *stmt, *pattern_stmt; gimple_seq pattern_def_seq = NULL; gimple_stmt_iterator pattern_def_si = gsi_none (); bool transform_pattern_stmt = false; bool check_profitability = false; unsigned int th; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== vec_transform_loop ===\n"); /* Use the more conservative vectorization threshold. If the number of iterations is constant assume the cost check has been performed by our caller. If the threshold makes all loops profitable that run at least the (estimated) vectorization factor number of times checking is pointless, too. */ th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo); if (th >= vect_vf_for_cost (loop_vinfo) && !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Profitability threshold is %d loop iterations.\n", th); check_profitability = true; } /* Make sure there exists a single-predecessor exit bb. Do this before versioning. */ edge e = single_exit (loop); if (! single_pred_p (e->dest)) { split_loop_exit_edge (e); if (dump_enabled_p ()) dump_printf (MSG_NOTE, "split exit edge\n"); } /* Version the loop first, if required, so the profitability check comes first. */ if (LOOP_REQUIRES_VERSIONING (loop_vinfo)) { poly_uint64 versioning_threshold = LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo); if (check_profitability && ordered_p (poly_uint64 (th), versioning_threshold)) { versioning_threshold = ordered_max (poly_uint64 (th), versioning_threshold); check_profitability = false; } vect_loop_versioning (loop_vinfo, th, check_profitability, versioning_threshold); check_profitability = false; } /* Make sure there exists a single-predecessor exit bb also on the scalar loop copy. Do this after versioning but before peeling so CFG structure is fine for both scalar and if-converted loop to make slpeel_duplicate_current_defs_from_edges face matched loop closed PHI nodes on the exit. */ if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo)) { e = single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo)); if (! single_pred_p (e->dest)) { split_loop_exit_edge (e); if (dump_enabled_p ()) dump_printf (MSG_NOTE, "split exit edge of scalar loop\n"); } } tree niters = vect_build_loop_niters (loop_vinfo); LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = niters; tree nitersm1 = unshare_expr (LOOP_VINFO_NITERSM1 (loop_vinfo)); bool niters_no_overflow = loop_niters_no_overflow (loop_vinfo); epilogue = vect_do_peeling (loop_vinfo, niters, nitersm1, &niters_vector, &step_vector, &niters_vector_mult_vf, th, check_profitability, niters_no_overflow); if (niters_vector == NULL_TREE) { if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo) && known_eq (lowest_vf, vf)) { niters_vector = build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)), LOOP_VINFO_INT_NITERS (loop_vinfo) / lowest_vf); step_vector = build_one_cst (TREE_TYPE (niters)); } else vect_gen_vector_loop_niters (loop_vinfo, niters, &niters_vector, &step_vector, niters_no_overflow); } /* 1) Make sure the loop header has exactly two entries 2) Make sure we have a preheader basic block. */ gcc_assert (EDGE_COUNT (loop->header->preds) == 2); split_edge (loop_preheader_edge (loop)); if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo) && vect_use_loop_mask_for_alignment_p (loop_vinfo)) /* This will deal with any possible peeling. */ vect_prepare_for_masked_peels (loop_vinfo); /* FORNOW: the vectorizer supports only loops which body consist of one basic block (header + empty latch). When the vectorizer will support more involved loop forms, the order by which the BBs are traversed need to be reconsidered. */ for (i = 0; i < nbbs; i++) { basic_block bb = bbs[i]; stmt_vec_info stmt_info; for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) { gphi *phi = si.phi (); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "------>vectorizing phi: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); } stmt_info = vinfo_for_stmt (phi); if (!stmt_info) continue; if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info)) vect_loop_kill_debug_uses (loop, phi); if (!STMT_VINFO_RELEVANT_P (stmt_info) && !STMT_VINFO_LIVE_P (stmt_info)) continue; if (STMT_VINFO_VECTYPE (stmt_info) && (maybe_ne (TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)), vf)) && dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n"); if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle) && ! PURE_SLP_STMT (stmt_info)) { if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n"); vect_transform_stmt (phi, NULL, NULL, NULL, NULL); } } pattern_stmt = NULL; for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si) || transform_pattern_stmt;) { bool is_store; if (transform_pattern_stmt) stmt = pattern_stmt; else { stmt = gsi_stmt (si); /* During vectorization remove existing clobber stmts. */ if (gimple_clobber_p (stmt)) { unlink_stmt_vdef (stmt); gsi_remove (&si, true); release_defs (stmt); continue; } } if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "------>vectorizing statement: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); } stmt_info = vinfo_for_stmt (stmt); /* vector stmts created in the outer-loop during vectorization of stmts in an inner-loop may not have a stmt_info, and do not need to be vectorized. */ if (!stmt_info) { gsi_next (&si); continue; } if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info)) vect_loop_kill_debug_uses (loop, stmt); if (!STMT_VINFO_RELEVANT_P (stmt_info) && !STMT_VINFO_LIVE_P (stmt_info)) { if (STMT_VINFO_IN_PATTERN_P (stmt_info) && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) { stmt = pattern_stmt; stmt_info = vinfo_for_stmt (stmt); } else { gsi_next (&si); continue; } } else if (STMT_VINFO_IN_PATTERN_P (stmt_info) && (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info)) && (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt)) || STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt)))) transform_pattern_stmt = true; /* If pattern statement has def stmts, vectorize them too. */ if (is_pattern_stmt_p (stmt_info)) { if (pattern_def_seq == NULL) { pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info); pattern_def_si = gsi_start (pattern_def_seq); } else if (!gsi_end_p (pattern_def_si)) gsi_next (&pattern_def_si); if (pattern_def_seq != NULL) { gimple *pattern_def_stmt = NULL; stmt_vec_info pattern_def_stmt_info = NULL; while (!gsi_end_p (pattern_def_si)) { pattern_def_stmt = gsi_stmt (pattern_def_si); pattern_def_stmt_info = vinfo_for_stmt (pattern_def_stmt); if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info) || STMT_VINFO_LIVE_P (pattern_def_stmt_info)) break; gsi_next (&pattern_def_si); } if (!gsi_end_p (pattern_def_si)) { if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "==> vectorizing pattern def " "stmt: "); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0); } stmt = pattern_def_stmt; stmt_info = pattern_def_stmt_info; } else { pattern_def_si = gsi_none (); transform_pattern_stmt = false; } } else transform_pattern_stmt = false; } if (STMT_VINFO_VECTYPE (stmt_info)) { poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)); if (!STMT_SLP_TYPE (stmt_info) && maybe_ne (nunits, vf) && dump_enabled_p ()) /* For SLP VF is set according to unrolling factor, and not to vector size, hence for SLP this print is not valid. */ dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n"); } /* SLP. Schedule all the SLP instances when the first SLP stmt is reached. */ if (STMT_SLP_TYPE (stmt_info)) { if (!slp_scheduled) { slp_scheduled = true; if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "=== scheduling SLP instances ===\n"); vect_schedule_slp (loop_vinfo); } /* Hybrid SLP stmts must be vectorized in addition to SLP. */ if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info)) { if (!transform_pattern_stmt && gsi_end_p (pattern_def_si)) { pattern_def_seq = NULL; gsi_next (&si); } continue; } } /* -------- vectorize statement ------------ */ if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n"); grouped_store = false; is_store = vect_transform_stmt (stmt, &si, &grouped_store, NULL, NULL); if (is_store) { if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) { /* Interleaving. If IS_STORE is TRUE, the vectorization of the interleaving chain was completed - free all the stores in the chain. */ gsi_next (&si); vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info)); } else { /* Free the attached stmt_vec_info and remove the stmt. */ gimple *store = gsi_stmt (si); free_stmt_vec_info (store); unlink_stmt_vdef (store); gsi_remove (&si, true); release_defs (store); } /* Stores can only appear at the end of pattern statements. */ gcc_assert (!transform_pattern_stmt); pattern_def_seq = NULL; } else if (!transform_pattern_stmt && gsi_end_p (pattern_def_si)) { pattern_def_seq = NULL; gsi_next (&si); } } /* stmts in BB */ /* Stub out scalar statements that must not survive vectorization. Doing this here helps with grouped statements, or statements that are involved in patterns. */ for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gcall *call = dyn_cast <gcall *> (gsi_stmt (gsi)); if (call && gimple_call_internal_p (call, IFN_MASK_LOAD)) { tree lhs = gimple_get_lhs (call); if (!VECTOR_TYPE_P (TREE_TYPE (lhs))) { tree zero = build_zero_cst (TREE_TYPE (lhs)); gimple *new_stmt = gimple_build_assign (lhs, zero); gsi_replace (&gsi, new_stmt, true); } } } } /* BBs in loop */ /* The vectorization factor is always > 1, so if we use an IV increment of 1. a zero NITERS becomes a nonzero NITERS_VECTOR. */ if (integer_onep (step_vector)) niters_no_overflow = true; vect_set_loop_condition (loop, loop_vinfo, niters_vector, step_vector, niters_vector_mult_vf, !niters_no_overflow); unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo); scale_profile_for_vect_loop (loop, assumed_vf); /* True if the final iteration might not handle a full vector's worth of scalar iterations. */ bool final_iter_may_be_partial = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo); /* The minimum number of iterations performed by the epilogue. This is 1 when peeling for gaps because we always need a final scalar iteration. */ int min_epilogue_iters = LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) ? 1 : 0; /* +1 to convert latch counts to loop iteration counts, -min_epilogue_iters to remove iterations that cannot be performed by the vector code. */ int bias_for_lowest = 1 - min_epilogue_iters; int bias_for_assumed = bias_for_lowest; int alignment_npeels = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo); if (alignment_npeels && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) { /* When the amount of peeling is known at compile time, the first iteration will have exactly alignment_npeels active elements. In the worst case it will have at least one. */ int min_first_active = (alignment_npeels > 0 ? alignment_npeels : 1); bias_for_lowest += lowest_vf - min_first_active; bias_for_assumed += assumed_vf - min_first_active; } /* In these calculations the "- 1" converts loop iteration counts back to latch counts. */ if (loop->any_upper_bound) loop->nb_iterations_upper_bound = (final_iter_may_be_partial ? wi::udiv_ceil (loop->nb_iterations_upper_bound + bias_for_lowest, lowest_vf) - 1 : wi::udiv_floor (loop->nb_iterations_upper_bound + bias_for_lowest, lowest_vf) - 1); if (loop->any_likely_upper_bound) loop->nb_iterations_likely_upper_bound = (final_iter_may_be_partial ? wi::udiv_ceil (loop->nb_iterations_likely_upper_bound + bias_for_lowest, lowest_vf) - 1 : wi::udiv_floor (loop->nb_iterations_likely_upper_bound + bias_for_lowest, lowest_vf) - 1); if (loop->any_estimate) loop->nb_iterations_estimate = (final_iter_may_be_partial ? wi::udiv_ceil (loop->nb_iterations_estimate + bias_for_assumed, assumed_vf) - 1 : wi::udiv_floor (loop->nb_iterations_estimate + bias_for_assumed, assumed_vf) - 1); if (dump_enabled_p ()) { if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo)) { dump_printf_loc (MSG_NOTE, vect_location, "LOOP VECTORIZED\n"); if (loop->inner) dump_printf_loc (MSG_NOTE, vect_location, "OUTER LOOP VECTORIZED\n"); dump_printf (MSG_NOTE, "\n"); } else { dump_printf_loc (MSG_NOTE, vect_location, "LOOP EPILOGUE VECTORIZED (VS="); dump_dec (MSG_NOTE, current_vector_size); dump_printf (MSG_NOTE, ")\n"); } } /* Free SLP instances here because otherwise stmt reference counting won't work. */ slp_instance instance; FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance) vect_free_slp_instance (instance); LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release (); /* Clear-up safelen field since its value is invalid after vectorization since vectorized loop can have loop-carried dependencies. */ loop->safelen = 0; /* Don't vectorize epilogue for epilogue. */ if (LOOP_VINFO_EPILOGUE_P (loop_vinfo)) epilogue = NULL; if (!PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK)) epilogue = NULL; if (epilogue) { auto_vector_sizes vector_sizes; targetm.vectorize.autovectorize_vector_sizes (&vector_sizes); unsigned int next_size = 0; if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0 && known_eq (vf, lowest_vf)) { unsigned int eiters = (LOOP_VINFO_INT_NITERS (loop_vinfo) - LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)); eiters = eiters % lowest_vf; epilogue->nb_iterations_upper_bound = eiters - 1; unsigned int ratio; while (next_size < vector_sizes.length () && !(constant_multiple_p (current_vector_size, vector_sizes[next_size], &ratio) && eiters >= lowest_vf / ratio)) next_size += 1; } else while (next_size < vector_sizes.length () && maybe_lt (current_vector_size, vector_sizes[next_size])) next_size += 1; if (next_size == vector_sizes.length ()) epilogue = NULL; } if (epilogue) { epilogue->force_vectorize = loop->force_vectorize; epilogue->safelen = loop->safelen; epilogue->dont_vectorize = false; /* We may need to if-convert epilogue to vectorize it. */ if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo)) tree_if_conversion (epilogue); } return epilogue; } /* The code below is trying to perform simple optimization - revert if-conversion for masked stores, i.e. if the mask of a store is zero do not perform it and all stored value producers also if possible. For example, for (i=0; i<n; i++) if (c[i]) { p1[i] += 1; p2[i] = p3[i] +2; } this transformation will produce the following semi-hammock: if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 }) { vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165); vect__12.22_172 = vect__11.19_170 + vect_cst__171; MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172); vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165); vect__19.28_184 = vect__18.25_182 + vect_cst__183; MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184); } */ void optimize_mask_stores (struct loop *loop) { basic_block *bbs = get_loop_body (loop); unsigned nbbs = loop->num_nodes; unsigned i; basic_block bb; struct loop *bb_loop; gimple_stmt_iterator gsi; gimple *stmt; auto_vec<gimple *> worklist; vect_location = find_loop_location (loop); /* Pick up all masked stores in loop if any. */ for (i = 0; i < nbbs; i++) { bb = bbs[i]; for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { stmt = gsi_stmt (gsi); if (gimple_call_internal_p (stmt, IFN_MASK_STORE)) worklist.safe_push (stmt); } } free (bbs); if (worklist.is_empty ()) return; /* Loop has masked stores. */ while (!worklist.is_empty ()) { gimple *last, *last_store; edge e, efalse; tree mask; basic_block store_bb, join_bb; gimple_stmt_iterator gsi_to; tree vdef, new_vdef; gphi *phi; tree vectype; tree zero; last = worklist.pop (); mask = gimple_call_arg (last, 2); bb = gimple_bb (last); /* Create then_bb and if-then structure in CFG, then_bb belongs to the same loop as if_bb. It could be different to LOOP when two level loop-nest is vectorized and mask_store belongs to the inner one. */ e = split_block (bb, last); bb_loop = bb->loop_father; gcc_assert (loop == bb_loop || flow_loop_nested_p (loop, bb_loop)); join_bb = e->dest; store_bb = create_empty_bb (bb); add_bb_to_loop (store_bb, bb_loop); e->flags = EDGE_TRUE_VALUE; efalse = make_edge (bb, store_bb, EDGE_FALSE_VALUE); /* Put STORE_BB to likely part. */ efalse->probability = profile_probability::unlikely (); store_bb->count = efalse->count (); make_single_succ_edge (store_bb, join_bb, EDGE_FALLTHRU); if (dom_info_available_p (CDI_DOMINATORS)) set_immediate_dominator (CDI_DOMINATORS, store_bb, bb); if (dump_enabled_p ()) dump_printf_loc (MSG_NOTE, vect_location, "Create new block %d to sink mask stores.", store_bb->index); /* Create vector comparison with boolean result. */ vectype = TREE_TYPE (mask); zero = build_zero_cst (vectype); stmt = gimple_build_cond (EQ_EXPR, mask, zero, NULL_TREE, NULL_TREE); gsi = gsi_last_bb (bb); gsi_insert_after (&gsi, stmt, GSI_SAME_STMT); /* Create new PHI node for vdef of the last masked store: .MEM_2 = VDEF <.MEM_1> will be converted to .MEM.3 = VDEF <.MEM_1> and new PHI node will be created in join bb .MEM_2 = PHI <.MEM_1, .MEM_3> */ vdef = gimple_vdef (last); new_vdef = make_ssa_name (gimple_vop (cfun), last); gimple_set_vdef (last, new_vdef); phi = create_phi_node (vdef, join_bb); add_phi_arg (phi, new_vdef, EDGE_SUCC (store_bb, 0), UNKNOWN_LOCATION); /* Put all masked stores with the same mask to STORE_BB if possible. */ while (true) { gimple_stmt_iterator gsi_from; gimple *stmt1 = NULL; /* Move masked store to STORE_BB. */ last_store = last; gsi = gsi_for_stmt (last); gsi_from = gsi; /* Shift GSI to the previous stmt for further traversal. */ gsi_prev (&gsi); gsi_to = gsi_start_bb (store_bb); gsi_move_before (&gsi_from, &gsi_to); /* Setup GSI_TO to the non-empty block start. */ gsi_to = gsi_start_bb (store_bb); if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Move stmt to created bb\n"); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, last, 0); } /* Move all stored value producers if possible. */ while (!gsi_end_p (gsi)) { tree lhs; imm_use_iterator imm_iter; use_operand_p use_p; bool res; /* Skip debug statements. */ if (is_gimple_debug (gsi_stmt (gsi))) { gsi_prev (&gsi); continue; } stmt1 = gsi_stmt (gsi); /* Do not consider statements writing to memory or having volatile operand. */ if (gimple_vdef (stmt1) || gimple_has_volatile_ops (stmt1)) break; gsi_from = gsi; gsi_prev (&gsi); lhs = gimple_get_lhs (stmt1); if (!lhs) break; /* LHS of vectorized stmt must be SSA_NAME. */ if (TREE_CODE (lhs) != SSA_NAME) break; if (!VECTOR_TYPE_P (TREE_TYPE (lhs))) { /* Remove dead scalar statement. */ if (has_zero_uses (lhs)) { gsi_remove (&gsi_from, true); continue; } } /* Check that LHS does not have uses outside of STORE_BB. */ res = true; FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs) { gimple *use_stmt; use_stmt = USE_STMT (use_p); if (is_gimple_debug (use_stmt)) continue; if (gimple_bb (use_stmt) != store_bb) { res = false; break; } } if (!res) break; if (gimple_vuse (stmt1) && gimple_vuse (stmt1) != gimple_vuse (last_store)) break; /* Can move STMT1 to STORE_BB. */ if (dump_enabled_p ()) { dump_printf_loc (MSG_NOTE, vect_location, "Move stmt to created bb\n"); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0); } gsi_move_before (&gsi_from, &gsi_to); /* Shift GSI_TO for further insertion. */ gsi_prev (&gsi_to); } /* Put other masked stores with the same mask to STORE_BB. */ if (worklist.is_empty () || gimple_call_arg (worklist.last (), 2) != mask || worklist.last () != stmt1) break; last = worklist.pop (); } add_phi_arg (phi, gimple_vuse (last_store), e, UNKNOWN_LOCATION); } }
mexutil.h
/////////////////////////////////////////////////////////////////////////////// // // Name: mexutil.h // Purpose: Macros and helper functions for creating MATLAB MEX-files. // Author: Daeyun Shin <daeyun@dshin.org> // Created: 01.15.2015 // Modified: 02.01.2015 // Version: 0.1 // // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at http://mozilla.org/MPL/2.0/. // /////////////////////////////////////////////////////////////////////////////// #pragma once #include "mex.h" #include <string> #include <algorithm> #include <cctype> #include <vector> #include <sstream> #include <iostream> #ifdef _OPENMP #include <omp.h> #endif #ifndef N_LHS_VAR #define N_LHS_VAR nargout #endif #ifndef N_RHS_VAR #define N_RHS_VAR nargin #endif #ifndef MEX_COMPONENT_NAME #define MEX_COMPONENT_NAME "MATLAB" #endif #define BOLD(str) "<strong>" str "</strong>" #define ORANGE(str) "[\b" str "]\b" namespace mexutil { enum CompOp { EQ, GT, LT, NEQ, GE, LE }; enum ArgType { kDouble, kSingle, kStruct, kLogical, kChar, kInt8, kUint8, kInt16, kUint16, kInt32, kUint32 }; // Redirect stderr to a file or stringstream . void CaptureErrorMsg(std::stringstream &stderr_content); void CaptureErrorMsg(const std::string &filename); // e.g. double* mat = GetArg<kDouble,EQ,GT>(0, prhs, 3, 3); Throws an error if // prhs[0] doesn't have exactly 3 rows or have less than 3 columns. 0s are // ignored. template <ArgType argtype, CompOp row_comp = EQ, CompOp col_comp = EQ> void *GetArg(const mwSize index, const mxArray *input[], mwSize nrows = 0, mwSize ncols = 0); // Constructs the identifier token used in error messages. std::string MatlabIdStringFromFilename(std::string str); std::string FilenameFromPath(std::string str); // Retrieves the workspace global variable mexVerboseLevel (default: 1). int VerboseLevel(); const int kDefaultVerboseLevel = 1; const std::string kFilename = FilenameFromPath(__FILE__); const std::string kFunctionIdentifier = MatlabIdStringFromFilename(kFilename); const int kVerboseLevel = VerboseLevel(); // Force pass-by-value behavior to prevent accidentally modifying shared // memory content in-place. Undocumented. // http://undocumentedmatlab.com/blog/matlab-mex-in-place-editing extern "C" bool mxUnshareArray(mxArray *array_ptr, bool noDeepCopy); // Copy and transpose. template <size_t nrows_in, typename T> void Transpose(const std::vector<T> &in, T *out); // Useful when zero-based indexing is used. template <size_t nrows_in, typename T> void TransposeAddOne(const std::vector<T> &in, T *out); mxArray *UnshareArray(int index, const mxArray *prhs[]) { mxArray *unshared = const_cast<mxArray *>(prhs[index]); mxUnshareArray(unshared, true); return unshared; } std::string MatlabIdStringFromFilename(std::string str) { (void)(MatlabIdStringFromFilename); auto is_invalid_id_char = [](char ch) { return !(isalnum((int)ch) || ch == '_'); }; if (int i = str.find_first_of('.')) str = str.substr(0, i); if (!isalpha(str[0])) str = "mex_" + str; std::replace_if(str.begin(), str.end(), is_invalid_id_char, '_'); return str; } std::string FilenameFromPath(std::string str) { (void)(FilenameFromPath); if (int i = str.find_last_of('/')) str = str.substr(i + 1, str.length()); return str; } int VerboseLevel() { (void)(VerboseLevel); mxArray *ptr = mexGetVariable("global", "mexVerboseLevel"); if (ptr == NULL) return kDefaultVerboseLevel; return mxGetScalar(ptr); } void CaptureErrorMsg(std::stringstream &stderr_content) { std::cerr.rdbuf(stderr_content.rdbuf()); } void CaptureErrorMsg(const std::string &filename) { freopen(filename.c_str(), "a", stderr); } template <size_t nrows_in, typename T> void Transpose(const std::vector<T> &in, T *out) { const size_t ncols_in = in.size() / nrows_in; #pragma omp parallel for for (size_t i = 0; i < in.size(); i += nrows_in) { for (size_t j = 0; j < nrows_in; ++j) { *(out + (i / nrows_in) + ncols_in * j) = in[i + j]; } } } template <size_t nrows_in, typename T> void TransposeAddOne(const std::vector<T> &in, T *out) { const size_t ncols_in = in.size() / nrows_in; #pragma omp parallel for for (size_t i = 0; i < in.size(); i += nrows_in) { for (size_t j = 0; j < nrows_in; ++j) { *(out + (i / nrows_in) + ncols_in * j) = in[i + j] + 1; } } } // e.g. LEVEL(2, MPRINTF("Not printed if logging level is less than 2.")) #define LEVEL(verbose_level, expr) \ { \ if (kVerboseLevel >= verbose_level) expr; \ } // Construct an identifier string e.g. MATLAB:mexutil:myErrorIdentifier #define MEX_IDENTIFIER(mnemonic) \ (std::string(MEX_COMPONENT_NAME ":") + kFunctionIdentifier + \ std::string(":" mnemonic)).c_str() // Assert number of input variables. #define N_IN_RANGE(min, max) \ { \ if (N_RHS_VAR < min || N_RHS_VAR > max) { \ mexErrMsgIdAndTxt(MEX_IDENTIFIER("InputSizeError"), \ "Number of inputs must be between %d and %d.", min, \ max); \ } \ } // Assert number of output variables. #define N_OUT_RANGE(min, max) \ { \ if (N_LHS_VAR < min || N_LHS_VAR > max) { \ mexErrMsgIdAndTxt(MEX_IDENTIFIER("OutputSizeError"), \ "Number of outputs must be between %d and %d.", min, \ max); \ } \ } #define N_IN(num) \ { \ if (N_RHS_VAR != num) { \ mexErrMsgIdAndTxt(MEX_IDENTIFIER("InputSizeError"), \ "Number of inputs must be %d.", num); \ } \ } #define N_OUT(num) \ { \ if (N_LHS_VAR != num) { \ mexErrMsgIdAndTxt(MEX_IDENTIFIER("OutputSizeError"), \ "Number of outputs must be %d.", num); \ } \ } #define VAR(name) \ { \ std::ostringstream val_str; \ val_str << name; \ DisplayVariable(#name, val_str.str(), sizeof(name), (void *)&name, \ __FILE__, __LINE__, __func__); \ } // Print message to MATLAB console. // e.g.MPRINTF(BOLD("%d"), argc); #define MPRINTF(...) \ { \ mexPrintf(__VA_ARGS__); \ mexEvalString("drawnow;"); \ } // Display error and exit. #define ERR_EXIT(errname, ...) \ { mexErrMsgIdAndTxt(MEX_IDENTIFIER(errname), ##__VA_ARGS__); } // Macros starting with an underscore are internal. #define _ASSERT(condition) \ { \ if (!(condition)) { \ MPRINTF("[ERROR] (%s:%d %s) ", kFilename.c_str(), __LINE__, __func__); \ mexErrMsgTxt("assertion " #condition " failed\n", ); \ } \ } #define _ASSERT_MSG(condition, msg) \ { \ if (!(condition)) { \ MPRINTF("[ERROR] (%s:%d %s) ", kFilename.c_str(), __LINE__, __func__); \ mexErrMsgTxt("assertion " #condition " failed\n%s\n", msg); \ } \ } #define _CHOOSE_MACRO(a, x, func, ...) func #define ASSERT(condition, ...) \ _CHOOSE_MACRO(, ##__VA_ARGS__, _ASSERT_MSG(__VA_ARGS__), _ASSERT(__VA_ARGS__)) #define ASSERT_FMT(condition, fmt, ...) \ { \ if (!(condition)) { \ MPRINTF("[ERROR] (%s:%d %s) ", kFilename.c_str(), __LINE__, __func__); \ mexErrMsgIdAndTxt(MEX_IDENTIFIER("AssertionError"), \ "assertion " #condition " failed\n" fmt "\n", \ ##__VA_ARGS__); \ } \ } template <ArgType argtype, CompOp row_comp, CompOp col_comp> void *GetArg(mwSize index, const mxArray *input[], mwSize nrows, mwSize ncols) { if (nrows > 0) { switch (row_comp) { case EQ: ASSERT_FMT(mxGetM(input[index]) == nrows, "size(input[%d], 1) must be %d.", index, nrows); break; case GT: ASSERT_FMT(mxGetM(input[index]) > nrows, "size(input[%d], 1) must be greater than %d.", index, nrows); break; case LT: ASSERT_FMT(mxGetM(input[index]) < nrows, "size(input[%d], 1) must be less than %d.", index, nrows); break; case NEQ: ASSERT_FMT(mxGetM(input[index]) != nrows, "size(input[%d], 1) must be not equal %d.", index, nrows); break; case GE: ASSERT_FMT(mxGetM(input[index]) >= nrows, "size(input[%d], 1) must be at least %d.", index, nrows); break; case LE: ASSERT_FMT(mxGetM(input[index]) <= nrows, "size(input[%d], 1) can be at most %d.", index, nrows); break; default: break; } } if (ncols > 0) { switch (col_comp) { case EQ: ASSERT_FMT(mxGetN(input[index]) == ncols, "size(input[%d], 2) must be %d.", index, ncols); break; case GT: ASSERT_FMT(mxGetN(input[index]) > ncols, "size(input[%d], 2) must be greater than %d.", index, ncols); break; case LT: ASSERT_FMT(mxGetN(input[index]) < ncols, "size(input[%d], 2) must be less than %d.", index, ncols); break; case NEQ: ASSERT_FMT(mxGetN(input[index]) != ncols, "size(input[%d], 2) must be not equal %d.", index, ncols); break; case GE: ASSERT_FMT(mxGetN(input[index]) >= ncols, "size(input[%d], 2) must be at least %d.", index, ncols); break; case LE: ASSERT_FMT(mxGetN(input[index]) <= ncols, "size(input[%d], 2) can be at most %d.", index, ncols); break; default: break; } } switch (argtype) { case kDouble: ASSERT_FMT(mxIsDouble(input[index]), "Invalid data type for input index %d.", index); return mxGetPr(input[index]); // double* case kSingle: ASSERT_FMT(mxIsSingle(input[index]), "Invalid data type for input index %d.", index); return mxGetData(input[index]); // float* case kStruct: ASSERT_FMT(mxIsStruct(input[index]), "Invalid data type for input index %d.", index); // TODO ERR_EXIT("UnknownDataTypeError", "Not implemented"); break; case kLogical: ASSERT_FMT(mxIsLogical(input[index]), "Invalid data type for input index %d.", index); return mxGetLogicals(input[index]); // mxLogical* case kChar: ASSERT_FMT(mxIsChar(input[index]), "Invalid data type for input index %d.", index); return mxGetChars(input[index]); // char* case kInt8: ASSERT_FMT(mxIsInt8(input[index]), "Invalid data type for input index %d.", index); return mxGetData(input[index]); // int8_t* case kUint8: ASSERT_FMT(mxIsUint8(input[index]), "Invalid data type for input index %d.", index); return mxGetData(input[index]); // uint8_t* case kInt16: ASSERT_FMT(mxIsInt16(input[index]), "Invalid data type for input index %d.", index); return mxGetData(input[index]); // int16_t* case kUint16: ASSERT_FMT(mxIsUint16(input[index]), "Invalid data type for input index %d.", index); return mxGetData(input[index]); // uint16_t* case kInt32: ASSERT_FMT(mxIsInt32(input[index]), "Invalid data type for input index %d.", index); return mxGetData(input[index]); // int32_t* case kUint32: ASSERT_FMT(mxIsUint32(input[index]), "Invalid data type for input index %d.", index); return mxGetData(input[index]); // uint32_t* default: ERR_EXIT("UnknownDataTypeError", "Unknown argtype"); } } } void DisplayVariable(std::string name, std::string value, size_t size, void *ptr, std::string file, int line, std::string func) { MPRINTF("[INFO] (%s:%d %s) %s=%s &%s=%p %d\n", file.c_str(), line, func.c_str(), name.c_str(), value.c_str(), name.c_str(), ptr, (int)size); }
GB_binop__land_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__land_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__land_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__land_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__land_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__land_fp32) // A*D function (colscale): GB (_AxD__land_fp32) // D*A function (rowscale): GB (_DxB__land_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__land_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__land_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_fp32) // C=scalar+B GB (_bind1st__land_fp32) // C=scalar+B' GB (_bind1st_tran__land_fp32) // C=A+scalar GB (_bind2nd__land_fp32) // C=A'+scalar GB (_bind2nd_tran__land_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) && (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_FP32 || GxB_NO_LAND_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__land_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__land_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__land_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__land_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__land_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__land_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__land_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__land_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__land_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__land_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__land_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__land_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__land_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__land_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
470.lbm.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <stdlib.h> #include <sys/stat.h> #include <math.h> /* $Id: main.c,v 1.4 2004/04/21 04:23:43 pohlt Exp $ */ /*############################################################################*/ /* $Id: main.h,v 1.4 2004/04/21 04:23:43 pohlt Exp $ */ /*############################################################################*/ /* $Id: config.h,v 1.5 2004/04/20 14:42:56 pohlt Exp $ */ /*############################################################################*/ #ifndef _CONFIG_H_ #define _CONFIG_H_ /*############################################################################*/ #define SIZE (100) #define SIZE_X (1*SIZE) #define SIZE_Y (1*SIZE) #define SIZE_Z (130) #define OMEGA (1.95) #define OUTPUT_PRECISION float #define BOOL int #define TRUE (-1) #define FALSE (0) /*############################################################################*/ #endif /* _CONFIG_H_ */ #ifndef _MAIN_H_ #define _MAIN_H_ /*############################################################################*/ /*############################################################################*/ #if !defined(SPEC_CPU) typedef struct { double timeScale; clock_t tickStart, tickStop; // struct tms timeStart, timeStop; } MAIN_Time; #endif typedef enum {NOTHING = 0, COMPARE, STORE} MAIN_Action; typedef enum {LDC = 0, CHANNEL} MAIN_SimType; typedef struct { int nTimeSteps; char* resultFilename; MAIN_Action action; MAIN_SimType simType; char* obstacleFilename; } MAIN_Param; /*############################################################################*/ #if !defined(SPEC_CPU) void MAIN_startClock( MAIN_Time* time ); void MAIN_stopClock( MAIN_Time* time, const MAIN_Param* param ); #endif /*############################################################################*/ #endif /* _MAIN_H_ */ /* $Id: lbm.h,v 1.1 2004/04/20 14:33:59 pohlt Exp $ */ /*############################################################################*/ #ifndef _LBM_H_ #define _LBM_H_ /*############################################################################*/ /*############################################################################*/ typedef enum {C = 0, N, S, E, W, T, B, NE, NW, SE, SW, NT, NB, ST, SB, ET, EB, WT, WB, FLAGS, N_CELL_ENTRIES} CELL_ENTRIES; #define N_DISTR_FUNCS FLAGS typedef enum {OBSTACLE = 1 << 0, ACCEL = 1 << 1, IN_OUT_FLOW = 1 << 2} CELL_FLAGS; /* $Id: lbm_1d_array.h,v 1.1 2004/04/20 14:33:59 pohlt Exp $ */ #ifndef _LBM_MACROS_H_ #define _LBM_MACROS_H_ /*############################################################################*/ typedef double LBM_Grid[SIZE_Z*SIZE_Y*SIZE_X*N_CELL_ENTRIES]; typedef LBM_Grid* LBM_GridPtr; static LBM_GridPtr srcGrid, dstGrid; /*############################################################################*/ #define CALC_INDEX(x,y,z,e) ((e)+N_CELL_ENTRIES*((x)+ \ (y)*SIZE_X+(z)*SIZE_X*SIZE_Y)) #define SWEEP_VAR int i; #define SWEEP_START(x1,y1,z1,x2,y2,z2) \ for( i = CALC_INDEX(x1, y1, z1, 0); \ i < CALC_INDEX(x2, y2, z2, 0); \ i += N_CELL_ENTRIES ) { #define SWEEP_END } #define SWEEP_X ((i / N_CELL_ENTRIES) % SIZE_X) #define SWEEP_Y (((i / N_CELL_ENTRIES) / SIZE_X) % SIZE_Y) #define SWEEP_Z ((i / N_CELL_ENTRIES) / (SIZE_X*SIZE_Y)) #define GRID_ENTRY(g,x,y,z,e) ((g)[CALC_INDEX( x, y, z, e)]) #define GRID_ENTRY_SWEEP(g,dx,dy,dz,e) ((g)[CALC_INDEX(dx, dy, dz, e)+(i)]) #define LOCAL(g,e) (GRID_ENTRY_SWEEP( g, 0, 0, 0, e )) #define NEIGHBOR_C(g,e) (GRID_ENTRY_SWEEP( g, 0, 0, 0, e )) #define NEIGHBOR_N(g,e) (GRID_ENTRY_SWEEP( g, 0, +1, 0, e )) #define NEIGHBOR_S(g,e) (GRID_ENTRY_SWEEP( g, 0, -1, 0, e )) #define NEIGHBOR_E(g,e) (GRID_ENTRY_SWEEP( g, +1, 0, 0, e )) #define NEIGHBOR_W(g,e) (GRID_ENTRY_SWEEP( g, -1, 0, 0, e )) #define NEIGHBOR_T(g,e) (GRID_ENTRY_SWEEP( g, 0, 0, +1, e )) #define NEIGHBOR_B(g,e) (GRID_ENTRY_SWEEP( g, 0, 0, -1, e )) #define NEIGHBOR_NE(g,e) (GRID_ENTRY_SWEEP( g, +1, +1, 0, e )) #define NEIGHBOR_NW(g,e) (GRID_ENTRY_SWEEP( g, -1, +1, 0, e )) #define NEIGHBOR_SE(g,e) (GRID_ENTRY_SWEEP( g, +1, -1, 0, e )) #define NEIGHBOR_SW(g,e) (GRID_ENTRY_SWEEP( g, -1, -1, 0, e )) #define NEIGHBOR_NT(g,e) (GRID_ENTRY_SWEEP( g, 0, +1, +1, e )) #define NEIGHBOR_NB(g,e) (GRID_ENTRY_SWEEP( g, 0, +1, -1, e )) #define NEIGHBOR_ST(g,e) (GRID_ENTRY_SWEEP( g, 0, -1, +1, e )) #define NEIGHBOR_SB(g,e) (GRID_ENTRY_SWEEP( g, 0, -1, -1, e )) #define NEIGHBOR_ET(g,e) (GRID_ENTRY_SWEEP( g, +1, 0, +1, e )) #define NEIGHBOR_EB(g,e) (GRID_ENTRY_SWEEP( g, +1, 0, -1, e )) #define NEIGHBOR_WT(g,e) (GRID_ENTRY_SWEEP( g, -1, 0, +1, e )) #define NEIGHBOR_WB(g,e) (GRID_ENTRY_SWEEP( g, -1, 0, -1, e )) #define COLLIDE_STREAM #ifdef COLLIDE_STREAM #define SRC_C(g) (LOCAL( g, C )) #define SRC_N(g) (LOCAL( g, N )) #define SRC_S(g) (LOCAL( g, S )) #define SRC_E(g) (LOCAL( g, E )) #define SRC_W(g) (LOCAL( g, W )) #define SRC_T(g) (LOCAL( g, T )) #define SRC_B(g) (LOCAL( g, B )) #define SRC_NE(g) (LOCAL( g, NE )) #define SRC_NW(g) (LOCAL( g, NW )) #define SRC_SE(g) (LOCAL( g, SE )) #define SRC_SW(g) (LOCAL( g, SW )) #define SRC_NT(g) (LOCAL( g, NT )) #define SRC_NB(g) (LOCAL( g, NB )) #define SRC_ST(g) (LOCAL( g, ST )) #define SRC_SB(g) (LOCAL( g, SB )) #define SRC_ET(g) (LOCAL( g, ET )) #define SRC_EB(g) (LOCAL( g, EB )) #define SRC_WT(g) (LOCAL( g, WT )) #define SRC_WB(g) (LOCAL( g, WB )) #define DST_C(g) (NEIGHBOR_C ( g, C )) #define DST_N(g) (NEIGHBOR_N ( g, N )) #define DST_S(g) (NEIGHBOR_S ( g, S )) #define DST_E(g) (NEIGHBOR_E ( g, E )) #define DST_W(g) (NEIGHBOR_W ( g, W )) #define DST_T(g) (NEIGHBOR_T ( g, T )) #define DST_B(g) (NEIGHBOR_B ( g, B )) #define DST_NE(g) (NEIGHBOR_NE( g, NE )) #define DST_NW(g) (NEIGHBOR_NW( g, NW )) #define DST_SE(g) (NEIGHBOR_SE( g, SE )) #define DST_SW(g) (NEIGHBOR_SW( g, SW )) #define DST_NT(g) (NEIGHBOR_NT( g, NT )) #define DST_NB(g) (NEIGHBOR_NB( g, NB )) #define DST_ST(g) (NEIGHBOR_ST( g, ST )) #define DST_SB(g) (NEIGHBOR_SB( g, SB )) #define DST_ET(g) (NEIGHBOR_ET( g, ET )) #define DST_EB(g) (NEIGHBOR_EB( g, EB )) #define DST_WT(g) (NEIGHBOR_WT( g, WT )) #define DST_WB(g) (NEIGHBOR_WB( g, WB )) #else /* COLLIDE_STREAM */ #define SRC_C(g) (NEIGHBOR_C ( g, C )) #define SRC_N(g) (NEIGHBOR_S ( g, N )) #define SRC_S(g) (NEIGHBOR_N ( g, S )) #define SRC_E(g) (NEIGHBOR_W ( g, E )) #define SRC_W(g) (NEIGHBOR_E ( g, W )) #define SRC_T(g) (NEIGHBOR_B ( g, T )) #define SRC_B(g) (NEIGHBOR_T ( g, B )) #define SRC_NE(g) (NEIGHBOR_SW( g, NE )) #define SRC_NW(g) (NEIGHBOR_SE( g, NW )) #define SRC_SE(g) (NEIGHBOR_NW( g, SE )) #define SRC_SW(g) (NEIGHBOR_NE( g, SW )) #define SRC_NT(g) (NEIGHBOR_SB( g, NT )) #define SRC_NB(g) (NEIGHBOR_ST( g, NB )) #define SRC_ST(g) (NEIGHBOR_NB( g, ST )) #define SRC_SB(g) (NEIGHBOR_NT( g, SB )) #define SRC_ET(g) (NEIGHBOR_WB( g, ET )) #define SRC_EB(g) (NEIGHBOR_WT( g, EB )) #define SRC_WT(g) (NEIGHBOR_EB( g, WT )) #define SRC_WB(g) (NEIGHBOR_ET( g, WB )) #define DST_C(g) (LOCAL( g, C )) #define DST_N(g) (LOCAL( g, N )) #define DST_S(g) (LOCAL( g, S )) #define DST_E(g) (LOCAL( g, E )) #define DST_W(g) (LOCAL( g, W )) #define DST_T(g) (LOCAL( g, T )) #define DST_B(g) (LOCAL( g, B )) #define DST_NE(g) (LOCAL( g, NE )) #define DST_NW(g) (LOCAL( g, NW )) #define DST_SE(g) (LOCAL( g, SE )) #define DST_SW(g) (LOCAL( g, SW )) #define DST_NT(g) (LOCAL( g, NT )) #define DST_NB(g) (LOCAL( g, NB )) #define DST_ST(g) (LOCAL( g, ST )) #define DST_SB(g) (LOCAL( g, SB )) #define DST_ET(g) (LOCAL( g, ET )) #define DST_EB(g) (LOCAL( g, EB )) #define DST_WT(g) (LOCAL( g, WT )) #define DST_WB(g) (LOCAL( g, WB )) #endif /* COLLIDE_STREAM */ #define MAGIC_CAST(v) ((unsigned int*) ((void*) (&(v)))) #define FLAG_VAR(v) unsigned int* const _aux_ = MAGIC_CAST(v) #define TEST_FLAG_SWEEP(g,f) ((*MAGIC_CAST(LOCAL(g, FLAGS))) & (f)) #define SET_FLAG_SWEEP(g,f) {FLAG_VAR(LOCAL(g, FLAGS)); (*_aux_) |= (f);} #define CLEAR_FLAG_SWEEP(g,f) {FLAG_VAR(LOCAL(g, FLAGS)); (*_aux_) &= ~(f);} #define CLEAR_ALL_FLAGS_SWEEP(g) {FLAG_VAR(LOCAL(g, FLAGS)); (*_aux_) = 0;} #define TEST_FLAG(g,x,y,z,f) ((*MAGIC_CAST(GRID_ENTRY(g, x, y, z, FLAGS))) & (f)) #define SET_FLAG(g,x,y,z,f) {FLAG_VAR(GRID_ENTRY(g, x, y, z, FLAGS)); (*_aux_) |= (f);} #define CLEAR_FLAG(g,x,y,z,f) {FLAG_VAR(GRID_ENTRY(g, x, y, z, FLAGS)); (*_aux_) &= ~(f);} #define CLEAR_ALL_FLAGS(g,x,y,z) {FLAG_VAR(GRID_ENTRY(g, x, y, z, FLAGS)); (*_aux_) = 0;} /*############################################################################*/ #endif /* _LBM_MACROS_H_ */ /*############################################################################*/ void LBM_allocateGrid( double** ptr ); void LBM_freeGrid( double** ptr ); void LBM_initializeGrid( LBM_Grid grid ); void LBM_initializeSpecialCellsForLDC( LBM_Grid grid ); void LBM_loadObstacleFile( LBM_Grid grid, const char* filename ); void LBM_initializeSpecialCellsForChannel( LBM_Grid grid ); void LBM_swapGrids( LBM_GridPtr* grid1, LBM_GridPtr* grid2 ); void LBM_performStreamCollide( LBM_Grid srcGrid, LBM_Grid dstGrid ); void LBM_handleInOutFlow( LBM_Grid srcGrid ); void LBM_showGridStatistics( LBM_Grid Grid ); void LBM_storeVelocityField( LBM_Grid grid, const char* filename, const BOOL binary ); void LBM_compareVelocityField( LBM_Grid grid, const char* filename, const BOOL binary ); /*############################################################################*/ #endif /* _LBM_H_ */ #define DFL1 (1.0/ 3.0) #define DFL2 (1.0/18.0) #define DFL3 (1.0/36.0) /*############################################################################*/ void LBM_allocateGrid( double** ptr ) { const size_t margin = 2*SIZE_X*SIZE_Y*N_CELL_ENTRIES, size = sizeof( LBM_Grid ) + 2*margin*sizeof( double ); *ptr = malloc( size ); if( ! *ptr ) { printf( "LBM_allocateGrid: could not allocate %.1f MByte\n", size / (1024.0*1024.0) ); exit( 1 ); } #if !defined(SPEC_CPU) printf( "LBM_allocateGrid: allocated %.1f MByte\n", size / (1024.0*1024.0) ); #endif *ptr += margin; } /*############################################################################*/ void LBM_freeGrid( double** ptr ) { const size_t margin = 2*SIZE_X*SIZE_Y*N_CELL_ENTRIES; free( *ptr-margin ); *ptr = NULL; } /*############################################################################*/ void LBM_initializeGrid( LBM_Grid grid ) { SWEEP_VAR /*voption indep*/ #if !defined(SPEC_CPU) #ifdef _OPENMP #pragma omp parallel for #endif #endif SWEEP_START( 0, 0, -2, 0, 0, SIZE_Z+2 ) LOCAL( grid, C ) = DFL1; LOCAL( grid, N ) = DFL2; LOCAL( grid, S ) = DFL2; LOCAL( grid, E ) = DFL2; LOCAL( grid, W ) = DFL2; LOCAL( grid, T ) = DFL2; LOCAL( grid, B ) = DFL2; LOCAL( grid, NE ) = DFL3; LOCAL( grid, NW ) = DFL3; LOCAL( grid, SE ) = DFL3; LOCAL( grid, SW ) = DFL3; LOCAL( grid, NT ) = DFL3; LOCAL( grid, NB ) = DFL3; LOCAL( grid, ST ) = DFL3; LOCAL( grid, SB ) = DFL3; LOCAL( grid, ET ) = DFL3; LOCAL( grid, EB ) = DFL3; LOCAL( grid, WT ) = DFL3; LOCAL( grid, WB ) = DFL3; CLEAR_ALL_FLAGS_SWEEP( grid ); SWEEP_END } /*############################################################################*/ void LBM_swapGrids( LBM_GridPtr* grid1, LBM_GridPtr* grid2 ) { LBM_GridPtr aux = *grid1; *grid1 = *grid2; *grid2 = aux; } /*############################################################################*/ void LBM_loadObstacleFile( LBM_Grid grid, const char* filename ) { int x, y, z; FILE* file = fopen( filename, "rb" ); for( z = 0; z < SIZE_Z; z++ ) { for( y = 0; y < SIZE_Y; y++ ) { for( x = 0; x < SIZE_X; x++ ) { if( fgetc( file ) != '.' ) SET_FLAG( grid, x, y, z, OBSTACLE ); } fgetc( file ); } fgetc( file ); } fclose( file ); } /*############################################################################*/ void LBM_initializeSpecialCellsForLDC( LBM_Grid grid ) { int x, y, z; /*voption indep*/ #if !defined(SPEC_CPU) #ifdef _OPENMP #pragma omp parallel for private( x, y ) #endif #endif for( z = -2; z < SIZE_Z+2; z++ ) { for( y = 0; y < SIZE_Y; y++ ) { for( x = 0; x < SIZE_X; x++ ) { if( x == 0 || x == SIZE_X-1 || y == 0 || y == SIZE_Y-1 || z == 0 || z == SIZE_Z-1 ) { SET_FLAG( grid, x, y, z, OBSTACLE ); } else { if( (z == 1 || z == SIZE_Z-2) && x > 1 && x < SIZE_X-2 && y > 1 && y < SIZE_Y-2 ) { SET_FLAG( grid, x, y, z, ACCEL ); } } } } } } /*############################################################################*/ void LBM_initializeSpecialCellsForChannel( LBM_Grid grid ) { int x, y, z; /*voption indep*/ #if !defined(SPEC_CPU) #ifdef _OPENMP #pragma omp parallel for private( x, y ) #endif #endif for( z = -2; z < SIZE_Z+2; z++ ) { for( y = 0; y < SIZE_Y; y++ ) { for( x = 0; x < SIZE_X; x++ ) { if( x == 0 || x == SIZE_X-1 || y == 0 || y == SIZE_Y-1 ) { SET_FLAG( grid, x, y, z, OBSTACLE ); if( (z == 0 || z == SIZE_Z-1) && ! TEST_FLAG( grid, x, y, z, OBSTACLE )) SET_FLAG( grid, x, y, z, IN_OUT_FLOW ); } } } } } /*############################################################################*/ void LBM_performStreamCollide( LBM_Grid srcGrid, LBM_Grid dstGrid ) { SWEEP_VAR double ux, uy, uz, u2, rho; /*voption indep*/ #if !defined(SPEC_CPU) #ifdef _OPENMP #pragma omp parallel for private( ux, uy, uz, u2, rho ) #endif #endif SWEEP_START( 0, 0, 0, 0, 0, SIZE_Z ) if( TEST_FLAG_SWEEP( srcGrid, OBSTACLE )) { DST_C ( dstGrid ) = SRC_C ( srcGrid ); DST_S ( dstGrid ) = SRC_N ( srcGrid ); DST_N ( dstGrid ) = SRC_S ( srcGrid ); DST_W ( dstGrid ) = SRC_E ( srcGrid ); DST_E ( dstGrid ) = SRC_W ( srcGrid ); DST_B ( dstGrid ) = SRC_T ( srcGrid ); DST_T ( dstGrid ) = SRC_B ( srcGrid ); DST_SW( dstGrid ) = SRC_NE( srcGrid ); DST_SE( dstGrid ) = SRC_NW( srcGrid ); DST_NW( dstGrid ) = SRC_SE( srcGrid ); DST_NE( dstGrid ) = SRC_SW( srcGrid ); DST_SB( dstGrid ) = SRC_NT( srcGrid ); DST_ST( dstGrid ) = SRC_NB( srcGrid ); DST_NB( dstGrid ) = SRC_ST( srcGrid ); DST_NT( dstGrid ) = SRC_SB( srcGrid ); DST_WB( dstGrid ) = SRC_ET( srcGrid ); DST_WT( dstGrid ) = SRC_EB( srcGrid ); DST_EB( dstGrid ) = SRC_WT( srcGrid ); DST_ET( dstGrid ) = SRC_WB( srcGrid ); continue; } rho = + SRC_C ( srcGrid ) + SRC_N ( srcGrid ) + SRC_S ( srcGrid ) + SRC_E ( srcGrid ) + SRC_W ( srcGrid ) + SRC_T ( srcGrid ) + SRC_B ( srcGrid ) + SRC_NE( srcGrid ) + SRC_NW( srcGrid ) + SRC_SE( srcGrid ) + SRC_SW( srcGrid ) + SRC_NT( srcGrid ) + SRC_NB( srcGrid ) + SRC_ST( srcGrid ) + SRC_SB( srcGrid ) + SRC_ET( srcGrid ) + SRC_EB( srcGrid ) + SRC_WT( srcGrid ) + SRC_WB( srcGrid ); ux = + SRC_E ( srcGrid ) - SRC_W ( srcGrid ) + SRC_NE( srcGrid ) - SRC_NW( srcGrid ) + SRC_SE( srcGrid ) - SRC_SW( srcGrid ) + SRC_ET( srcGrid ) + SRC_EB( srcGrid ) - SRC_WT( srcGrid ) - SRC_WB( srcGrid ); uy = + SRC_N ( srcGrid ) - SRC_S ( srcGrid ) + SRC_NE( srcGrid ) + SRC_NW( srcGrid ) - SRC_SE( srcGrid ) - SRC_SW( srcGrid ) + SRC_NT( srcGrid ) + SRC_NB( srcGrid ) - SRC_ST( srcGrid ) - SRC_SB( srcGrid ); uz = + SRC_T ( srcGrid ) - SRC_B ( srcGrid ) + SRC_NT( srcGrid ) - SRC_NB( srcGrid ) + SRC_ST( srcGrid ) - SRC_SB( srcGrid ) + SRC_ET( srcGrid ) - SRC_EB( srcGrid ) + SRC_WT( srcGrid ) - SRC_WB( srcGrid ); ux /= rho; uy /= rho; uz /= rho; if( TEST_FLAG_SWEEP( srcGrid, ACCEL )) { ux = 0.005; uy = 0.002; uz = 0.000; } u2 = 1.5 * (ux*ux + uy*uy + uz*uz); DST_C ( dstGrid ) = (1.0-OMEGA)*SRC_C ( srcGrid ) + DFL1*OMEGA*rho*(1.0 - u2); DST_N ( dstGrid ) = (1.0-OMEGA)*SRC_N ( srcGrid ) + DFL2*OMEGA*rho*(1.0 + uy*(4.5*uy + 3.0) - u2); DST_S ( dstGrid ) = (1.0-OMEGA)*SRC_S ( srcGrid ) + DFL2*OMEGA*rho*(1.0 + uy*(4.5*uy - 3.0) - u2); DST_E ( dstGrid ) = (1.0-OMEGA)*SRC_E ( srcGrid ) + DFL2*OMEGA*rho*(1.0 + ux*(4.5*ux + 3.0) - u2); DST_W ( dstGrid ) = (1.0-OMEGA)*SRC_W ( srcGrid ) + DFL2*OMEGA*rho*(1.0 + ux*(4.5*ux - 3.0) - u2); DST_T ( dstGrid ) = (1.0-OMEGA)*SRC_T ( srcGrid ) + DFL2*OMEGA*rho*(1.0 + uz*(4.5*uz + 3.0) - u2); DST_B ( dstGrid ) = (1.0-OMEGA)*SRC_B ( srcGrid ) + DFL2*OMEGA*rho*(1.0 + uz*(4.5*uz - 3.0) - u2); DST_NE( dstGrid ) = (1.0-OMEGA)*SRC_NE( srcGrid ) + DFL3*OMEGA*rho*(1.0 + (+ux+uy)*(4.5*(+ux+uy) + 3.0) - u2); DST_NW( dstGrid ) = (1.0-OMEGA)*SRC_NW( srcGrid ) + DFL3*OMEGA*rho*(1.0 + (-ux+uy)*(4.5*(-ux+uy) + 3.0) - u2); DST_SE( dstGrid ) = (1.0-OMEGA)*SRC_SE( srcGrid ) + DFL3*OMEGA*rho*(1.0 + (+ux-uy)*(4.5*(+ux-uy) + 3.0) - u2); DST_SW( dstGrid ) = (1.0-OMEGA)*SRC_SW( srcGrid ) + DFL3*OMEGA*rho*(1.0 + (-ux-uy)*(4.5*(-ux-uy) + 3.0) - u2); DST_NT( dstGrid ) = (1.0-OMEGA)*SRC_NT( srcGrid ) + DFL3*OMEGA*rho*(1.0 + (+uy+uz)*(4.5*(+uy+uz) + 3.0) - u2); DST_NB( dstGrid ) = (1.0-OMEGA)*SRC_NB( srcGrid ) + DFL3*OMEGA*rho*(1.0 + (+uy-uz)*(4.5*(+uy-uz) + 3.0) - u2); DST_ST( dstGrid ) = (1.0-OMEGA)*SRC_ST( srcGrid ) + DFL3*OMEGA*rho*(1.0 + (-uy+uz)*(4.5*(-uy+uz) + 3.0) - u2); DST_SB( dstGrid ) = (1.0-OMEGA)*SRC_SB( srcGrid ) + DFL3*OMEGA*rho*(1.0 + (-uy-uz)*(4.5*(-uy-uz) + 3.0) - u2); DST_ET( dstGrid ) = (1.0-OMEGA)*SRC_ET( srcGrid ) + DFL3*OMEGA*rho*(1.0 + (+ux+uz)*(4.5*(+ux+uz) + 3.0) - u2); DST_EB( dstGrid ) = (1.0-OMEGA)*SRC_EB( srcGrid ) + DFL3*OMEGA*rho*(1.0 + (+ux-uz)*(4.5*(+ux-uz) + 3.0) - u2); DST_WT( dstGrid ) = (1.0-OMEGA)*SRC_WT( srcGrid ) + DFL3*OMEGA*rho*(1.0 + (-ux+uz)*(4.5*(-ux+uz) + 3.0) - u2); DST_WB( dstGrid ) = (1.0-OMEGA)*SRC_WB( srcGrid ) + DFL3*OMEGA*rho*(1.0 + (-ux-uz)*(4.5*(-ux-uz) + 3.0) - u2); SWEEP_END } /*############################################################################*/ void LBM_handleInOutFlow( LBM_Grid srcGrid ) { double ux , uy , uz , rho , ux1, uy1, uz1, rho1, ux2, uy2, uz2, rho2, u2, px, py; SWEEP_VAR /* inflow */ /*voption indep*/ #if !defined(SPEC_CPU) #ifdef _OPENMP #pragma omp parallel for private( ux, uy, uz, rho, ux1, uy1, uz1, rho1, \ ux2, uy2, uz2, rho2, u2, px, py ) #endif #endif SWEEP_START( 0, 0, 0, 0, 0, 1 ) rho1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, C ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, N ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, E ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, T ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, SE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, ST ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, WT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 1, WB ); rho2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, C ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, N ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, E ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, T ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, SE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, ST ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, WT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, 2, WB ); rho = 2.0*rho1 - rho2; px = (SWEEP_X / (0.5*(SIZE_X-1))) - 1.0; py = (SWEEP_Y / (0.5*(SIZE_Y-1))) - 1.0; ux = 0.00; uy = 0.00; uz = 0.01 * (1.0-px*px) * (1.0-py*py); u2 = 1.5 * (ux*ux + uy*uy + uz*uz); LOCAL( srcGrid, C ) = DFL1*rho*(1.0 - u2); LOCAL( srcGrid, N ) = DFL2*rho*(1.0 + uy*(4.5*uy + 3.0) - u2); LOCAL( srcGrid, S ) = DFL2*rho*(1.0 + uy*(4.5*uy - 3.0) - u2); LOCAL( srcGrid, E ) = DFL2*rho*(1.0 + ux*(4.5*ux + 3.0) - u2); LOCAL( srcGrid, W ) = DFL2*rho*(1.0 + ux*(4.5*ux - 3.0) - u2); LOCAL( srcGrid, T ) = DFL2*rho*(1.0 + uz*(4.5*uz + 3.0) - u2); LOCAL( srcGrid, B ) = DFL2*rho*(1.0 + uz*(4.5*uz - 3.0) - u2); LOCAL( srcGrid, NE) = DFL3*rho*(1.0 + (+ux+uy)*(4.5*(+ux+uy) + 3.0) - u2); LOCAL( srcGrid, NW) = DFL3*rho*(1.0 + (-ux+uy)*(4.5*(-ux+uy) + 3.0) - u2); LOCAL( srcGrid, SE) = DFL3*rho*(1.0 + (+ux-uy)*(4.5*(+ux-uy) + 3.0) - u2); LOCAL( srcGrid, SW) = DFL3*rho*(1.0 + (-ux-uy)*(4.5*(-ux-uy) + 3.0) - u2); LOCAL( srcGrid, NT) = DFL3*rho*(1.0 + (+uy+uz)*(4.5*(+uy+uz) + 3.0) - u2); LOCAL( srcGrid, NB) = DFL3*rho*(1.0 + (+uy-uz)*(4.5*(+uy-uz) + 3.0) - u2); LOCAL( srcGrid, ST) = DFL3*rho*(1.0 + (-uy+uz)*(4.5*(-uy+uz) + 3.0) - u2); LOCAL( srcGrid, SB) = DFL3*rho*(1.0 + (-uy-uz)*(4.5*(-uy-uz) + 3.0) - u2); LOCAL( srcGrid, ET) = DFL3*rho*(1.0 + (+ux+uz)*(4.5*(+ux+uz) + 3.0) - u2); LOCAL( srcGrid, EB) = DFL3*rho*(1.0 + (+ux-uz)*(4.5*(+ux-uz) + 3.0) - u2); LOCAL( srcGrid, WT) = DFL3*rho*(1.0 + (-ux+uz)*(4.5*(-ux+uz) + 3.0) - u2); LOCAL( srcGrid, WB) = DFL3*rho*(1.0 + (-ux-uz)*(4.5*(-ux-uz) + 3.0) - u2); SWEEP_END /* outflow */ /*voption indep*/ #if !defined(SPEC_CPU) #ifdef _OPENMP #pragma omp parallel for private( ux, uy, uz, rho, ux1, uy1, uz1, rho1, \ ux2, uy2, uz2, rho2, u2, px, py ) #endif #endif SWEEP_START( 0, 0, SIZE_Z-1, 0, 0, SIZE_Z ) rho1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, C ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, N ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, E ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, T ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ST ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WB ); ux1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, E ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, EB ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WB ); uy1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, N ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NW ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NB ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SB ); uz1 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, T ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, ET ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -1, WB ); ux1 /= rho1; uy1 /= rho1; uz1 /= rho1; rho2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, C ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, N ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, E ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, T ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ST ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WB ); ux2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, E ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, W ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ET ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, EB ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WB ); uy2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, N ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, S ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NE ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NW ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SE ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SW ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NT ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NB ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SB ); uz2 = + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, T ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, B ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, NB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ST ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, SB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, ET ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, EB ) + GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WT ) - GRID_ENTRY_SWEEP( srcGrid, 0, 0, -2, WB ); ux2 /= rho2; uy2 /= rho2; uz2 /= rho2; rho = 1.0; ux = 2*ux1 - ux2; uy = 2*uy1 - uy2; uz = 2*uz1 - uz2; u2 = 1.5 * (ux*ux + uy*uy + uz*uz); LOCAL( srcGrid, C ) = DFL1*rho*(1.0 - u2); LOCAL( srcGrid, N ) = DFL2*rho*(1.0 + uy*(4.5*uy + 3.0) - u2); LOCAL( srcGrid, S ) = DFL2*rho*(1.0 + uy*(4.5*uy - 3.0) - u2); LOCAL( srcGrid, E ) = DFL2*rho*(1.0 + ux*(4.5*ux + 3.0) - u2); LOCAL( srcGrid, W ) = DFL2*rho*(1.0 + ux*(4.5*ux - 3.0) - u2); LOCAL( srcGrid, T ) = DFL2*rho*(1.0 + uz*(4.5*uz + 3.0) - u2); LOCAL( srcGrid, B ) = DFL2*rho*(1.0 + uz*(4.5*uz - 3.0) - u2); LOCAL( srcGrid, NE) = DFL3*rho*(1.0 + (+ux+uy)*(4.5*(+ux+uy) + 3.0) - u2); LOCAL( srcGrid, NW) = DFL3*rho*(1.0 + (-ux+uy)*(4.5*(-ux+uy) + 3.0) - u2); LOCAL( srcGrid, SE) = DFL3*rho*(1.0 + (+ux-uy)*(4.5*(+ux-uy) + 3.0) - u2); LOCAL( srcGrid, SW) = DFL3*rho*(1.0 + (-ux-uy)*(4.5*(-ux-uy) + 3.0) - u2); LOCAL( srcGrid, NT) = DFL3*rho*(1.0 + (+uy+uz)*(4.5*(+uy+uz) + 3.0) - u2); LOCAL( srcGrid, NB) = DFL3*rho*(1.0 + (+uy-uz)*(4.5*(+uy-uz) + 3.0) - u2); LOCAL( srcGrid, ST) = DFL3*rho*(1.0 + (-uy+uz)*(4.5*(-uy+uz) + 3.0) - u2); LOCAL( srcGrid, SB) = DFL3*rho*(1.0 + (-uy-uz)*(4.5*(-uy-uz) + 3.0) - u2); LOCAL( srcGrid, ET) = DFL3*rho*(1.0 + (+ux+uz)*(4.5*(+ux+uz) + 3.0) - u2); LOCAL( srcGrid, EB) = DFL3*rho*(1.0 + (+ux-uz)*(4.5*(+ux-uz) + 3.0) - u2); LOCAL( srcGrid, WT) = DFL3*rho*(1.0 + (-ux+uz)*(4.5*(-ux+uz) + 3.0) - u2); LOCAL( srcGrid, WB) = DFL3*rho*(1.0 + (-ux-uz)*(4.5*(-ux-uz) + 3.0) - u2); SWEEP_END } /*############################################################################*/ void LBM_showGridStatistics( LBM_Grid grid ) { int nObstacleCells = 0, nAccelCells = 0, nFluidCells = 0; double ux, uy, uz; double minU2 = 1e+30, maxU2 = -1e+30, u2; double minRho = 1e+30, maxRho = -1e+30, rho; double mass = 0; SWEEP_VAR SWEEP_START( 0, 0, 0, 0, 0, SIZE_Z ) rho = + LOCAL( grid, C ) + LOCAL( grid, N ) + LOCAL( grid, S ) + LOCAL( grid, E ) + LOCAL( grid, W ) + LOCAL( grid, T ) + LOCAL( grid, B ) + LOCAL( grid, NE ) + LOCAL( grid, NW ) + LOCAL( grid, SE ) + LOCAL( grid, SW ) + LOCAL( grid, NT ) + LOCAL( grid, NB ) + LOCAL( grid, ST ) + LOCAL( grid, SB ) + LOCAL( grid, ET ) + LOCAL( grid, EB ) + LOCAL( grid, WT ) + LOCAL( grid, WB ); if( rho < minRho ) minRho = rho; if( rho > maxRho ) maxRho = rho; mass += rho; if( TEST_FLAG_SWEEP( grid, OBSTACLE )) { nObstacleCells++; } else { if( TEST_FLAG_SWEEP( grid, ACCEL )) nAccelCells++; else nFluidCells++; ux = + LOCAL( grid, E ) - LOCAL( grid, W ) + LOCAL( grid, NE ) - LOCAL( grid, NW ) + LOCAL( grid, SE ) - LOCAL( grid, SW ) + LOCAL( grid, ET ) + LOCAL( grid, EB ) - LOCAL( grid, WT ) - LOCAL( grid, WB ); uy = + LOCAL( grid, N ) - LOCAL( grid, S ) + LOCAL( grid, NE ) + LOCAL( grid, NW ) - LOCAL( grid, SE ) - LOCAL( grid, SW ) + LOCAL( grid, NT ) + LOCAL( grid, NB ) - LOCAL( grid, ST ) - LOCAL( grid, SB ); uz = + LOCAL( grid, T ) - LOCAL( grid, B ) + LOCAL( grid, NT ) - LOCAL( grid, NB ) + LOCAL( grid, ST ) - LOCAL( grid, SB ) + LOCAL( grid, ET ) - LOCAL( grid, EB ) + LOCAL( grid, WT ) - LOCAL( grid, WB ); u2 = (ux*ux + uy*uy + uz*uz) / (rho*rho); if( u2 < minU2 ) minU2 = u2; if( u2 > maxU2 ) maxU2 = u2; } SWEEP_END printf( "LBM_showGridStatistics:\n" "\tnObstacleCells: %7i nAccelCells: %7i nFluidCells: %7i\n" "\tminRho: %8.4f maxRho: %8.4f mass: %e\n" "\tminU: %e maxU: %e\n\n", nObstacleCells, nAccelCells, nFluidCells, minRho, maxRho, mass, sqrt( minU2 ), sqrt( maxU2 ) ); } /*############################################################################*/ static void storeValue( FILE* file, OUTPUT_PRECISION* v ) { const int litteBigEndianTest = 1; if( (*((unsigned char*) &litteBigEndianTest)) == 0 ) { /* big endian */ const char* vPtr = (char*) v; char buffer[sizeof( OUTPUT_PRECISION )]; int i; for (i = 0; i < sizeof( OUTPUT_PRECISION ); i++) buffer[i] = vPtr[sizeof( OUTPUT_PRECISION ) - i - 1]; fwrite( buffer, sizeof( OUTPUT_PRECISION ), 1, file ); } else { /* little endian */ fwrite( v, sizeof( OUTPUT_PRECISION ), 1, file ); } } /*############################################################################*/ static void loadValue( FILE* file, OUTPUT_PRECISION* v ) { const int litteBigEndianTest = 1; if( (*((unsigned char*) &litteBigEndianTest)) == 0 ) { /* big endian */ char* vPtr = (char*) v; char buffer[sizeof( OUTPUT_PRECISION )]; int i; fread( buffer, sizeof( OUTPUT_PRECISION ), 1, file ); for (i = 0; i < sizeof( OUTPUT_PRECISION ); i++) vPtr[i] = buffer[sizeof( OUTPUT_PRECISION ) - i - 1]; } else { /* little endian */ fread( v, sizeof( OUTPUT_PRECISION ), 1, file ); } } /*############################################################################*/ void LBM_storeVelocityField( LBM_Grid grid, const char* filename, const int binary ) { int x, y, z; OUTPUT_PRECISION rho, ux, uy, uz; FILE* file = fopen( filename, (binary ? "wb" : "w") ); for( z = 0; z < SIZE_Z; z++ ) { for( y = 0; y < SIZE_Y; y++ ) { for( x = 0; x < SIZE_X; x++ ) { rho = + GRID_ENTRY( grid, x, y, z, C ) + GRID_ENTRY( grid, x, y, z, N ) + GRID_ENTRY( grid, x, y, z, S ) + GRID_ENTRY( grid, x, y, z, E ) + GRID_ENTRY( grid, x, y, z, W ) + GRID_ENTRY( grid, x, y, z, T ) + GRID_ENTRY( grid, x, y, z, B ) + GRID_ENTRY( grid, x, y, z, NE ) + GRID_ENTRY( grid, x, y, z, NW ) + GRID_ENTRY( grid, x, y, z, SE ) + GRID_ENTRY( grid, x, y, z, SW ) + GRID_ENTRY( grid, x, y, z, NT ) + GRID_ENTRY( grid, x, y, z, NB ) + GRID_ENTRY( grid, x, y, z, ST ) + GRID_ENTRY( grid, x, y, z, SB ) + GRID_ENTRY( grid, x, y, z, ET ) + GRID_ENTRY( grid, x, y, z, EB ) + GRID_ENTRY( grid, x, y, z, WT ) + GRID_ENTRY( grid, x, y, z, WB ); ux = + GRID_ENTRY( grid, x, y, z, E ) - GRID_ENTRY( grid, x, y, z, W ) + GRID_ENTRY( grid, x, y, z, NE ) - GRID_ENTRY( grid, x, y, z, NW ) + GRID_ENTRY( grid, x, y, z, SE ) - GRID_ENTRY( grid, x, y, z, SW ) + GRID_ENTRY( grid, x, y, z, ET ) + GRID_ENTRY( grid, x, y, z, EB ) - GRID_ENTRY( grid, x, y, z, WT ) - GRID_ENTRY( grid, x, y, z, WB ); uy = + GRID_ENTRY( grid, x, y, z, N ) - GRID_ENTRY( grid, x, y, z, S ) + GRID_ENTRY( grid, x, y, z, NE ) + GRID_ENTRY( grid, x, y, z, NW ) - GRID_ENTRY( grid, x, y, z, SE ) - GRID_ENTRY( grid, x, y, z, SW ) + GRID_ENTRY( grid, x, y, z, NT ) + GRID_ENTRY( grid, x, y, z, NB ) - GRID_ENTRY( grid, x, y, z, ST ) - GRID_ENTRY( grid, x, y, z, SB ); uz = + GRID_ENTRY( grid, x, y, z, T ) - GRID_ENTRY( grid, x, y, z, B ) + GRID_ENTRY( grid, x, y, z, NT ) - GRID_ENTRY( grid, x, y, z, NB ) + GRID_ENTRY( grid, x, y, z, ST ) - GRID_ENTRY( grid, x, y, z, SB ) + GRID_ENTRY( grid, x, y, z, ET ) - GRID_ENTRY( grid, x, y, z, EB ) + GRID_ENTRY( grid, x, y, z, WT ) - GRID_ENTRY( grid, x, y, z, WB ); ux /= rho; uy /= rho; uz /= rho; if( binary ) { /* fwrite( &ux, sizeof( ux ), 1, file ); fwrite( &uy, sizeof( uy ), 1, file ); fwrite( &uz, sizeof( uz ), 1, file ); */ storeValue( file, &ux ); storeValue( file, &uy ); storeValue( file, &uz ); } else fprintf( file, "%e %e %e\n", ux, uy, uz ); } } } fclose( file ); } /*############################################################################*/ void LBM_compareVelocityField( LBM_Grid grid, const char* filename, const int binary ) { int x, y, z; double rho, ux, uy, uz; OUTPUT_PRECISION fileUx, fileUy, fileUz, dUx, dUy, dUz, diff2, maxDiff2 = -1e+30; FILE* file = fopen( filename, (binary ? "rb" : "r") ); for( z = 0; z < SIZE_Z; z++ ) { for( y = 0; y < SIZE_Y; y++ ) { for( x = 0; x < SIZE_X; x++ ) { rho = + GRID_ENTRY( grid, x, y, z, C ) + GRID_ENTRY( grid, x, y, z, N ) + GRID_ENTRY( grid, x, y, z, S ) + GRID_ENTRY( grid, x, y, z, E ) + GRID_ENTRY( grid, x, y, z, W ) + GRID_ENTRY( grid, x, y, z, T ) + GRID_ENTRY( grid, x, y, z, B ) + GRID_ENTRY( grid, x, y, z, NE ) + GRID_ENTRY( grid, x, y, z, NW ) + GRID_ENTRY( grid, x, y, z, SE ) + GRID_ENTRY( grid, x, y, z, SW ) + GRID_ENTRY( grid, x, y, z, NT ) + GRID_ENTRY( grid, x, y, z, NB ) + GRID_ENTRY( grid, x, y, z, ST ) + GRID_ENTRY( grid, x, y, z, SB ) + GRID_ENTRY( grid, x, y, z, ET ) + GRID_ENTRY( grid, x, y, z, EB ) + GRID_ENTRY( grid, x, y, z, WT ) + GRID_ENTRY( grid, x, y, z, WB ); ux = + GRID_ENTRY( grid, x, y, z, E ) - GRID_ENTRY( grid, x, y, z, W ) + GRID_ENTRY( grid, x, y, z, NE ) - GRID_ENTRY( grid, x, y, z, NW ) + GRID_ENTRY( grid, x, y, z, SE ) - GRID_ENTRY( grid, x, y, z, SW ) + GRID_ENTRY( grid, x, y, z, ET ) + GRID_ENTRY( grid, x, y, z, EB ) - GRID_ENTRY( grid, x, y, z, WT ) - GRID_ENTRY( grid, x, y, z, WB ); uy = + GRID_ENTRY( grid, x, y, z, N ) - GRID_ENTRY( grid, x, y, z, S ) + GRID_ENTRY( grid, x, y, z, NE ) + GRID_ENTRY( grid, x, y, z, NW ) - GRID_ENTRY( grid, x, y, z, SE ) - GRID_ENTRY( grid, x, y, z, SW ) + GRID_ENTRY( grid, x, y, z, NT ) + GRID_ENTRY( grid, x, y, z, NB ) - GRID_ENTRY( grid, x, y, z, ST ) - GRID_ENTRY( grid, x, y, z, SB ); uz = + GRID_ENTRY( grid, x, y, z, T ) - GRID_ENTRY( grid, x, y, z, B ) + GRID_ENTRY( grid, x, y, z, NT ) - GRID_ENTRY( grid, x, y, z, NB ) + GRID_ENTRY( grid, x, y, z, ST ) - GRID_ENTRY( grid, x, y, z, SB ) + GRID_ENTRY( grid, x, y, z, ET ) - GRID_ENTRY( grid, x, y, z, EB ) + GRID_ENTRY( grid, x, y, z, WT ) - GRID_ENTRY( grid, x, y, z, WB ); ux /= rho; uy /= rho; uz /= rho; if( binary ) { loadValue( file, &fileUx ); loadValue( file, &fileUy ); loadValue( file, &fileUz ); } else { if( sizeof( OUTPUT_PRECISION ) == sizeof( double )) { fscanf( file, "%lf %lf %lf\n", &fileUx, &fileUy, &fileUz ); } else { fscanf( file, "%f %f %f\n", &fileUx, &fileUy, &fileUz ); } } dUx = ux - fileUx; dUy = uy - fileUy; dUz = uz - fileUz; diff2 = dUx*dUx + dUy*dUy + dUz*dUz; if( diff2 > maxDiff2 ) maxDiff2 = diff2; } } } #if defined(SPEC_CPU) printf( "LBM_compareVelocityField: maxDiff = %e \n\n", sqrt( maxDiff2 ) ); #else printf( "LBM_compareVelocityField: maxDiff = %e ==> %s\n\n", sqrt( maxDiff2 ), sqrt( maxDiff2 ) > 1e-5 ? "##### ERROR #####" : "OK" ); #endif fclose( file ); } /*############################################################################*/ /*############################################################################*/ /*############################################################################*/ void MAIN_parseCommandLine( int nArgs, char* arg[], MAIN_Param* param ) { struct stat fileStat; if( nArgs < 5 || nArgs > 6 ) { printf( "syntax: lbm <time steps> <result file> <0: nil, 1: cmp, 2: str> <0: ldc, 1: channel flow> [<obstacle file>]\n" ); exit( 1 ); } param->nTimeSteps = atoi( arg[1] ); param->resultFilename = arg[2]; param->action = (MAIN_Action) atoi( arg[3] ); param->simType = (MAIN_SimType) atoi( arg[4] ); if( nArgs == 6 ) { param->obstacleFilename = arg[5]; if( stat( param->obstacleFilename, &fileStat ) != 0 ) { printf( "MAIN_parseCommandLine: cannot stat obstacle file '%s'\n", param->obstacleFilename ); exit( 1 ); } if( fileStat.st_size != SIZE_X*SIZE_Y*SIZE_Z+(SIZE_Y+1)*SIZE_Z ) { printf( "MAIN_parseCommandLine:\n" "\tsize of file '%s' is %i bytes\n" "\texpected size is %i bytes\n", param->obstacleFilename, (int) fileStat.st_size, SIZE_X*SIZE_Y*SIZE_Z+(SIZE_Y+1)*SIZE_Z ); exit( 1 ); } } else param->obstacleFilename = NULL; if( param->action == COMPARE && stat( param->resultFilename, &fileStat ) != 0 ) { printf( "MAIN_parseCommandLine: cannot stat result file '%s'\n", param->resultFilename ); exit( 1 ); } } /*############################################################################*/ void MAIN_printInfo( const MAIN_Param* param ) { const char actionString[3][32] = {"nothing", "compare", "store"}; const char simTypeString[3][32] = {"lid-driven cavity", "channel flow"}; printf( "MAIN_printInfo:\n" "\tgrid size : %i x %i x %i = %.2f * 10^6 Cells\n" "\tnTimeSteps : %i\n" "\tresult file : %s\n" "\taction : %s\n" "\tsimulation type: %s\n" "\tobstacle file : %s\n\n", SIZE_X, SIZE_Y, SIZE_Z, 1e-6*SIZE_X*SIZE_Y*SIZE_Z, param->nTimeSteps, param->resultFilename, actionString[param->action], simTypeString[param->simType], (param->obstacleFilename == NULL) ? "<none>" : param->obstacleFilename ); } /*############################################################################*/ void MAIN_initialize( const MAIN_Param* param ) { LBM_allocateGrid( (double**) &srcGrid ); LBM_allocateGrid( (double**) &dstGrid ); LBM_initializeGrid( *srcGrid ); LBM_initializeGrid( *dstGrid ); if( param->obstacleFilename != NULL ) { LBM_loadObstacleFile( *srcGrid, param->obstacleFilename ); LBM_loadObstacleFile( *dstGrid, param->obstacleFilename ); } if( param->simType == CHANNEL ) { LBM_initializeSpecialCellsForChannel( *srcGrid ); LBM_initializeSpecialCellsForChannel( *dstGrid ); } else { LBM_initializeSpecialCellsForLDC( *srcGrid ); LBM_initializeSpecialCellsForLDC( *dstGrid ); } LBM_showGridStatistics( *srcGrid ); } /*############################################################################*/ void MAIN_finalize( const MAIN_Param* param ) { LBM_showGridStatistics( *srcGrid ); if( param->action == COMPARE ) LBM_compareVelocityField( *srcGrid, param->resultFilename, TRUE ); if( param->action == STORE ) LBM_storeVelocityField( *srcGrid, param->resultFilename, TRUE ); LBM_freeGrid( (double**) &srcGrid ); LBM_freeGrid( (double**) &dstGrid ); } int main( ) { int nArgs; char** arg; MAIN_Param param; #if !defined(SPEC_CPU) MAIN_Time time; #endif int t; nArgs = 6; arg = malloc(6 * sizeof(char*)); arg[0] = "test.exe"; arg[1] = "20"; arg[2] = "reference.dat"; arg[3] = "0"; arg[4] = "1"; arg[5] = "100_100_130_cf_a.of"; MAIN_parseCommandLine( nArgs, arg, &param ); MAIN_printInfo( &param ); MAIN_initialize( &param ); #if !defined(SPEC_CPU) MAIN_startClock( &time ); #endif for( t = 1; t <= param.nTimeSteps; t++ ) { if( param.simType == CHANNEL ) { LBM_handleInOutFlow( *srcGrid ); } LBM_performStreamCollide( *srcGrid, *dstGrid ); LBM_swapGrids( &srcGrid, &dstGrid ); if( (t & 63) == 0 ) { printf( "timestep: %i\n", t ); LBM_showGridStatistics( *srcGrid ); } } #if !defined(SPEC_CPU) MAIN_stopClock( &time, &param ); #endif MAIN_finalize( &param ); return 0; } #if !defined(SPEC_CPU) /*############################################################################*/ void MAIN_startClock( MAIN_Time* time ) { /* time->timeScale = 1.0 / sysconf( _SC_CLK_TCK ); time->tickStart = times( &(time->timeStart) ); */ } /*############################################################################*/ void MAIN_stopClock( MAIN_Time* time, const MAIN_Param* param ) { /* time->tickStop = times( &(time->timeStop) ); printf( "MAIN_stopClock:\n" "\tusr: %7.2f sys: %7.2f tot: %7.2f wct: %7.2f MLUPS: %5.2f\n\n", (time->timeStop.tms_utime - time->timeStart.tms_utime) * time->timeScale, (time->timeStop.tms_stime - time->timeStart.tms_stime) * time->timeScale, (time->timeStop.tms_utime - time->timeStart.tms_utime + time->timeStop.tms_stime - time->timeStart.tms_stime) * time->timeScale, (time->tickStop - time->tickStart ) * time->timeScale, 1.0e-6 * SIZE_X * SIZE_Y * SIZE_Z * param->nTimeSteps / (time->tickStop - time->tickStart ) / time->timeScale ); */ } #endif
GB_unop__floor_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__floor_fc64_fc64) // op(A') function: GB (_unop_tran__floor_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_cfloor (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cfloor (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_cfloor (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FLOOR || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__floor_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cfloor (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_cfloor (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__floor_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
globals.h
#ifndef GLOBALS_H #define GLOBALS_H #define PROG_NAME "WVT ICs" #define VERSION "1.0" /* C std lib */ #include <stdlib.h> // system #include <stdio.h> #include <stdint.h> #include <stdarg.h> #include <stddef.h> #include <stdbool.h> #include <string.h> #include <time.h> #include <math.h> #include <limits.h> #include <complex.h> /* GNU Scientifc Library */ #include <gsl/gsl_math.h> #include <gsl/gsl_const_cgsm.h> #include <gsl/gsl_const_num.h> #include <gsl/gsl_rng.h> #include <omp.h> /* Global Prototypes */ #include "macro.h" #include "peano.h" #include "proto.h" /* To eat PNG density files */ #ifdef EAT_PNG #include "png.h" #include "zlib.h" #endif #include "peanowalk.h" /* Code parameters */ #define CHARBUFSIZE 512 // For any char buffer ! #define MAXTAGS 300 // In parameter file #ifdef TWO_DIM #ifdef SPH_CUBIC_SPLINE #define DESNNGB 14 // SPH kernel weighted number of neighbours #define NNGBDEV 0.01 // error tolerance in SPH kernel weighted neighb. #define NGBMAX (DESNNGB*16) // size of neighbour list #else #ifdef SPH_WC2 #define DESNNGB 16 // SPH kernel weighted number of neighbours #define NNGBDEV 0.01 // error tolerance in SPH kernel weighted neighb. #define NGBMAX (DESNNGB*8) // size of neighbour list #else #define DESNNGB 44 // SPH kernel weighted number of neighbours #define NNGBDEV 0.01 // error tolerance in SPH kernel weighted neighb. #define NGBMAX (DESNNGB*8) // size of neighbour list #endif // SPH_WC2 #endif // SPH_CUBIC_SPLINE #else #ifdef SPH_CUBIC_SPLINE #define DESNNGB 50 // SPH kernel weighted number of neighbours #define NNGBDEV 0.05 // error tolerance in SPH kernel weighted neighb. #define NGBMAX (DESNNGB*16) // size of neighbour list #else #ifdef SPH_WC2 #define DESNNGB 64 // SPH kernel weighted number of neighbours #define NNGBDEV 0.05 // error tolerance in SPH kernel weighted neighb. #define NGBMAX (DESNNGB*8) // size of neighbour list #else #define DESNNGB 295 // SPH kernel weighted number of neighbours #define NNGBDEV 0.05 // error tolerance in SPH kernel weighted neighb. #define NGBMAX (DESNNGB*8) // size of neighbour list #endif // SPH_WC2 #endif // SPH_CUBIC_SPLINE #endif // TWO_DIM /* mathematical constants */ #define pi M_PI #define sqrt2 M_SQRT2 #define sqrt3 1.73205080756887719 #define fourpithird 4.18879032135009765 extern const double ULength, UMass, UVel; // Standard Gadget units (setup.c) extern struct OpenMP_infos { int NThreads; // Number of openMP threads int ThreadID; // Thread ID of this thread unsigned short Seed[3]; // random number seed: erand48(Omp.Seed) } Omp; #pragma omp threadprivate(Omp) extern struct Parameters { int Npart; int Maxiter; double MpsFraction; // move this fraction of the mean particle sep double StepReduction; // force convergence at this rate double BiasCorrection; // density bias correction factor double LimitMps[4]; // Convergence criterium for particle movement double MoveFractionMin; // move at least this fraction of particles during redistribution steps double MoveFractionMax; // move at most this fraction of particles during redistribution steps double ProbesFraction; int RedistributionFrequency; int LastMoveStep; int Problem_Flag; int Problem_Subflag; } Param; extern struct ProblemParameters { char Name[CHARBUFSIZE]; double Mpart; double Boxsize[3]; // [0] is ALWAYS the largest one ! double Rho_Max; bool Periodic[3]; } Problem; #ifdef EAT_PNG extern struct ImageProperties { char Name[CHARBUFSIZE]; float *Density; int Xpix; int Ypix; int Zpix; } Image; #endif extern struct ParticleData { double Pos[3]; double Vel[3]; int32_t ID; int Type; peanoKey Key; int Tree_Parent; bool Redistributed; } *P; extern struct GasParticleData { float U; float Rho; float Hsml; float VarHsmlFac; float ID; float Rho_Model; float Bfld[3]; } *SphP; extern float ( *Density_Func_Ptr ) ( const int, const double ); extern float ( *U_Func_Ptr ) ( const int ); extern void ( *Velocity_Func_Ptr ) ( const int, float * ); extern void ( *Magnetic_Field_Func_Ptr ) ( const int, float * ); extern void ( *PostProcessing_Func_Ptr ) (); double G; // gravitational constant in code units #endif
ParallelMovingAverageCpuCode.c
/*** Run a maxfile in parallel on multiple DFEs using DFE groups and OpenMP. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "ParallelMovingAverage.h" #include "MaxSLiCInterface.h" // Using MaxelerOS managed groups of DFEs void MovingAverageDFE_groups(int numEngines, max_file_t* maxfile, ParallelMovingAverage_actions_t** actions) { max_group_t *group = max_load_group(maxfile, MAXOS_EXCLUSIVE, "local", numEngines); #pragma omp parallel for for (int i = 0; i < numEngines; i++) ParallelMovingAverage_run_group(group, actions[i]); max_unload_group(group); } // Using max_load/max_unload to load the bitstream on specific DFEs, // may be required for more advanced scheduling void MovingAverageDFE_custom(int numEngines, max_file_t* maxfile, ParallelMovingAverage_actions_t** actions, char** dfeIds) { #pragma omp parallel for for (int i = 0; i < numEngines; i++) { char id[100]; sprintf(id, "local:%d", i); max_engine_t *engine = max_load(maxfile, id); ParallelMovingAverage_run(engine, actions[i]); max_unload(engine); } } void MovingAverageDFE(int width, int n, int *in, int* out, int numEngines, char** dfeIds, bool useGroups) { if (width != 3) { printf("Error, only moving average of width three supported!"); exit(1); } if (n % numEngines != 0) { printf("Error, stream size must be multiple of number of DFEs!"); exit(1); } ParallelMovingAverage_actions_t *actions[numEngines]; // split data in batches int itemsPerDfe = n / numEngines; for (int i = 0; i < numEngines; i++) { for (int j = 0; j < itemsPerDfe; j++) { actions[i] = malloc(sizeof(ParallelMovingAverage_actions_t)); actions[i]->param_N = itemsPerDfe; actions[i]->instream_a = in + itemsPerDfe * i; actions[i]->outstream_output = out + itemsPerDfe * i; } } max_file_t *maxfile = ParallelMovingAverage_init(); if (useGroups) MovingAverageDFE_groups(numEngines, maxfile, actions); else MovingAverageDFE_custom(numEngines, maxfile, actions, dfeIds); // need to redo the values around the boundaries between the batches for (int i = 0; i < numEngines; i++) { int boundary = i * itemsPerDfe; for (int j = 0; j < width / 2; j++) { int pos1 = boundary + j; if (i != 0) out[pos1] = (in[pos1 - 1] + in[pos1] + in[pos1 + 1]) / 3; int pos2 = boundary + itemsPerDfe - j - 1; if (i != numEngines - 1) out[pos2] = (in[pos2 - 1] + in[pos2] + in[pos2 + 1]) / 3; } } max_file_free(maxfile); } void check_results(int n, int* out, int* exp) { for (int i = 1; i < n - 1; i++) if (out[i] != exp[i]) { printf("Output from DFE did not match CPU: %d : %d != %d\n", i, out[i], exp[i]); exit(1); } } int main(int argc, char** argv) { int n = 384; const int numEngines = 8; int *a = calloc(n, sizeof(int)); int *out = calloc(n, sizeof(int)); for(int i = 0; i < n; ++i) a[i] = i + 1; int *exp = calloc(n, sizeof(int)); for (int i = 1; i < n - 1; i++) exp[i] = (a[i - 1] + a[i] + a[i + 1]) / 3; char *dfeIds[] = {"0", "1", "2", "3", "4", "5", "6", "7"}; printf("Running on DFE with groups.\n"); MovingAverageDFE(3, n, a, out, numEngines, dfeIds, true); check_results(n, a, exp); // TODO: custom mode seems to produce errors // printf("Running on DFE with custom mode.\n"); // MovingAverageDFE(3, n, a, out, numEngines, dfeIds, false); // check_results(n, a, exp); free(a); free(exp); free(out); printf("Test passed!\n"); return 0; }
truecrypt_fmt_plug.c
/* TrueCrypt volume support to John The Ripper * * Written by Alain Espinosa <alainesp at gmail.com> in 2012. No copyright * is claimed, and the software is hereby placed in the public domain. * In case this attempt to disclaim copyright and place the software in the * public domain is deemed null and void, then the software is * Copyright (c) 2012 Alain Espinosa and it is hereby released to the * general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * There's ABSOLUTELY NO WARRANTY, express or implied. * * (This is a heavily cut-down "BSD license".) * * Updated in Dec, 2014 by JimF. This is a ugly format, and was converted * into a more standard (using crypt_all) format. The PKCS5_PBKDF2_HMAC can * be replaced with faster pbkdf2_xxxx functions (possibly with SIMD usage). * this has been done for sha512. ripemd160 and Whirlpool pbkdf2 header * files have been created. Also, proper decrypt is now done, (in cmp_exact) * and we test against the 'TRUE' signature, and against 2 crc32's which * are computed over the 448 bytes of decrypted data. So we now have a * full 96 bits of hash. There will be no way we get false positives from * this slow format. EVP_AES_XTS removed. Also, we now only pbkdf2 over * 64 bytes of data (all that is needed for the 2 AES keys), and that sped * up the crypts A LOT (~3x faster) * */ #include "arch.h" #if FMT_EXTERNS_H extern struct fmt_main fmt_truecrypt; extern struct fmt_main fmt_truecrypt_ripemd160; extern struct fmt_main fmt_truecrypt_sha512; extern struct fmt_main fmt_truecrypt_whirlpool; #elif FMT_REGISTERS_H john_register_one(&fmt_truecrypt); john_register_one(&fmt_truecrypt_ripemd160); john_register_one(&fmt_truecrypt_sha512); john_register_one(&fmt_truecrypt_whirlpool); #else #include <openssl/aes.h> #include <string.h> #include "misc.h" #include "memory.h" #include "common.h" #include "formats.h" #include "crc32.h" #define PBKDF2_HMAC_SHA512_ALSO_INCLUDE_CTX #include "pbkdf2_hmac_sha512.h" #include "pbkdf2_hmac_ripemd160.h" #include "pbkdf2_hmac_whirlpool.h" #ifdef _OPENMP #include <omp.h> #define OMP_SCALE 1 #endif #include "memdbg.h" /* 64 is the actual maximum used by Truecrypt software as of version 7.1a */ #define PLAINTEXT_LENGTH 64 #define MAX_CIPHERTEXT_LENGTH (512*2+32) #define SALT_SIZE sizeof(struct cust_salt) #define SALT_ALIGN 4 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static unsigned char (*key_buffer)[PLAINTEXT_LENGTH + 1]; static unsigned char (*first_block_dec)[16]; #define TAG_WHIRLPOOL "truecrypt_WHIRLPOOL$" #define TAG_SHA512 "truecrypt_SHA_512$" #define TAG_RIPEMD160 "truecrypt_RIPEMD_160$" #define TAG_WHIRLPOOL_LEN (sizeof(TAG_WHIRLPOOL)-1) #define TAG_SHA512_LEN (sizeof(TAG_SHA512)-1) #define TAG_RIPEMD160_LEN (sizeof(TAG_RIPEMD160)-1) #define IS_SHA512 1 #define IS_RIPEMD160 2 #define IS_WHIRLPOOL 3 struct cust_salt { unsigned char salt[64]; unsigned char bin[512-64]; int loop_inc; int num_iterations; int hash_type; } *psalt; static struct fmt_tests tests_ripemd160[] = { {"truecrypt_RIPEMD_160$b9f118f89d2699cbe42cad7bc2c61b0822b3d6e57e8d43e79f55666aa30572676c3aced5f0900af223e9fcdf43ac39637640977f546eb714475f8e2dbf5368bfb80a671d7796d4a88c36594acd07081b7ef0fbead3d3a0ff2b295e9488a5a2747ed97905436c28c636f408b36b0898aad3c4e9566182bd55f80e97a55ad9cf20899599fb775f314067c9f7e6153b9544bfbcffb53eef5a34b515e38f186a2ddcc7cd3aed635a1fb4aab98b82d57341ec6ae52ad72e43f41aa251717082d0858bf2ccc69a7ca00daceb5b325841d70bb2216e1f0d4dc936b9f50ebf92dbe2abec9bc3babea7a4357fa74a7b2bcce542044552bbc0135ae35568526e9bd2afde0fa4969d6dc680cf96f7d82ec0a75b6170c94e3f2b6fd98f2e6f01db08ce63f1b6bcf5ea380ed6f927a5a8ced7995d83ea8e9c49238e8523d63d6b669ae0d165b94f1e19b49922b4748798129eed9aa2dae0d2798adabf35dc4cc30b25851a3469a9ee0877775abca26374a4176f8d237f8191fcc870f413ffdbfa73ee22790a548025c4fcafd40f631508f1f6c8d4c847e409c839d21ff146f469feff87198bc184db4b5c5a77f3402f491538503f68e0116dac76344b762627ad678de76cb768779f8f1c35338dd9f72dcc1ac337319b0e21551b9feb85f8cac67a2f35f305a39037bf96cd61869bf1761abcce644598dad254990d17f0faa4965926acb75abf", "password" }, {"truecrypt_RIPEMD_160$6ab053e5ebee8c56bce5705fb1e03bf8cf99e2930232e525befe1e45063aa2e30981585020a967a1c45520543847cdb281557e16c81cea9d329b666e232eeb008dbe3e1f1a181f69f073f0f314bc17e255d42aaa1dbab92231a4fb62d100f6930bae4ccf6726680554dea3e2419fb67230c186f6af2c8b4525eb8ebb73d957b01b8a124b736e45f94160266bcfaeda16b351ec750d980250ebb76672578e9e3a104dde89611bce6ee32179f35073be9f1dee8da002559c6fab292ff3af657cf5a0d864a7844235aeac441afe55f69e51c7a7c06f7330a1c8babae2e6476e3a1d6fb3d4eb63694218e53e0483659aad21f20a70817b86ce56c2b27bae3017727ff26866a00e75f37e6c8091a28582bd202f30a5790f5a90792de010aebc0ed81e9743d00518419f32ce73a8d3f07e55830845fe21c64a8a748cbdca0c3bf512a4938e68a311004538619b65873880f13b2a9486f1292d5c77116509a64eb0a1bba7307f97d42e7cfa36d2b58b71393e04e7e3e328a7728197b8bcdef14cf3f7708cd233c58031c695da5f6b671cc5066323cc86bb3c6311535ad223a44abd4eec9077d70ab0f257de5706a3ff5c15e3bc2bde6496a8414bc6a5ed84fe9462b65efa866312e0699e47338e879ae512a66f3f36fc086d2595bbcff2e744dd1ec283ba8e91299e62e4b2392608dd950ede0c1f3d5b317b2870ead59efe096c054ea1", "123" }, {NULL} }; static struct fmt_tests tests_sha512[] = { {"truecrypt_SHA_512$aa582afe64197a3cfd4faf7697673e5e14414369da3f716400414f63f75447da7d3abdc65a25ea511b1772d67370d6c349d8000de66d65861403093fecfb85719e1d46158d24324e5a2c0ee598214b1b2e7eac761dbde8cb85bcb33f293df7f30c9e44a3fa97bf1c70e9986677855873fa2435d9154ccaed8f28d68f16b10adcce7032d7c1742d322739d02c05457859abdaa176faa95c674d2a1092c30832dd2afd9a319599b4d1db92ffe6e48b3b29e566d5c51af091839699f5ad1715730fef24e94e39a6f40770b8320e30bf972d810b588af88ce3450337adbec0a10255b20230bcfca93aa5a0a6592cd6038312181c0792c59ec9e5d95a6216497d39ae28131869b89368e82371718970bf9750a7114c83d87b1b0cd16b6e8d41c4925d15ec26107e92847ec1bb73363ca10f3ad62afa8b0f95ff13cdbe217a1e8a74508ef439ed2140b26d5538b8d011a0d1e469f2a6962e56964adc75b90d9c6a16e88ad0adb59a337f8abb3f9d76f7f9acad22853e9dbbce13a4f686c6a802243b0901972af3c6928511609ac7b957b352452c4347acd563a72faa86a46522942fdc57f32d48c5148a2bb0bc2c3dbc9851385f816f2ece958957082c0a8fe69f647be675d87fcb8244912abc277a3242ee17e1d522f85598417559cb3a9f60b755e5b613069cb54c05a4c5d2fbd3ca6ba793320aeb0e109f8b21852daf2d9ed74dd9", "password"}, {"truecrypt_SHA_512$73f6b08614dc4ffbd77d27a0815b0700d6b612f573ccd6c8937e8d154321e3c1c1c67dd348d4d3bc8304e94a3a6ec0c672de8396a9a6b26b12393195b7daa4225a9d3a134229be011f8179791bb00c31b5c132c8dbad5a6f8738487477c409b3c32d90b07be8d7a3a9faa95d37ab6faccc459d47f029e25adcea48cee83eaa35b7acc3f849717000421d92ac46e6f16ec3dccacd3ffae76a48280977d2a6727027d9d6ff9c4c98405359ee382f6dd1eca0d7007cbe804b81485c1085e74b58d3eb1e3c7ebdc1e1ab1384e4440ab6ca7beed7e0ef7d1e0da5ffc3cd89f7b6ac8a9257ee369d397ac1e112f75382ddbe6f7317ec20c46cb7b2111d0d91570e90b4c01a0b8205fcdf4d0cadcf4a067b8f285a541f1d649894fb3ade29a2ee0575524455d489c299dde215bea3254f7d43aa4e4011a39bdb6e7473bc29f588e659fdbf065cc4a336ba42f2b6c07479cf3e544978150fb013da7db22afcb4f8384e39e2edfa30a4cbe5e84a07c54ba66663bb9284836cc5a8ba7489d3f7f92aec6d9f4e264c90c2af6181082bd273197bc42c325cb1de31006dd55425e3f210d2ddd7973978eec865d3226bb1e30a9897146d90d79a73070e87f0182981ea85f15f948ae1958af7704fabecd6f07e20be70be9f9c38a5c5e5c8b17be648f011b2c40f62d6ac51de932add5bdb47bb428fd510b004a7aa79321b03ed7aa202be439fbf", "password" }, {"truecrypt_SHA_512$cfd9e5757da139b32d117cd60f86f649400615dc218981106dfadd44598599a7ec0ace42de61506fe8d81b5c885861cdb26e0c38cb9adfcff27ba88872220ccd0914d4fa44bab5a708fe6864e0f665ac71d87e7e97b3724d610cf1f6ec09fa99da40126f63868654fed3381eaa8176f689e8e292c3cb68e43601d5804bc2e19d86722c21d42204e158b26b720e7b8f7580edce15469195dd7ed711b0fcb6c8abc253d0fd93cc784d5279de527fbdcfb357780635a5c363b773b55957d7efb472f6e6012489a9f0d225573446e5251cfb277a1365eed787e0da52f02d835667d74cc41fa4002cc35ad1ce276fbf9d73d6553ac0f8ab6961901d292a66df814a2cbda1b41f29aeec88ed15e7d37fe84ac5306b5a1b8d2e1f2c132e5c7d40ca7bb76d4ff87980ca4d75eaac5066b3ed50b53259554b9f922f7cee8e91847359d06e448da02cbeeecc78ca9bee2899a33dfa04a478ca131d33c64d6de5f81b219f11bed6ff3c0d56f26b3a27c79e7c55b6f76567a612166ce71028e3d3ae7e5abd25faec5e2e9dc30719baa2c138e26d6f8e3799a72b5e7b1c2a07c12cea452073b72f6e429bb17dd23fe3934c9e406bb4060083f92aa100c2e82ca40664f65c02cbc800c5696659f8df84db17edb92de5d4f1ca9e5fe71844e1e8c4f8b19ce7362fb3ca5467bf65122067c53f011648a6663894b315e6c5c635bec5bd39da028041", "123" }, {NULL} }; static struct fmt_tests tests_whirlpool[] = { {"truecrypt_WHIRLPOOL$5724ba89229d705010ec56af416b16155682a0cab9cf48ac5a5fdd2086c9a251ae4bbea6cfb8464321a789852f7812095b0e0c4c4f9c6d14ba7beedaf3484b375ac7bc97b43c3e74bf1a0c259b7ac8725d990d2ff31935ca3443f2ce8df59de86515da3e0f53f728882b71c5cc704df0c87c282a7413db446e9a2e516a144311dd25092eb0a2c5df0240d899708289fc7141abd8538fa5791d9f96c39129cce9fe8a6e58e84364e2f4acc32274147431cb2d2480b1b54bffee485acee0925852b8a6ee71d275f028b92e540be595448e5f1d78560a3b8ad209962dd5981d7ca98db9a678a588a9296157d44502cd78f9e32f022dddc9bc8111b5704ee39a9b56d30b89898ae340e90f2e6c73be6ac64de97e32fc2eed0b66dcd5c1553eeab3950cf851624a5a4439435a6fd5717fda6d5f939f4a902321341964c16bda8975752ba150fb9d858d8eaff2a2086cb50d30abff741ee20223b4223b1783f0ed537a609a081afed952395ef0b5de6883db66cbb5a8bac70f2f757c7b6e6bb5d863672820f0d3d61b262b2b6c2ca0dc8e7137851aa450da1c1d915e005bff0e849a89bf67693ef97f5c17bf8d07a18c562dc783274f9ec580f9519a6dd1429b66160ddb04549506ad616dd0695da144fa2ad270eac7163983e9036f1bde3c7634b8a246b8dcd518ce3e12b881c838fbce59a0cfdffa3b21447e3f28124f63549c3962", "password" }, {"truecrypt_WHIRLPOOL$0650595770851981d70b088ff6ef4bf90573e08d03c8cac8b2dfded22e1653f5c45103758c68be344fdccae42b4683087da083a3841b92fb79856798eaee793c04cd95ae556d9616684da17e47bd2f775d8128f94b80b781e4cab4921b12c620721cf719ca72d3997cea829fd29b429282b597d5719c13423cdf7bd717fa12a56b8eddcf7b1ad2796c4ad078ab3a9bd944a694aa4b0078ed160440dd3db13dd1d04a7aaaa4dc016a95bd1cfafcd833ae933c627bf5512ae55c76069af7190823dba0133d6fe02e4421d3684ff2a2493da990a3cc5eed40a9e8c48c7a89a2f47030d45c324a3d78b941e772e24b285af6739ae1f5953ff838edaa69e79939f55d0fe00cd0e3a20a46db3a232009eabc800711342f7e580ba909f16c2039d4900fd4025845a385641a6037ceb6420fe7d37868e8c06e6146eddec9e6cb97e71048da5fa5898dac08152516ea1c6729e85d31596cd226aa218ce693989efb9fa8b05404bcc2debbc75c429a03fe31bfc49f10d595b898436ff6b02fc01d745b91280f26ae94a4969ce7f86c12e6b562c7b5377e3fb3247a8cda11a930c2a9e80f24966925de01afad5987ebee9c3de1d41667c6dc35cebbbc963f263c700d06a647ab7020385e3a7e30406f3e7a9b3142d39e0439c98948134d11166b621dfd3ea9d3a84d985b2aa7732b7ad9beba44334dd86292b0c94befb2cb8aa72a823129cb", "123" }, {NULL} }; static struct fmt_tests tests_all[] = { {"truecrypt_SHA_512$aa582afe64197a3cfd4faf7697673e5e14414369da3f716400414f63f75447da7d3abdc65a25ea511b1772d67370d6c349d8000de66d65861403093fecfb85719e1d46158d24324e5a2c0ee598214b1b2e7eac761dbde8cb85bcb33f293df7f30c9e44a3fa97bf1c70e9986677855873fa2435d9154ccaed8f28d68f16b10adcce7032d7c1742d322739d02c05457859abdaa176faa95c674d2a1092c30832dd2afd9a319599b4d1db92ffe6e48b3b29e566d5c51af091839699f5ad1715730fef24e94e39a6f40770b8320e30bf972d810b588af88ce3450337adbec0a10255b20230bcfca93aa5a0a6592cd6038312181c0792c59ec9e5d95a6216497d39ae28131869b89368e82371718970bf9750a7114c83d87b1b0cd16b6e8d41c4925d15ec26107e92847ec1bb73363ca10f3ad62afa8b0f95ff13cdbe217a1e8a74508ef439ed2140b26d5538b8d011a0d1e469f2a6962e56964adc75b90d9c6a16e88ad0adb59a337f8abb3f9d76f7f9acad22853e9dbbce13a4f686c6a802243b0901972af3c6928511609ac7b957b352452c4347acd563a72faa86a46522942fdc57f32d48c5148a2bb0bc2c3dbc9851385f816f2ece958957082c0a8fe69f647be675d87fcb8244912abc277a3242ee17e1d522f85598417559cb3a9f60b755e5b613069cb54c05a4c5d2fbd3ca6ba793320aeb0e109f8b21852daf2d9ed74dd9", "password"}, {"truecrypt_SHA_512$73f6b08614dc4ffbd77d27a0815b0700d6b612f573ccd6c8937e8d154321e3c1c1c67dd348d4d3bc8304e94a3a6ec0c672de8396a9a6b26b12393195b7daa4225a9d3a134229be011f8179791bb00c31b5c132c8dbad5a6f8738487477c409b3c32d90b07be8d7a3a9faa95d37ab6faccc459d47f029e25adcea48cee83eaa35b7acc3f849717000421d92ac46e6f16ec3dccacd3ffae76a48280977d2a6727027d9d6ff9c4c98405359ee382f6dd1eca0d7007cbe804b81485c1085e74b58d3eb1e3c7ebdc1e1ab1384e4440ab6ca7beed7e0ef7d1e0da5ffc3cd89f7b6ac8a9257ee369d397ac1e112f75382ddbe6f7317ec20c46cb7b2111d0d91570e90b4c01a0b8205fcdf4d0cadcf4a067b8f285a541f1d649894fb3ade29a2ee0575524455d489c299dde215bea3254f7d43aa4e4011a39bdb6e7473bc29f588e659fdbf065cc4a336ba42f2b6c07479cf3e544978150fb013da7db22afcb4f8384e39e2edfa30a4cbe5e84a07c54ba66663bb9284836cc5a8ba7489d3f7f92aec6d9f4e264c90c2af6181082bd273197bc42c325cb1de31006dd55425e3f210d2ddd7973978eec865d3226bb1e30a9897146d90d79a73070e87f0182981ea85f15f948ae1958af7704fabecd6f07e20be70be9f9c38a5c5e5c8b17be648f011b2c40f62d6ac51de932add5bdb47bb428fd510b004a7aa79321b03ed7aa202be439fbf", "password" }, {TAG_SHA512"cfd9e5757da139b32d117cd60f86f649400615dc218981106dfadd44598599a7ec0ace42de61506fe8d81b5c885861cdb26e0c38cb9adfcff27ba88872220ccd0914d4fa44bab5a708fe6864e0f665ac71d87e7e97b3724d610cf1f6ec09fa99da40126f63868654fed3381eaa8176f689e8e292c3cb68e43601d5804bc2e19d86722c21d42204e158b26b720e7b8f7580edce15469195dd7ed711b0fcb6c8abc253d0fd93cc784d5279de527fbdcfb357780635a5c363b773b55957d7efb472f6e6012489a9f0d225573446e5251cfb277a1365eed787e0da52f02d835667d74cc41fa4002cc35ad1ce276fbf9d73d6553ac0f8ab6961901d292a66df814a2cbda1b41f29aeec88ed15e7d37fe84ac5306b5a1b8d2e1f2c132e5c7d40ca7bb76d4ff87980ca4d75eaac5066b3ed50b53259554b9f922f7cee8e91847359d06e448da02cbeeecc78ca9bee2899a33dfa04a478ca131d33c64d6de5f81b219f11bed6ff3c0d56f26b3a27c79e7c55b6f76567a612166ce71028e3d3ae7e5abd25faec5e2e9dc30719baa2c138e26d6f8e3799a72b5e7b1c2a07c12cea452073b72f6e429bb17dd23fe3934c9e406bb4060083f92aa100c2e82ca40664f65c02cbc800c5696659f8df84db17edb92de5d4f1ca9e5fe71844e1e8c4f8b19ce7362fb3ca5467bf65122067c53f011648a6663894b315e6c5c635bec5bd39da028041", "123" }, {"truecrypt_RIPEMD_160$b9f118f89d2699cbe42cad7bc2c61b0822b3d6e57e8d43e79f55666aa30572676c3aced5f0900af223e9fcdf43ac39637640977f546eb714475f8e2dbf5368bfb80a671d7796d4a88c36594acd07081b7ef0fbead3d3a0ff2b295e9488a5a2747ed97905436c28c636f408b36b0898aad3c4e9566182bd55f80e97a55ad9cf20899599fb775f314067c9f7e6153b9544bfbcffb53eef5a34b515e38f186a2ddcc7cd3aed635a1fb4aab98b82d57341ec6ae52ad72e43f41aa251717082d0858bf2ccc69a7ca00daceb5b325841d70bb2216e1f0d4dc936b9f50ebf92dbe2abec9bc3babea7a4357fa74a7b2bcce542044552bbc0135ae35568526e9bd2afde0fa4969d6dc680cf96f7d82ec0a75b6170c94e3f2b6fd98f2e6f01db08ce63f1b6bcf5ea380ed6f927a5a8ced7995d83ea8e9c49238e8523d63d6b669ae0d165b94f1e19b49922b4748798129eed9aa2dae0d2798adabf35dc4cc30b25851a3469a9ee0877775abca26374a4176f8d237f8191fcc870f413ffdbfa73ee22790a548025c4fcafd40f631508f1f6c8d4c847e409c839d21ff146f469feff87198bc184db4b5c5a77f3402f491538503f68e0116dac76344b762627ad678de76cb768779f8f1c35338dd9f72dcc1ac337319b0e21551b9feb85f8cac67a2f35f305a39037bf96cd61869bf1761abcce644598dad254990d17f0faa4965926acb75abf", "password" }, {TAG_RIPEMD160"6ab053e5ebee8c56bce5705fb1e03bf8cf99e2930232e525befe1e45063aa2e30981585020a967a1c45520543847cdb281557e16c81cea9d329b666e232eeb008dbe3e1f1a181f69f073f0f314bc17e255d42aaa1dbab92231a4fb62d100f6930bae4ccf6726680554dea3e2419fb67230c186f6af2c8b4525eb8ebb73d957b01b8a124b736e45f94160266bcfaeda16b351ec750d980250ebb76672578e9e3a104dde89611bce6ee32179f35073be9f1dee8da002559c6fab292ff3af657cf5a0d864a7844235aeac441afe55f69e51c7a7c06f7330a1c8babae2e6476e3a1d6fb3d4eb63694218e53e0483659aad21f20a70817b86ce56c2b27bae3017727ff26866a00e75f37e6c8091a28582bd202f30a5790f5a90792de010aebc0ed81e9743d00518419f32ce73a8d3f07e55830845fe21c64a8a748cbdca0c3bf512a4938e68a311004538619b65873880f13b2a9486f1292d5c77116509a64eb0a1bba7307f97d42e7cfa36d2b58b71393e04e7e3e328a7728197b8bcdef14cf3f7708cd233c58031c695da5f6b671cc5066323cc86bb3c6311535ad223a44abd4eec9077d70ab0f257de5706a3ff5c15e3bc2bde6496a8414bc6a5ed84fe9462b65efa866312e0699e47338e879ae512a66f3f36fc086d2595bbcff2e744dd1ec283ba8e91299e62e4b2392608dd950ede0c1f3d5b317b2870ead59efe096c054ea1", "123" }, {"truecrypt_WHIRLPOOL$5724ba89229d705010ec56af416b16155682a0cab9cf48ac5a5fdd2086c9a251ae4bbea6cfb8464321a789852f7812095b0e0c4c4f9c6d14ba7beedaf3484b375ac7bc97b43c3e74bf1a0c259b7ac8725d990d2ff31935ca3443f2ce8df59de86515da3e0f53f728882b71c5cc704df0c87c282a7413db446e9a2e516a144311dd25092eb0a2c5df0240d899708289fc7141abd8538fa5791d9f96c39129cce9fe8a6e58e84364e2f4acc32274147431cb2d2480b1b54bffee485acee0925852b8a6ee71d275f028b92e540be595448e5f1d78560a3b8ad209962dd5981d7ca98db9a678a588a9296157d44502cd78f9e32f022dddc9bc8111b5704ee39a9b56d30b89898ae340e90f2e6c73be6ac64de97e32fc2eed0b66dcd5c1553eeab3950cf851624a5a4439435a6fd5717fda6d5f939f4a902321341964c16bda8975752ba150fb9d858d8eaff2a2086cb50d30abff741ee20223b4223b1783f0ed537a609a081afed952395ef0b5de6883db66cbb5a8bac70f2f757c7b6e6bb5d863672820f0d3d61b262b2b6c2ca0dc8e7137851aa450da1c1d915e005bff0e849a89bf67693ef97f5c17bf8d07a18c562dc783274f9ec580f9519a6dd1429b66160ddb04549506ad616dd0695da144fa2ad270eac7163983e9036f1bde3c7634b8a246b8dcd518ce3e12b881c838fbce59a0cfdffa3b21447e3f28124f63549c3962", "password" }, {TAG_WHIRLPOOL"0650595770851981d70b088ff6ef4bf90573e08d03c8cac8b2dfded22e1653f5c45103758c68be344fdccae42b4683087da083a3841b92fb79856798eaee793c04cd95ae556d9616684da17e47bd2f775d8128f94b80b781e4cab4921b12c620721cf719ca72d3997cea829fd29b429282b597d5719c13423cdf7bd717fa12a56b8eddcf7b1ad2796c4ad078ab3a9bd944a694aa4b0078ed160440dd3db13dd1d04a7aaaa4dc016a95bd1cfafcd833ae933c627bf5512ae55c76069af7190823dba0133d6fe02e4421d3684ff2a2493da990a3cc5eed40a9e8c48c7a89a2f47030d45c324a3d78b941e772e24b285af6739ae1f5953ff838edaa69e79939f55d0fe00cd0e3a20a46db3a232009eabc800711342f7e580ba909f16c2039d4900fd4025845a385641a6037ceb6420fe7d37868e8c06e6146eddec9e6cb97e71048da5fa5898dac08152516ea1c6729e85d31596cd226aa218ce693989efb9fa8b05404bcc2debbc75c429a03fe31bfc49f10d595b898436ff6b02fc01d745b91280f26ae94a4969ce7f86c12e6b562c7b5377e3fb3247a8cda11a930c2a9e80f24966925de01afad5987ebee9c3de1d41667c6dc35cebbbc963f263c700d06a647ab7020385e3a7e30406f3e7a9b3142d39e0439c98948134d11166b621dfd3ea9d3a84d985b2aa7732b7ad9beba44334dd86292b0c94befb2cb8aa72a823129cb", "123" }, {NULL} }; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif key_buffer = mem_calloc_tiny(sizeof(*key_buffer) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); first_block_dec = mem_calloc_tiny(sizeof(*first_block_dec) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static char *prepare(char *split_fields[10], struct fmt_main *self) { return split_fields[1]; } static char* ms_split(char *ciphertext, int index, struct fmt_main *self) { static char out[MAX_CIPHERTEXT_LENGTH + 1]; int i; for(i = 0; ciphertext[i] && i < MAX_CIPHERTEXT_LENGTH; i++) out[i] = ciphertext[i]; out[i] = 0; return out; } static int valid(char* ciphertext, int pos) { unsigned int i; // Very small if(pos + 512*2 != strlen(ciphertext)) return 0; // Not hexadecimal characters for (i = 0; i < 512*2; i++) if (atoi16[ARCH_INDEX((ciphertext+pos)[i])] == 0x7F) return 0; return 1; } static int valid_ripemd160(char* ciphertext, struct fmt_main *self) { // Not a supported hashing if (strncmp(ciphertext, TAG_RIPEMD160, TAG_RIPEMD160_LEN)) return 0; return valid(ciphertext, TAG_RIPEMD160_LEN); } static int valid_sha512(char* ciphertext, struct fmt_main *self) { // Not a supported hashing if (strncmp(ciphertext, TAG_SHA512, TAG_SHA512_LEN)) return 0; return valid(ciphertext, TAG_SHA512_LEN); } static int valid_whirlpool(char* ciphertext, struct fmt_main *self) { // Not a supported hashing if (strncmp(ciphertext, TAG_WHIRLPOOL, TAG_WHIRLPOOL_LEN)) return 0; return valid(ciphertext, TAG_WHIRLPOOL_LEN); } static int valid_truecrypt(char *ciphertext, struct fmt_main *self) { if (valid_sha512(ciphertext, self) || valid_ripemd160(ciphertext, self) || valid_whirlpool(ciphertext, self)) return 1; return 0; } static void set_salt(void *salt) { psalt = salt; } static void* get_salt(char *ciphertext) { static char buf[sizeof(struct cust_salt)+4]; struct cust_salt *s = (struct cust_salt *)mem_align(buf, 4); unsigned int i; s->num_iterations = 1000; s->loop_inc = 1; if (!strncmp(ciphertext, TAG_WHIRLPOOL, TAG_WHIRLPOOL_LEN)) { ciphertext += TAG_WHIRLPOOL_LEN; s->hash_type = IS_WHIRLPOOL; } else if (!strncmp(ciphertext, TAG_SHA512, TAG_SHA512_LEN)) { ciphertext += TAG_SHA512_LEN; s->hash_type = IS_SHA512; #if SSE_GROUP_SZ_SHA512 s->loop_inc = SSE_GROUP_SZ_SHA512; #endif } else if (!strncmp(ciphertext, TAG_RIPEMD160, TAG_RIPEMD160_LEN)) { ciphertext += TAG_RIPEMD160_LEN; s->hash_type = IS_RIPEMD160; s->num_iterations = 2000; } else { // should never get here! valid() should catch all lines that do not have the tags. fprintf(stderr, "Error, unknown type in truecrypt::get_salt(), [%s]\n", ciphertext); exit(0); } // Convert the hexadecimal salt in binary for(i = 0; i < 64; i++) s->salt[i] = (atoi16[ARCH_INDEX(ciphertext[2*i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2*i+1])]; for(; i < 512; i++) s->bin[i-64] = (atoi16[ARCH_INDEX(ciphertext[2*i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2*i+1])]; return s; } /*********************************************************************************************************** * we know first sector has Tweak value of 0. For this, we just AES a null 16 bytes, then do the XeX using * the results for our xor, then modular mult GF(2) that value for the next round. NOTE, len MUST * be an even multiple of 16 bytes. We do NOT handle CT stealing. But the way we use it in the TC format * we only decrypt 16 bytes, and later (if it looks 'good'), we decrypt the whole first sector (512-64 bytes) * both which are even 16 byte data. * This code has NOT been optimized. It was based on simple reference code that I could get my hands on. However, * 'mostly' we do a single limb AES-XTS which is just 2 AES, and the buffers xored (before and after). There is * no mulmod GF(2) logic done in that case. NOTE, there was NO noticable change in speed, from using original * oSSL EVP_AES_256_XTS vs this code, so this code is deemed 'good enough' for usage in this location. ***********************************************************************************************************/ static void AES_256_XTS_first_sector(const unsigned char *double_key, unsigned char *out, const unsigned char *data, unsigned len) { unsigned char tweak[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; unsigned char buf[16]; int i, j, cnt; AES_KEY key1, key2; AES_set_decrypt_key(double_key, 256, &key1); AES_set_encrypt_key(&double_key[32], 256, &key2); // first aes tweak (we do it right over tweak AES_encrypt(tweak, tweak, &key2); cnt = len/16; for (j=0;;) { for (i = 0; i < 16; ++i) buf[i] = data[i]^tweak[i]; AES_decrypt(buf, out, &key1); for (i = 0; i < 16; ++i) out[i]^=tweak[i]; ++j; if (j == cnt) break; else { unsigned char Cin, Cout; unsigned x; Cin = 0; for (x = 0; x < 16; ++x) { Cout = (tweak[x] >> 7) & 1; tweak[x] = ((tweak[x] << 1) + Cin) & 0xFF; Cin = Cout; } if (Cout) tweak[0] ^= 135; //GF_128_FDBK; } data += 16; out += 16; } } static int crypt_all(int *pcount, struct db_salt *salt) { int i, count = *pcount; #ifdef _OPENMP #pragma omp parallel for #endif for(i = 0; i < count; i+=psalt->loop_inc) { unsigned char key[64]; int j; #if SSE_GROUP_SZ_SHA512 unsigned char Keys[SSE_GROUP_SZ_SHA512][64]; if (psalt->hash_type == IS_SHA512) { int lens[SSE_GROUP_SZ_SHA512]; unsigned char *pin[SSE_GROUP_SZ_SHA512]; union { unsigned char *pout[SSE_GROUP_SZ_SHA512]; unsigned char *poutc; } x; for (j = 0; j < SSE_GROUP_SZ_SHA512; ++j) { lens[j] = strlen((char*)(key_buffer[i+j])); pin[j] = key_buffer[i+j]; x.pout[j] = Keys[j]; } pbkdf2_sha512_sse((const unsigned char **)pin, lens, psalt->salt, 64, psalt->num_iterations, &(x.poutc), sizeof(key), 0); } #else if (is_sha512) pbkdf2_sha512((const unsigned char*)key_buffer[i], strlen(key_buffer[i]), psalt->salt, 64, num_iterations, key, sizeof(key), 0); #endif else if (psalt->hash_type == IS_RIPEMD160) pbkdf2_ripemd160(key_buffer[i], strlen((char*)(key_buffer[i])), psalt->salt, 64, psalt->num_iterations, key, sizeof(key), 0); else pbkdf2_whirlpool(key_buffer[i], strlen((char*)(key_buffer[i])), psalt->salt, 64, psalt->num_iterations, key, sizeof(key), 0); for (j = 0; j < psalt->loop_inc; ++j) { #if SSE_GROUP_SZ_SHA512 if (psalt->hash_type == IS_SHA512) memcpy(key, Keys[j], sizeof(key)); #endif // Try to decrypt using AES AES_256_XTS_first_sector(key, first_block_dec[i+j], psalt->bin, 16); } } return count; } static int cmp_all(void* binary, int count) { int i; for (i = 0; i < count; ++i) { if (!memcmp(first_block_dec[i], "TRUE", 4)) return 1; } return 0; } static int cmp_one(void* binary, int index) { if (!memcmp(first_block_dec[index], "TRUE", 4)) return 1; return 0; } // compare a BE string crc32, against crc32, and do it in a safe for non-aligned CPU way. // this function is not really speed critical. static int cmp_crc32s(unsigned char *given_crc32, CRC32_t comp_crc32) { return given_crc32[0] == ((comp_crc32>>24)&0xFF) && given_crc32[1] == ((comp_crc32>>16)&0xFF) && given_crc32[2] == ((comp_crc32>> 8)&0xFF) && given_crc32[3] == ((comp_crc32>> 0)&0xFF); } static int cmp_exact(char *source, int idx) { #if 0 if (!memcmp(first_block_dec[idx], "TRUE", 4) && !memcmp(&first_block_dec[idx][12], "\0\0\0\0", 4)) return 1; #else unsigned char key[64]; unsigned char decr_header[512-64]; CRC32_t check_sum; #if DEBUG static int cnt; char fname[20]; FILE *fp; #endif if (psalt->hash_type == IS_SHA512) pbkdf2_sha512((const unsigned char*)key_buffer[idx], strlen((char*)(key_buffer[idx])), psalt->salt, 64, psalt->num_iterations, key, sizeof(key), 0); else if (psalt->hash_type == IS_RIPEMD160) pbkdf2_ripemd160((const unsigned char*)key_buffer[idx], strlen((char*)(key_buffer[idx])), psalt->salt, 64, psalt->num_iterations, key, sizeof(key), 0); else pbkdf2_whirlpool((const unsigned char*)key_buffer[idx], strlen((char*)(key_buffer[idx])), psalt->salt, 64, psalt->num_iterations, key, sizeof(key), 0); // we have 448 bytes of header (64 bytes unencrypted salt were the first 64 bytes). // decrypt it and look for 3 items. AES_256_XTS_first_sector(key, decr_header, psalt->bin, 512-64); // first item we look for is a contstant string 'TRUE' in the first 4 bytes if (memcmp(decr_header, "TRUE", 4)) return 0; // now we look for 2 crc values. At offset 8 is the first. This provided // CRC should be the crc32 of the last 256 bytes of the buffer. CRC32_Init(&check_sum); CRC32_Update(&check_sum, &decr_header[256-64], 256); if (!cmp_crc32s(&decr_header[8], ~check_sum)) return 0; // now we compute crc of the first part of the buffer, up to 4 bytes less than // the start of that last 256 bytes (i.e. 188 bytes in total). Following this // buffer we compute crc32 over, should be a 4 byte block that is what we are // given as a match for this crc32 (of course, those 4 bytes are not part of // the crc32. The 4 bytes of provided crc32 is the only 4 bytes of the header // which are not placed into 'some' CRC32 computation. CRC32_Init(&check_sum); CRC32_Update(&check_sum, decr_header, 256-64-4); if (!cmp_crc32s(&decr_header[256-64-4], ~check_sum)) return 0; #if DEBUG sprintf(fname, "tc_decr_header-%04d.dat", cnt++); fp = fopen(fname, "wb"); fwrite(decr_header, 1, 512-64, fp); fclose(fp); #endif // Passed 96 bits of tests. This is the right password! return 1; #endif return 0; } static void set_key(char* key, int index) { strcpy((char*)(key_buffer[index]), key); } static char *get_key(int index) { return (char*)(key_buffer[index]); } static int salt_hash(void *salt) { unsigned v=0, i; struct cust_salt *psalt = (struct cust_salt *)salt; for (i = 0; i < 64; ++i) { v *= 11; v += psalt->salt[i]; } return v & (SALT_HASH_SIZE - 1); } #if FMT_MAIN_VERSION > 11 static unsigned int tc_hash_algorithm(void *salt) { return (unsigned int)((struct cust_salt*)salt)->hash_type; } #endif struct fmt_main fmt_truecrypt = { { "tc_aes_xts", // FORMAT_LABEL "TrueCrypt (RIPEMD160/SHA512/WHIRLPOOL) AES256_XTS", // FORMAT_NAME #if SSE_GROUP_SZ_SHA512 SHA512_ALGORITHM_NAME, // ALGORITHM_NAME, #else #if ARCH_BITS >= 64 "64/" ARCH_BITS_STR, // ALGORITHM_NAME, #else "32/" ARCH_BITS_STR, // ALGORITHM_NAME, #endif #endif "", // BENCHMARK_COMMENT -1, // BENCHMARK_LENGTH PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { "hash algorithm [1:SHA512 2:RIPEMD160 3:Whirlpool]", }, #endif tests_all }, { init, fmt_default_done, fmt_default_reset, prepare, valid_truecrypt, ms_split, fmt_default_binary, get_salt, #if FMT_MAIN_VERSION > 11 { tc_hash_algorithm, }, #endif fmt_default_source, { fmt_default_binary_hash }, salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_truecrypt_ripemd160 = { { "tc_ripemd160", // FORMAT_LABEL "TrueCrypt RIPEMD160 AES256_XTS", // FORMAT_NAME "32/" ARCH_BITS_STR, // ALGORITHM_NAME, "", // BENCHMARK_COMMENT -1, // BENCHMARK_LENGTH PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests_ripemd160 }, { init, fmt_default_done, fmt_default_reset, prepare, valid_ripemd160, ms_split, fmt_default_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash }, salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_truecrypt_sha512 = { { "tc_sha512", // FORMAT_LABEL "TrueCrypt SHA512 AES256_XTS", // FORMAT_NAME #if SSE_GROUP_SZ_SHA512 SHA512_ALGORITHM_NAME, // ALGORITHM_NAME, #else #if ARCH_BITS >= 64 "64/" ARCH_BITS_STR, // ALGORITHM_NAME, #else "32/" ARCH_BITS_STR, // ALGORITHM_NAME, #endif #endif "", // BENCHMARK_COMMENT -1, // BENCHMARK_LENGTH PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, #if SSE_GROUP_SZ_SHA512 SSE_GROUP_SZ_SHA512, SSE_GROUP_SZ_SHA512, #else MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #endif FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests_sha512 }, { init, fmt_default_done, fmt_default_reset, prepare, valid_sha512, ms_split, fmt_default_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash }, salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; struct fmt_main fmt_truecrypt_whirlpool = { { "tc_whirlpool", // FORMAT_LABEL "TrueCrypt WHIRLPOOL AES256_XTS", // FORMAT_NAME #if ARCH_BITS >= 64 "64/" ARCH_BITS_STR, // ALGORITHM_NAME, #else "32/" ARCH_BITS_STR, // ALGORITHM_NAME, #endif "", // BENCHMARK_COMMENT -1, // BENCHMARK_LENGTH PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests_whirlpool }, { init, fmt_default_done, fmt_default_reset, prepare, valid_whirlpool, ms_split, fmt_default_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash }, salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
repartition.h
#ifndef __REPARTITION_H__ #define __REPARTITION_H__ #include <cstdlib> #include <cmath> #include <omp.h> #include "ompUtils.h" #include "repartition.h" #include "direct_knn.h" #include <mpi.h> #include <map> #include <queue> #include <vector> namespace knn{ namespace repartition { void Collect_Query_Results(long *query_ids, int *neighbors_per_point, int *neighbor_ids, int nPts, MPI_Comm comm); /** * \param totalClusterSize An array of size numClusters containing the global size of each cluster. * \param localClusterSize An array of size numClusters containing the local size of each cluster on this node. * \param numClusters Number of clusters * \param comm The MPI communicator to use. */ template<typename cSizeType, typename cMembType> void redistribute(cSizeType *totalClusterSize, int numClusters, cMembType *clusterMembership, int n, int num_of_groups, int *group_id_per_point, int *group_id_per_cluster, int **send_count, MPI_Comm comm) { int nproc, rank; MPI_Comm_size(comm, &nproc); MPI_Comm_rank(comm, &rank); //Priority queue for processes (min heap) std::priority_queue<std::pair<cSizeType, int>, std::vector<std::pair<cSizeType, int> >, std::greater<std::pair<cSizeType, int> > > process_queue; for( int i = 0; i < num_of_groups; i++ ) process_queue.push( std::make_pair<cSizeType, int>(0, i) ); std::pair<cSizeType, int> *pClusters = new std::pair<cSizeType, int>[numClusters]; #pragma omp parallel for schedule(static) for(int i = 0; i < numClusters; i++) { pClusters[i].first = totalClusterSize[i]; pClusters[i].second = i; } omp_par::merge_sort(pClusters, &pClusters[numClusters], std::greater<std::pair<cSizeType,int> >()); for(int i = 0; i < numClusters; i++) { std::pair<cSizeType, int> emptiestProcess; emptiestProcess.first = process_queue.top().first; emptiestProcess.second = process_queue.top().second; group_id_per_cluster[pClusters[i].second] = emptiestProcess.second; //Update queue emptiestProcess.first += pClusters[i].first; process_queue.pop(); process_queue.push(emptiestProcess); } delete[] pClusters; *send_count = new int [num_of_groups]; #pragma omp parallel for for(int i = 0; i < num_of_groups; i++) (*send_count)[i] = 0; #pragma omp parallel for for(int i = 0; i < n; i++) group_id_per_point[i] = group_id_per_cluster[ clusterMembership[i] ]; for(int i = 0; i < n; i++) (*send_count)[ group_id_per_point[i] ]++; } /** * repartition query points according the critieria * dist(center, query) < clusterRadius + range. * this code at first duplicate query points if we need to search for knn * on several different processors, and then using 'repartition' function * to exchange data among processors. */ void query_repartition(long *queryIDs, // in: query ids double *queryPts, // in: query data double *centers, // in: cluster centers long nPts, // in: no. of query points long nClusters, // in: no. of clusters int dim, // in: dimensionality of data std::map<int, std::vector<int> > clusterPos, // in: define each cluster is on which processor double *Radius, // in: radius of each cluster [nClusters] double search_range, // in: search knn within radius 'range' long **new_queryIDs, // out: query ids after repartition (local) double **new_queryPts, // out: query points after repartition (local) long *new_nPts, // out: no. of query points after repartition (local) MPI_Comm comm); /** * Computing the radius of each clusters * */ void Calculate_Cluster_Radius(double * centers, // in: cluster centers double * points, // in: points int dim, // in: dimensionality of each point int * localClusterSize, // in: indicate each cluster has how many points int numClusters, // in: no. of clusters double ** Radius, // out: radius of each cluster MPI_Comm comm); /** * Redistributes data points and global ids to their respective new owners. Should be called after * cluster_assign and local_rearrange. * \param ids An array of size n containing the global identifiers of the corresponding points in data. * \param data An array of size n*dim containing the initial data points stored on this node. * \param n Number of data points (number of elements in data divided by dim) * \param send_count An array of size nproc containing the number of points to send to each process * (obtained by calling cluster_assign). * \param new_ids [out] A pointer to an (unallocated) array of new_n longs which will store the ids * after repartitioning. * \param new_data [out] A pointer to an (unallocated) array of new_n*dim longs which will store the data * points after repartitioning. * \param new_n [out] The number of data points owned by this process after repartitioning. * \param comm The MPI communicator to use. */ int repartition(long *ids, double *data, long n, int *send_count, int dim, long **new_ids, double **new_data, long *new_n, MPI_Comm comm, int maxNewPoints = 0); /** * Redistribute query points with individual search radii. * \param ids An array of size n containing the global identifiers of the corresponding points in data. * \param data An array of size n*dim containing the initial data points stored on this node. * \radii The search radii for the local input points. * \param n Number of data points (number of elements in data divided by dim) * \param send_count An array of size nproc containing the number of points to send to each process * (obtained by calling cluster_assign). * \param new_ids [out] A pointer to an (unallocated) array of new_n longs which will store the ids * after repartitioning. * \param new_data [out] A pointer to an (unallocated) array of new_n*dim longs which will store the data * points after repartitioning. * \param new_radii [out] The new local search radii after redistributing points (allocated internally). * \param new_n [out] The number of data points owned by this process after repartitioning. * \param comm The MPI communicator to use. */ void repartition(long *ids, double *data, double *radii, long n, int *send_count, int dim, long **new_ids, double **new_data, double **new_radii, long *new_n, MPI_Comm comm); /** * Redistributes data points, their global ids, and "secondary IDs" to their respective new owners. Should be called after * cluster_assign and local_rearrange. * \param ids An array of size n containing the global identifiers of the corresponding points in data. * \param secondaryIDs An array of size n containing the secondary identifiers of the corresponding points in data. * \param data An array of size n*dim containing the initial data points stored on this node. * \param n Number of data points (number of elements in data divided by dim) * \param send_count An array of size nproc containing the number of points to send to each process * (obtained by calling cluster_assign). * \param new_ids [out] A pointer to an (unallocated) array of new_n longs which will store the ids * after repartitioning. * \param new_secondaryIDs [out] A pointer to an (unallocated) array of new_n unsigned ints which will store the * secondary ids after repartitioning. * \param new_data [out] A pointer to an (unallocated) array of new_n*dim longs which will store the data * points after repartitioning. * \param new_n [out] The number of data points owned by this process after repartitioning. * \param comm The MPI communicator to use. */ void repartition(long *ids, unsigned int *secondaryIDs, double *data, long n, int *send_count, int dim, long **new_ids, unsigned int **new_secondaryIDs, double **new_data, long *new_n, MPI_Comm comm); /** * Rearranges ids, cluster membership labels, and data points so that points sent to a given * rank are contiguous and ordered by rank. * \param ids A pointer to an array of size n containing the global ids of all locally stored points. * \param clusterMembership A pointer to an array of size n containing the id of the cluster * that each point belongs to. * \param data A pointer to an array of n*dim doubles containing the coordinates of the points. * \param n The number of *local* points. * \param dim The dimensionality of the data points. */ void local_rearrange(long **ids, int **clusterMembership, double **data, long n, int dim); /** * Rearranges ids, cluster membership labels, and data points so that points sent to a given * rank are contiguous and ordered by rank. * \param ids A pointer to an array of size n containing the global ids of all locally stored points. * \param clusterMembership A pointer to an array of size n containing the id of the cluster * that each point belongs to. * \param data A pointer to an array of n*dim doubles containing the coordinates of the points. * \param n The number of *local* points. * \param dim The dimensionality of the data points. */ void local_rearrange(long **ids, unsigned int **clusterMembership, double **data, long n, int dim); /** * Rearranges ids, cluster membership labels, and data points so that points sent to a given * rank are contiguous and ordered by rank. A secondary identifier (for various uses) is associated * with each point and rearranged accordingly. * \param ids A pointer to an array of size n containing the global ids of all locally stored points. * \param clusterMembership A pointer to an array of size n containing the id of the cluster * that each point belongs to. * \param secondaryIDs An array of n "secondary identifiers" for each point (e.g., hash keys). * \param data A pointer to an array of n*dim doubles containing the coordinates of the points. * \param n The number of *local* points. * \param dim The dimensionality of the data points. */ void local_rearrange(long **ids, int **clusterMembership, unsigned int **secondaryIDs, double **data, long n, int dim); /** * Rearranges ids, cluster membership labels, and data points so that points sent to a given * rank are contiguous and ordered by rank. * \param ids A pointer to an array of size n containing the global ids of all locally stored points. * \param clusterMembership A pointer to an array of size n containing the projection of the point * that each point belongs to. * \param data A pointer to an array of n*dim doubles containing the coordinates of the points. * \param n The number of *local* points. * \param dim The dimensionality of the data points. */ void local_rearrange(long **ids, double **clusterMembership, double **data, long n, int dim); void pre_all2all(long *ids, int *membership, double *data, long n, int dim); void rearrange_data( int *clusterMembership, double *data, long n, int dim, double *re_data); /** * Calculates how many points this process will send to each process in comm. * \param totalClusterSize An array of length numClusters which contains the number of *global* * points assigned to each cluster. * \param numClusters The total number of clusters. * \param clusterMembership An array of length n containing the id of the cluster to which * each point belongs. * \param n The number of locally stored points. * \param comm The MPI communicator that contains all processes that have points. * \return An array of length equal to the size of comm containing the number of points to * send to each rank. */ int* calculate_send_counts(int *totalClusterSize, int numClusters, int *clusterMembership, int **clusterLocations, long n, MPI_Comm comm); /** * Move points to make workload balanced in comm. * \param points An array of length 2dn/p to store points * \param gids An array of length 2n/p to store globle ids * \param numof_points The number of locally stored points. * \param dim dimensionality of points * \param maxIter The maximum number of points * \param dim dimensionality of points * \param nem_numof_points The number of points after do load balance. * \param comm The MPI communicator that contains all processes that have points. */ void loadBalance(double *points, long *gids, int numof_points, int dim, int n_over_p, //output int &new_numof_points, MPI_Comm comm); /** * Exchanges points between a pair of processes. * \param partner_rank Rank of this process's comminication parter in comm. * \param maxpts The maximum number of points allowed per node. * \param n_send Number of points to send to partner. * \param dim Dimension of points. * \param point_sendbuf Send buffer for data points. * \param id_sendbuf Send buffer for point IDs. * \param point_recvbuf Receive buffer for data points. * \param id_recfbuf Receive buffer for point IDs. * \param comm MPI communicator. * \return The number of points received from partner. */ int pairwise_exchange( int partner_rank, int maxpts, int n_send, int dim, double *point_sendbuf, long *id_sendbuf, double *point_recvbuf, long *id_recvbuf, MPI_Comm comm ); /** * Repartition points for a binTree node split; points must already be correctly ordered (all left child points * must be at the beginning of the array). * \param ids [inout] An array of size n containing the global identifiers of the corresponding points in data; must have * at least 2*maxpoints space allocated.. * \param data [inout] An array of size n*dim containing the initial data points stored on this node; must have * at least 2*maxpoints*dim space allocated. * \param nLocal The number of points stored locally. * \param maxpts The maximum number of points allowed per node. * \param child_tag An array of length nLocal indicating to wich child each point belongs (0=left, 1=right). * \param dim The dimensionality of the data points. * points after repartitioning. * \param comm The MPI communicator to use. * \return The new number of points stored on this process. */ int tree_repartition(long *ids, double *data, int nLocal, int maxpts, int *child_tag, int dim, MPI_Comm comm); int tree_repartition_arbitraryN(std::vector<long> &ids, std::vector<double> &data, int nLocal, int *child_tag, int *rank_colors, int dim, MPI_Comm comm); void loadBalance_arbitraryN(std::vector<double> &points, std::vector<long> &gids, int numof_points, int dim, //output int &new_numof_points, MPI_Comm comm); } } #endif
deconvolution_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void deconvolution_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack4, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { v4f32 _sum = (v4f32)__msa_fill_w(0); if (bias_data_ptr) { _sum = (v4f32)__msa_ld_w((const float*)bias_data_ptr + p * 4, 0); } const float* kptr = (const float*)weight_data_pack4.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const float* sptr = m.row(sy) + sx * 4; int k = (y * kernel_w + x) * 16; v4f32 _val0 = (v4f32)__msa_fill_w_f32(*sptr++); v4f32 _val1 = (v4f32)__msa_fill_w_f32(*sptr++); v4f32 _val2 = (v4f32)__msa_fill_w_f32(*sptr++); v4f32 _val3 = (v4f32)__msa_fill_w_f32(*sptr++); v4f32 _w0 = (v4f32)__msa_ld_w(kptr + k, 0); v4f32 _w1 = (v4f32)__msa_ld_w(kptr + k + 4, 0); v4f32 _w2 = (v4f32)__msa_ld_w(kptr + k + 8, 0); v4f32 _w3 = (v4f32)__msa_ld_w(kptr + k + 12, 0); _sum = __msa_fadd_w(_sum, __msa_fmul_w(_val0, _w0)); _sum = __msa_fadd_w(_sum, __msa_fmul_w(_val1, _w1)); _sum = __msa_fadd_w(_sum, __msa_fmul_w(_val2, _w2)); _sum = __msa_fadd_w(_sum, __msa_fmul_w(_val3, _w3)); } } kptr += maxk * 16; } _sum = activation_ps(_sum, activation_type, activation_params); __msa_st_w((v4i32)_sum, outptr + j * 4, 0); } outptr += outw * 4; } } }
guess.c
#include <stdint.h> #include <omp.h> #include "geometry.h" #include "mesh2geo.h" #include "phy.h" /* Set the initial guess */ void iguess(struct geometry *g) { uint32_t i; #pragma omp parallel for for(i = 0; i < g->n->sz; i++) { g->q->q[i * g->c->b + 0] = P; g->q->q[i * g->c->b + 1] = U; g->q->q[i * g->c->b + 2] = V; g->q->q[i * g->c->b + 3] = W; } }
enhance.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE N N H H AAA N N CCCC EEEEE % % E NN N H H A A NN N C E % % EEE N N N HHHHH AAAAA N N N C EEE % % E N NN H H A A N NN C E % % EEEEE N N H H A A N N CCCC EEEEE % % % % % % MagickCore Image Enhancement Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/opencl.h" #include "magick/opencl-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resource_.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/xml-tree.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoGammaImage() extract the 'mean' from the image and adjust the image % to try make set its gamma appropriatally. % % The format of the AutoGammaImage method is: % % MagickBooleanType AutoGammaImage(Image *image) % MagickBooleanType AutoGammaImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set all given channels is adjusted in the same way using the % mean average of those channels. % */ MagickExport MagickBooleanType AutoGammaImage(Image *image) { return(AutoGammaImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoGammaImageChannel(Image *image, const ChannelType channel) { double gamma, mean, logmean, sans; MagickStatusType status; logmean=log(0.5); if ((channel & SyncChannels) != 0) { /* Apply gamma correction equally accross all given channels */ (void) GetImageChannelMean(image,channel,&mean,&sans,&image->exception); gamma=log(mean*QuantumScale)/logmean; return(LevelImageChannel(image,channel,0.0,(double) QuantumRange,gamma)); } /* Auto-gamma each channel separateally */ status = MagickTrue; if ((channel & RedChannel) != 0) { (void) GetImageChannelMean(image,RedChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,RedChannel,0.0,(double) QuantumRange, gamma); } if ((channel & GreenChannel) != 0) { (void) GetImageChannelMean(image,GreenChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,GreenChannel,0.0,(double) QuantumRange, gamma); } if ((channel & BlueChannel) != 0) { (void) GetImageChannelMean(image,BlueChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,BlueChannel,0.0,(double) QuantumRange, gamma); } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { (void) GetImageChannelMean(image,OpacityChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,OpacityChannel,0.0,(double) QuantumRange, gamma); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { (void) GetImageChannelMean(image,IndexChannel,&mean,&sans, &image->exception); gamma=log(mean*QuantumScale)/logmean; status&=LevelImageChannel(image,IndexChannel,0.0,(double) QuantumRange, gamma); } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoLevelImage() adjusts the levels of a particular image channel by % scaling the minimum and maximum values to the full quantum range. % % The format of the LevelImage method is: % % MagickBooleanType AutoLevelImage(Image *image) % MagickBooleanType AutoLevelImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: The image to auto-level % % o channel: The channels to auto-level. If the special 'SyncChannels' % flag is set the min/max/mean value of all given channels is used for % all given channels, to all channels in the same way. % */ MagickExport MagickBooleanType AutoLevelImage(Image *image) { return(AutoLevelImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType AutoLevelImageChannel(Image *image, const ChannelType channel) { /* Convenience method for a min/max histogram stretch. */ return(MinMaxStretchImage(image,channel,0.0,0.0)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B r i g h t n e s s C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BrightnessContrastImage() changes the brightness and/or contrast of an % image. It converts the brightness and contrast parameters into slope and % intercept and calls a polynomical function to apply to the image. % % The format of the BrightnessContrastImage method is: % % MagickBooleanType BrightnessContrastImage(Image *image, % const double brightness,const double contrast) % MagickBooleanType BrightnessContrastImageChannel(Image *image, % const ChannelType channel,const double brightness, % const double contrast) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o brightness: the brightness percent (-100 .. 100). % % o contrast: the contrast percent (-100 .. 100). % */ MagickExport MagickBooleanType BrightnessContrastImage(Image *image, const double brightness,const double contrast) { MagickBooleanType status; status=BrightnessContrastImageChannel(image,DefaultChannels,brightness, contrast); return(status); } MagickExport MagickBooleanType BrightnessContrastImageChannel(Image *image, const ChannelType channel,const double brightness,const double contrast) { #define BrightnessContastImageTag "BrightnessContast/Image" double alpha, intercept, coefficients[2], slope; MagickBooleanType status; /* Compute slope and intercept. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); alpha=contrast; slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0)); if (slope < 0.0) slope=0.0; intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope); coefficients[0]=slope; coefficients[1]=intercept; status=FunctionImageChannel(image,channel,PolynomialFunction,2,coefficients, &image->exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r D e c i s i o n L i s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorDecisionListImage() accepts a lightweight Color Correction Collection % (CCC) file which solely contains one or more color corrections and applies % the correction to the image. Here is a sample CCC file: % % <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2"> % <ColorCorrection id="cc03345"> % <SOPNode> % <Slope> 0.9 1.2 0.5 </Slope> % <Offset> 0.4 -0.5 0.6 </Offset> % <Power> 1.0 0.8 1.5 </Power> % </SOPNode> % <SATNode> % <Saturation> 0.85 </Saturation> % </SATNode> % </ColorCorrection> % </ColorCorrectionCollection> % % which includes the slop, offset, and power for each of the RGB channels % as well as the saturation. % % The format of the ColorDecisionListImage method is: % % MagickBooleanType ColorDecisionListImage(Image *image, % const char *color_correction_collection) % % A description of each parameter follows: % % o image: the image. % % o color_correction_collection: the color correction collection in XML. % */ MagickExport MagickBooleanType ColorDecisionListImage(Image *image, const char *color_correction_collection) { #define ColorDecisionListCorrectImageTag "ColorDecisionList/Image" typedef struct _Correction { double slope, offset, power; } Correction; typedef struct _ColorCorrection { Correction red, green, blue; double saturation; } ColorCorrection; CacheView *image_view; char token[MaxTextExtent]; ColorCorrection color_correction; const char *content, *p; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; PixelPacket *cdl_map; register ssize_t i; ssize_t y; XMLTreeInfo *cc, *ccc, *sat, *sop; /* Allocate and initialize cdl maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (color_correction_collection == (const char *) NULL) return(MagickFalse); exception=(&image->exception); ccc=NewXMLTree((const char *) color_correction_collection,&image->exception); if (ccc == (XMLTreeInfo *) NULL) return(MagickFalse); cc=GetXMLTreeChild(ccc,"ColorCorrection"); if (cc == (XMLTreeInfo *) NULL) { ccc=DestroyXMLTree(ccc); return(MagickFalse); } color_correction.red.slope=1.0; color_correction.red.offset=0.0; color_correction.red.power=1.0; color_correction.green.slope=1.0; color_correction.green.offset=0.0; color_correction.green.power=1.0; color_correction.blue.slope=1.0; color_correction.blue.offset=0.0; color_correction.blue.power=1.0; color_correction.saturation=0.0; sop=GetXMLTreeChild(cc,"SOPNode"); if (sop != (XMLTreeInfo *) NULL) { XMLTreeInfo *offset, *power, *slope; slope=GetXMLTreeChild(sop,"Slope"); if (slope != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(slope); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); switch (i) { case 0: { color_correction.red.slope=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.slope=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.slope=StringToDouble(token, (char **) NULL); break; } } } } offset=GetXMLTreeChild(sop,"Offset"); if (offset != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(offset); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); switch (i) { case 0: { color_correction.red.offset=StringToDouble(token, (char **) NULL); break; } case 1: { color_correction.green.offset=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.offset=StringToDouble(token, (char **) NULL); break; } } } } power=GetXMLTreeChild(sop,"Power"); if (power != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(power); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MaxTextExtent,token); switch (i) { case 0: { color_correction.red.power=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.power=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.power=StringToDouble(token, (char **) NULL); break; } } } } } sat=GetXMLTreeChild(cc,"SATNode"); if (sat != (XMLTreeInfo *) NULL) { XMLTreeInfo *saturation; saturation=GetXMLTreeChild(sat,"Saturation"); if (saturation != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(saturation); p=(const char *) content; (void) GetNextToken(p,&p,MaxTextExtent,token); color_correction.saturation=StringToDouble(token,(char **) NULL); } } ccc=DestroyXMLTree(ccc); if (image->debug != MagickFalse) { (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Color Correction Collection:"); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.slope: %g",color_correction.red.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.offset: %g",color_correction.red.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.power: %g",color_correction.red.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.slope: %g",color_correction.green.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.offset: %g",color_correction.green.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.power: %g",color_correction.green.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.slope: %g",color_correction.blue.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.offset: %g",color_correction.blue.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.power: %g",color_correction.blue.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.saturation: %g",color_correction.saturation); } cdl_map=(PixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map)); if (cdl_map == (PixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); for (i=0; i <= (ssize_t) MaxMap; i++) { cdl_map[i].red=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+ color_correction.red.offset,color_correction.red.power))))); cdl_map[i].green=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+ color_correction.green.offset,color_correction.green.power))))); cdl_map[i].blue=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+ color_correction.blue.offset,color_correction.blue.power))))); } if (image->storage_class == PseudoClass) { /* Apply transfer function to colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { double luma; luma=0.212656*image->colormap[i].red+0.715158*image->colormap[i].green+ 0.072186*image->colormap[i].blue; image->colormap[i].red=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].red)].red-luma); image->colormap[i].green=ClampToQuantum(luma+ color_correction.saturation*cdl_map[ScaleQuantumToMap( image->colormap[i].green)].green-luma); image->colormap[i].blue=ClampToQuantum(luma+color_correction.saturation* cdl_map[ScaleQuantumToMap(image->colormap[i].blue)].blue-luma); } } /* Apply transfer function to image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double luma; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { luma=0.212656*GetPixelRed(q)+0.715158*GetPixelGreen(q)+ 0.072186*GetPixelBlue(q); SetPixelRed(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelRed(q))].red-luma))); SetPixelGreen(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelGreen(q))].green-luma))); SetPixelBlue(q,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelBlue(q))].blue-luma))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag, progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); cdl_map=(PixelPacket *) RelinquishMagickMemory(cdl_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClutImage() replaces each color value in the given image, by using it as an % index to lookup a replacement color value in a Color Look UP Table in the % form of an image. The values are extracted along a diagonal of the CLUT % image so either a horizontal or vertial gradient image can be used. % % Typically this is used to either re-color a gray-scale image according to a % color gradient in the CLUT image, or to perform a freeform histogram % (level) adjustment according to the (typically gray-scale) gradient in the % CLUT image. % % When the 'channel' mask includes the matte/alpha transparency channel but % one image has no such channel it is assumed that that image is a simple % gray-scale image that will effect the alpha channel values, either for % gray-scale coloring (with transparent or semi-transparent colors), or % a histogram adjustment of existing alpha channel values. If both images % have matte channels, direct and normal indexing is applied, which is rarely % used. % % The format of the ClutImage method is: % % MagickBooleanType ClutImage(Image *image,Image *clut_image) % MagickBooleanType ClutImageChannel(Image *image, % const ChannelType channel,Image *clut_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o clut_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image) { return(ClutImageChannel(image,DefaultChannels,clut_image)); } MagickExport MagickBooleanType ClutImageChannel(Image *image, const ChannelType channel,const Image *clut_image) { #define ClutImageTag "Clut/Image" CacheView *clut_view, *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket *clut_map; register ssize_t i; ssize_t adjust, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clut_image != (Image *) NULL); assert(clut_image->signature == MagickCoreSignature); exception=(&image->exception); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsGrayColorspace(clut_image->colorspace) == MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace); clut_map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*clut_map)); if (clut_map == (MagickPixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Clut image. */ status=MagickTrue; progress=0; adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1); clut_view=AcquireAuthenticCacheView(clut_image,exception); for (i=0; i <= (ssize_t) MaxMap; i++) { GetMagickPixelPacket(clut_image,clut_map+i); status=InterpolateMagickPixelPacket(clut_image,clut_view, UndefinedInterpolatePixel,(double) i*(clut_image->columns-adjust)/MaxMap, (double) i*(clut_image->rows-adjust)/MaxMap,clut_map+i,exception); if (status == MagickFalse) break; } clut_view=DestroyCacheView(clut_view); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampPixelRed(clut_map+ ScaleQuantumToMap(GetPixelRed(q)))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampPixelGreen(clut_map+ ScaleQuantumToMap(GetPixelGreen(q)))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampPixelBlue(clut_map+ ScaleQuantumToMap(GetPixelBlue(q)))); if ((channel & OpacityChannel) != 0) { if (clut_image->matte == MagickFalse) SetPixelAlpha(q,MagickPixelIntensityToQuantum(clut_map+ ScaleQuantumToMap((Quantum) GetPixelAlpha(q)))); else if (image->matte == MagickFalse) SetPixelOpacity(q,ClampPixelOpacity(clut_map+ ScaleQuantumToMap((Quantum) MagickPixelIntensity(&pixel)))); else SetPixelOpacity(q,ClampPixelOpacity( clut_map+ScaleQuantumToMap(GetPixelOpacity(q)))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum((clut_map+(ssize_t) GetPixelIndex(indexes+x))->index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ClutImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); clut_map=(MagickPixelPacket *) RelinquishMagickMemory(clut_map); if ((clut_image->matte != MagickFalse) && ((channel & OpacityChannel) != 0)) (void) SetImageAlphaChannel(image,ActivateAlphaChannel); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastImage() enhances the intensity differences between the lighter and % darker elements of the image. Set sharpen to a MagickTrue to increase the % image contrast otherwise the contrast is reduced. % % The format of the ContrastImage method is: % % MagickBooleanType ContrastImage(Image *image, % const MagickBooleanType sharpen) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % */ static void Contrast(const int sign,Quantum *red,Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Enhance contrast: dark color become darker, light color become lighter. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); hue=0.0; saturation=0.0; brightness=0.0; ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)- brightness); if (brightness > 1.0) brightness=1.0; else if (brightness < 0.0) brightness=0.0; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } MagickExport MagickBooleanType ContrastImage(Image *image, const MagickBooleanType sharpen) { #define ContrastImageTag "Contrast/Image" CacheView *image_view; ExceptionInfo *exception; int sign; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sign=sharpen != MagickFalse ? 1 : -1; if (image->storage_class == PseudoClass) { /* Contrast enhance colormap. */ for (i=0; i < (ssize_t) image->colors; i++) Contrast(sign,&image->colormap[i].red,&image->colormap[i].green, &image->colormap[i].blue); } /* Contrast enhance image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) status=AccelerateContrastImage(image,sharpen,&image->exception); if (status != MagickFalse) return status; #endif status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum blue, green, red; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); Contrast(sign,&red,&green,&blue); SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ContrastImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastStretchImage() is a simple image enhancement technique that attempts % to improve the contrast in an image by `stretching' the range of intensity % values it contains to span a desired range of values. It differs from the % more sophisticated histogram equalization in that it can only apply a % linear scaling function to the image pixel values. As a result the % `enhancement' is less harsh. % % The format of the ContrastStretchImage method is: % % MagickBooleanType ContrastStretchImage(Image *image, % const char *levels) % MagickBooleanType ContrastStretchImageChannel(Image *image, % const size_t channel,const double black_point, % const double white_point) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: the black point. % % o white_point: the white point. % % o levels: Specify the levels where the black and white points have the % range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.). % */ MagickExport MagickBooleanType ContrastStretchImage(Image *image, const char *levels) { double black_point, white_point; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); black_point=geometry_info.rho; white_point=(double) image->columns*image->rows; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; if ((flags & PercentValue) != 0) { black_point*=(double) QuantumRange/100.0; white_point*=(double) QuantumRange/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) image->columns*image->rows-black_point; status=ContrastStretchImageChannel(image,DefaultChannels,black_point, white_point); return(status); } MagickExport MagickBooleanType ContrastStretchImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point) { #define MaxRange(color) ((MagickRealType) ScaleQuantumToMap((Quantum) (color))) #define ContrastStretchImageTag "ContrastStretch/Image" CacheView *image_view; double intensity; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket black, *histogram, white; QuantumPixelPacket *stretch_map; register ssize_t i; ssize_t y; /* Allocate histogram and stretch map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); #if defined(MAGICKCORE_OPENCL_SUPPORT) && 0 /* Call OpenCL version */ status=AccelerateContrastStretchImageChannel(image,channel,black_point, white_point,&image->exception); if (status != MagickFalse) return status; #endif histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); stretch_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*stretch_map)); if ((histogram == (MagickPixelPacket *) NULL) || (stretch_map == (QuantumPixelPacket *) NULL)) { if (stretch_map != (QuantumPixelPacket *) NULL) stretch_map=(QuantumPixelPacket *) RelinquishMagickMemory(stretch_map); if (histogram != (MagickPixelPacket *) NULL) histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ if (SetImageGray(image,exception) != MagickFalse) (void) SetImageColorspace(image,GRAYColorspace); status=MagickTrue; (void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if ((channel & SyncChannels) != 0) for (x=0; x < (ssize_t) image->columns; x++) { Quantum intensity; intensity=ClampToQuantum(GetPixelIntensity(image,p)); histogram[ScaleQuantumToMap(intensity)].red++; histogram[ScaleQuantumToMap(intensity)].green++; histogram[ScaleQuantumToMap(intensity)].blue++; histogram[ScaleQuantumToMap(intensity)].index++; p++; } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++; p++; } } /* Find the histogram boundaries by locating the black/white levels. */ black.red=0.0; white.red=MaxRange(QuantumRange); if ((channel & RedChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].red; if (intensity > black_point) break; } black.red=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].red; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.red=(MagickRealType) i; } black.green=0.0; white.green=MaxRange(QuantumRange); if ((channel & GreenChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].green; if (intensity > black_point) break; } black.green=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].green; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.green=(MagickRealType) i; } black.blue=0.0; white.blue=MaxRange(QuantumRange); if ((channel & BlueChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].blue; if (intensity > black_point) break; } black.blue=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].blue; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.blue=(MagickRealType) i; } black.opacity=0.0; white.opacity=MaxRange(QuantumRange); if ((channel & OpacityChannel) != 0) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].opacity; if (intensity > black_point) break; } black.opacity=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].opacity; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.opacity=(MagickRealType) i; } black.index=0.0; white.index=MaxRange(QuantumRange); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { intensity=0.0; for (i=0; i <= (ssize_t) MaxMap; i++) { intensity+=histogram[i].index; if (intensity > black_point) break; } black.index=(MagickRealType) i; intensity=0.0; for (i=(ssize_t) MaxMap; i != 0; i--) { intensity+=histogram[i].index; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white.index=(MagickRealType) i; } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); /* Stretch the histogram to create the stretched image mapping. */ (void) memset(stretch_map,0,(MaxMap+1)*sizeof(*stretch_map)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & RedChannel) != 0) { if (i < (ssize_t) black.red) stretch_map[i].red=(Quantum) 0; else if (i > (ssize_t) white.red) stretch_map[i].red=QuantumRange; else if (black.red != white.red) stretch_map[i].red=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.red)/(white.red-black.red))); } if ((channel & GreenChannel) != 0) { if (i < (ssize_t) black.green) stretch_map[i].green=0; else if (i > (ssize_t) white.green) stretch_map[i].green=QuantumRange; else if (black.green != white.green) stretch_map[i].green=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.green)/(white.green-black.green))); } if ((channel & BlueChannel) != 0) { if (i < (ssize_t) black.blue) stretch_map[i].blue=0; else if (i > (ssize_t) white.blue) stretch_map[i].blue= QuantumRange; else if (black.blue != white.blue) stretch_map[i].blue=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.blue)/(white.blue-black.blue))); } if ((channel & OpacityChannel) != 0) { if (i < (ssize_t) black.opacity) stretch_map[i].opacity=0; else if (i > (ssize_t) white.opacity) stretch_map[i].opacity=QuantumRange; else if (black.opacity != white.opacity) stretch_map[i].opacity=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.opacity)/(white.opacity-black.opacity))); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (i < (ssize_t) black.index) stretch_map[i].index=0; else if (i > (ssize_t) white.index) stretch_map[i].index=QuantumRange; else if (black.index != white.index) stretch_map[i].index=ScaleMapToQuantum((MagickRealType) (MaxMap* (i-black.index)/(white.index-black.index))); } } /* Stretch the image. */ if (((channel & OpacityChannel) != 0) || (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))) image->storage_class=DirectClass; if (image->storage_class == PseudoClass) { /* Stretch colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) image->colormap[i].red=stretch_map[ ScaleQuantumToMap(image->colormap[i].red)].red; } if ((channel & GreenChannel) != 0) { if (black.green != white.green) image->colormap[i].green=stretch_map[ ScaleQuantumToMap(image->colormap[i].green)].green; } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) image->colormap[i].blue=stretch_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue; } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) image->colormap[i].opacity=stretch_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity; } } } /* Stretch image. */ status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { if (black.red != white.red) SetPixelRed(q,stretch_map[ ScaleQuantumToMap(GetPixelRed(q))].red); } if ((channel & GreenChannel) != 0) { if (black.green != white.green) SetPixelGreen(q,stretch_map[ ScaleQuantumToMap(GetPixelGreen(q))].green); } if ((channel & BlueChannel) != 0) { if (black.blue != white.blue) SetPixelBlue(q,stretch_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue); } if ((channel & OpacityChannel) != 0) { if (black.opacity != white.opacity) SetPixelOpacity(q,stretch_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { if (black.index != white.index) SetPixelIndex(indexes+x,stretch_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ContrastStretchImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); stretch_map=(QuantumPixelPacket *) RelinquishMagickMemory(stretch_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E n h a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EnhanceImage() applies a digital filter that improves the quality of a % noisy image. % % The format of the EnhanceImage method is: % % Image *EnhanceImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception) { #define EnhancePixel(weight) \ mean=QuantumScale*((double) GetPixelRed(r)+pixel.red)/2.0; \ distance=QuantumScale*((double) GetPixelRed(r)-pixel.red); \ distance_squared=(4.0+mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelGreen(r)+pixel.green)/2.0; \ distance=QuantumScale*((double) GetPixelGreen(r)-pixel.green); \ distance_squared+=(7.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlue(r)+pixel.blue)/2.0; \ distance=QuantumScale*((double) GetPixelBlue(r)-pixel.blue); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelOpacity(r)+pixel.opacity)/2.0; \ distance=QuantumScale*((double) GetPixelOpacity(r)-pixel.opacity); \ distance_squared+=(5.0-mean)*distance*distance; \ if (distance_squared < 0.069) \ { \ aggregate.red+=(weight)*GetPixelRed(r); \ aggregate.green+=(weight)*GetPixelGreen(r); \ aggregate.blue+=(weight)*GetPixelBlue(r); \ aggregate.opacity+=(weight)*GetPixelOpacity(r); \ total_weight+=(weight); \ } \ r++; #define EnhanceImageTag "Enhance/Image" CacheView *enhance_view, *image_view; Image *enhance_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; /* Initialize enhanced image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((image->columns < 5) || (image->rows < 5)) return((Image *) NULL); enhance_image=CloneImage(image,0,0,MagickTrue,exception); if (enhance_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(enhance_image,DirectClass) == MagickFalse) { InheritException(exception,&enhance_image->exception); enhance_image=DestroyImage(enhance_image); return((Image *) NULL); } /* Enhance image. */ status=MagickTrue; progress=0; (void) memset(&zero,0,sizeof(zero)); image_view=AcquireAuthenticCacheView(image,exception); enhance_view=AcquireAuthenticCacheView(enhance_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,enhance_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register PixelPacket *magick_restrict q; register ssize_t x; /* Read another scan line. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception); q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double distance, distance_squared, mean, total_weight; MagickPixelPacket aggregate; PixelPacket pixel; register const PixelPacket *magick_restrict r; /* Compute weighted average of target pixel color components. */ aggregate=zero; total_weight=0.0; r=p+2*(image->columns+4)+2; pixel=(*r); r=p; EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); r=p+(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+2*(image->columns+4); EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0); EnhancePixel(40.0); EnhancePixel(10.0); r=p+3*(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+4*(image->columns+4); EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); if (total_weight > MagickEpsilon) { SetPixelRed(q,(aggregate.red+(total_weight/2)-1)/total_weight); SetPixelGreen(q,(aggregate.green+(total_weight/2)-1)/total_weight); SetPixelBlue(q,(aggregate.blue+(total_weight/2)-1)/total_weight); SetPixelOpacity(q,(aggregate.opacity+(total_weight/2)-1)/ total_weight); } p++; q++; } if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EnhanceImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } enhance_view=DestroyCacheView(enhance_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) enhance_image=DestroyImage(enhance_image); return(enhance_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E q u a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EqualizeImage() applies a histogram equalization to the image. % % The format of the EqualizeImage method is: % % MagickBooleanType EqualizeImage(Image *image) % MagickBooleanType EqualizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType EqualizeImage(Image *image) { return(EqualizeImageChannel(image,DefaultChannels)); } MagickExport MagickBooleanType EqualizeImageChannel(Image *image, const ChannelType channel) { #define EqualizeImageTag "Equalize/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket black, *histogram, intensity, *map, white; QuantumPixelPacket *equalize_map; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); #if defined(MAGICKCORE_OPENCL_SUPPORT) /* Call OpenCL version */ status=AccelerateEqualizeImage(image,channel,&image->exception); if (status != MagickFalse) return status; #endif /* Allocate and initialize histogram arrays. */ equalize_map=(QuantumPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*equalize_map)); histogram=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); map=(MagickPixelPacket *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*map)); if ((equalize_map == (QuantumPixelPacket *) NULL) || (histogram == (MagickPixelPacket *) NULL) || (map == (MagickPixelPacket *) NULL)) { if (map != (MagickPixelPacket *) NULL) map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (histogram != (MagickPixelPacket *) NULL) histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); if (equalize_map != (QuantumPixelPacket *) NULL) equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory( equalize_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ (void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); if ((channel & SyncChannels) != 0) for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity=GetPixelIntensity(image,p); histogram[ScaleQuantumToMap(ClampToQuantum(intensity))].red++; p++; } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) histogram[ScaleQuantumToMap(GetPixelRed(p))].red++; if ((channel & GreenChannel) != 0) histogram[ScaleQuantumToMap(GetPixelGreen(p))].green++; if ((channel & BlueChannel) != 0) histogram[ScaleQuantumToMap(GetPixelBlue(p))].blue++; if ((channel & OpacityChannel) != 0) histogram[ScaleQuantumToMap(GetPixelOpacity(p))].opacity++; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) histogram[ScaleQuantumToMap(GetPixelIndex(indexes+x))].index++; p++; } } image_view=DestroyCacheView(image_view); /* Integrate the histogram to get the equalization map. */ (void) memset(&intensity,0,sizeof(intensity)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & SyncChannels) != 0) { intensity.red+=histogram[i].red; map[i]=intensity; continue; } if ((channel & RedChannel) != 0) intensity.red+=histogram[i].red; if ((channel & GreenChannel) != 0) intensity.green+=histogram[i].green; if ((channel & BlueChannel) != 0) intensity.blue+=histogram[i].blue; if ((channel & OpacityChannel) != 0) intensity.opacity+=histogram[i].opacity; if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) intensity.index+=histogram[i].index; map[i]=intensity; } black=map[0]; white=map[(int) MaxMap]; (void) memset(equalize_map,0,(MaxMap+1)*sizeof(*equalize_map)); for (i=0; i <= (ssize_t) MaxMap; i++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].red-black.red))/(white.red-black.red))); continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) equalize_map[i].red=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].red-black.red))/(white.red-black.red))); if (((channel & GreenChannel) != 0) && (white.green != black.green)) equalize_map[i].green=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].green-black.green))/(white.green-black.green))); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) equalize_map[i].blue=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].blue-black.blue))/(white.blue-black.blue))); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) equalize_map[i].opacity=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].opacity-black.opacity))/(white.opacity-black.opacity))); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) equalize_map[i].index=ScaleMapToQuantum((MagickRealType) ((MaxMap* (map[i].index-black.index))/(white.index-black.index))); } histogram=(MagickPixelPacket *) RelinquishMagickMemory(histogram); map=(MagickPixelPacket *) RelinquishMagickMemory(map); if (image->storage_class == PseudoClass) { /* Equalize colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) { image->colormap[i].red=equalize_map[ ScaleQuantumToMap(image->colormap[i].red)].red; image->colormap[i].green=equalize_map[ ScaleQuantumToMap(image->colormap[i].green)].red; image->colormap[i].blue=equalize_map[ ScaleQuantumToMap(image->colormap[i].blue)].red; image->colormap[i].opacity=equalize_map[ ScaleQuantumToMap(image->colormap[i].opacity)].red; } continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) image->colormap[i].red=equalize_map[ ScaleQuantumToMap(image->colormap[i].red)].red; if (((channel & GreenChannel) != 0) && (white.green != black.green)) image->colormap[i].green=equalize_map[ ScaleQuantumToMap(image->colormap[i].green)].green; if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) image->colormap[i].blue=equalize_map[ ScaleQuantumToMap(image->colormap[i].blue)].blue; if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) image->colormap[i].opacity=equalize_map[ ScaleQuantumToMap(image->colormap[i].opacity)].opacity; } } /* Equalize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & SyncChannels) != 0) { if (white.red != black.red) { SetPixelRed(q,equalize_map[ ScaleQuantumToMap(GetPixelRed(q))].red); SetPixelGreen(q,equalize_map[ ScaleQuantumToMap(GetPixelGreen(q))].red); SetPixelBlue(q,equalize_map[ ScaleQuantumToMap(GetPixelBlue(q))].red); SetPixelOpacity(q,equalize_map[ ScaleQuantumToMap(GetPixelOpacity(q))].red); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,equalize_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].red); } q++; continue; } if (((channel & RedChannel) != 0) && (white.red != black.red)) SetPixelRed(q,equalize_map[ ScaleQuantumToMap(GetPixelRed(q))].red); if (((channel & GreenChannel) != 0) && (white.green != black.green)) SetPixelGreen(q,equalize_map[ ScaleQuantumToMap(GetPixelGreen(q))].green); if (((channel & BlueChannel) != 0) && (white.blue != black.blue)) SetPixelBlue(q,equalize_map[ ScaleQuantumToMap(GetPixelBlue(q))].blue); if (((channel & OpacityChannel) != 0) && (white.opacity != black.opacity)) SetPixelOpacity(q,equalize_map[ ScaleQuantumToMap(GetPixelOpacity(q))].opacity); if ((((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) && (white.index != black.index)) SetPixelIndex(indexes+x,equalize_map[ ScaleQuantumToMap(GetPixelIndex(indexes+x))].index); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EqualizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); equalize_map=(QuantumPixelPacket *) RelinquishMagickMemory(equalize_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GammaImage() gamma-corrects a particular image channel. The same % image viewed on different devices will have perceptual differences in the % way the image's intensities are represented on the screen. Specify % individual gamma levels for the red, green, and blue channels, or adjust % all three with the gamma parameter. Values typically range from 0.8 to 2.3. % % You can also reduce the influence of a particular channel with a gamma % value of 0. % % The format of the GammaImage method is: % % MagickBooleanType GammaImage(Image *image,const char *level) % MagickBooleanType GammaImageChannel(Image *image, % const ChannelType channel,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o level: the image gamma as a string (e.g. 1.6,1.2,1.0). % % o gamma: the image gamma. % */ static inline double gamma_pow(const double value,const double gamma) { return(value < 0.0 ? value : pow(value,gamma)); } MagickExport MagickBooleanType GammaImage(Image *image,const char *level) { GeometryInfo geometry_info; MagickPixelPacket gamma; MagickStatusType flags, status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (level == (char *) NULL) return(MagickFalse); flags=ParseGeometry(level,&geometry_info); gamma.red=geometry_info.rho; gamma.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) gamma.green=gamma.red; gamma.blue=geometry_info.xi; if ((flags & XiValue) == 0) gamma.blue=gamma.red; if ((gamma.red == 1.0) && (gamma.green == 1.0) && (gamma.blue == 1.0)) return(MagickTrue); if ((gamma.red == gamma.green) && (gamma.green == gamma.blue)) status=GammaImageChannel(image,(ChannelType) (RedChannel | GreenChannel | BlueChannel),(double) gamma.red); else { status=GammaImageChannel(image,RedChannel,(double) gamma.red); status&=GammaImageChannel(image,GreenChannel,(double) gamma.green); status&=GammaImageChannel(image,BlueChannel,(double) gamma.blue); } return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType GammaImageChannel(Image *image, const ChannelType channel,const double gamma) { #define GammaImageTag "Gamma/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; Quantum *gamma_map; register ssize_t i; ssize_t y; /* Allocate and initialize gamma maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); if (gamma == 1.0) return(MagickTrue); gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map)); if (gamma_map == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map)); if (gamma != 0.0) for (i=0; i <= (ssize_t) MaxMap; i++) gamma_map[i]=ClampToQuantum((MagickRealType) ScaleMapToQuantum(( MagickRealType) (MaxMap*pow((double) i/MaxMap, PerceptibleReciprocal(gamma))))); if (image->storage_class == PseudoClass) { /* Gamma-correct colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((channel & RedChannel) != 0) image->colormap[i].red=gamma_map[ScaleQuantumToMap( image->colormap[i].red)]; if ((channel & GreenChannel) != 0) image->colormap[i].green=gamma_map[ScaleQuantumToMap( image->colormap[i].green)]; if ((channel & BlueChannel) != 0) image->colormap[i].blue=gamma_map[ScaleQuantumToMap( image->colormap[i].blue)]; if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) image->colormap[i].opacity=gamma_map[ScaleQuantumToMap( image->colormap[i].opacity)]; else image->colormap[i].opacity=QuantumRange-gamma_map[ ScaleQuantumToMap((Quantum) (QuantumRange- image->colormap[i].opacity))]; } #else if ((channel & RedChannel) != 0) image->colormap[i].red=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].red,PerceptibleReciprocal(gamma)); if ((channel & GreenChannel) != 0) image->colormap[i].green=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].green,PerceptibleReciprocal(gamma)); if ((channel & BlueChannel) != 0) image->colormap[i].blue=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].blue,PerceptibleReciprocal(gamma)); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) image->colormap[i].opacity=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].opacity,PerceptibleReciprocal(gamma)); else image->colormap[i].opacity=QuantumRange-QuantumRange*gamma_pow( QuantumScale*(QuantumRange-image->colormap[i].opacity),1.0/ gamma); } #endif } } /* Gamma-correct image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((channel & SyncChannels) != 0) { SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]); SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]); SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]); } else { if ((channel & RedChannel) != 0) SetPixelRed(q,gamma_map[ScaleQuantumToMap(GetPixelRed(q))]); if ((channel & GreenChannel) != 0) SetPixelGreen(q,gamma_map[ScaleQuantumToMap(GetPixelGreen(q))]); if ((channel & BlueChannel) != 0) SetPixelBlue(q,gamma_map[ScaleQuantumToMap(GetPixelBlue(q))]); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,gamma_map[ScaleQuantumToMap( GetPixelOpacity(q))]); else SetPixelAlpha(q,gamma_map[ScaleQuantumToMap((Quantum) GetPixelAlpha(q))]); } } #else if ((channel & SyncChannels) != 0) { SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q), PerceptibleReciprocal(gamma))); SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale*GetPixelGreen(q), PerceptibleReciprocal(gamma))); SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q), PerceptibleReciprocal(gamma))); } else { if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange*gamma_pow(QuantumScale*GetPixelRed(q), PerceptibleReciprocal(gamma))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange*gamma_pow(QuantumScale* GetPixelGreen(q),PerceptibleReciprocal(gamma))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange*gamma_pow(QuantumScale*GetPixelBlue(q), PerceptibleReciprocal(gamma))); if ((channel & OpacityChannel) != 0) { if (image->matte == MagickFalse) SetPixelOpacity(q,QuantumRange*gamma_pow(QuantumScale* GetPixelOpacity(q),PerceptibleReciprocal(gamma))); else SetPixelAlpha(q,QuantumRange*gamma_pow(QuantumScale* GetPixelAlpha(q),PerceptibleReciprocal(gamma))); } } #endif q++; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,gamma_map[ScaleQuantumToMap( GetPixelIndex(indexes+x))]); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,GammaImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map); if (image->gamma != 0.0) image->gamma*=gamma; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GrayscaleImage() converts the colors in the reference image to gray. % % The format of the GrayscaleImageChannel method is: % % MagickBooleanType GrayscaleImage(Image *image, % const PixelIntensityMethod method) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType GrayscaleImage(Image *image, const PixelIntensityMethod method) { #define GrayscaleImageTag "Grayscale/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { if (SyncImage(image) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); } /* Grayscale image. */ /* call opencl version */ #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateGrayscaleImage(image,method,&image->exception) != MagickFalse) { image->intensity=method; image->type=GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return(SetImageColorspace(image,LinearGRAYColorspace)); return(SetImageColorspace(image,GRAYColorspace)); } #endif status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType blue, green, intensity, red; red=(MagickRealType) q->red; green=(MagickRealType) q->green; blue=(MagickRealType) q->blue; intensity=0.0; switch (method) { case AveragePixelIntensityMethod: { intensity=(red+green+blue)/3.0; break; } case BrightnessPixelIntensityMethod: { intensity=MagickMax(MagickMax(red,green),blue); break; } case LightnessPixelIntensityMethod: { intensity=(MagickMin(MagickMin(red,green),blue)+ MagickMax(MagickMax(red,green),blue))/2.0; break; } case MSPixelIntensityMethod: { intensity=(MagickRealType) (((double) red*red+green*green+ blue*blue)/(3.0*QuantumRange)); break; } case Rec601LumaPixelIntensityMethod: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec601LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec709LumaPixelIntensityMethod: default: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case Rec709LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case RMSPixelIntensityMethod: { intensity=(MagickRealType) (sqrt((double) red*red+green*green+ blue*blue)/sqrt(3.0)); break; } } SetPixelGray(q,ClampToQuantum(intensity)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,GrayscaleImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->intensity=method; image->type=GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return(SetImageColorspace(image,LinearGRAYColorspace)); return(SetImageColorspace(image,GRAYColorspace)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H a l d C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HaldClutImage() applies a Hald color lookup table to the image. A Hald % color lookup table is a 3-dimensional color cube mapped to 2 dimensions. % Create it with the HALD coder. You can apply any color transformation to % the Hald image and then use this method to apply the transform to the % image. % % The format of the HaldClutImage method is: % % MagickBooleanType HaldClutImage(Image *image,Image *hald_image) % MagickBooleanType HaldClutImageChannel(Image *image, % const ChannelType channel,Image *hald_image) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o hald_image: the color lookup table image for replacement color values. % % o channel: the channel. % */ MagickExport MagickBooleanType HaldClutImage(Image *image, const Image *hald_image) { return(HaldClutImageChannel(image,DefaultChannels,hald_image)); } MagickExport MagickBooleanType HaldClutImageChannel(Image *image, const ChannelType channel,const Image *hald_image) { #define HaldClutImageTag "Clut/Image" typedef struct _HaldInfo { MagickRealType x, y, z; } HaldInfo; CacheView *hald_view, *image_view; double width; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; size_t cube_size, length, level; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(hald_image != (Image *) NULL); assert(hald_image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Hald clut image. */ status=MagickTrue; progress=0; length=(size_t) MagickMin((MagickRealType) hald_image->columns, (MagickRealType) hald_image->rows); for (level=2; (level*level*level) < length; level++) ; level*=level; cube_size=level*level; width=(double) hald_image->columns; GetMagickPixelPacket(hald_image,&zero); exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); hald_view=AcquireAuthenticCacheView(hald_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,hald_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double area, offset; HaldInfo point; MagickPixelPacket pixel, pixel1, pixel2, pixel3, pixel4; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(hald_view); pixel=zero; pixel1=zero; pixel2=zero; pixel3=zero; pixel4=zero; for (x=0; x < (ssize_t) image->columns; x++) { point.x=QuantumScale*(level-1.0)*GetPixelRed(q); point.y=QuantumScale*(level-1.0)*GetPixelGreen(q); point.z=QuantumScale*(level-1.0)*GetPixelBlue(q); offset=(double) (point.x+level*floor(point.y)+cube_size*floor(point.z)); point.x-=floor(point.x); point.y-=floor(point.y); point.z-=floor(point.z); status=InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); if (status == MagickFalse) break; status=InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); if (status == MagickFalse) break; area=point.y; if (hald_image->interpolate == NearestNeighborInterpolatePixel) area=(point.y < 0.5) ? 0.0 : 1.0; MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,area,&pixel3); offset+=cube_size; status=InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset,width),floor(offset/width), &pixel1,exception); if (status == MagickFalse) break; status=InterpolateMagickPixelPacket(image,hald_view, UndefinedInterpolatePixel,fmod(offset+level,width),floor((offset+level)/ width),&pixel2,exception); if (status == MagickFalse) break; MagickPixelCompositeAreaBlend(&pixel1,pixel1.opacity,&pixel2, pixel2.opacity,area,&pixel4); area=point.z; if (hald_image->interpolate == NearestNeighborInterpolatePixel) area=(point.z < 0.5)? 0.0 : 1.0; MagickPixelCompositeAreaBlend(&pixel3,pixel3.opacity,&pixel4, pixel4.opacity,area,&pixel); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(pixel.index)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,HaldClutImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } hald_view=DestroyCacheView(hald_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() adjusts the levels of a particular image channel by % scaling the colors falling between specified white and black points to % the full available quantum range. % % The parameters provided represent the black, and white points. The black % point specifies the darkest color in the image. Colors darker than the % black point are set to zero. White point specifies the lightest color in % the image. Colors brighter than the white point are set to the maximum % quantum value. % % If a '!' flag is given, map black and white colors to the given levels % rather than mapping those levels to black and white. See % LevelizeImageChannel() and LevelizeImageChannel(), below. % % Gamma specifies a gamma correction to apply to the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o levels: Specify the levels where the black and white points have the % range of 0-QuantumRange, and gamma has the range 0-10 (e.g. 10x90%+2). % A '!' flag inverts the re-mapping. % */ MagickExport MagickBooleanType LevelImage(Image *image,const char *levels) { double black_point, gamma, white_point; GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; /* Parse levels. */ if (levels == (char *) NULL) return(MagickFalse); flags=ParseGeometry(levels,&geometry_info); black_point=geometry_info.rho; white_point=(double) QuantumRange; if ((flags & SigmaValue) != 0) white_point=geometry_info.sigma; gamma=1.0; if ((flags & XiValue) != 0) gamma=geometry_info.xi; if ((flags & PercentValue) != 0) { black_point*=(double) image->columns*image->rows/100.0; white_point*=(double) image->columns*image->rows/100.0; } if ((flags & SigmaValue) == 0) white_point=(double) QuantumRange-black_point; if ((flags & AspectValue ) == 0) status=LevelImageChannel(image,DefaultChannels,black_point,white_point, gamma); else status=LevelizeImage(image,black_point,white_point,gamma); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() applies the normal level operation to the image, spreading % out the values between the black and white points over the entire range of % values. Gamma correction is also applied after the values has been mapped. % % It is typically used to improve image contrast, or to provide a controlled % linear threshold for the image. If the black and white points are set to % the minimum and maximum values found in the image, the image can be % normalized. or by swapping black and white values, negate the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const double black_point, % const double white_point,const double gamma) % MagickBooleanType LevelImageChannel(Image *image, % const ChannelType channel,const double black_point, % const double white_point,const double gamma) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level which is to be mapped to zero (black) % % o white_point: The level which is to be mapped to QuantumRange (white) % % o gamma: adjust gamma by this factor before mapping values. % use 1.0 for purely linear stretching of image color values % */ static inline double LevelPixel(const double black_point, const double white_point,const double gamma,const MagickRealType pixel) { double level_pixel, scale; scale=PerceptibleReciprocal(white_point-black_point); level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point),1.0/ gamma); return(level_pixel); } MagickExport MagickBooleanType LevelImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelImageTag "Level/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=(Quantum) ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) image->colormap[i].red)); if ((channel & GreenChannel) != 0) image->colormap[i].green=(Quantum) ClampToQuantum(LevelPixel( black_point,white_point,gamma,(MagickRealType) image->colormap[i].green)); if ((channel & BlueChannel) != 0) image->colormap[i].blue=(Quantum) ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) image->colormap[i].blue)); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=(Quantum) (QuantumRange-(Quantum) ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) (QuantumRange-image->colormap[i].opacity)))); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelRed(q)))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelGreen(q)))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelBlue(q)))); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelAlpha(q,ClampToQuantum(LevelPixel(black_point,white_point,gamma, (MagickRealType) GetPixelAlpha(q)))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(LevelPixel(black_point, white_point,gamma,(MagickRealType) GetPixelIndex(indexes+x)))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,LevelImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) ClampImage(image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImageChannel() applies the reversed LevelImage() operation to just % the specific channels specified. It compresses the full range of color % values, so that they lie between the given black and white points. Gamma is % applied before the values are mapped. % % LevelizeImageChannel() can be called with by using a +level command line % API option, or using a '!' on a -level or LevelImage() geometry string. % % It can be used for example de-contrast a greyscale image to the exact % levels specified. Or by using specific levels for each channel of an image % you can convert a gray-scale image to any linear color gradient, according % to those levels. % % The format of the LevelizeImageChannel method is: % % MagickBooleanType LevelizeImageChannel(Image *image, % const ChannelType channel,const char *levels) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantumRange (white) to. % % o gamma: adjust gamma by this factor before mapping values. % */ MagickExport MagickBooleanType LevelizeImage(Image *image, const double black_point,const double white_point,const double gamma) { MagickBooleanType status; status=LevelizeImageChannel(image,DefaultChannels,black_point,white_point, gamma); return(status); } MagickExport MagickBooleanType LevelizeImageChannel(Image *image, const ChannelType channel,const double black_point,const double white_point, const double gamma) { #define LevelizeImageTag "Levelize/Image" #define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \ (QuantumScale*(x)),gamma))*(white_point-black_point)+black_point) CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=LevelizeValue(image->colormap[i].red); if ((channel & GreenChannel) != 0) image->colormap[i].green=LevelizeValue(image->colormap[i].green); if ((channel & BlueChannel) != 0) image->colormap[i].blue=LevelizeValue(image->colormap[i].blue); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=(Quantum) (QuantumRange-LevelizeValue( QuantumRange-image->colormap[i].opacity)); } /* Level image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,LevelizeValue(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,LevelizeValue(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,LevelizeValue(GetPixelBlue(q))); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelAlpha(q,LevelizeValue(GetPixelAlpha(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,LevelizeValue(GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,LevelizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColor() maps the given color to "black" and "white" values, % linearly spreading out the colors, and level values on a channel by channel % bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriatally. This effectivally maps a greyscale gradient into the given % color gradient. % % The format of the LevelColorsImageChannel method is: % % MagickBooleanType LevelColorsImage(Image *image, % const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % MagickBooleanType LevelColorsImageChannel(Image *image, % const ChannelType channel,const MagickPixelPacket *black_color, % const MagickPixelPacket *white_color,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % */ MagickExport MagickBooleanType LevelColorsImage(Image *image, const MagickPixelPacket *black_color,const MagickPixelPacket *white_color, const MagickBooleanType invert) { MagickBooleanType status; status=LevelColorsImageChannel(image,DefaultChannels,black_color,white_color, invert); return(status); } MagickExport MagickBooleanType LevelColorsImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *black_color, const MagickPixelPacket *white_color,const MagickBooleanType invert) { MagickStatusType status; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsGrayColorspace(black_color->colorspace) != MagickFalse) || (IsGrayColorspace(white_color->colorspace) != MagickFalse))) (void) SetImageColorspace(image,sRGBColorspace); status=MagickTrue; if (invert == MagickFalse) { if ((channel & RedChannel) != 0) status&=LevelImageChannel(image,RedChannel,black_color->red, white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status&=LevelImageChannel(image,GreenChannel,black_color->green, white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status&=LevelImageChannel(image,BlueChannel,black_color->blue, white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) status&=LevelImageChannel(image,OpacityChannel,black_color->opacity, white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status&=LevelImageChannel(image,IndexChannel,black_color->index, white_color->index,(double) 1.0); } else { if ((channel & RedChannel) != 0) status&=LevelizeImageChannel(image,RedChannel,black_color->red, white_color->red,(double) 1.0); if ((channel & GreenChannel) != 0) status&=LevelizeImageChannel(image,GreenChannel,black_color->green, white_color->green,(double) 1.0); if ((channel & BlueChannel) != 0) status&=LevelizeImageChannel(image,BlueChannel,black_color->blue, white_color->blue,(double) 1.0); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) status&=LevelizeImageChannel(image,OpacityChannel,black_color->opacity, white_color->opacity,(double) 1.0); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status&=LevelizeImageChannel(image,IndexChannel,black_color->index, white_color->index,(double) 1.0); } return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i n e a r S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LinearStretchImage() discards any pixels below the black point and above % the white point and levels the remaining pixels. % % The format of the LinearStretchImage method is: % % MagickBooleanType LinearStretchImage(Image *image, % const double black_point,const double white_point) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % */ MagickExport MagickBooleanType LinearStretchImage(Image *image, const double black_point,const double white_point) { #define LinearStretchImageTag "LinearStretch/Image" ExceptionInfo *exception; MagickBooleanType status; MagickRealType *histogram, intensity; ssize_t black, white, y; /* Allocate histogram and linear map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); exception=(&image->exception); histogram=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*histogram)); if (histogram == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ (void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram)); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=(ssize_t) image->columns-1; x >= 0; x--) { histogram[ScaleQuantumToMap(ClampToQuantum(GetPixelIntensity(image,p)))]++; p++; } } /* Find the histogram boundaries by locating the black and white point levels. */ intensity=0.0; for (black=0; black < (ssize_t) MaxMap; black++) { intensity+=histogram[black]; if (intensity >= black_point) break; } intensity=0.0; for (white=(ssize_t) MaxMap; white != 0; white--) { intensity+=histogram[white]; if (intensity >= white_point) break; } histogram=(MagickRealType *) RelinquishMagickMemory(histogram); status=LevelImageChannel(image,DefaultChannels,(double) ScaleMapToQuantum(black),(double) ScaleMapToQuantum(white),1.0); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d u l a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModulateImage() lets you control the brightness, saturation, and hue % of an image. Modulate represents the brightness, saturation, and hue % as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the % modulation is lightness, saturation, and hue. For HWB, use blackness, % whiteness, and hue. And for HCL, use chrome, luma, and hue. % % The format of the ModulateImage method is: % % MagickBooleanType ModulateImage(Image *image,const char *modulate) % % A description of each parameter follows: % % o image: the image. % % o modulate: Define the percent change in brightness, saturation, and % hue. % */ static inline void ModulateHCL(const double percent_hue, const double percent_chroma,const double percent_luma,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHCLp(const double percent_hue, const double percent_chroma,const double percent_luma,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLpToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHSB(const double percent_hue, const double percent_saturation,const double percent_brightness,Quantum *red, Quantum *green,Quantum *blue) { double brightness, hue, saturation; /* Increase or decrease color brightness, saturation, or hue. */ ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; brightness*=0.01*percent_brightness; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } static inline void ModulateHSI(const double percent_hue, const double percent_saturation,const double percent_intensity,Quantum *red, Quantum *green,Quantum *blue) { double intensity, hue, saturation; /* Increase or decrease color intensity, saturation, or hue. */ ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; intensity*=0.01*percent_intensity; ConvertHSIToRGB(hue,saturation,intensity,red,green,blue); } static inline void ModulateHSL(const double percent_hue, const double percent_saturation,const double percent_lightness,Quantum *red, Quantum *green,Quantum *blue) { double hue, lightness, saturation; /* Increase or decrease color lightness, saturation, or hue. */ ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; lightness*=0.01*percent_lightness; ConvertHSLToRGB(hue,saturation,lightness,red,green,blue); } static inline void ModulateHSV(const double percent_hue, const double percent_saturation,const double percent_value,Quantum *red, Quantum *green,Quantum *blue) { double hue, saturation, value; /* Increase or decrease color value, saturation, or hue. */ ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; value*=0.01*percent_value; ConvertHSVToRGB(hue,saturation,value,red,green,blue); } static inline void ModulateHWB(const double percent_hue, const double percent_whiteness,const double percent_blackness,Quantum *red, Quantum *green,Quantum *blue) { double blackness, hue, whiteness; /* Increase or decrease color blackness, whiteness, or hue. */ ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness); hue+=fmod((percent_hue-100.0),200.0)/200.0; blackness*=0.01*percent_blackness; whiteness*=0.01*percent_whiteness; ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue); } static inline void ModulateLCHab(const double percent_luma, const double percent_chroma,const double percent_hue,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHabToRGB(luma,chroma,hue,red,green,blue); } static inline void ModulateLCHuv(const double percent_luma, const double percent_chroma,const double percent_hue,Quantum *red, Quantum *green,Quantum *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue); } MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate) { #define ModulateImageTag "Modulate/Image" CacheView *image_view; ColorspaceType colorspace; const char *artifact; double percent_brightness, percent_hue, percent_saturation; ExceptionInfo *exception; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; register ssize_t i; ssize_t y; /* Initialize modulate table. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (modulate == (char *) NULL) return(MagickFalse); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); flags=ParseGeometry(modulate,&geometry_info); percent_brightness=geometry_info.rho; percent_saturation=geometry_info.sigma; if ((flags & SigmaValue) == 0) percent_saturation=100.0; percent_hue=geometry_info.xi; if ((flags & XiValue) == 0) percent_hue=100.0; colorspace=UndefinedColorspace; artifact=GetImageArtifact(image,"modulate:colorspace"); if (artifact != (const char *) NULL) colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions, MagickFalse,artifact); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { Quantum blue, green, red; /* Modulate image colormap. */ red=image->colormap[i].red; green=image->colormap[i].green; blue=image->colormap[i].blue; switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSIColorspace: { ModulateHSI(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: case LCHColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } image->colormap[i].red=red; image->colormap[i].green=green; image->colormap[i].blue=blue; } /* Modulate image. */ /* call opencl version */ #if defined(MAGICKCORE_OPENCL_SUPPORT) status=AccelerateModulateImage(image,percent_brightness,percent_hue, percent_saturation,colorspace,&image->exception); if (status != MagickFalse) return status; #endif status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum blue, green, red; red=GetPixelRed(q); green=GetPixelGreen(q); blue=GetPixelBlue(q); switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHColorspace: case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } SetPixelRed(q,red); SetPixelGreen(q,green); SetPixelBlue(q,blue); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ModulateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e g a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NegateImage() negates the colors in the reference image. The grayscale % option means that only grayscale values within the image are negated. % % The format of the NegateImageChannel method is: % % MagickBooleanType NegateImage(Image *image, % const MagickBooleanType grayscale) % MagickBooleanType NegateImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType grayscale) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o grayscale: If MagickTrue, only negate grayscale pixels within the image. % */ MagickExport MagickBooleanType NegateImage(Image *image, const MagickBooleanType grayscale) { MagickBooleanType status; status=NegateImageChannel(image,DefaultChannels,grayscale); return(status); } MagickExport MagickBooleanType NegateImageChannel(Image *image, const ChannelType channel,const MagickBooleanType grayscale) { #define NegateImageTag "Negate/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { /* Negate colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if (grayscale != MagickFalse) if ((image->colormap[i].red != image->colormap[i].green) || (image->colormap[i].green != image->colormap[i].blue)) continue; if ((channel & RedChannel) != 0) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((channel & GreenChannel) != 0) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((channel & BlueChannel) != 0) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } } /* Negate image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); if (grayscale != MagickFalse) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRed(q) != GetPixelGreen(q)) || (GetPixelGreen(q) != GetPixelBlue(q))) { q++; continue; } if ((channel & RedChannel) != 0) SetPixelRed(q,QuantumRange-GetPixelRed(q)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,QuantumRange-GetPixelGreen(q)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,QuantumRange-GetPixelBlue(q)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,QuantumRange-GetPixelOpacity(q)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x)); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,NegateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(MagickTrue); } /* Negate image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); if (channel == DefaultChannels) for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q+x,QuantumRange-GetPixelRed(q+x)); SetPixelGreen(q+x,QuantumRange-GetPixelGreen(q+x)); SetPixelBlue(q+x,QuantumRange-GetPixelBlue(q+x)); } else for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q+x,QuantumRange-GetPixelRed(q+x)); if ((channel & GreenChannel) != 0) SetPixelGreen(q+x,QuantumRange-GetPixelGreen(q+x)); if ((channel & BlueChannel) != 0) SetPixelBlue(q+x,QuantumRange-GetPixelBlue(q+x)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q+x,QuantumRange-GetPixelOpacity(q+x)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,QuantumRange-GetPixelIndex(indexes+x)); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,NegateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N o r m a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The NormalizeImage() method enhances the contrast of a color image by % mapping the darkest 2 percent of all pixel to black and the brightest % 1 percent to white. % % The format of the NormalizeImage method is: % % MagickBooleanType NormalizeImage(Image *image) % MagickBooleanType NormalizeImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % */ MagickExport MagickBooleanType NormalizeImage(Image *image) { MagickBooleanType status; status=NormalizeImageChannel(image,DefaultChannels); return(status); } MagickExport MagickBooleanType NormalizeImageChannel(Image *image, const ChannelType channel) { double black_point, white_point; black_point=(double) image->columns*image->rows*0.0015; white_point=(double) image->columns*image->rows*0.9995; return(ContrastStretchImageChannel(image,channel,black_point,white_point)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i g m o i d a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SigmoidalContrastImage() adjusts the contrast of an image with a non-linear % sigmoidal contrast algorithm. Increase the contrast of the image using a % sigmoidal transfer function without saturating highlights or shadows. % Contrast indicates how much to increase the contrast (0 is none; 3 is % typical; 20 is pushing it); mid-point indicates where midtones fall in the % resultant image (0 is white; 50% is middle-gray; 100% is black). Set % sharpen to MagickTrue to increase the image contrast otherwise the contrast % is reduced. % % The format of the SigmoidalContrastImage method is: % % MagickBooleanType SigmoidalContrastImage(Image *image, % const MagickBooleanType sharpen,const char *levels) % MagickBooleanType SigmoidalContrastImageChannel(Image *image, % const ChannelType channel,const MagickBooleanType sharpen, % const double contrast,const double midpoint) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o sharpen: Increase or decrease image contrast. % % o contrast: strength of the contrast, the larger the number the more % 'threshold-like' it becomes. % % o midpoint: midpoint of the function as a color value 0 to QuantumRange. % */ /* ImageMagick 7 has a version of this function which does not use LUTs. */ /* Sigmoidal function Sigmoidal with inflexion point moved to b and "slope constant" set to a. The first version, based on the hyperbolic tangent tanh, when combined with the scaling step, is an exact arithmetic clone of the the sigmoid function based on the logistic curve. The equivalence is based on the identity 1/(1+exp(-t)) = (1+tanh(t/2))/2 (http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the scaled sigmoidal derivation is invariant under affine transformations of the ordinate. The tanh version is almost certainly more accurate and cheaper. The 0.5 factor in the argument is to clone the legacy ImageMagick behavior. The reason for making the define depend on atanh even though it only uses tanh has to do with the construction of the inverse of the scaled sigmoidal. */ #if defined(MAGICKCORE_HAVE_ATANH) #define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) ) #else #define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) ) #endif /* Scaled sigmoidal function: ( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) / ( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) ) See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by zero. This is fixed below by exiting immediately when contrast is small, leaving the image (or colormap) unmodified. This appears to be safe because the series expansion of the logistic sigmoidal function around x=b is 1/2-a*(b-x)/4+... so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh). */ #define ScaledSigmoidal(a,b,x) ( \ (Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \ (Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) ) /* Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even when creating a LUT from in gamut values, hence the branching. In addition, HDRI may have out of gamut values. InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal: It is only a right inverse. This is unavoidable. */ static inline double InverseScaledSigmoidal(const double a,const double b, const double x) { const double sig0=Sigmoidal(a,b,0.0); const double sig1=Sigmoidal(a,b,1.0); const double argument=(sig1-sig0)*x+sig0; const double clamped= ( #if defined(MAGICKCORE_HAVE_ATANH) argument < -1+MagickEpsilon ? -1+MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b+(2.0/a)*atanh(clamped)); #else argument < MagickEpsilon ? MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b-log(1.0/clamped-1.0)/a); #endif } MagickExport MagickBooleanType SigmoidalContrastImage(Image *image, const MagickBooleanType sharpen,const char *levels) { GeometryInfo geometry_info; MagickBooleanType status; MagickStatusType flags; flags=ParseGeometry(levels,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=1.0*QuantumRange/2.0; if ((flags & PercentValue) != 0) geometry_info.sigma=1.0*QuantumRange*geometry_info.sigma/100.0; status=SigmoidalContrastImageChannel(image,DefaultChannels,sharpen, geometry_info.rho,geometry_info.sigma); return(status); } MagickExport MagickBooleanType SigmoidalContrastImageChannel(Image *image, const ChannelType channel,const MagickBooleanType sharpen, const double contrast,const double midpoint) { #define SigmoidalContrastImageTag "SigmoidalContrast/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickRealType *sigmoidal_map; register ssize_t i; ssize_t y; /* Side effect: clamps values unless contrast<MagickEpsilon, in which case nothing is done. */ if (contrast < MagickEpsilon) return(MagickTrue); /* Allocate and initialize sigmoidal maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); sigmoidal_map=(MagickRealType *) AcquireQuantumMemory(MaxMap+1UL, sizeof(*sigmoidal_map)); if (sigmoidal_map == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(sigmoidal_map,0,(MaxMap+1)*sizeof(*sigmoidal_map)); if (sharpen != MagickFalse) for (i=0; i <= (ssize_t) MaxMap; i++) sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) (MaxMap*ScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/ MaxMap))); else for (i=0; i <= (ssize_t) MaxMap; i++) sigmoidal_map[i]=(MagickRealType) ScaleMapToQuantum((MagickRealType) ( MaxMap*InverseScaledSigmoidal(contrast,QuantumScale*midpoint,(double) i/ MaxMap))); /* Sigmoidal-contrast enhance colormap. */ if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) image->colormap[i].red=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].red)]); if ((channel & GreenChannel) != 0) image->colormap[i].green=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].green)]); if ((channel & BlueChannel) != 0) image->colormap[i].blue=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].blue)]); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=ClampToQuantum(sigmoidal_map[ ScaleQuantumToMap(image->colormap[i].opacity)]); } /* Sigmoidal-contrast enhance image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelRed(q))])); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelGreen(q))])); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelBlue(q))])); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelOpacity(q))])); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(sigmoidal_map[ScaleQuantumToMap( GetPixelIndex(indexes+x))])); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sigmoidal_map=(MagickRealType *) RelinquishMagickMemory(sigmoidal_map); return(status); }
sections.c
#include <omp.h> #define N 10 int main (int argc, char * argv[]){ double a[N], b[N], c[N]; int i; for(i=0; i<N; i++) a[i] = 0; for(i=0; i<N; i++) b[i] = 0; for(i=0; i<N; i++) c[i] = 0; #pragma omp parallel #pragma omp sections { #pragma omp section c[0]=1; #pragma omp section c[1]=1; } }
tgreed.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <omp.h> #include <unistd.h> #include <limits.h> #define TIME_RES 1000000 #define FLT_MAX 3.402823466e+38F /* max value */ double initial_time; double clear_cache [30000000]; float** matrix_distances; int * destino; int cities; int nodo;// calculado com a funcao rand void clearCache (){ int i; for(i=0; i<30000000;i++) clear_cache[i]=i; } void start(){ double time= omp_get_wtime(); initial_time= time* TIME_RES; } double stop(){ double time = omp_get_wtime(); double final = time * TIME_RES; return final - initial_time; } // funcao para alocar e inicializar a matriz de distancias e o array com as cidades void initialize_matrix(){ int i,j; matrix_distances = (float**) malloc(sizeof(float*) * cities ); destino = (int*) malloc(sizeof(int) * cities); for (i =0;i<cities;i++){ destino[i] = -1; matrix_distances[i] =malloc(sizeof(float) * cities); // matrix_distances[i] = 0; } for(i=0;i<cities;i++){ for(j=0;j<cities;j++){ matrix_distances[i][j] = 0; } } } // funcao que consoante as posicoes(x,y) calcula as distancias entre as varias cidades void find_distances(int* x, int* y, int n){ int i,j; for(i=0;i<n;i++){ for(j=0;j<n;j++){ if(i!=j){ matrix_distances[i][j] = sqrt(pow(x[i]-x[j],2) + pow(y[i]-y[j],2)); } } //printf("O valor de cada distancia : %f", matrix_distances[i][j]); } } // funcao principal que calcula,sucessivamente o caminho mais curto de uma cidade inicial ate a seguinte // anda nao visitada void greedy(int num_proc){ int i,j,pos; float min= FLT_MAX; float count=0; int iter= 0; i = nodo; // cidade inicial calculado pelo rand #pragma omp parallel num_threads(num_proc) { while(iter<(cities-1)){ for(j=0;j<cities;j++){ if(matrix_distances[nodo][j]<min && (matrix_distances[nodo][j]!=0) && (destino[j]==-1)){ min = matrix_distances[nodo][j]; pos = j; } } destino[nodo] = pos; printf("Sou o nodo: %d \n", nodo); nodo = pos; iter++; count+=min ; min = FLT_MAX; } for(j=0;j<cities;j++){ if(destino[j]<0){ destino[j] = i; #pragma omp atomic count+= matrix_distances[j][i]; } } } printf("A distancia total e: %f\n", count); } int main(int argc, char** argv){ srand((unsigned) time(NULL)); cities=atoi(argv[1]); int num_proc = atoi(argv[2]); int* posX; int* posY; int j,i; nodo = rand()% cities; posX = (int*) malloc(sizeof(int) * cities); posY = (int*) malloc(sizeof(int) *cities); for(i = 0; i<cities;i++){ posX[i] = rand() % 50; posY[i] = rand() % 50; //printf("%d ",posY[i]); //printf("%d ",posX[i]); } initialize_matrix(); start(); find_distances(posX,posY,cities); /* for(i=0;i<cities;i++){ for(j=0;j<cities;j++){ printf("%f ", matrix_distances[i][j]); if(j== cities-1) printf("\n"); } } */ greedy(num_proc); double tempo = stop(); printf("Demorou %f segundos\n ",tempo); }
omp_threadprivate.c
<ompts:test> <ompts:testdescription>Test which checks the omp threadprivate directive by filling an array with random numbers in an parallelised region. Each thread generates one number of the array and saves this in a temporary threadprivate variable. In a second parallelised region the test controls, that the temporary variable contains still the former value by comparing it with the one in the array.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp threadprivate</ompts:directive> <ompts:dependences>omp critical,omp_set_dynamic,omp_get_num_threads</ompts:dependences> <ompts:testcode> /* * Threadprivate is tested in 2 ways: * 1. The global variable declared as threadprivate should have * local copy for each thread. Otherwise race condition and * wrong result. * 2. If the value of local copy is retained for the two adjacent * parallel regions */ #include "omp_testsuite.h" #include <stdlib.h> #include <stdio.h> static int sum0 = 0; <ompts:check>#pragma omp threadprivate(sum0)</ompts:check> static int myvalue = 0; <ompts:check>#pragma omp threadprivate(myvalue)</ompts:check> int <ompts:testcode:functionname>omp_threadprivate</ompts:testcode:functionname>(FILE * logFile) { int sum = 0; int known_sum; int i; int iter; int *data; int size; int failed = 0; int my_random; omp_set_dynamic(0); #pragma omp parallel { sum0 = 0; #pragma omp for for (i = 1; i <= LOOPCOUNT; i++) { sum0 = sum0 + i; } /*end of for*/ #pragma omp critical { sum = sum + sum0; } /*end of critical */ } /* end of parallel */ known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; if (known_sum != sum ) { fprintf (logFile, " known_sum = %d, sum = %d\n", known_sum, sum); } /* the next parallel region is just used to get the number of threads*/ omp_set_dynamic(0); #pragma omp parallel { #pragma omp master { size=omp_get_num_threads(); data=(int*) malloc(size*sizeof(int)); } }/* end parallel*/ srand(45); for (iter = 0; iter < 100; iter++){ my_random = rand(); /* random number generator is called inside serial region*/ /* the first parallel region is used to initialiye myvalue and the array with my_random+rank*/ #pragma omp parallel { int rank; rank = omp_get_thread_num (); myvalue = data[rank] = my_random + rank; } /* the second parallel region verifies that the value of "myvalue" is retained */ #pragma omp parallel reduction(+:failed) { int rank; rank = omp_get_thread_num (); failed = failed + (myvalue != data[rank]); if(myvalue != data[rank]){ fprintf (logFile, " myvalue = %d, data[rank]= %d\n", myvalue, data[rank]); } } } free (data); return (known_sum == sum) && !failed; } /* end of check_threadprivate*/ </ompts:testcode> </ompts:test>
utils.h
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <iostream> #include <vector> #include <string> #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/highgui/highgui.hpp> #ifdef _WIN32 #define GLOG_NO_ABBREVIATED_SEVERITIES #include <windows.h> #else #include <dirent.h> #include <sys/types.h> #endif namespace PaddleSolution { namespace utils { inline std::string path_join(const std::string& dir, const std::string& path) { std::string seperator = "/"; #ifdef _WIN32 seperator = "\\"; #endif return dir + seperator + path; } #ifndef _WIN32 // scan a directory and get all files with input extensions inline std::vector<std::string> get_directory_images( const std::string& path, const std::string& exts) { std::vector<std::string> imgs; struct dirent *entry; DIR *dir = opendir(path.c_str()); if (dir == NULL) { closedir(dir); return imgs; } while ((entry = readdir(dir)) != NULL) { std::string item = entry->d_name; auto ext = strrchr(entry->d_name, '.'); if (!ext || std::string(ext) == "." || std::string(ext) == "..") { continue; } if (exts.find(ext) != std::string::npos) { imgs.push_back(path_join(path, entry->d_name)); } } return imgs; } #else // scan a directory and get all files with input extensions inline std::vector<std::string> get_directory_images( const std::string& path, const std::string& exts) { std::string pattern(path); pattern.append("\\*"); std::vector<std::string> imgs; WIN32_FIND_DATA data; HANDLE hFind; if ((hFind = FindFirstFile(pattern.c_str(), &data)) != INVALID_HANDLE_VALUE) { do { auto fname = std::string(data.cFileName); auto pos = fname.rfind("."); auto ext = fname.substr(pos + 1); if (ext.size() > 1 && exts.find(ext) != std::string::npos) { imgs.push_back(path + "\\" + data.cFileName); } } while (FindNextFile(hFind, &data) != 0); FindClose(hFind); } return imgs; } #endif // normalize and HWC_BGR -> CHW_RGB inline void normalize(cv::Mat& im, float* data, std::vector<float>& fmean, std::vector<float>& fstd) { int rh = im.rows; int rw = im.cols; int rc = im.channels(); double normf = static_cast<double>(1.0) / 255.0; #pragma omp parallel for for (int h = 0; h < rh; ++h) { const uchar* ptr = im.ptr<uchar>(h); int im_index = 0; for (int w = 0; w < rw; ++w) { for (int c = 0; c < rc; ++c) { int top_index = (c * rh + h) * rw + w; float pixel = static_cast<float>(ptr[im_index++]); pixel = (pixel * normf - fmean[c]) / fstd[c]; data[top_index] = pixel; } } } } // flatten a cv::mat inline void flatten_mat(cv::Mat& im, float* data) { int rh = im.rows; int rw = im.cols; int rc = im.channels(); #pragma omp parallel for for (int h = 0; h < rh; ++h) { const uchar* ptr = im.ptr<uchar>(h); int im_index = 0; int top_index = h * rw * rc; for (int w = 0; w < rw; ++w) { for (int c = 0; c < rc; ++c) { float pixel = static_cast<float>(ptr[im_index++]); data[top_index++] = pixel; } } } } // argmax inline void argmax(float* out, std::vector<int>& shape, std::vector<uchar>& mask, std::vector<uchar>& scoremap) { int out_img_len = shape[1] * shape[2]; int blob_out_len = out_img_len * shape[0]; float max_value = -1; int label = 0; #pragma omp parallel private(label) for (int i = 0; i < out_img_len; ++i) { max_value = -1; label = 0; #pragma omp for reduction(max : max_value) for (int j = 0; j < shape[0]; ++j) { int index = i + j * out_img_len; if (index >= blob_out_len) { continue; } float value = out[index]; if (value > max_value) { max_value = value; label = j; } } if (label == 0) max_value = 0; mask[i] = uchar(label); scoremap[i] = uchar(max_value * 255); } } } // namespace utils } // namespace PaddleSolution
residualbased_simple_steady_scheme.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Michael Andre, https://github.com/msandre // #if !defined(KRATOS_RESIDUALBASED_SIMPLE_STEADY_SCHEME ) #define KRATOS_RESIDUALBASED_SIMPLE_STEADY_SCHEME // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "solving_strategies/schemes/scheme.h" #include "includes/variables.h" #include "includes/cfd_variables.h" #include "containers/array_1d.h" #include "utilities/openmp_utils.h" #include "utilities/coordinate_transformation_utilities.h" #include "processes/process.h" namespace Kratos { ///@name Kratos Classes ///@{ template<class TSparseSpace, class TDenseSpace > class ResidualBasedSimpleSteadyScheme : public Scheme<TSparseSpace, TDenseSpace> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedSimpleSteadyScheme); typedef Scheme<TSparseSpace, TDenseSpace> BaseType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef Element::GeometryType GeometryType; ///@} ///@name Life Cycle ///@{ ResidualBasedSimpleSteadyScheme(double VelocityRelaxationFactor, double PressureRelaxationFactor, unsigned int DomainSize) : Scheme<TSparseSpace, TDenseSpace>(), mVelocityRelaxationFactor(VelocityRelaxationFactor), mPressureRelaxationFactor(PressureRelaxationFactor), mRotationTool(DomainSize,DomainSize+1,SLIP) {} ResidualBasedSimpleSteadyScheme( double VelocityRelaxationFactor, double PressureRelaxationFactor, unsigned int DomainSize, Process::Pointer pTurbulenceModel) : Scheme<TSparseSpace, TDenseSpace>(), mVelocityRelaxationFactor(VelocityRelaxationFactor), mPressureRelaxationFactor(PressureRelaxationFactor), mRotationTool(DomainSize,DomainSize+1,SLIP), mpTurbulenceModel(pTurbulenceModel) {} ~ResidualBasedSimpleSteadyScheme() override {} ///@} ///@name Operators ///@{ double GetVelocityRelaxationFactor() const { return mVelocityRelaxationFactor; } void SetVelocityRelaxationFactor(double factor) { mVelocityRelaxationFactor = factor; } double GetPressureRelaxationFactor() const { return mPressureRelaxationFactor; } void SetPressureRelaxationFactor(double factor) { mPressureRelaxationFactor = factor; } void Update(ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb) override { KRATOS_TRY; mRotationTool.RotateVelocities(rModelPart); TSparseSpace::InplaceMult(rDx, mVelocityRelaxationFactor); mpDofUpdater->UpdateDofs(rDofSet,rDx); mRotationTool.RecoverVelocities(rModelPart); KRATOS_CATCH(""); } void CalculateSystemContributions( Element& rCurrentElement, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, const ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY; rCurrentElement.InitializeNonLinearIteration(CurrentProcessInfo); rCurrentElement.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); Matrix SteadyLHS; rCurrentElement.CalculateLocalVelocityContribution(SteadyLHS, RHS_Contribution, CurrentProcessInfo); rCurrentElement.EquationIdVector(EquationId, CurrentProcessInfo); if (SteadyLHS.size1() != 0) noalias(LHS_Contribution) += SteadyLHS; // apply slip condition mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentElement.GetGeometry()); mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentElement.GetGeometry()); KRATOS_CATCH(""); } void CalculateSystemContributions( Condition& rCurrentCondition, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Condition::EquationIdVectorType& EquationId, const ProcessInfo& CurrentProcessInfo) override { KRATOS_TRY; rCurrentCondition.InitializeNonLinearIteration(CurrentProcessInfo); rCurrentCondition.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); Matrix SteadyLHS; rCurrentCondition.CalculateLocalVelocityContribution(SteadyLHS, RHS_Contribution, CurrentProcessInfo); rCurrentCondition.EquationIdVector(EquationId, CurrentProcessInfo); if (SteadyLHS.size1() != 0) noalias(LHS_Contribution) += SteadyLHS; // apply slip condition mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentCondition.GetGeometry()); mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentCondition.GetGeometry()); KRATOS_CATCH(""); } void CalculateRHSContribution( Element& rCurrentElement, LocalSystemVectorType& rRHS_Contribution, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; //TODO: This is very inefficient. Check why the RHS-only methods are not called. Matrix LHS_Contribution; CalculateSystemContributions( rCurrentElement, LHS_Contribution, rRHS_Contribution, rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void CalculateRHSContribution( Condition& rCurrentCondition, LocalSystemVectorType& rRHS_Contribution, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY; //TODO: This is very inefficient. Check why the RHS-only methods are not called. Matrix LHS_Contribution; CalculateSystemContributions( rCurrentCondition, LHS_Contribution, rRHS_Contribution, rEquationId, rCurrentProcessInfo); KRATOS_CATCH(""); } void FinalizeNonLinIteration(ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb) override { if (mpTurbulenceModel) // If not null { mpTurbulenceModel->Execute(); } ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); //if orthogonal subscales are computed if (CurrentProcessInfo[OSS_SWITCH] == 1.0) { KRATOS_INFO_IF("ResidualBasedSimpleSteadyScheme", rModelPart.GetCommunicator().MyPID() == 0) << "Computing OSS projections" << std::endl; const int number_of_nodes = rModelPart.NumberOfNodes(); #pragma omp parallel for for (int i = 0; i < number_of_nodes; i++) { ModelPart::NodeIterator it_node = rModelPart.NodesBegin() + i; noalias(it_node->FastGetSolutionStepValue(ADVPROJ)) = ZeroVector(3); it_node->FastGetSolutionStepValue(DIVPROJ) = 0.0; it_node->FastGetSolutionStepValue(NODAL_AREA) = 0.0; } const int number_of_elements = rModelPart.NumberOfElements(); array_1d<double, 3 > output; #pragma omp parallel for private(output) for (int i = 0; i < number_of_elements; i++) { ModelPart::ElementIterator it_elem = rModelPart.ElementsBegin() + i; it_elem->Calculate(ADVPROJ,output,CurrentProcessInfo); } rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA); rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ); rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ); #pragma omp parallel for for (int i = 0; i < number_of_nodes; i++) { ModelPart::NodeIterator it_node = rModelPart.NodesBegin() + i; if (it_node->FastGetSolutionStepValue(NODAL_AREA) == 0.0) it_node->FastGetSolutionStepValue(NODAL_AREA) = 1.0; const double area_inverse = 1.0 / it_node->FastGetSolutionStepValue(NODAL_AREA); it_node->FastGetSolutionStepValue(ADVPROJ) *= area_inverse; it_node->FastGetSolutionStepValue(DIVPROJ) *= area_inverse; } } } void FinalizeSolutionStep(ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb) override { LocalSystemVectorType RHS_Contribution; LocalSystemMatrixType LHS_Contribution; ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); ++itNode) { itNode->FastGetSolutionStepValue(REACTION_X,0) = 0.0; itNode->FastGetSolutionStepValue(REACTION_Y,0) = 0.0; itNode->FastGetSolutionStepValue(REACTION_Z,0) = 0.0; } for (ModelPart::ElementsContainerType::ptr_iterator itElem = rModelPart.Elements().ptr_begin(); itElem != rModelPart.Elements().ptr_end(); ++itElem) { (*itElem)->InitializeNonLinearIteration(rCurrentProcessInfo); (*itElem)->CalculateLocalSystem(LHS_Contribution,RHS_Contribution,rCurrentProcessInfo); Matrix SteadyLHS; (*itElem)->CalculateLocalVelocityContribution(SteadyLHS,RHS_Contribution,rCurrentProcessInfo); GeometryType& rGeom = (*itElem)->GetGeometry(); unsigned int NumNodes = rGeom.PointsNumber(); unsigned int Dimension = rGeom.WorkingSpaceDimension(); unsigned int index = 0; for (unsigned int i = 0; i < NumNodes; i++) { rGeom[i].FastGetSolutionStepValue(REACTION_X,0) -= RHS_Contribution[index++]; rGeom[i].FastGetSolutionStepValue(REACTION_Y,0) -= RHS_Contribution[index++]; if (Dimension == 3) rGeom[i].FastGetSolutionStepValue(REACTION_Z,0) -= RHS_Contribution[index++]; index++; // skip pressure dof } } rModelPart.GetCommunicator().AssembleCurrentData(REACTION); Scheme<TSparseSpace, TDenseSpace>::FinalizeSolutionStep(rModelPart, rA, rDx, rb); } ///@} protected: ///@name Protected Operators ///@{ ///@} private: ///@name Member Variables ///@{ double mVelocityRelaxationFactor; double mPressureRelaxationFactor; CoordinateTransformationUtils<LocalSystemMatrixType,LocalSystemVectorType,double> mRotationTool; Process::Pointer mpTurbulenceModel; typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); ///@} }; ///@} } // namespace Kratos #endif /* KRATOS_RESIDUALBASED_SIMPLE_STEADY_SCHEME defined */
heat_3d-a.pluto.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Discretized 3D heat equation stencil with non periodic boundary conditions * Adapted from Pochoir test bench */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <math.h> /* * N is the number of points * T is the number of timesteps */ #ifdef HAS_DECLS #include "decls.h" #else #define N 800L #define T 800L #endif #define NUM_FP_OPS 15 /* Define our arrays */ double total=0; double sum_err_sqr=0; int chtotal=0; /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char * argv[]) { long int t, i, j, k; const int BASE = 1024; long count=0; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0; // double A[2][N][N][N]; double ****A = (double ****)malloc(2 * sizeof (double ***)); int l; for (l = 0; l < 2; l++){ A[l] = (double ***) malloc(N * sizeof(double **)); for (i = 0; i < N; i++){ A[l][i] = (double **) malloc(N * sizeof(double *)); for (j = 0; j < N; j++) A[l][i][j] = (double *) malloc(N * sizeof (double)); } } printf("Number of points = %ld\t|Number of timesteps = %ld\t", N, T); /* Initialization */ srand(42); // seed with a constant value to verify results for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { for (k = 0; k < N; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef TIME gettimeofday(&start, 0); #endif // #undef N // #define N 150L #undef T #define T 400L /* Copyright (C) 1991-2012 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* We do support the IEC 559 math functionality, real and complex. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((N >= 1) && (T >= 1)) { for (t1=-1;t1<=T-1;t1++) { lbp=ceild(t1,2); ubp=floord(2*t1+N,4); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(0,ceild(t1-1,2));t3<=min(floord(2*T+N-2,4),floord(2*t1+N+2,4));t3++) { for (t4=max(max(0,ceild(t1-1011,1012)),ceild(4*t3-N-2022,2024));t4<=min(min(floord(2*T+N-2,2024),floord(2*t1+N+2,2024)),floord(4*t3+N+2,2024));t4++) { if ((t1 <= floord(2024*t4-N,2)) && (t2 <= 506*t4-1) && (t3 <= 506*t4-1) && (t4 >= ceild(N,2024))) { if (N%2 == 0) { for (t6=max(max(4*t2,2024*t4-N+1),-4*t1+4*t2+4048*t4-2*N-1);t6<=min(4*t2+3,-4*t1+4*t2+4048*t4-2*N+2);t6++) { for (t7=max(4*t3,2024*t4-N+1);t7<=4*t3+3;t7++) { A[0][(-2024*t4+t6+N-1)][(-2024*t4+t7+N-1)][(N-1)] = 0.125 * ((((-2024*t4+t6+N-1)+1) >= N ? 0 : A[1][(-2024*t4+t6+N-1)+1][(-2024*t4+t7+N-1)][(N-1)]) - 2.0 * A[1][(-2024*t4+t6+N-1)][(-2024*t4+t7+N-1)][(N-1)] + (((-2024*t4+t6+N-1)-1) < 0 ? 0 : A[1][(-2024*t4+t6+N-1)-1][(-2024*t4+t7+N-1)][(N-1)])) + 0.125 * ((((-2024*t4+t7+N-1)+1) >= N ? 0 : A[1][(-2024*t4+t6+N-1)][(-2024*t4+t7+N-1)+1][(N-1)]) - 2.0 * A[1][(-2024*t4+t6+N-1)][(-2024*t4+t7+N-1)][(N-1)] + (((-2024*t4+t7+N-1)-1) < 0 ? 0 : A[1][(-2024*t4+t6+N-1)][(-2024*t4+t7+N-1)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2024*t4+t6+N-1)][(-2024*t4+t7+N-1)][(N-1)+1]) - 2.0 * A[1][(-2024*t4+t6+N-1)][(-2024*t4+t7+N-1)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2024*t4+t6+N-1)][(-2024*t4+t7+N-1)][(N-1)-1])) + A[1][(-2024*t4+t6+N-1)][(-2024*t4+t7+N-1)][(N-1)];; } } } } if ((t1 <= floord(4*t3-N,2)) && (t2 <= t3-1) && (t3 >= ceild(N,4))) { if (N%2 == 0) { for (t6=max(max(4*t2,4*t3-N+1),-4*t1+4*t2+8*t3-2*N-1);t6<=min(4*t2+3,-4*t1+4*t2+8*t3-2*N+2);t6++) { lbv=max(2024*t4,4*t3-N+1); ubv=min(4*t3,2024*t4+2023); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-4*t3+t6+N-1)][(N-1)][(-4*t3+t8+N-1)] = 0.125 * ((((-4*t3+t6+N-1)+1) >= N ? 0 : A[1][(-4*t3+t6+N-1)+1][(N-1)][(-4*t3+t8+N-1)]) - 2.0 * A[1][(-4*t3+t6+N-1)][(N-1)][(-4*t3+t8+N-1)] + (((-4*t3+t6+N-1)-1) < 0 ? 0 : A[1][(-4*t3+t6+N-1)-1][(N-1)][(-4*t3+t8+N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-4*t3+t6+N-1)][(N-1)+1][(-4*t3+t8+N-1)]) - 2.0 * A[1][(-4*t3+t6+N-1)][(N-1)][(-4*t3+t8+N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-4*t3+t6+N-1)][(N-1)-1][(-4*t3+t8+N-1)])) + 0.125 * ((((-4*t3+t8+N-1)+1) >= N ? 0 : A[1][(-4*t3+t6+N-1)][(N-1)][(-4*t3+t8+N-1)+1]) - 2.0 * A[1][(-4*t3+t6+N-1)][(N-1)][(-4*t3+t8+N-1)] + (((-4*t3+t8+N-1)-1) < 0 ? 0 : A[1][(-4*t3+t6+N-1)][(N-1)][(-4*t3+t8+N-1)-1])) + A[1][(-4*t3+t6+N-1)][(N-1)][(-4*t3+t8+N-1)];; } } } } if ((t1 >= 0) && (2*t1 == 4*t2-N)) { for (t7=max(4*t3,2*t1+1);t7<=min(2*t1+N,4*t3+3);t7++) { lbv=max(2024*t4,2*t1+1); ubv=min(2*t1+N,2024*t4+2023); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if ((2*t1+3*N)%4 == 0) { A[0][(N-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(N-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(N-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((N-1)-1) < 0 ? 0 : A[1][(N-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(N-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(N-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(N-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(N-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(N-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(N-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(N-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(min(2*t3,T-2),1012*t4+1010)) && (2*t1 == 4*t2-N)) { for (t7=max(4*t3,2*t1+2);t7<=min(4*t3+3,2*t1+N+1);t7++) { lbv=max(2024*t4,2*t1+2); ubv=min(2024*t4+2023,2*t1+N+1); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if ((2*t1+3*N)%4 == 0) { A[1][(N-1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((N-1)+1) >= N ? 0 : A[0][(N-1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(N-1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((N-1)-1) < 0 ? 0 : A[0][(N-1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(N-1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(N-1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(N-1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(N-1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(N-1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(N-1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(N-1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } } if ((t1 == 2*t2) && (t1 <= min(floord(4*t3-N+3,2),floord(2024*t4-N+2023,2))) && (t1 >= max(ceild(4*t3-N+1,2),ceild(2024*t4-N+1,2)))) { for (t7=4*t3;t7<=2*t1+N-1;t7++) { lbv=max(2*t1,2024*t4); ubv=2*t1+N-1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][0][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][0][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][0][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][0][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][0][(-2*t1+t7)][(-2*t1+t8)];; } } } for (t6=2*t1+1;t6<=min(2*t1+2,2*t1+N);t6++) { for (t7=max(4*t3,2*t1+1);t7<=2*t1+N;t7++) { lbv=max(2024*t4,2*t1+1); ubv=2*t1+N; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } } if ((t1 == 2*t2) && (t1 <= floord(2024*t4-N+2023,2)) && (t1 >= max(ceild(4*t3-N+4,2),ceild(2024*t4-N+1,2)))) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=max(2*t1,2024*t4); ubv=2*t1+N-1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][0][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][0][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][0][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][0][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][0][(-2*t1+t7)][(-2*t1+t8)];; } } } for (t6=2*t1+1;t6<=2*t1+2;t6++) { for (t7=max(4*t3,2*t1+1);t7<=4*t3+3;t7++) { lbv=max(2024*t4,2*t1+1); ubv=2*t1+N; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } } if ((t1 == 2*t2) && (t1 <= floord(4*t3-N+3,2)) && (t1 >= max(ceild(4*t3-N+1,2),ceild(2024*t4-N+2024,2)))) { for (t7=4*t3;t7<=2*t1+N-1;t7++) { lbv=max(2*t1,2024*t4); ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][0][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][0][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][0][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][0][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][0][(-2*t1+t7)][(-2*t1+t8)];; } } } for (t6=2*t1+1;t6<=2*t1+2;t6++) { for (t7=4*t3;t7<=2*t1+N;t7++) { lbv=max(2024*t4,2*t1+1); ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } } if ((t1 == 2*t2) && (t1 >= max(ceild(4*t3-N+4,2),ceild(2024*t4-N+2024,2)))) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=max(2*t1,2024*t4); ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][0][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][0][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][0][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][0][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][0][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][0][(-2*t1+t7)][(-2*t1+t8)];; } } } for (t6=2*t1+1;t6<=2*t1+2;t6++) { for (t7=max(4*t3,2*t1+1);t7<=4*t3+3;t7++) { lbv=max(2024*t4,2*t1+1); ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } } if ((t1 <= min(floord(2024*t4-N+2023,2),2*t2-1)) && (t1 >= max(max(ceild(4*t2-N+1,2),2*t3),1012*t4))) { lbv=2*t1; ubv=2*t1+N-1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][0][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][0][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][0][(-2*t1+t8)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2)][0 +1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2)][0 -1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][0][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][0][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][0][(-2*t1+t8)];; } for (t7=2*t1+1;t7<=4*t3+3;t7++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][0] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][0])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 -1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][0];; lbv=2*t1+1; ubv=2*t1+N-1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)];; } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=2*t1+1;t7<=4*t3+3;t7++) { lbv=2*t1+1; ubv=2*t1+N; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(min(floord(2024*t4-N+2023,2),2*t2-1),1012*t4-1)) && (t1 >= max(max(ceild(4*t2-N+1,2),ceild(2024*t4-N+1,2)),2*t3))) { lbv=2024*t4; ubv=2*t1+N-1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][0][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][0][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][0][(-2*t1+t8)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2)][0 +1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2)][0 -1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][0][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][0][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][0][(-2*t1+t8)];; } for (t7=2*t1+1;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2*t1+N-1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)];; } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=2*t1+1;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2*t1+N; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= 2*t2-1) && (t1 >= max(max(max(ceild(4*t2-N+1,2),ceild(2024*t4-N+2024,2)),2*t3),1012*t4))) { lbv=2*t1; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][0][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][0][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][0][(-2*t1+t8)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2)][0 +1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2)][0 -1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][0][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][0][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][0][(-2*t1+t8)];; } for (t7=2*t1+1;t7<=4*t3+3;t7++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][0] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][0])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 -1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][0];; lbv=2*t1+1; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=2*t1+1;t7<=4*t3+3;t7++) { lbv=2*t1+1; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(2*t2-1,1012*t4-1)) && (t1 >= max(max(ceild(4*t2-N+1,2),ceild(2024*t4-N+2024,2)),2*t3))) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][0][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][0][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][0][(-2*t1+t8)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2)][0 +1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2)][0 -1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][0][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][0][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][0][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][0][(-2*t1+t8)];; } for (t7=2*t1+1;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=2*t1+1;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(min(floord(4*t3-N+3,2),floord(2024*t4-N+2023,2)),2*t2-1)) && (t1 >= max(ceild(4*t3-N+1,2),1012*t4))) { for (t7=4*t3;t7<=2*t1+N-1;t7++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][0] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][0])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 -1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][0];; lbv=2*t1+1; ubv=2*t1+N-1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)];; } lbv=2*t1+1; ubv=2*t1+N; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(N-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(N-1)][(-2*t1+t8-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)];; } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=4*t3;t7<=2*t1+N;t7++) { lbv=2*t1+1; ubv=2*t1+N; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(min(floord(2024*t4-N+2023,2),2*t2-1),2*t3-1)) && (t1 >= max(max(ceild(4*t2-N+1,2),ceild(4*t3-N+4,2)),1012*t4))) { for (t7=4*t3;t7<=4*t3+3;t7++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][0] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][0])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 -1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][0];; lbv=2*t1+1; ubv=2*t1+N-1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)];; } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2*t1+1; ubv=2*t1+N; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(floord(4*t3-N+3,2),2*t2-1)) && (t1 >= max(max(ceild(4*t3-N+1,2),ceild(2024*t4-N+2024,2)),1012*t4))) { for (t7=4*t3;t7<=2*t1+N-1;t7++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][0] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][0])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 -1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][0];; lbv=2*t1+1; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } lbv=2*t1+1; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(N-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(N-1)][(-2*t1+t8-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)];; } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=4*t3;t7<=2*t1+N;t7++) { lbv=2*t1+1; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(2*t2-1,2*t3-1)) && (t1 >= max(max(max(ceild(4*t2-N+1,2),ceild(4*t3-N+4,2)),ceild(2024*t4-N+2024,2)),1012*t4))) { for (t7=4*t3;t7<=4*t3+3;t7++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][0] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][0])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][0]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][0 -1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][0];; lbv=2*t1+1; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2*t1+1; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(min(min(floord(4*t3-N+3,2),floord(2024*t4-N+2023,2)),2*t2-1),1012*t4-1)) && (t1 >= max(max(0,ceild(4*t3-N+1,2)),ceild(2024*t4-N+1,2)))) { for (t7=4*t3;t7<=2*t1+N-1;t7++) { lbv=2024*t4; ubv=2*t1+N-1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)];; } lbv=2024*t4; ubv=2*t1+N; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(N-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(N-1)][(-2*t1+t8-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)];; } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=4*t3;t7<=2*t1+N;t7++) { lbv=2024*t4; ubv=2*t1+N; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(min(min(floord(2024*t4-N+2023,2),2*t2-1),2*t3-1),1012*t4-1)) && (t1 >= max(max(max(0,ceild(4*t2-N+1,2)),ceild(4*t3-N+4,2)),ceild(2024*t4-N+1,2)))) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2*t1+N-1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(N-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(N-1)];; } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2*t1+N; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(min(floord(4*t3-N+3,2),2*t2-1),1012*t4-1)) && (t1 >= max(max(0,ceild(4*t3-N+1,2)),ceild(2024*t4-N+2024,2)))) { for (t7=4*t3;t7<=2*t1+N-1;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(N-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(N-1)][(-2*t1+t8-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(N-1)][(-2*t1+t8-1)];; } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=4*t3;t7<=2*t1+N;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((t1 <= min(min(2*t2-1,2*t3-1),1012*t4-1)) && (t1 >= max(max(max(0,ceild(4*t2-N+1,2)),ceild(4*t3-N+4,2)),ceild(2024*t4-N+2024,2)))) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[0][(-2*t1+4*t2)+1][(-2*t1+t7)][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)-1][(-2*t1+t7)][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t7)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)+1][(-2*t1+t8)]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t7)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)-1][(-2*t1+t8)])) + 0.125 * ((((-2*t1+t8)+1) >= N ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)+1]) - 2.0 * A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)] + (((-2*t1+t8)-1) < 0 ? 0 : A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)-1])) + A[0][(-2*t1+4*t2)][(-2*t1+t7)][(-2*t1+t8)];; A[0][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+4*t2-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+4*t2-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+4*t2-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } for (t6=4*t2+1;t6<=min(2*t1+N,4*t2+2);t6++) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] = 0.125 * ((((-2*t1+t6-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)+1][(-2*t1+t7-1)][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t6-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)-1][(-2*t1+t7-1)][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t7-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)+1][(-2*t1+t8-1)]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t7-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)-1][(-2*t1+t8-1)])) + 0.125 * ((((-2*t1+t8-1)+1) >= N ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)+1]) - 2.0 * A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)] + (((-2*t1+t8-1)-1) < 0 ? 0 : A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)-1])) + A[1][(-2*t1+t6-1)][(-2*t1+t7-1)][(-2*t1+t8-1)];; } } } } if ((N >= 3) && (t1 <= min(min(2*t3,T-2),1012*t4+1010)) && (2*t1 == 4*t2-N+1)) { for (t6=2*t1+N;t6<=2*t1+N+1;t6++) { for (t7=max(4*t3,2*t1+2);t7<=min(4*t3+3,2*t1+N+1);t7++) { lbv=max(2024*t4,2*t1+2); ubv=min(2024*t4+2023,2*t1+N+1); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if ((2*t1+3*N+1)%4 == 0) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } } for (t7=max(4*t3,2*t1+3);t7<=4*t3+3;t7++) { lbv=max(2024*t4,2*t1+3); ubv=min(2024*t4+2023,2*t1+N+2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if ((2*t1+3*N+1)%4 == 0) { A[0][(N-1)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(N-1)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(N-1)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((N-1)-1) < 0 ? 0 : A[1][(N-1)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(N-1)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(N-1)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(N-1)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(N-1)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(N-1)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(N-1)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(N-1)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } } } if ((t1 <= min(min(min(floord(2024*t4-N+2021,2),2*t3),T-2),2*t2-1)) && (t1 >= max(max(ceild(4*t2-N+2,2),2*t3-1),1012*t4-1))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=2*t1+2;t7<=4*t3+3;t7++) { lbv=2*t1+2; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } lbv=2*t1+2; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][0][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][0][(-2*t1+t8-2)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][0 +1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][0 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)];; } for (t7=2*t1+3;t7<=4*t3+3;t7++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][0])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 -1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0];; lbv=2*t1+3; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-1)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)];; } } if ((t1 <= min(min(min(min(floord(2024*t4-N+2021,2),2*t3),T-2),2*t2-1),1012*t4-2)) && (t1 >= max(max(ceild(4*t2-N+2,2),ceild(2024*t4-N-1,2)),2*t3-1))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=2*t1+2;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][0][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][0][(-2*t1+t8-2)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][0 +1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][0 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)];; } for (t7=2*t1+3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-1)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)];; } } if ((t1 <= min(min(min(floord(4*t3-N+1,2),floord(2024*t4-N+2021,2)),T-2),2*t2-1)) && (t1 >= max(max(ceild(4*t2-N+2,2),ceild(4*t3-N-1,2)),1012*t4-1))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2*t1+2; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=2*t1+N+1;t7++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][0])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 -1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0];; lbv=2*t1+3; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-1)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)];; } lbv=2*t1+3; ubv=2*t1+N+2; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(N-1)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(N-1)][(-2*t1+t8-3)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(N-1)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(N-1)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)];; } } if ((t1 <= min(min(min(min(floord(4*t3-N+1,2),floord(2024*t4-N+2021,2)),T-2),2*t2-1),1012*t4-2)) && (t1 >= max(ceild(4*t2-N+2,2),ceild(4*t3-N-1,2)))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-1)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)];; } lbv=2024*t4; ubv=2*t1+N+2; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(N-1)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(N-1)][(-2*t1+t8-3)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(N-1)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(N-1)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)];; } } if ((t1 <= min(min(min(floord(2024*t4-N+2021,2),T-2),2*t2-1),2*t3-2)) && (t1 >= max(max(ceild(4*t2-N+2,2),ceild(4*t3-N+2,2)),1012*t4-1))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2*t1+2; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=4*t3+3;t7++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][0])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 -1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0];; lbv=2*t1+3; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-1)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)];; } } if ((t1 <= min(min(min(min(floord(2024*t4-N+2021,2),T-2),2*t2-1),2*t3-2),1012*t4-2)) && (t1 >= max(max(ceild(4*t2-N+2,2),ceild(4*t3-N+2,2)),ceild(2024*t4-N-1,2)))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(N-1)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(N-1)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(N-1)];; } } if ((t1 <= min(min(min(2*t3,T-2),2*t2-1),1012*t4+1010)) && (t1 >= max(max(max(ceild(4*t2-N+2,2),ceild(2024*t4-N+2022,2)),2*t3-1),1012*t4-1))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=2*t1+2;t7<=4*t3+3;t7++) { lbv=2*t1+2; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } lbv=2*t1+2; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][0][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][0][(-2*t1+t8-2)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][0 +1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][0 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)];; } for (t7=2*t1+3;t7<=4*t3+3;t7++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][0])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 -1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0];; lbv=2*t1+3; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } } if ((t1 <= min(min(min(2*t3,T-2),2*t2-1),1012*t4-2)) && (t1 >= max(max(ceild(4*t2-N+2,2),ceild(2024*t4-N+2022,2)),2*t3-1))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=2*t1+2;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][0][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][0][(-2*t1+t8-2)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][0 +1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][0 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][0][(-2*t1+t8-2)];; } for (t7=2*t1+3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } } if ((t1 <= min(min(min(floord(4*t3-N+1,2),T-2),2*t2-1),1012*t4+1010)) && (t1 >= max(max(max(ceild(4*t2-N+2,2),ceild(4*t3-N-1,2)),ceild(2024*t4-N+2022,2)),1012*t4-1))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2*t1+2; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=2*t1+N+1;t7++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][0])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 -1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0];; lbv=2*t1+3; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } lbv=2*t1+3; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(N-1)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(N-1)][(-2*t1+t8-3)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(N-1)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(N-1)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)];; } } if ((t1 <= min(min(min(floord(4*t3-N+1,2),T-2),2*t2-1),1012*t4-2)) && (t1 >= max(max(ceild(4*t2-N+2,2),ceild(4*t3-N-1,2)),ceild(2024*t4-N+2022,2)))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[0][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(N-1)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(N-1)][(-2*t1+t8-3)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(N-1)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((N-1)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(N-1)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(N-1)][(-2*t1+t8-3)];; } } if ((t1 <= min(min(min(T-2,2*t2-1),2*t3-2),1012*t4+1010)) && (t1 >= max(max(max(ceild(4*t2-N+2,2),ceild(4*t3-N+2,2)),ceild(2024*t4-N+2022,2)),1012*t4-1))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2*t1+2; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=4*t3+3;t7++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][0])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][0]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 +1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0] + ((0 -1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0 -1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][0];; lbv=2*t1+3; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } } if ((t1 <= min(min(min(T-2,2*t2-1),2*t3-2),1012*t4-2)) && (t1 >= max(max(ceild(4*t2-N+2,2),ceild(4*t3-N+2,2)),ceild(2024*t4-N+2022,2)))) { for (t6=4*t2+1;t6<=4*t2+2;t6++) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+t6-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t6-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+t6-2)][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[1][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * ((((-2*t1+4*t2+1)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)+1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+4*t2+1)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)-1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][(-2*t1+4*t2+1)][(-2*t1+t7-2)][(-2*t1+t8-2)];; A[0][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * ((((-2*t1+4*t2)+1) >= N ? 0 : A[1][(-2*t1+4*t2)+1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+4*t2)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)-1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][(-2*t1+4*t2)][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } } if ((N == 1) && (t1 == 2*t2) && (t1 == 2*t3) && (t1 <= T-2)) { if (t1%2 == 0) { A[1][0][0][0] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][0][0]) - 2.0 * A[0][0][0][0] + ((0 -1) < 0 ? 0 : A[0][0 -1][0][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][0][0 +1][0]) - 2.0 * A[0][0][0][0] + ((0 -1) < 0 ? 0 : A[0][0][0 -1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][0][0][0 +1]) - 2.0 * A[0][0][0][0] + ((0 -1) < 0 ? 0 : A[0][0][0][0 -1])) + A[0][0][0][0];; } if (t1%2 == 0) { A[0][0][0][0] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][0][0]) - 2.0 * A[1][0][0][0] + ((0 -1) < 0 ? 0 : A[1][0 -1][0][0])) + 0.125 * (((0 +1) >= N ? 0 : A[1][0][0 +1][0]) - 2.0 * A[1][0][0][0] + ((0 -1) < 0 ? 0 : A[1][0][0 -1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[1][0][0][0 +1]) - 2.0 * A[1][0][0][0] + ((0 -1) < 0 ? 0 : A[1][0][0][0 -1])) + A[1][0][0][0];; } } if ((N >= 2) && (t1 == 2*t2) && (t1 == 2*t3) && (t1 <= min(floord(2024*t4-N+2021,2),T-2)) && (t1 >= 1012*t4)) { for (t7=2*t1+2;t7<=2*t1+3;t7++) { lbv=2*t1+2; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } lbv=2*t1+2; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][0][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][0][(-2*t1+t8-2)]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][0][(-2*t1+t8-2)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][1][0 +1][(-2*t1+t8-2)]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][1][0 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][0][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][0][(-2*t1+t8-2)-1])) + A[0][1][0][(-2*t1+t8-2)];; } } if (t1%2 == 0) { A[1][1][1][0] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][1][0]) - 2.0 * A[0][1][1][0] + ((1 -1) < 0 ? 0 : A[0][1 -1][1][0])) + 0.125 * (((1 +1) >= N ? 0 : A[0][1][1 +1][0]) - 2.0 * A[0][1][1][0] + ((1 -1) < 0 ? 0 : A[0][1][1 -1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][1][1][0 +1]) - 2.0 * A[0][1][1][0] + ((0 -1) < 0 ? 0 : A[0][1][1][0 -1])) + A[0][1][1][0];; } lbv=2*t1+3; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][1][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][1][(-2*t1+t8-2)]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][1][(-2*t1+t8-2)])) + 0.125 * (((1 +1) >= N ? 0 : A[0][1][1 +1][(-2*t1+t8-2)]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1][1 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][1][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][1][(-2*t1+t8-2)-1])) + A[0][1][1][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][0][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][0][(-2*t1+t8-3)]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][0][(-2*t1+t8-3)])) + 0.125 * (((0 +1) >= N ? 0 : A[1][0][0 +1][(-2*t1+t8-3)]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0][0 -1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][0][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][0][(-2*t1+t8-3)-1])) + A[1][0][0][(-2*t1+t8-3)];; } } if (t1%2 == 0) { A[0][0][0][(N-1)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][0][(N-1)]) - 2.0 * A[1][0][0][(N-1)] + ((0 -1) < 0 ? 0 : A[1][0 -1][0][(N-1)])) + 0.125 * (((0 +1) >= N ? 0 : A[1][0][0 +1][(N-1)]) - 2.0 * A[1][0][0][(N-1)] + ((0 -1) < 0 ? 0 : A[1][0][0 -1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][0][0][(N-1)+1]) - 2.0 * A[1][0][0][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][0][0][(N-1)-1])) + A[1][0][0][(N-1)];; } } if ((t1 == 2*t2) && (t1 == 2*t3) && (t1 <= min(min(floord(2024*t4-N+2021,2),T-2),1012*t4-2)) && (t1 >= ceild(2024*t4-N-1,2))) { for (t7=2*t1+2;t7<=2*t1+3;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][0][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][0][(-2*t1+t8-2)]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][0][(-2*t1+t8-2)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][1][0 +1][(-2*t1+t8-2)]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][1][0 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][0][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][0][(-2*t1+t8-2)-1])) + A[0][1][0][(-2*t1+t8-2)];; } } lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][1][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][1][(-2*t1+t8-2)]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][1][(-2*t1+t8-2)])) + 0.125 * (((1 +1) >= N ? 0 : A[0][1][1 +1][(-2*t1+t8-2)]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1][1 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][1][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][1][(-2*t1+t8-2)-1])) + A[0][1][1][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][0][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][0][(-2*t1+t8-3)]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][0][(-2*t1+t8-3)])) + 0.125 * (((0 +1) >= N ? 0 : A[1][0][0 +1][(-2*t1+t8-3)]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0][0 -1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][0][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][0][(-2*t1+t8-3)-1])) + A[1][0][0][(-2*t1+t8-3)];; } } if (t1%2 == 0) { A[0][0][0][(N-1)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][0][(N-1)]) - 2.0 * A[1][0][0][(N-1)] + ((0 -1) < 0 ? 0 : A[1][0 -1][0][(N-1)])) + 0.125 * (((0 +1) >= N ? 0 : A[1][0][0 +1][(N-1)]) - 2.0 * A[1][0][0][(N-1)] + ((0 -1) < 0 ? 0 : A[1][0][0 -1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][0][0][(N-1)+1]) - 2.0 * A[1][0][0][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][0][0][(N-1)-1])) + A[1][0][0][(N-1)];; } } if ((t1 == 2*t2) && (t1 <= min(min(min(floord(4*t3-N+1,2),floord(2024*t4-N+2021,2)),T-2),2*t3-2)) && (t1 >= max(ceild(4*t3-N-1,2),1012*t4))) { for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2*t1+2; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=2*t1+N+1;t7++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][0] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][0]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][0])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][0]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][0 +1]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + ((0 -1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][0 -1])) + A[0][1][(-2*t1+t7-2)][0];; } lbv=2*t1+3; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(N-1)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(N-1)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(N-1)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(N-1)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(N-1)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(N-1)-1])) + A[1][0][(-2*t1+t7-3)][(N-1)];; } } lbv=2*t1+3; ubv=2*t1+N+2; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[0][0][(N-1)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(N-1)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(N-1)][(-2*t1+t8-3)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][0][(N-1)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + (((N-1)-1) < 0 ? 0 : A[1][0][(N-1)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(N-1)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(N-1)][(-2*t1+t8-3)-1])) + A[1][0][(N-1)][(-2*t1+t8-3)];; } } } if ((t1 == 2*t2) && (t1 <= min(min(min(floord(4*t3-N+1,2),floord(2024*t4-N+2021,2)),T-2),1012*t4-2)) && (t1 >= ceild(4*t3-N-1,2))) { for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(N-1)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(N-1)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(N-1)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(N-1)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(N-1)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(N-1)-1])) + A[1][0][(-2*t1+t7-3)][(N-1)];; } } lbv=2024*t4; ubv=2*t1+N+2; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[0][0][(N-1)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(N-1)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(N-1)][(-2*t1+t8-3)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][0][(N-1)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + (((N-1)-1) < 0 ? 0 : A[1][0][(N-1)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(N-1)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(N-1)][(-2*t1+t8-3)-1])) + A[1][0][(N-1)][(-2*t1+t8-3)];; } } } if ((t1 == 2*t2) && (t1 <= min(min(floord(2024*t4-N+2021,2),T-2),2*t3-2)) && (t1 >= max(ceild(4*t3-N+2,2),1012*t4))) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2*t1+2; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=4*t3+3;t7++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][0] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][0]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][0])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][0]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][0 +1]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + ((0 -1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][0 -1])) + A[0][1][(-2*t1+t7-2)][0];; } lbv=2*t1+3; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(N-1)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(N-1)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(N-1)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(N-1)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(N-1)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(N-1)-1])) + A[1][0][(-2*t1+t7-3)][(N-1)];; } } } if ((t1 == 2*t2) && (t1 <= min(min(min(floord(2024*t4-N+2021,2),T-2),2*t3-2),1012*t4-2)) && (t1 >= max(ceild(4*t3-N+2,2),ceild(2024*t4-N-1,2)))) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2*t1+N+1; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(N-1)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(N-1)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(N-1)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(N-1)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(N-1)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(N-1)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(N-1)] + (((N-1)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(N-1)-1])) + A[1][0][(-2*t1+t7-3)][(N-1)];; } } } if ((t1 == 2*t2) && (t1 == 2*t3) && (t1 <= T-2) && (t1 >= max(ceild(2024*t4-N+2022,2),1012*t4))) { for (t7=2*t1+2;t7<=2*t1+3;t7++) { lbv=2*t1+2; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } lbv=2*t1+2; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][0][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][0][(-2*t1+t8-2)]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][0][(-2*t1+t8-2)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][1][0 +1][(-2*t1+t8-2)]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][1][0 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][0][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][0][(-2*t1+t8-2)-1])) + A[0][1][0][(-2*t1+t8-2)];; } } if (t1%2 == 0) { A[1][1][1][0] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][1][0]) - 2.0 * A[0][1][1][0] + ((1 -1) < 0 ? 0 : A[0][1 -1][1][0])) + 0.125 * (((1 +1) >= N ? 0 : A[0][1][1 +1][0]) - 2.0 * A[0][1][1][0] + ((1 -1) < 0 ? 0 : A[0][1][1 -1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][1][1][0 +1]) - 2.0 * A[0][1][1][0] + ((0 -1) < 0 ? 0 : A[0][1][1][0 -1])) + A[0][1][1][0];; } lbv=2*t1+3; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][1][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][1][(-2*t1+t8-2)]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][1][(-2*t1+t8-2)])) + 0.125 * (((1 +1) >= N ? 0 : A[0][1][1 +1][(-2*t1+t8-2)]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1][1 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][1][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][1][(-2*t1+t8-2)-1])) + A[0][1][1][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][0][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][0][(-2*t1+t8-3)]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][0][(-2*t1+t8-3)])) + 0.125 * (((0 +1) >= N ? 0 : A[1][0][0 +1][(-2*t1+t8-3)]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0][0 -1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][0][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][0][(-2*t1+t8-3)-1])) + A[1][0][0][(-2*t1+t8-3)];; } } } if ((t1 == 2*t2) && (t1 == 2*t3) && (t1 <= min(T-2,1012*t4-2)) && (t1 >= ceild(2024*t4-N+2022,2))) { for (t7=2*t1+2;t7<=2*t1+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][0][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][0][(-2*t1+t8-2)]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][0][(-2*t1+t8-2)])) + 0.125 * (((0 +1) >= N ? 0 : A[0][1][0 +1][(-2*t1+t8-2)]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][1][0 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][0][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][0][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][0][(-2*t1+t8-2)-1])) + A[0][1][0][(-2*t1+t8-2)];; } } lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][1][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][1][(-2*t1+t8-2)]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][1][(-2*t1+t8-2)])) + 0.125 * (((1 +1) >= N ? 0 : A[0][1][1 +1][(-2*t1+t8-2)]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1][1 -1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][1][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][1][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][1][(-2*t1+t8-2)-1])) + A[0][1][1][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][0][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][0][(-2*t1+t8-3)]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][0][(-2*t1+t8-3)])) + 0.125 * (((0 +1) >= N ? 0 : A[1][0][0 +1][(-2*t1+t8-3)]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0][0 -1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][0][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][0][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][0][(-2*t1+t8-3)-1])) + A[1][0][0][(-2*t1+t8-3)];; } } } if ((t1 == 2*t2) && (t1 <= min(floord(4*t3-N+1,2),T-2)) && (t1 >= max(max(ceild(4*t3-N-1,2),ceild(2024*t4-N+2022,2)),1012*t4))) { for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2*t1+2; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=2*t1+N+1;t7++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][0] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][0]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][0])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][0]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][0 +1]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + ((0 -1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][0 -1])) + A[0][1][(-2*t1+t7-2)][0];; } lbv=2*t1+3; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } } lbv=2*t1+3; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[0][0][(N-1)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(N-1)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(N-1)][(-2*t1+t8-3)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][0][(N-1)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + (((N-1)-1) < 0 ? 0 : A[1][0][(N-1)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(N-1)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(N-1)][(-2*t1+t8-3)-1])) + A[1][0][(N-1)][(-2*t1+t8-3)];; } } } if ((t1 == 2*t2) && (t1 <= min(min(floord(4*t3-N+1,2),T-2),1012*t4-2)) && (t1 >= max(ceild(4*t3-N-1,2),ceild(2024*t4-N+2022,2)))) { for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=2*t1+N+1;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } } lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[0][0][(N-1)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(N-1)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(N-1)][(-2*t1+t8-3)])) + 0.125 * ((((N-1)+1) >= N ? 0 : A[1][0][(N-1)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + (((N-1)-1) < 0 ? 0 : A[1][0][(N-1)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(N-1)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(N-1)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(N-1)][(-2*t1+t8-3)-1])) + A[1][0][(N-1)][(-2*t1+t8-3)];; } } } if ((t1 == 2*t2) && (t1 <= min(T-2,2*t3-2)) && (t1 >= max(max(ceild(4*t3-N+2,2),ceild(2024*t4-N+2022,2)),1012*t4))) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2*t1+2; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=4*t3+3;t7++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][0] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][0]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][0])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][0]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][0])) + 0.125 * (((0 +1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][0 +1]) - 2.0 * A[0][1][(-2*t1+t7-2)][0] + ((0 -1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][0 -1])) + A[0][1][(-2*t1+t7-2)][0];; } lbv=2*t1+3; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } } } if ((t1 == 2*t2) && (t1 <= min(min(T-2,2*t3-2),1012*t4-2)) && (t1 >= max(ceild(4*t3-N+2,2),ceild(2024*t4-N+2022,2)))) { for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][0][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((0 +1) >= N ? 0 : A[0][0 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((0 -1) < 0 ? 0 : A[0][0 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][0][(-2*t1+t7-2)][(-2*t1+t8-2)];; } } } for (t7=4*t3;t7<=4*t3+3;t7++) { lbv=2024*t4; ubv=2024*t4+2023; #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { if (t1%2 == 0) { A[1][1][(-2*t1+t7-2)][(-2*t1+t8-2)] = 0.125 * (((1 +1) >= N ? 0 : A[0][1 +1][(-2*t1+t7-2)][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + ((1 -1) < 0 ? 0 : A[0][1 -1][(-2*t1+t7-2)][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t7-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)+1][(-2*t1+t8-2)]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t7-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)-1][(-2*t1+t8-2)])) + 0.125 * ((((-2*t1+t8-2)+1) >= N ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)+1]) - 2.0 * A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)] + (((-2*t1+t8-2)-1) < 0 ? 0 : A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)-1])) + A[0][1][(-2*t1+t7-2)][(-2*t1+t8-2)];; } if (t1%2 == 0) { A[0][0][(-2*t1+t7-3)][(-2*t1+t8-3)] = 0.125 * (((0 +1) >= N ? 0 : A[1][0 +1][(-2*t1+t7-3)][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + ((0 -1) < 0 ? 0 : A[1][0 -1][(-2*t1+t7-3)][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t7-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)+1][(-2*t1+t8-3)]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t7-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)-1][(-2*t1+t8-3)])) + 0.125 * ((((-2*t1+t8-3)+1) >= N ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)+1]) - 2.0 * A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)] + (((-2*t1+t8-3)-1) < 0 ? 0 : A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)-1])) + A[1][0][(-2*t1+t7-3)][(-2*t1+t8-3)];; } } } } } } } } } /* End of CLooG code */ // #undef N // #define N 300L #undef T #define T 800L #ifdef TIME gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); printf("|Time taken: %7.5lfs\t", tdiff); printf("|MFLOPS: %f\n", ((((double)NUM_FP_OPS * N *N * N * (T-1)) / tdiff) / 1000000L)); #endif #ifdef VERIFY for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { for (k = 0; k < N; k++) { total+= A[T%2][i][j][k] ; } } } printf("|sum: %e\t", total); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { for (k = 0; k < N; k++) { sum_err_sqr += (A[T%2][i][j][k] - (total/N))*(A[T%2][i][j][k] - (total/N)); } } } printf("|rms(A) = %7.2f\t", sqrt(sum_err_sqr)); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { for (k = 0; k < N; k++) { chtotal += ((char *)A[T%2][i][j])[k]; } } } printf("|sum(rep(A)) = %d\n", chtotal); #endif for (l = 0; l < 2; l++){ for (i = 0; i < N; i++){ for (j = 0; j < N; j++) free(A[l][i][j]); // = (double *) malloc(N * sizeof (double)); free(A[l][i]); // = (double **) malloc(N * sizeof(double *)); } free(A[l]); // = (double ***) malloc(N * sizeof(double **)); } return 0; } // icc -O3 -fp-model precise heat_1d_np.c -o op-heat-1d-np -lm // /* @ begin PrimeTile (num_tiling_levels=1; first_depth=1; last_depth=-1; boundary_tiling_level=-1;) @*/ // /* @ begin PrimeRegTile (scalar_replacement=0; T1t5=4; T1t6=4; T1t7=4; T1t8=4; ) @*/ // /* @ end @*/
convolution_3x3_pack4_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_kernel_pack4_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 4b-4a-inch/4a-64-outch/4b; kernel_tm_pack4.create(2 * inch / 4, 64, (outch / 4) / 2 + (outch / 4) % 2, (size_t)2u * 16, 16); int q = 0; for (; q + 7 < outch; q += 8) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); const Mat k4 = kernel_tm.channel(q + 4); const Mat k5 = kernel_tm.channel(q + 5); const Mat k6 = kernel_tm.channel(q + 6); const Mat k7 = kernel_tm.channel(q + 7); Mat g0 = kernel_tm_pack4.channel(q / 8); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int p = 0; p + 3 < inch; p += 4) { for (int i = 0; i < 4; i++) { const float* k00 = k0.row(p + i); const float* k10 = k1.row(p + i); const float* k20 = k2.row(p + i); const float* k30 = k3.row(p + i); const float* k40 = k4.row(p + i); const float* k50 = k5.row(p + i); const float* k60 = k6.row(p + i); const float* k70 = k7.row(p + i); g00[0] = (__fp16)k00[k]; g00[1] = (__fp16)k10[k]; g00[2] = (__fp16)k20[k]; g00[3] = (__fp16)k30[k]; g00[4] = (__fp16)k40[k]; g00[5] = (__fp16)k50[k]; g00[6] = (__fp16)k60[k]; g00[7] = (__fp16)k70[k]; g00 += 8; } } } } for (; q + 3 < outch; q += 4) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); Mat g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int p = 0; p + 3 < inch; p += 4) { for (int i = 0; i < 4; i++) { const float* k00 = k0.row(p + i); const float* k10 = k1.row(p + i); const float* k20 = k2.row(p + i); const float* k30 = k3.row(p + i); g00[0] = (__fp16)k00[k]; g00[1] = (__fp16)k10[k]; g00[2] = (__fp16)k20[k]; g00[3] = (__fp16)k30[k]; g00 += 4; } } } } } static void conv3x3s1_winograd63_pack4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 6; int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd63_transform_input_pack4_fp16sa_neon(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; convolution_winograd_dot_pack4_fp16sa_neon(bottom_blob_tm, outch, kernel_tm, top_blob_tm, opt); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 2u * 4, 4, opt.workspace_allocator); } { conv3x3s1_winograd63_transform_output_pack4_fp16sa_neon(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd43_transform_kernel_pack4_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd43 transform kernel Mat kernel_tm(6 * 6, inch, outch); const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = 4b-4a-inch/4a-36-outch/4b; kernel_tm_pack4.create(2 * inch / 4, 36, (outch / 4) / 2 + (outch / 4) % 2, (size_t)2u * 16, 16); int q = 0; for (; q + 7 < outch; q += 8) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); const Mat k4 = kernel_tm.channel(q + 4); const Mat k5 = kernel_tm.channel(q + 5); const Mat k6 = kernel_tm.channel(q + 6); const Mat k7 = kernel_tm.channel(q + 7); Mat g0 = kernel_tm_pack4.channel(q / 8); for (int k = 0; k < 36; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int p = 0; p + 3 < inch; p += 4) { for (int i = 0; i < 4; i++) { const float* k00 = k0.row(p + i); const float* k10 = k1.row(p + i); const float* k20 = k2.row(p + i); const float* k30 = k3.row(p + i); const float* k40 = k4.row(p + i); const float* k50 = k5.row(p + i); const float* k60 = k6.row(p + i); const float* k70 = k7.row(p + i); g00[0] = (__fp16)k00[k]; g00[1] = (__fp16)k10[k]; g00[2] = (__fp16)k20[k]; g00[3] = (__fp16)k30[k]; g00[4] = (__fp16)k40[k]; g00[5] = (__fp16)k50[k]; g00[6] = (__fp16)k60[k]; g00[7] = (__fp16)k70[k]; g00 += 8; } } } } for (; q + 3 < outch; q += 4) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); Mat g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4); for (int k = 0; k < 36; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int p = 0; p + 3 < inch; p += 4) { for (int i = 0; i < 4; i++) { const float* k00 = k0.row(p + i); const float* k10 = k1.row(p + i); const float* k20 = k2.row(p + i); const float* k30 = k3.row(p + i); g00[0] = (__fp16)k00[k]; g00[1] = (__fp16)k10[k]; g00[2] = (__fp16)k20[k]; g00[3] = (__fp16)k30[k]; g00 += 4; } } } } } static void conv3x3s1_winograd43_pack4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 4; int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd43_transform_input_pack4_fp16sa_neon(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; convolution_winograd_dot_pack4_fp16sa_neon(bottom_blob_tm, outch, kernel_tm, top_blob_tm, opt); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 2u * 4, 4, opt.workspace_allocator); } { conv3x3s1_winograd43_transform_output_pack4_fp16sa_neon(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd23_transform_kernel_pack4_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd23 transform kernel Mat kernel_tm(4 * 4, inch, outch); const float ktm[4][3] = { {1.0f, 0.0f, 0.0f}, {1.0f / 2, 1.0f / 2, 1.0f / 2}, {1.0f / 2, -1.0f / 2, 1.0f / 2}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[4][3]; for (int i = 0; i < 4; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 4; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 4; i++) { kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 16-inch-outch // dst = 4b-4a-inch/4a-16-outch/4b; kernel_tm_pack4.create(2 * inch / 4, 16, (outch / 4) / 2 + (outch / 4) % 2, (size_t)2u * 16, 16); int q = 0; for (; q + 7 < outch; q += 8) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); const Mat k4 = kernel_tm.channel(q + 4); const Mat k5 = kernel_tm.channel(q + 5); const Mat k6 = kernel_tm.channel(q + 6); const Mat k7 = kernel_tm.channel(q + 7); Mat g0 = kernel_tm_pack4.channel(q / 8); for (int k = 0; k < 16; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int p = 0; p + 3 < inch; p += 4) { for (int i = 0; i < 4; i++) { const float* k00 = k0.row(p + i); const float* k10 = k1.row(p + i); const float* k20 = k2.row(p + i); const float* k30 = k3.row(p + i); const float* k40 = k4.row(p + i); const float* k50 = k5.row(p + i); const float* k60 = k6.row(p + i); const float* k70 = k7.row(p + i); g00[0] = (__fp16)k00[k]; g00[1] = (__fp16)k10[k]; g00[2] = (__fp16)k20[k]; g00[3] = (__fp16)k30[k]; g00[4] = (__fp16)k40[k]; g00[5] = (__fp16)k50[k]; g00[6] = (__fp16)k60[k]; g00[7] = (__fp16)k70[k]; g00 += 8; } } } } for (; q + 3 < outch; q += 4) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); Mat g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4); for (int k = 0; k < 16; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int p = 0; p + 3 < inch; p += 4) { for (int i = 0; i < 4; i++) { const float* k00 = k0.row(p + i); const float* k10 = k1.row(p + i); const float* k20 = k2.row(p + i); const float* k30 = k3.row(p + i); g00[0] = (__fp16)k00[k]; g00[1] = (__fp16)k10[k]; g00[2] = (__fp16)k20[k]; g00[3] = (__fp16)k30[k]; g00 += 4; } } } } } static void conv3x3s1_winograd23_pack4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 2n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 1) / 2 * 2; outh = (outh + 1) / 2 * 2; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tiles = outw / 2; int h_tiles = outh / 2; const int tiles = w_tiles * h_tiles; bottom_blob_tm.create(tiles, 16, inch, elemsize, elempack, opt.workspace_allocator); conv3x3s1_winograd23_transform_input_pack4_fp16sa_neon(bottom_blob_bordered, bottom_blob_tm, opt); } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; convolution_winograd_dot_pack4_fp16sa_neon(bottom_blob_tm, outch, kernel_tm, top_blob_tm, opt); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 2u * 4, 4, opt.workspace_allocator); } { conv3x3s1_winograd23_transform_output_pack4_fp16sa_neon(top_blob_tm, top_blob_bordered, bias, opt); } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_pack4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const __fp16* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); float16x4_t _bias0 = bias ? vld1_f16(bias + p * 4) : vdup_n_f16((__fp16)0.f); out0.fill(_bias0); int q = 0; for (; q < inch; q++) { __fp16* outptr0 = out0.row<__fp16>(0); const Mat img0 = bottom_blob.channel(q); const __fp16* r0 = img0.row<const __fp16>(0); const __fp16* r1 = img0.row<const __fp16>(1); const __fp16* r2 = img0.row<const __fp16>(2); const __fp16* kptr = kernel.channel(p).row<const __fp16>(q); // 16 * 9 float16x8_t _k00_01 = vld1q_f16(kptr); float16x8_t _k00_23 = vld1q_f16(kptr + 8); float16x8_t _k01_01 = vld1q_f16(kptr + 16); float16x8_t _k01_23 = vld1q_f16(kptr + 24); float16x8_t _k02_01 = vld1q_f16(kptr + 32); float16x8_t _k02_23 = vld1q_f16(kptr + 40); float16x8_t _k10_01 = vld1q_f16(kptr + 48); float16x8_t _k10_23 = vld1q_f16(kptr + 56); float16x8_t _k11_01 = vld1q_f16(kptr + 64); float16x8_t _k11_23 = vld1q_f16(kptr + 72); float16x8_t _k12_01 = vld1q_f16(kptr + 80); float16x8_t _k12_23 = vld1q_f16(kptr + 88); float16x8_t _k20_01 = vld1q_f16(kptr + 96); float16x8_t _k20_23 = vld1q_f16(kptr + 104); float16x8_t _k21_01 = vld1q_f16(kptr + 112); float16x8_t _k21_23 = vld1q_f16(kptr + 120); float16x8_t _k22_01 = vld1q_f16(kptr + 128); float16x8_t _k22_23 = vld1q_f16(kptr + 136); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%0] \n" // sum0 sum1 sum2 sum3 "prfm pldl1keep, [%1, #384] \n" "ld1 {v0.8h, v1.8h, v2.8h}, [%1] \n" // r00 r01 r02 r03 r04 r05 "ext v6.16b, %8.16b, %8.16b, #8 \n" "fmla v10.4h, %8.4h, v0.h[0] \n" "fmla v11.4h, %8.4h, v0.h[4] \n" "fmla v12.4h, %8.4h, v1.h[0] \n" "fmla v13.4h, %8.4h, v1.h[4] \n" "fmla v10.4h, v6.4h, v0.h[1] \n" "fmla v11.4h, v6.4h, v0.h[5] \n" "fmla v12.4h, v6.4h, v1.h[1] \n" "fmla v13.4h, v6.4h, v1.h[5] \n" "ext v7.16b, %9.16b, %9.16b, #8 \n" "fmla v10.4h, %9.4h, v0.h[2] \n" "fmla v11.4h, %9.4h, v0.h[6] \n" "fmla v12.4h, %9.4h, v1.h[2] \n" "fmla v13.4h, %9.4h, v1.h[6] \n" "fmla v10.4h, v7.4h, v0.h[3] \n" "fmla v11.4h, v7.4h, v0.h[7] \n" "fmla v12.4h, v7.4h, v1.h[3] \n" "fmla v13.4h, v7.4h, v1.h[7] \n" "ext v8.16b, %10.16b, %10.16b, #8 \n" "fmla v10.4h, %10.4h, v0.h[4] \n" "fmla v11.4h, %10.4h, v1.h[0] \n" "fmla v12.4h, %10.4h, v1.h[4] \n" "fmla v13.4h, %10.4h, v2.h[0] \n" "fmla v10.4h, v8.4h, v0.h[5] \n" "fmla v11.4h, v8.4h, v1.h[1] \n" "fmla v12.4h, v8.4h, v1.h[5] \n" "fmla v13.4h, v8.4h, v2.h[1] \n" "ext v9.16b, %11.16b, %11.16b, #8 \n" "fmla v10.4h, %11.4h, v0.h[6] \n" "fmla v11.4h, %11.4h, v1.h[2] \n" "fmla v12.4h, %11.4h, v1.h[6] \n" "fmla v13.4h, %11.4h, v2.h[2] \n" "fmla v10.4h, v9.4h, v0.h[7] \n" "fmla v11.4h, v9.4h, v1.h[3] \n" "fmla v12.4h, v9.4h, v1.h[7] \n" "fmla v13.4h, v9.4h, v2.h[3] \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v3.8h, v4.8h, v5.8h}, [%2] \n" // r10 r11 r12 r13 r14 r15 "ext v6.16b, %12.16b, %12.16b, #8 \n" "fmla v10.4h, %12.4h, v1.h[0] \n" "fmla v11.4h, %12.4h, v1.h[4] \n" "fmla v12.4h, %12.4h, v2.h[0] \n" "fmla v13.4h, %12.4h, v2.h[4] \n" "fmla v10.4h, v6.4h, v1.h[1] \n" "fmla v11.4h, v6.4h, v1.h[5] \n" "fmla v12.4h, v6.4h, v2.h[1] \n" "fmla v13.4h, v6.4h, v2.h[5] \n" "ext v7.16b, %13.16b, %13.16b, #8 \n" "fmla v10.4h, %13.4h, v1.h[2] \n" "fmla v11.4h, %13.4h, v1.h[6] \n" "fmla v12.4h, %13.4h, v2.h[2] \n" "fmla v13.4h, %13.4h, v2.h[6] \n" "fmla v10.4h, v7.4h, v1.h[3] \n" "fmla v11.4h, v7.4h, v1.h[7] \n" "fmla v12.4h, v7.4h, v2.h[3] \n" "fmla v13.4h, v7.4h, v2.h[7] \n" "ext v8.16b, %14.16b, %14.16b, #8 \n" "fmla v10.4h, %14.4h, v3.h[0] \n" "fmla v11.4h, %14.4h, v3.h[4] \n" "fmla v12.4h, %14.4h, v4.h[0] \n" "fmla v13.4h, %14.4h, v4.h[4] \n" "fmla v10.4h, v8.4h, v3.h[1] \n" "fmla v11.4h, v8.4h, v3.h[5] \n" "fmla v12.4h, v8.4h, v4.h[1] \n" "fmla v13.4h, v8.4h, v4.h[5] \n" "ext v9.16b, %15.16b, %15.16b, #8 \n" "fmla v10.4h, %15.4h, v3.h[2] \n" "fmla v11.4h, %15.4h, v3.h[6] \n" "fmla v12.4h, %15.4h, v4.h[2] \n" "fmla v13.4h, %15.4h, v4.h[6] \n" "fmla v10.4h, v9.4h, v3.h[3] \n" "fmla v11.4h, v9.4h, v3.h[7] \n" "fmla v12.4h, v9.4h, v4.h[3] \n" "fmla v13.4h, v9.4h, v4.h[7] \n" "ext v6.16b, %16.16b, %16.16b, #8 \n" "fmla v10.4h, %16.4h, v3.h[4] \n" "fmla v11.4h, %16.4h, v4.h[0] \n" "fmla v12.4h, %16.4h, v4.h[4] \n" "fmla v13.4h, %16.4h, v5.h[0] \n" "fmla v10.4h, v6.4h, v3.h[5] \n" "fmla v11.4h, v6.4h, v4.h[1] \n" "fmla v12.4h, v6.4h, v4.h[5] \n" "fmla v13.4h, v6.4h, v5.h[1] \n" "ext v7.16b, %17.16b, %17.16b, #8 \n" "fmla v10.4h, %17.4h, v3.h[6] \n" "fmla v11.4h, %17.4h, v4.h[2] \n" "fmla v12.4h, %17.4h, v4.h[6] \n" "fmla v13.4h, %17.4h, v5.h[2] \n" "fmla v10.4h, v7.4h, v3.h[7] \n" "fmla v11.4h, v7.4h, v4.h[3] \n" "fmla v12.4h, v7.4h, v4.h[7] \n" "fmla v13.4h, v7.4h, v5.h[3] \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v0.8h, v1.8h, v2.8h}, [%3] \n" // r20 r21 r22 r23 r24 r25 "ext v8.16b, %18.16b, %18.16b, #8 \n" "fmla v10.4h, %18.4h, v4.h[0] \n" "fmla v11.4h, %18.4h, v4.h[4] \n" "fmla v12.4h, %18.4h, v5.h[0] \n" "fmla v13.4h, %18.4h, v5.h[4] \n" "fmla v10.4h, v8.4h, v4.h[1] \n" "fmla v11.4h, v8.4h, v4.h[5] \n" "fmla v12.4h, v8.4h, v5.h[1] \n" "fmla v13.4h, v8.4h, v5.h[5] \n" "ext v9.16b, %19.16b, %19.16b, #8 \n" "fmla v10.4h, %19.4h, v4.h[2] \n" "fmla v11.4h, %19.4h, v4.h[6] \n" "fmla v12.4h, %19.4h, v5.h[2] \n" "fmla v13.4h, %19.4h, v5.h[6] \n" "fmla v10.4h, v9.4h, v4.h[3] \n" "fmla v11.4h, v9.4h, v4.h[7] \n" "fmla v12.4h, v9.4h, v5.h[3] \n" "fmla v13.4h, v9.4h, v5.h[7] \n" "ext v6.16b, %20.16b, %20.16b, #8 \n" "fmla v10.4h, %20.4h, v0.h[0] \n" "fmla v11.4h, %20.4h, v0.h[4] \n" "fmla v12.4h, %20.4h, v1.h[0] \n" "fmla v13.4h, %20.4h, v1.h[4] \n" "fmla v10.4h, v6.4h, v0.h[1] \n" "fmla v11.4h, v6.4h, v0.h[5] \n" "fmla v12.4h, v6.4h, v1.h[1] \n" "fmla v13.4h, v6.4h, v1.h[5] \n" "ext v7.16b, %21.16b, %21.16b, #8 \n" "fmla v10.4h, %21.4h, v0.h[2] \n" "fmla v11.4h, %21.4h, v0.h[6] \n" "fmla v12.4h, %21.4h, v1.h[2] \n" "fmla v13.4h, %21.4h, v1.h[6] \n" "fmla v10.4h, v7.4h, v0.h[3] \n" "fmla v11.4h, v7.4h, v0.h[7] \n" "fmla v12.4h, v7.4h, v1.h[3] \n" "fmla v13.4h, v7.4h, v1.h[7] \n" "ext v8.16b, %22.16b, %22.16b, #8 \n" "fmla v10.4h, %22.4h, v0.h[4] \n" "fmla v11.4h, %22.4h, v1.h[0] \n" "fmla v12.4h, %22.4h, v1.h[4] \n" "fmla v13.4h, %22.4h, v2.h[0] \n" "fmla v10.4h, v8.4h, v0.h[5] \n" "fmla v11.4h, v8.4h, v1.h[1] \n" "fmla v12.4h, v8.4h, v1.h[5] \n" "fmla v13.4h, v8.4h, v2.h[1] \n" "ext v9.16b, %23.16b, %23.16b, #8 \n" "fmla v10.4h, %23.4h, v0.h[6] \n" "fmla v11.4h, %23.4h, v1.h[2] \n" "fmla v12.4h, %23.4h, v1.h[6] \n" "fmla v13.4h, %23.4h, v2.h[2] \n" "fmla v10.4h, v9.4h, v0.h[7] \n" "fmla v11.4h, v9.4h, v1.h[3] \n" "fmla v12.4h, v9.4h, v1.h[7] \n" "fmla v13.4h, v9.4h, v2.h[3] \n" "ext v6.16b, %24.16b, %24.16b, #8 \n" "fmla v10.4h, %24.4h, v1.h[0] \n" "fmla v11.4h, %24.4h, v1.h[4] \n" "fmla v12.4h, %24.4h, v2.h[0] \n" "fmla v13.4h, %24.4h, v2.h[4] \n" "add %1, %1, #32 \n" "fmla v10.4h, v6.4h, v1.h[1] \n" "fmla v11.4h, v6.4h, v1.h[5] \n" "fmla v12.4h, v6.4h, v2.h[1] \n" "fmla v13.4h, v6.4h, v2.h[5] \n" "ext v7.16b, %25.16b, %25.16b, #8 \n" "fmla v10.4h, %25.4h, v1.h[2] \n" "fmla v11.4h, %25.4h, v1.h[6] \n" "fmla v12.4h, %25.4h, v2.h[2] \n" "fmla v13.4h, %25.4h, v2.h[6] \n" "add %2, %2, #32 \n" "fmla v10.4h, v7.4h, v1.h[3] \n" "fmla v11.4h, v7.4h, v1.h[7] \n" "fmla v12.4h, v7.4h, v2.h[3] \n" "fmla v13.4h, v7.4h, v2.h[7] \n" "add %3, %3, #32 \n" "st1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00_01), // %8 "w"(_k00_23), // %9 "w"(_k01_01), // %10 "w"(_k01_23), // %11 "w"(_k02_01), // %12 "w"(_k02_23), // %13 "w"(_k10_01), // %14 "w"(_k10_23), // %15 "w"(_k11_01), // %16 "w"(_k11_23), // %17 "w"(_k12_01), // %18 "w"(_k12_23), // %19 "w"(_k20_01), // %20 "w"(_k20_23), // %21 "w"(_k21_01), // %22 "w"(_k21_23), // %23 "w"(_k22_01), // %24 "w"(_k22_23) // %25 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.8h, v1.8h}, [%1] \n" // r00 r01 r02 r03 "prfm pldl1keep, [%0, #128] \n" "ld1 {v12.4h, v13.4h}, [%0] \n" // sum0 sum1 "ext v4.16b, %8.16b, %8.16b, #8 \n" "fmul v10.4h, %8.4h, v0.h[0] \n" "fmul v11.4h, %8.4h, v0.h[4] \n" "fmla v12.4h, v4.4h, v0.h[1] \n" "fmla v13.4h, v4.4h, v0.h[5] \n" "ext v5.16b, %9.16b, %9.16b, #8 \n" "fmla v10.4h, %9.4h, v0.h[2] \n" "fmla v11.4h, %9.4h, v0.h[6] \n" "fmla v12.4h, v5.4h, v0.h[3] \n" "fmla v13.4h, v5.4h, v0.h[7] \n" "ext v6.16b, %10.16b, %10.16b, #8 \n" "fmla v10.4h, %10.4h, v0.h[4] \n" "fmla v11.4h, %10.4h, v1.h[0] \n" "fmla v12.4h, v6.4h, v0.h[5] \n" "fmla v13.4h, v6.4h, v1.h[1] \n" "ext v7.16b, %11.16b, %11.16b, #8 \n" "fmla v10.4h, %11.4h, v0.h[6] \n" "fmla v11.4h, %11.4h, v1.h[2] \n" "fmla v12.4h, v7.4h, v0.h[7] \n" "fmla v13.4h, v7.4h, v1.h[3] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v2.8h, v3.8h}, [%2] \n" // r10 r11 r12 r13 "ext v8.16b, %12.16b, %12.16b, #8 \n" "fmla v10.4h, %12.4h, v1.h[0] \n" "fmla v11.4h, %12.4h, v1.h[4] \n" "fmla v12.4h, v8.4h, v1.h[1] \n" "fmla v13.4h, v8.4h, v1.h[5] \n" "ext v9.16b, %13.16b, %13.16b, #8 \n" "fmla v10.4h, %13.4h, v1.h[2] \n" "fmla v11.4h, %13.4h, v1.h[6] \n" "fmla v12.4h, v9.4h, v1.h[3] \n" "fmla v13.4h, v9.4h, v1.h[7] \n" "ext v4.16b, %14.16b, %14.16b, #8 \n" "fmla v10.4h, %14.4h, v2.h[0] \n" "fmla v11.4h, %14.4h, v2.h[4] \n" "fmla v12.4h, v4.4h, v2.h[1] \n" "fmla v13.4h, v4.4h, v2.h[5] \n" "ext v5.16b, %15.16b, %15.16b, #8 \n" "fmla v10.4h, %15.4h, v2.h[2] \n" "fmla v11.4h, %15.4h, v2.h[6] \n" "fmla v12.4h, v5.4h, v2.h[3] \n" "fmla v13.4h, v5.4h, v2.h[7] \n" "ext v6.16b, %16.16b, %16.16b, #8 \n" "fmla v10.4h, %16.4h, v2.h[4] \n" "fmla v11.4h, %16.4h, v3.h[0] \n" "fmla v12.4h, v6.4h, v2.h[5] \n" "fmla v13.4h, v6.4h, v3.h[1] \n" "ext v7.16b, %17.16b, %17.16b, #8 \n" "fmla v10.4h, %17.4h, v2.h[6] \n" "fmla v11.4h, %17.4h, v3.h[2] \n" "fmla v12.4h, v7.4h, v2.h[7] \n" "fmla v13.4h, v7.4h, v3.h[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.8h, v1.8h}, [%3] \n" // r20 r21 r22 r23 "ext v8.16b, %18.16b, %18.16b, #8 \n" "fmla v10.4h, %18.4h, v3.h[0] \n" "fmla v11.4h, %18.4h, v3.h[4] \n" "fmla v12.4h, v8.4h, v3.h[1] \n" "fmla v13.4h, v8.4h, v3.h[5] \n" "ext v9.16b, %19.16b, %19.16b, #8 \n" "fmla v10.4h, %19.4h, v3.h[2] \n" "fmla v11.4h, %19.4h, v3.h[6] \n" "fmla v12.4h, v9.4h, v3.h[3] \n" "fmla v13.4h, v9.4h, v3.h[7] \n" "ext v4.16b, %20.16b, %20.16b, #8 \n" "fmla v10.4h, %20.4h, v0.h[0] \n" "fmla v11.4h, %20.4h, v0.h[4] \n" "fmla v12.4h, v4.4h, v0.h[1] \n" "fmla v13.4h, v4.4h, v0.h[5] \n" "ext v5.16b, %21.16b, %21.16b, #8 \n" "fmla v10.4h, %21.4h, v0.h[2] \n" "fmla v11.4h, %21.4h, v0.h[6] \n" "fmla v12.4h, v5.4h, v0.h[3] \n" "fmla v13.4h, v5.4h, v0.h[7] \n" "ext v6.16b, %22.16b, %22.16b, #8 \n" "fmla v10.4h, %22.4h, v0.h[4] \n" "fmla v11.4h, %22.4h, v1.h[0] \n" "fmla v12.4h, v6.4h, v0.h[5] \n" "fmla v13.4h, v6.4h, v1.h[1] \n" "ext v7.16b, %23.16b, %23.16b, #8 \n" "fmla v10.4h, %23.4h, v0.h[6] \n" "fmla v11.4h, %23.4h, v1.h[2] \n" "fmla v12.4h, v7.4h, v0.h[7] \n" "fmla v13.4h, v7.4h, v1.h[3] \n" "ext v8.16b, %24.16b, %24.16b, #8 \n" "fmla v10.4h, %24.4h, v1.h[0] \n" "fmla v11.4h, %24.4h, v1.h[4] \n" "fmla v12.4h, v8.4h, v1.h[1] \n" "fmla v13.4h, v8.4h, v1.h[5] \n" "ext v9.16b, %25.16b, %25.16b, #8 \n" "fmla v10.4h, %25.4h, v1.h[2] \n" "fmla v11.4h, %25.4h, v1.h[6] \n" "fmla v12.4h, v9.4h, v1.h[3] \n" "fmla v13.4h, v9.4h, v1.h[7] \n" "add %1, %1, #16 \n" "fadd v10.4h, v10.4h, v12.4h \n" "add %2, %2, #16 \n" "fadd v11.4h, v11.4h, v13.4h \n" "add %3, %3, #16 \n" "st1 {v10.4h, v11.4h}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00_01), // %8 "w"(_k00_23), // %9 "w"(_k01_01), // %10 "w"(_k01_23), // %11 "w"(_k02_01), // %12 "w"(_k02_23), // %13 "w"(_k10_01), // %14 "w"(_k10_23), // %15 "w"(_k11_01), // %16 "w"(_k11_23), // %17 "w"(_k12_01), // %18 "w"(_k12_23), // %19 "w"(_k20_01), // %20 "w"(_k20_23), // %21 "w"(_k21_01), // %22 "w"(_k21_23), // %23 "w"(_k22_01), // %24 "w"(_k22_23) // %25 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%1, #192] \n" "ld1 {v0.4h, v1.4h, v2.4h}, [%1] \n" // r00 r01 r02 "prfm pldl1keep, [%0, #64] \n" "ld1 {v13.4h}, [%0] \n" // sum0 "ext v6.16b, %8.16b, %8.16b, #8 \n" "fmul v10.4h, %8.4h, v0.h[0] \n" "fmul v11.4h, v6.4h, v0.h[1] \n" "ext v7.16b, %9.16b, %9.16b, #8 \n" "fmul v12.4h, %9.4h, v0.h[2] \n" "fmla v13.4h, v7.4h, v0.h[3] \n" "ext v8.16b, %10.16b, %10.16b, #8 \n" "fmla v10.4h, %10.4h, v1.h[0] \n" "fmla v11.4h, v8.4h, v1.h[1] \n" "ext v9.16b, %11.16b, %11.16b, #8 \n" "fmla v12.4h, %11.4h, v1.h[2] \n" "fmla v13.4h, v9.4h, v1.h[3] \n" "prfm pldl1keep, [%2, #192] \n" "ld1 {v3.4h, v4.4h, v5.4h}, [%2] \n" // r10 r11 r12 "ext v6.16b, %12.16b, %12.16b, #8 \n" "fmla v10.4h, %12.4h, v2.h[0] \n" "fmla v11.4h, v6.4h, v2.h[1] \n" "ext v7.16b, %13.16b, %13.16b, #8 \n" "fmla v12.4h, %13.4h, v2.h[2] \n" "fmla v13.4h, v7.4h, v2.h[3] \n" "ext v8.16b, %14.16b, %14.16b, #8 \n" "fmla v10.4h, %14.4h, v3.h[0] \n" "fmla v11.4h, v8.4h, v3.h[1] \n" "ext v9.16b, %15.16b, %15.16b, #8 \n" "fmla v12.4h, %15.4h, v3.h[2] \n" "fmla v13.4h, v9.4h, v3.h[3] \n" "ext v6.16b, %16.16b, %16.16b, #8 \n" "fmla v10.4h, %16.4h, v4.h[0] \n" "fmla v11.4h, v6.4h, v4.h[1] \n" "ext v7.16b, %17.16b, %17.16b, #8 \n" "fmla v12.4h, %17.4h, v4.h[2] \n" "fmla v13.4h, v7.4h, v4.h[3] \n" "prfm pldl1keep, [%3, #192] \n" "ld1 {v0.4h, v1.4h, v2.4h}, [%3] \n" // r20 r21 r22 "ext v8.16b, %18.16b, %18.16b, #8 \n" "fmla v10.4h, %18.4h, v5.h[0] \n" "fmla v11.4h, v8.4h, v5.h[1] \n" "ext v9.16b, %19.16b, %19.16b, #8 \n" "fmla v12.4h, %19.4h, v5.h[2] \n" "fmla v13.4h, v9.4h, v5.h[3] \n" "ext v6.16b, %20.16b, %20.16b, #8 \n" "fmla v10.4h, %20.4h, v0.h[0] \n" "fmla v11.4h, v6.4h, v0.h[1] \n" "ext v7.16b, %21.16b, %21.16b, #8 \n" "fmla v12.4h, %21.4h, v0.h[2] \n" "fmla v13.4h, v7.4h, v0.h[3] \n" "ext v8.16b, %22.16b, %22.16b, #8 \n" "fmla v10.4h, %22.4h, v1.h[0] \n" "fmla v11.4h, v8.4h, v1.h[1] \n" "ext v9.16b, %23.16b, %23.16b, #8 \n" "fmla v12.4h, %23.4h, v1.h[2] \n" "fmla v13.4h, v9.4h, v1.h[3] \n" "ext v6.16b, %24.16b, %24.16b, #8 \n" "fmla v10.4h, %24.4h, v2.h[0] \n" "fmla v11.4h, v6.4h, v2.h[1] \n" "ext v7.16b, %25.16b, %25.16b, #8 \n" "fmla v12.4h, %25.4h, v2.h[2] \n" "fmla v13.4h, v7.4h, v2.h[3] \n" "fadd v10.4h, v10.4h, v11.4h \n" "add %1, %1, #8 \n" "fadd v12.4h, v12.4h, v13.4h \n" "add %2, %2, #8 \n" "fadd v10.4h, v10.4h, v12.4h \n" "add %3, %3, #8 \n" "st1 {v10.4h}, [%0], #8 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00_01), // %8 "w"(_k00_23), // %9 "w"(_k01_01), // %10 "w"(_k01_23), // %11 "w"(_k02_01), // %12 "w"(_k02_23), // %13 "w"(_k10_01), // %14 "w"(_k10_23), // %15 "w"(_k11_01), // %16 "w"(_k11_23), // %17 "w"(_k12_01), // %18 "w"(_k12_23), // %19 "w"(_k20_01), // %20 "w"(_k20_23), // %21 "w"(_k21_01), // %22 "w"(_k21_23), // %23 "w"(_k22_01), // %24 "w"(_k22_23) // %25 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13"); } r0 += 8; r1 += 8; r2 += 8; } } } }
dft_dft_solver.h
#ifndef _DFT_DFT_SOLVER_ #define _DFT_DFT_SOLVER_ #include <complex> #include "spectral/spectral.h" #include "blueprint.h" #include "equations.h" namespace spectral { /*! @brief Solver for periodic boundary conditions of the spectral equations. * @ingroup solvers */ template< size_t n> class DFT_DFT_Solver { public: typedef Matrix<double, TL_DFT> Matrix_Type; /*! @brief Construct a solver for periodic boundary conditions * * The constructor allocates storage for the solver * and initializes all fourier coefficients as well as * all low level solvers needed. * @param blueprint Contains all the necessary parameters. * @throw Message If your parameters are inconsistent. */ DFT_DFT_Solver( const Blueprint& blueprint); /*! @brief Prepare Solver for execution * * This function takes the fields and computes the missing * one according to the target parameter passed. * @param v Container with three non void matrices * @param t which Matrix is missing? */ void init( std::array< Matrix<double,TL_DFT>, n>& v, enum target t); /** * @brief Perform first initializing step * */ void first_step(); /** * @brief Perform second initializing step * * After that the step function can be used */ void second_step(); /*! @brief Perform a step by the 3 step Karniadakis scheme * * @attention At least one call of first_step() and second_step() is necessary * */ void step(){ step_<TL_ORDER3>();} /*! @brief Get the result You get the solution matrix of the current timestep. @param t The field you want @return A Read only reference to the field @attention The reference is only valid until the next call to the step() function! */ const Matrix<double, TL_DFT>& getField( enum target t) const; /*! @brief Get the result Use this function when you want to call step() without destroying the solution. @param m In exchange for the solution matrix you have to provide storage for further calculations. The field is swapped in. @param t The field you want. @attention The fields you get are not the ones of the current timestep. You get the fields that are not needed any more. This means the densities are 4 timesteps "old" whereas the potential is the one of the last timestep. */ void getField( Matrix<double, TL_DFT>& m, enum target t); const std::array<Matrix<double, TL_DFT>, n>& getDensity( )const{return dens;} const std::array<Matrix<double, TL_DFT>, n>& getPotential( )const{return phi;} /*! @brief Get the parameters of the solver. @return The parameters in use. @note You cannot change parameters once constructed. */ const Blueprint& blueprint() const { return blue;} private: typedef std::complex<double> complex; //methods void init_coefficients( const Boundary& bound, const Physical& phys); void compute_cphi();//multiply cphi double dot( const Matrix_Type& m1, const Matrix_Type& m2); template< enum stepper S> void step_(); //members const size_t rows, cols; const size_t crows, ccols; const Blueprint blue; /////////////////fields////////////////////////////////// //GhostMatrix<double, TL_DFT> ghostdens, ghostphi; std::array< Matrix<double, TL_DFT>, n> dens, phi, nonlinear; /////////////////Complex (void) Matrices for fourier transforms/////////// std::array< Matrix< complex>, n> cdens, cphi; ///////////////////Solvers//////////////////////// Arakawa arakawa; Karniadakis<n, complex, TL_DFT> karniadakis; DFT_DFT dft_dft; /////////////////////Coefficients////////////////////// Matrix< std::array< double, n> > phi_coeff; std::array< Matrix< double>, n-1> gamma_coeff; }; template< size_t n> DFT_DFT_Solver<n>::DFT_DFT_Solver( const Blueprint& bp): rows( bp.algorithmic().ny ), cols( bp.algorithmic().nx ), crows( rows), ccols( cols/2+1), blue( bp), //fields dens( MatrixArray<double, TL_DFT,n>::construct( rows, cols)), phi( dens), nonlinear( dens), cdens( MatrixArray<complex, TL_NONE, n>::construct( crows, ccols)), cphi(cdens), //Solvers arakawa( bp.algorithmic().h), karniadakis(rows, cols, crows, ccols, bp.algorithmic().dt), dft_dft( rows, cols, FFTW_MEASURE), //Coefficients phi_coeff( crows, ccols), gamma_coeff( MatrixArray< double, TL_NONE, n-1>::construct( crows, ccols)) { bp.consistencyCheck(); if( bp.isEnabled( TL_GLOBAL)) { std::cerr << "WARNING: GLOBAL solver not implemented yet! \n\ Switch to local solver...\n"; } init_coefficients( bp.boundary(), bp.physical()); } template< size_t n> void DFT_DFT_Solver<n>::init_coefficients( const Boundary& bound, const Physical& phys) { Matrix< QuadMat< complex, n> > coeff( crows, ccols); double laplace; int ik; const complex dymin( 0, 2.*M_PI/bound.ly); const double kxmin2 = 2.*2.*M_PI*M_PI/(double)(bound.lx*bound.lx), kymin2 = 2.*2.*M_PI*M_PI/(double)(bound.ly*bound.ly); Equations e( phys, blue.isEnabled( TL_MHW)); Poisson p( phys); // dft_dft is not transposing so i is the y index by default for( unsigned i = 0; i<crows; i++) for( unsigned j = 0; j<ccols; j++) { ik = (i>rows/2) ? (i-rows) : i; //integer division rounded down laplace = - kxmin2*(double)(j*j) - kymin2*(double)(ik*ik); if( n == 2) { gamma_coeff[0](i,j) = p.gamma1_i( laplace); } else if( n == 3) { gamma_coeff[0](i,j) = p.gamma1_i( laplace); gamma_coeff[1](i,j) = p.gamma1_z( laplace); } if( rows%2 == 0 && i == rows/2) ik = 0; e( coeff( i,j), laplace, (double)ik*dymin); if( laplace == 0) continue; p( phi_coeff(i,j), laplace); } //for periodic bc the constant is undefined for( unsigned k=0; k<n; k++) phi_coeff(0,0)[k] = 0; karniadakis.init_coeff( coeff, (double)(rows*cols)); } template< size_t n> void DFT_DFT_Solver<n>::init( std::array< Matrix<double, TL_DFT>,n>& v, enum target t) { //fourier transform input into cdens for( unsigned k=0; k<n; k++) { #ifdef TL_DEBUG if( v[k].isVoid()) throw Message("You gave me a void Matrix!!", _ping_); #endif dft_dft.r2c( v[k], cdens[k]); } //don't forget to normalize coefficients!! for( unsigned k=0; k<n; k++) for( unsigned i=0; i<crows; i++) for( unsigned j=0; j<ccols;j++) cdens[k](i,j) /= (double)(rows*cols); switch( t) //which field must be computed? { case( TL_ELECTRONS): //bring cdens and cphi in the right order swap_fields( cphi[0], cdens[n-1]); for( unsigned k=n-1; k>0; k--) swap_fields( cdens[k], cdens[k-1]); //now solve for cdens[0] for( unsigned i=0; i<crows; i++) for( unsigned j=0; j<ccols; j++) { cdens[0](i,j) = cphi[0](i,j)/phi_coeff(i,j)[0]; for( unsigned k=0; k<n && k!=0; k++) cdens[0](i,j) -= cdens[k](i,j)*phi_coeff(i,j)[k]/phi_coeff(i,j)[0]; } break; case( TL_IONS): //bring cdens and cphi in the right order swap_fields( cphi[0], cdens[n-1]); for( unsigned k=n-1; k>1; k--) swap_fields( cdens[k], cdens[k-1]); //solve for cdens[1] for( unsigned i=0; i<crows; i++) for( unsigned j=0; j<ccols; j++) { cdens[1](i,j) = cphi[0](i,j) /phi_coeff(i,j)[1]; for( unsigned k=0; k<n && k!=1; k++) cdens[1](i,j) -= cdens[k](i,j)*phi_coeff(i,j)[k]/phi_coeff(i,j)[1]; } break; case( TL_IMPURITIES): //bring cdens and cphi in the right order swap_fields( cphi[0], cdens[n-1]); for( unsigned k=n-1; k>2; k--) //i.e. never for n = 3 swap_fields( cdens[k], cdens[k-1]); //solve for cdens[2] for( unsigned i=0; i<crows; i++) for( unsigned j=0; j<ccols; j++) { cdens[2](i,j) = cphi[0](i,j) /phi_coeff(i,j)[2]; for( unsigned k=0; k<n && k!=2; k++) cdens[2](i,j) -= cdens[k](i,j)*phi_coeff(i,j)[k]/phi_coeff(i,j)[2]; } break; case( TL_POTENTIAL): //solve for cphi for( unsigned i=0; i<crows; i++) for( unsigned j=0; j<ccols; j++) { cphi[0](i,j) = 0; for( unsigned k=0; k<n && k!=2; k++) cphi[0](i,j) += cdens[k](i,j)*phi_coeff(i,j)[k]; } break; case( TL_ALL): throw Message( "TL_ALL not treated yet!", _ping_); } //compute the rest cphi[k] for( unsigned k=0; k<n-1; k++) for( size_t i = 0; i < crows; i++) for( size_t j = 0; j < ccols; j++) cphi[k+1](i,j) = gamma_coeff[k](i,j)*cphi[0](i,j); //backtransform to x-space for( unsigned k=0; k<n; k++) { //set (0,0) mode 0 again cdens[k](0,0) = 0; cphi[k](0,0) = 0; dft_dft.c2r( cdens[k], dens[k]); dft_dft.c2r( cphi[k], phi[k]); } //now the density and the potential is given in x-space //first_steps(); } template< size_t n> void DFT_DFT_Solver<n>::getField( Matrix<double, TL_DFT>& m, enum target t) { #ifdef TL_DEBUG if(m.isVoid()) throw Message( "You may not swap in a void Matrix!\n", _ping_); #endif switch( t) { case( TL_ELECTRONS): swap_fields( m, nonlinear[0]); break; case( TL_IONS): swap_fields( m, nonlinear[1]); break; case( TL_IMPURITIES): swap_fields( m, nonlinear[2]); break; case( TL_POTENTIAL): swap_fields( m, cphi[0]); break; case( TL_ALL): throw Message( "TL_ALL not allowed here", _ping_); } } template< size_t n> const Matrix<double, TL_DFT>& DFT_DFT_Solver<n>::getField( enum target t) const { Matrix<double, TL_DFT> const * m = 0; switch( t) { case( TL_ELECTRONS): m = &dens[0]; break; case( TL_IONS): m = &dens[1]; break; case( TL_IMPURITIES): m = &dens[2]; break; case( TL_POTENTIAL): m = &phi[0]; break; case( TL_ALL): throw Message( "TL_ALL not allowed here", _ping_); } return *m; } template< size_t n> void DFT_DFT_Solver<n>::first_step() { karniadakis.template invert_coeff<TL_EULER>( ); step_<TL_EULER>(); } template< size_t n> void DFT_DFT_Solver<n>::second_step() { karniadakis.template invert_coeff<TL_ORDER2>(); step_<TL_ORDER2>(); karniadakis.template invert_coeff<TL_ORDER3>(); } template< size_t n> void DFT_DFT_Solver<n>::compute_cphi() { if( n==2) { #pragma omp parallel for for( size_t i = 0; i < crows; i++){ for( size_t j = 0; j < ccols; j++) cphi[0](i,j) = phi_coeff(i,j)[0]*cdens[0](i,j) + phi_coeff(i,j)[1]*cdens[1](i,j); } //#pragma omp barrier #pragma omp parallel for for( size_t i = 0; i < crows; i++){ for( size_t j = 0; j < ccols; j++) cphi[1](i,j) = gamma_coeff[0](i,j)*cphi[0](i,j); } //#pragma omp barrier } else if( n==3) { #pragma omp parallel for for( size_t i = 0; i < crows; i++){ for( size_t j = 0; j < ccols; j++) cphi[0](i,j) = phi_coeff(i,j)[0]*cdens[0](i,j) + phi_coeff(i,j)[1]*cdens[1](i,j) + phi_coeff(i,j)[2]*cdens[2](i,j); } //#pragma omp barrier #pragma omp parallel for for( size_t i = 0; i < crows; i++){ for( size_t j = 0; j < ccols; j++) { cphi[1](i,j) = gamma_coeff[0](i,j)*cphi[0](i,j); cphi[2](i,j) = gamma_coeff[1](i,j)*cphi[0](i,j); } } //#pragma omp barrier } } template< size_t n> template< enum stepper S> void DFT_DFT_Solver<n>::step_() { //1. Compute nonlinearity #pragma omp parallel for for( unsigned k=0; k<n; k++) { GhostMatrix<double, TL_DFT> ghostdens{ rows, cols, TL_PERIODIC, TL_PERIODIC}; GhostMatrix<double, TL_DFT> ghostphi{ rows, cols, TL_PERIODIC, TL_PERIODIC}; swap_fields( dens[k], ghostdens); //now dens[k] is void swap_fields( phi[k], ghostphi); //now phi[k] is void ghostdens.initGhostCells( ); ghostphi.initGhostCells( ); arakawa( ghostdens, ghostphi, nonlinear[k]); swap_fields( dens[k], ghostdens); //now ghostdens is void swap_fields( phi[k], ghostphi); //now ghostphi is void } //2. perform karniadakis step karniadakis.template step_i<S>( dens, nonlinear); //3. solve linear equation //3.1. transform v_hut #pragma omp parallel for for( unsigned k=0; k<n; k++){ dft_dft.r2c( dens[k], cdens[k]);} //3.2. perform karniadaksi step and multiply coefficients for phi karniadakis.step_ii( cdens); compute_cphi(); //3.3. backtransform #pragma omp parallel for for( unsigned k=0; k<n; k++) { dft_dft.c2r( cdens[k], dens[k]); dft_dft.c2r( cphi[k], phi[k]); } } } //namespace spectral #endif //_DFT_DFT_SOLVER_
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices (vertex % nearest the origin in RGB space and the vertex farthest from the origin). % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of pixels % represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _DoublePixelPacket { double red, green, blue, alpha; } DoublePixelPacket; typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; DoublePixelPacket total_color; double quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; DoublePixelPacket target; double distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; MemoryInfo *memory_info; ssize_t *cache; DoublePixelPacket error[ErrorQueueLength]; double weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *,ExceptionInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *,ExceptionInfo *), SetGrayscaleImage(Image *,ExceptionInfo *); static size_t DefineImageColormap(Image *,CubeInfo *,NodeInfo *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(CubeInfo *,const NodeInfo *), PruneToCubeDepth(CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info)); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither_method=image_info->dither == MagickFalse ? NoDitherMethod : RiemersmaDitherMethod; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const Image *image, const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (GetPixelAlpha(image,pixel) == OpaqueAlpha)) { alpha_pixel->red=(double) GetPixelRed(image,pixel); alpha_pixel->green=(double) GetPixelGreen(image,pixel); alpha_pixel->blue=(double) GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); return; } alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel)); alpha_pixel->red=alpha*GetPixelRed(image,pixel); alpha_pixel->green=alpha*GetPixelGreen(image,pixel); alpha_pixel->blue=alpha*GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); } static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info, const PixelInfo *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (pixel->alpha == OpaqueAlpha)) { alpha_pixel->red=(double) pixel->red; alpha_pixel->green=(double) pixel->green; alpha_pixel->blue=(double) pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; return; } alpha=(double) (QuantumScale*pixel->alpha); alpha_pixel->red=alpha*pixel->red; alpha_pixel->green=alpha*pixel->green; alpha_pixel->blue=alpha*pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const DoublePixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) | ((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3; return(id); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define AssignImageTag "Assign/Image" ColorspaceType colorspace; ssize_t y; /* Allocate image colormap. */ colorspace=image->colorspace; if (cube_info->quantize_info->colorspace != UndefinedColorspace) (void) TransformImageColorspace(image,cube_info->quantize_info->colorspace, exception); if (AcquireImageColormap(image,cube_info->colors,exception) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); (void) DefineImageColormap(image,cube_info,cube_info->root); /* Create a reduced color image. */ if (cube_info->quantize_info->dither_method != NoDitherMethod) (void) DitherImage(image,cube_info,exception); else { CacheView *image_view; MagickBooleanType status; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; register Quantum *magick_restrict q; register ssize_t x; ssize_t count; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { DoublePixelPacket pixel; register const NodeInfo *node_info; register ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,q,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum( image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum( image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum( image->colormap[index].blue),q); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum( image->colormap[index].alpha),q); } q+=GetPixelChannels(image); } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image,exception); if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) { double intensity; /* Monochrome image. */ intensity=0.0; if ((image->colors > 1) && (GetPixelInfoLuma(image->colormap+0) > GetPixelInfoLuma(image->colormap+1))) intensity=(double) QuantumRange; image->colormap[0].red=intensity; image->colormap[0].green=intensity; image->colormap[0].blue=intensity; if (image->colors > 1) { image->colormap[1].red=(double) QuantumRange-intensity; image->colormap[1].green=(double) QuantumRange-intensity; image->colormap[1].blue=(double) QuantumRange-intensity; } } (void) SyncImage(image,exception); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (IssRGBCompatibleColorspace(colorspace) == MagickFalse)) (void) TransformImageColorspace(image,colorspace,exception); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->alpha_trait == BlendPixelTrait ? MagickTrue : MagickFalse; if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; DoublePixelPacket error, mid, midpoint, pixel; MagickBooleanType proceed; double bisect; NodeInfo *node_info; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace,exception); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); midpoint.red=(double) QuantumRange/2.0; midpoint.green=(double) QuantumRange/2.0; midpoint.blue=(double) QuantumRange/2.0; midpoint.alpha=(double) QuantumRange/2.0; error.alpha=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); continue; } if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance)) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); continue; } if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance) != MagickFalse) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { double pixel; register double alpha, beta, distance; register DoublePixelPacket *magick_restrict q; register PixelInfo *magick_restrict p; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); alpha=1.0; beta=1.0; if (cube_info->associate_alpha != MagickFalse) { alpha=(double) (QuantumScale*p->alpha); beta=(double) (QuantumScale*q->alpha); } pixel=alpha*p->red-beta*q->red; distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->green-beta*q->green; distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->blue-beta*q->blue; distance+=pixel*pixel; if (distance <= cube_info->distance) { if (cube_info->associate_alpha != MagickFalse) { pixel=p->alpha-q->alpha; distance+=pixel*pixel; } if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image, ExceptionInfo *exception) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsPaletteImage(image) == MagickFalse) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. DefineImageColormap() returns the number of % colors in the image colormap. % % The format of the DefineImageColormap method is: % % size_t DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static size_t DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) (void) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { register double alpha; register PixelInfo *magick_restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(double) ((MagickOffsetType) node_info->number_unique); alpha=PerceptibleReciprocal(alpha); if (cube_info->associate_alpha == MagickFalse) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); q->alpha=(double) OpaqueAlpha; } else { double opacity; opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha); q->alpha=(double) ClampToQuantum(opacity); if (q->alpha == OpaqueAlpha) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); } else { double gamma; gamma=(double) (QuantumScale*q->alpha); gamma=PerceptibleReciprocal(gamma); q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.blue); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } return(image->colors); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { register Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->memory_info != (MemoryInfo *) NULL) cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); quantize_info->signature=(~MagickCoreSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o exception: return any errors or warnings in this structure. % */ static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels) { register ssize_t i; assert(pixels != (DoublePixelPacket **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (DoublePixelPacket *) NULL) pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static DoublePixelPacket **AcquirePixelThreadSet(const size_t count) { DoublePixelPacket **pixels; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (DoublePixelPacket **) NULL) return((DoublePixelPacket **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2* sizeof(**pixels)); if (pixels[i] == (DoublePixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const DoublePixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; const char *artifact; double amount; DoublePixelPacket **pixels; MagickBooleanType status; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (DoublePixelPacket **) NULL) return(MagickFalse); status=MagickTrue; amount=1.0; artifact=GetImageArtifact(image,"dither:diffusion-amount"); if (artifact != (const char *) NULL) amount=StringToDoubleInterval(artifact,1.0); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; DoublePixelPacket *current, *previous; register Quantum *magick_restrict q; register ssize_t x; size_t index; ssize_t v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket color, pixel; register ssize_t i; ssize_t u; u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel); if (x > 0) { pixel.red+=7.0*amount*current[u-v].red/16; pixel.green+=7.0*amount*current[u-v].green/16; pixel.blue+=7.0*amount*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=7.0*amount*current[u-v].alpha/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=previous[u+v].red/16; pixel.green+=previous[u+v].green/16; pixel.blue+=previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=previous[u+v].alpha/16; } pixel.red+=5.0*amount*previous[u].red/16; pixel.green+=5.0*amount*previous[u].green/16; pixel.blue+=5.0*amount*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=5.0*amount*previous[u].alpha/16; if (x > 0) { pixel.red+=3.0*amount*previous[u-v].red/16; pixel.green+=3.0*amount*previous[u-v].green/16; pixel.blue+=3.0*amount*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=3.0*amount*previous[u-v].alpha/16; } } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { register NodeInfo *node_info; register size_t node_id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { node_id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[node_id] == (NodeInfo *) NULL) break; node_info=node_info->child[node_id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image)); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red), q+u*GetPixelChannels(image)); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green), q+u*GetPixelChannels(image)); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue), q+u*GetPixelChannels(image)); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha), q+u*GetPixelChannels(image)); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixelInfo(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].alpha=pixel.alpha-color.alpha; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int, ExceptionInfo *); static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info, const size_t level,const unsigned int direction,ExceptionInfo *exception) { if (level == 1) switch (direction) { case WestGravity: { (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); break; } case EastGravity: { (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); break; } case NorthGravity: { (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); break; } case SouthGravity: { (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); break; } default: break; } else switch (direction) { case WestGravity: { Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); break; } case EastGravity: { Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); break; } case NorthGravity: { Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); break; } case SouthGravity: { Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); break; } default: break; } } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" DoublePixelPacket color, pixel; MagickBooleanType proceed; register CubeInfo *p; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { register Quantum *magick_restrict q; register ssize_t i; /* Distribute error. */ q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); AssociateAlphaPixel(image,cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=p->weights[i]*p->error[i].red; pixel.green+=p->weights[i]*p->error[i].green; pixel.blue+=p->weights[i]*p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.alpha+=p->weights[i]*p->error[i].alpha; } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(double) (4.0*(QuantumRange+1.0)*((double) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) p->cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q); if (cube_info->associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) memmove(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t depth; if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info,exception)); /* Distribute quantization error along a Hilbert curve. */ (void) memset(cube_info->error,0,ErrorQueueLength* sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows); for (depth=1; i != 0; depth++) i>>=1; if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows)) depth++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); if (depth > 1) Riemersma(image,image_view,cube_info,depth-1,NorthGravity,exception); status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; double sum, weight; register ssize_t i; size_t length; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) memset(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither_method == NoDitherMethod) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache)); if (cube_info->memory_info == (MemoryInfo *) NULL) return((CubeInfo *) NULL); cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info); /* Initialize color cache. */ (void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)* length); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight); weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0)); } /* Normalize the weighting factors. */ weight=0.0; for (i=0; i < ErrorQueueLength; i++) weight+=cube_info->weights[i]; sum=0.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]/=weight; sum+=cube_info->weights[i]; } cube_info->weights[0]+=1.0-sum; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) memset(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image, ExceptionInfo *exception) { CacheView *image_view; double alpha, area, beta, distance, maximum_error, mean_error, mean_error_per_pixel; ssize_t index, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,exception); (void) memset(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { index=(ssize_t) GetPixelIndex(image,p); if (image->alpha_trait == BlendPixelTrait) { alpha=(double) (QuantumScale*GetPixelAlpha(image,p)); beta=(double) (QuantumScale*image->colormap[index].alpha); } distance=fabs((double) (alpha*GetPixelRed(image,p)-beta* image->colormap[index].red)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta* image->colormap[index].green)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta* image->colormap[index].blue)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area; image->error.normalized_mean_error=(double) QuantumScale*QuantumScale* mean_error/area; image->error.normalized_maximum_error=(double) QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) memset(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const DitherMethod dither_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither_method: choose from UndefinedDitherMethod, NoDitherMethod, % RiemersmaDitherMethod, FloydSteinbergDitherMethod. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const DitherMethod dither_method,ExceptionInfo *exception) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \ QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) PosterizePixel(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) PosterizePixel(image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) PosterizePixel(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) PosterizePixel(image->colormap[i].alpha); } /* Posterize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait == BlendPixelTrait)) SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither_method=dither_method; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) { NodeInfo *parent; register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(cube_info,node_info->child[i]); /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.alpha+=node_info->total_color.alpha; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; if (image->alpha_trait != BlendPixelTrait) { if (SetImageGray(image,exception) != MagickFalse) (void) SetGrayscaleImage(image,exception); } if ((quantize_info->dither_method == NoDitherMethod) && (image->storage_class == PseudoClass) && (image->colors <= maximum_colors)) { if ((quantize_info->colorspace != UndefinedColorspace) && (quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace(image,quantize_info->colorspace, exception); return(MagickTrue); } depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2)) depth--; if ((image->alpha_trait == BlendPixelTrait) && (depth > 5)) depth--; if (SetImageGray(image,exception) != MagickFalse) depth=MaxTreeDepth; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { /* Reduce the number of colors in the image if it contains more than the maximum, otherwise we can disable dithering to improve the performance. */ if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); else cube_info->quantize_info->dither_method=NoDitherMethod; status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; register ssize_t i; size_t depth, maximum_colors, number_images; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images,exception); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither_method != NoDitherMethod) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u a n t i z e E r r o r F l a t t e n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeErrorFlatten() traverses the color cube and flattens the quantization % error into a sorted 1D array. This accelerates the color reduction process. % % Contributed by Yoya. % % The format of the QuantizeErrorFlatten method is: % % size_t QuantizeErrorFlatten(const CubeInfo *cube_info, % const NodeInfo *node_info,const ssize_t offset, % double *quantize_error) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is current pointer. % % o offset: quantize error offset. % % o quantize_error: the quantization error vector. % */ static size_t QuantizeErrorFlatten(const CubeInfo *cube_info, const NodeInfo *node_info,const ssize_t offset,double *quantize_error) { register ssize_t i; size_t n, number_children; if (offset >= (ssize_t) cube_info->nodes) return(0); quantize_error[offset]=node_info->quantize_error; n=1; number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children ; i++) if (node_info->child[i] != (NodeInfo *) NULL) n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n, quantize_error); return(n); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static int QuantizeErrorCompare(const void *error_p,const void *error_q) { double *p, *q; p=(double *) error_p; q=(double *) error_q; if (*p > *q) return(1); if (fabs(*q-*p) <= MagickEpsilon) return(0); return(-1); } static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; if (cube_info->colors > cube_info->maximum_colors) { double *quantize_error; /* Enable rapid reduction of the number of unique colors. */ quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes, sizeof(*quantize_error)); if (quantize_error != (double *) NULL) { (void) QuantizeErrorFlatten(cube_info,cube_info->root,0, quantize_error); qsort(quantize_error,cube_info->nodes,sizeof(double), QuantizeErrorCompare); if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100)) cube_info->next_threshold=quantize_error[cube_info->nodes-110* (cube_info->maximum_colors+1)/100]; quantize_error=(double *) RelinquishMagickMemory(quantize_error); } } for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest of the colors % from the reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images,exception); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { double intensity; PixelInfo *color_1, *color_2; color_1=(PixelInfo *) x; color_2=(PixelInfo *) y; intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)- GetPixelInfoIntensity((const Image *) NULL,color_2); return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo *colormap; register ssize_t i; size_t extent; ssize_t *colormap_index, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace,exception); extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1)); colormap_index=(ssize_t *) AcquireQuantumMemory(extent, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { (void) memset(colormap_index,(-1),extent*sizeof(*colormap_index)); if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } image->colors=0; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(image,q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=(double) GetPixelRed(image,q); image->colormap[image->colors].green=(double) GetPixelGreen(image,q); image->colormap[image->colors].blue=(double) GetPixelBlue(image,q); image->colors++; } } SetPixelIndex(image,(Quantum) colormap_index[intensity],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } (void) memset(colormap_index,0,extent*sizeof(*colormap_index)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].alpha=(double) i; qsort((void *) image->colormap,image->colors,sizeof(PixelInfo), IntensityCompare); colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap)); if (colormap == (PixelInfo *) NULL) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].alpha]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap( GetPixelIndex(image,q))],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (SetImageMonochrome(image,exception) != MagickFalse) image->type=BilevelType; return(status); }
hmap_mput.c
// hmap_mput: put multiple keys #include <omp.h> #include "hmap_common.h" #include "hmap_aux.h" #include "hmap_get.h" #include "hmap_put.h" #include "fasthash.h" #include "hmap_update.h" #include "hmap_mput.h" #define mcr_set_key(keys, i, alt_keys, key_len) ( \ (keys != NULL) ? \ (keys[i]) : \ ((void *)((char *)alt_keys + (i*key_len))) \ ) #define mcr_set_val(vals, i, alt_vals, val_len) ( \ ( vals != NULL ) ? \ (vals[i]) : \ ((void *)((char *)alt_vals + (i*val_len))) \ ) #define mcr_set_key_len(key_lens, i, key_len) ( \ ( key_lens != NULL ) ? \ (key_lens[i]) : \ (key_len) \ ) static inline void * set_key( void **keys, uint32_t i, void *alt_keys, uint32_t key_len ) { void *key_i; if ( keys != NULL ) { key_i = keys[i]; } else { key_i = (void *)((char *)alt_keys + (i*key_len)); } return key_i; } //-------------------------------- static inline void * set_val( void **vals, uint32_t i, void *alt_vals, uint32_t val_len ) { void *val_i; if ( vals != NULL ) { val_i = vals[i]; } else { val_i = (void *)((char *)alt_vals + (i*val_len)); } return val_i; } //-------------------------------- static inline uint16_t set_key_len( uint16_t *key_lens, uint32_t i, uint32_t key_len ) { uint16_t key_len_i; if ( key_lens != NULL ) { key_len_i = key_lens[i]; } else { key_len_i = key_len; } return key_len_i; } //-------------------------------- int hmap_mput( hmap_t *ptr_hmap, hmap_multi_t *M, void **keys, // [nkeys] uint16_t *key_lens, // [nkeys] void *alt_keys, // either keys or alt_keys but not both uint32_t key_len, uint32_t nkeys, void **vals, // [nkeys] void *alt_vals, // either keys or alt_keys but not both uint32_t val_len ) { int status = 0; if ( nkeys == 0 ) { goto BYE; } //------------------------------------------------- if ( keys == NULL ) { if ( key_lens != NULL ) { go_BYE(-1); } if ( alt_keys == NULL ) { go_BYE(-1); } if ( key_len == 0 ) { go_BYE(-1); } } else { if ( key_lens == NULL ) { go_BYE(-1); } if ( alt_keys != NULL ) { go_BYE(-1); } if ( key_len != 0 ) { go_BYE(-1); } } //------------------------------------------------- if ( vals == NULL ) { if ( alt_vals == NULL ) { go_BYE(-1); } if ( val_len == 0 ) { go_BYE(-1); } } else { if ( alt_vals != NULL ) { go_BYE(-1); } if ( val_len != 0 ) { go_BYE(-1); } } //------------------------------------------------- uint32_t *idxs = M->idxs; uint32_t *hashes = M->hashes; uint32_t *locs = M->locs; int8_t *tids = M->tids; bool *exists = M->exists; bool *set = M->set; uint16_t *m_key_len = M->m_key_len; void **m_key = M->m_key; int nP = M->num_procs; #ifdef SEQUENTIAL nP = 1; #else if ( nP <= 0 ) { nP = omp_get_num_procs(); } #endif // fprintf(stderr, "Using %d cores \n", nP); uint64_t proc_divinfo = fast_div32_init(nP); uint32_t lb = 0, ub; register uint64_t hashkey = ptr_hmap->hashkey; register uint64_t divinfo = ptr_hmap->divinfo; for ( int iter = 0; ; iter++ ) { ub = lb + M->num_at_once; if ( ub > nkeys ) { ub = nkeys; } uint32_t niters = ub - lb; int chnk_sz = 16; // so that no false sharing on write bool do_sequential_loop = false; for ( int k = 0; k < M->num_at_once; k++ ) { set[k] = false; } #pragma omp parallel for num_threads(nP) schedule(dynamic, chnk_sz) for ( uint32_t j = 0; j < niters; j++ ) { uint32_t i = j + lb; //-------------------------------- m_key[j] = mcr_set_key(keys, i, alt_keys, key_len); m_key_len[j] = mcr_set_key_len(key_lens, i, key_len); dbg_t dbg; idxs[j] = UINT_MAX; // bad value // hashes[j] = murmurhash3(m_key[j], m_key_len[j], hashkey); hashes[j] = fasthash32(m_key[j], m_key_len[j], hashkey); locs[j] = fast_rem32(hashes[j], ptr_hmap->size, divinfo); dbg.hash = hashes[j]; dbg.probe_loc = locs[j]; int lstatus = hmap_get(ptr_hmap, m_key[j], m_key_len[j], NULL, exists+j, idxs+j, &dbg); // do not exist if bad status, this is an omp loop if ( lstatus != 0 ) { if ( status == 0 ) { status = lstatus; } } if ( !exists[j] ) { // new key=> insert in sequential loop tids[j] = -1; // assigned to nobody, done in sequential loop if ( do_sequential_loop == false ) { do_sequential_loop = true; } } else { tids[j] = fast_rem32(hashes[j], nP, proc_divinfo); #ifdef DEBUG uint8_t chk = hashes[j] % nP; if ( chk != tids[j] ) { WHEREAMI; if ( status == 0 ) { status = -1; } } #endif } set[j] = true; } cBYE(status); #ifdef DEBUG for ( uint32_t j = 0; j < niters; j++ ) { if ( !set[j] ) { go_BYE(-1); } if ( j > (uint32_t)M->num_at_once ) { go_BYE(-1); } if ( exists[j] ) { if ( idxs[j] >= ptr_hmap->size ) { go_BYE(-1); } } else { if ( idxs[j] != UINT_MAX ) { go_BYE(-1); } } } #endif //----------------------------------- #ifdef SEQUENTIAL uint32_t num_updates = 0, num_inserts = 0; int exp_num_updates = 0, exp_num_inserts = 0; for ( uint32_t j = 0; j < niters; j++ ) { if ( exists[j] ) { exp_num_updates++; } else { exp_num_inserts++; } } if ( ( exp_num_updates + exp_num_inserts ) != niters ) { go_BYE(-1); } #endif // This loop does all the updates that can be done in parallel #pragma omp parallel { int tid; #ifdef SEQUENTIAL tid = 0; #else tid = omp_get_thread_num(); #endif for ( uint32_t j = 0; j < niters; j++ ) { if ( tids[j] != tid ) { continue; } // not mine, skip uint32_t i = j + lb; //-------------------------------- void *val_i = mcr_set_val(vals, i, alt_vals, val_len); int lstatus = hmap_update(ptr_hmap, idxs[j], val_i); // do not exist if bad status, this is an omp loop if ( lstatus != 0 ) { if ( status == 0 ) { status = lstatus; } } #ifdef SEQUENTIAL num_updates++; #endif } } cBYE(status); // exit if any thread had a problem // This loop does sequential inserts if ( do_sequential_loop ) { dbg_t dbg; for ( uint32_t j = 0; j < niters; j++ ) { if ( exists[j] ) { continue; } uint32_t i = j + lb; void *val_i = mcr_set_val(vals, i, alt_vals, val_len); dbg.hash = hashes[j]; dbg.probe_loc = locs[j]; status = hmap_put(ptr_hmap, m_key[j], m_key_len[j], true, val_i, &dbg); cBYE(status); #ifdef SEQUENTIAL num_inserts++; #endif } } #ifdef SEQUENTIAL if ( ( num_updates + num_inserts ) != niters ) { go_BYE(-1); } #endif if ( ub >= nkeys ) { break; } lb += M->num_at_once; } BYE: return status; }
ordering_op-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file ordering_op-inl.h * \brief Function definition of ordering operators */ #ifndef MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_ #define MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_ #include <mxnet/operator_util.h> #include <dmlc/optional.h> #include <mshadow/tensor.h> #include <algorithm> #include <vector> #include <string> #include <type_traits> #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "./sort_op.h" #include "./indexing_op.h" #include "../../api/operator/op_utils.h" namespace mshadow { template<typename xpu, int src_dim, typename DType, int dst_dim> inline Tensor<xpu, dst_dim, DType> inplace_reshape(Tensor<xpu, src_dim, DType> src, Shape<dst_dim> target_shape) { CHECK_EQ(src.CheckContiguous(), true); return Tensor<xpu, dst_dim, DType>(src.dptr_, target_shape, src.stream_); } }; namespace mxnet { namespace op { // These enums are only visible within this header namespace topk_enum { enum TopKReturnType {kReturnValue, kReturnIndices, kReturnMask, kReturnBoth}; } // topk_enum struct TopKParam : public dmlc::Parameter<TopKParam> { dmlc::optional<int> axis; int k; int ret_typ; bool is_ascend; int dtype; DMLC_DECLARE_PARAMETER(TopKParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1)) .describe("Axis along which to choose the top k indices." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(k).set_default(1) .describe("Number of top elements to select," " should be always smaller than or equal to the element number in the given axis." " A global sort is performed if set k < 1."); DMLC_DECLARE_FIELD(ret_typ).set_default(topk_enum::kReturnIndices) .add_enum("value", topk_enum::kReturnValue) .add_enum("indices", topk_enum::kReturnIndices) .add_enum("mask", topk_enum::kReturnMask) .add_enum("both", topk_enum::kReturnBoth) .describe("The return type.\n" " \"value\" means to return the top k values," " \"indices\" means to return the indices of the top k values," " \"mask\" means to return a mask array containing 0 and 1. 1 means the top k values." " \"both\" means to return a list of both values and indices of top k elements."); DMLC_DECLARE_FIELD(is_ascend).set_default(false) .describe("Whether to choose k largest or k smallest elements." " Top K largest elements will be chosen if set to false."); DMLC_DECLARE_FIELD(dtype) // TODO(srivrohi): remove support for real data type in mxnet-2.0 .add_enum("uint8", mshadow::kUint8) .add_enum("int32", mshadow::kInt32) .add_enum("int64", mshadow::kInt64) .add_enum("float16", mshadow::kFloat16) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .set_default(mshadow::kFloat32) .describe("DType of the output indices when ret_typ is \"indices\" or \"both\". " "An error will be raised if the selected data type cannot precisely represent the " "indices."); } }; struct SortParam : public dmlc::Parameter<SortParam> { dmlc::optional<int> axis; bool is_ascend; DMLC_DECLARE_PARAMETER(SortParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1)) .describe("Axis along which to choose sort the input tensor." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(is_ascend).set_default(true) .describe("Whether to sort in ascending or descending order."); } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream axis_s, is_ascend_s; axis_s << axis; is_ascend_s << is_ascend; (*dict)["axis"] = axis_s.str(); (*dict)["is_ascend_s"] = is_ascend_s.str(); } }; struct ArgSortParam : public dmlc::Parameter<ArgSortParam> { dmlc::optional<int> axis; bool is_ascend; int dtype; DMLC_DECLARE_PARAMETER(ArgSortParam) { DMLC_DECLARE_FIELD(axis).set_default(dmlc::optional<int>(-1)) .describe("Axis along which to sort the input tensor." " If not given, the flattened array is used. Default is -1."); DMLC_DECLARE_FIELD(is_ascend).set_default(true) .describe("Whether to sort in ascending or descending order."); DMLC_DECLARE_FIELD(dtype) // TODO(srivrohi): remove support for real data type in mxnet-2.0 .add_enum("uint8", mshadow::kUint8) .add_enum("int32", mshadow::kInt32) .add_enum("int64", mshadow::kInt64) .add_enum("float16", mshadow::kFloat16) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .set_default(mshadow::kFloat32) .describe("DType of the output indices. It is only valid when ret_typ is \"indices\" or" " \"both\". An error will be raised if the selected data type cannot precisely " "represent the indices."); } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream axis_s, is_ascend_s, dtype_s; axis_s << axis; is_ascend_s << is_ascend; dtype_s << dtype; (*dict)["axis"] = axis_s.str(); (*dict)["is_ascend_s"] = is_ascend_s.str(); (*dict)["dtype"] = MXNetTypeWithBool2String(dtype); } }; inline void ParseTopKParam(const TShape& src_shape, const TopKParam& param, TShape *target_shape, size_t *batch_size, index_t *element_num, int *axis, index_t *k, bool *do_transpose, bool *is_ascend) { *do_transpose = false; *k = param.k; *is_ascend = param.is_ascend; // get batch_size, axis and element_num if (!static_cast<bool>(param.axis)) { // No axis given *axis = 0; *batch_size = 1; *element_num = src_shape.Size(); } else { *axis = param.axis.value(); if (*axis < 0) { *axis += src_shape.ndim(); } CHECK(*axis >= 0 && *axis < static_cast<int>(src_shape.ndim())) << "Invalid axis! axis should be between 0 and " << src_shape.ndim() << ", found axis=" << *axis; if (src_shape[*axis] != 0) { *batch_size = src_shape.Size() / src_shape[*axis]; } *element_num = src_shape[*axis]; if (*axis != src_shape.ndim() - 1) { *do_transpose = true; } } // get k if (param.k <= 0) { *k = *element_num; } // get target_shape if (!static_cast<bool>(param.axis)) { if (param.ret_typ != topk_enum::kReturnMask) { *target_shape = mshadow::Shape1(*k); } else { *target_shape = src_shape; } } else { *target_shape = src_shape; if (param.ret_typ != topk_enum::kReturnMask) { (*target_shape)[*axis] = *k; } } CHECK(*k >= 0 && *k <= *element_num) << "k must be smaller than " << *element_num << ", get k = " << *k; } using namespace mshadow; struct fill_ind_to_one { template<typename DType> MSHADOW_XINLINE static void Map(int i, const index_t* indices, DType* out) { out[indices[i]] = static_cast<DType>(1); } }; struct fill_ind { template<typename DType> MSHADOW_XINLINE static void Map(int i, const index_t* indices, const DType* val, int req, DType* out) { KERNEL_ASSIGN(out[indices[i]], req, val[i]); } }; template<typename DType> MSHADOW_FORCE_INLINE void TopKSort(const Tensor<cpu, 1, DType>& dat, const Tensor<cpu, 1, index_t>& ind, const Tensor<cpu, 1, char>& work, index_t K, index_t N, bool is_ascend, Stream<cpu> *s) { // Use full sort when K is relatively large. const bool full_sort(K*8 > N); // Batch size. const index_t M(work.size(0)/(sizeof(DType)*N)); const int omp_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < M; ++i) { // Tensor `work` stores the flattened source data, while `dat` stores the sorted result. DType *vals = reinterpret_cast<DType*>(work.dptr_); DType *sorted_vals = dat.dptr_+i*N; index_t *indices = ind.dptr_+i*N; if (is_ascend) { if (full_sort) { std::sort(indices, indices+N, [&](const index_t& i1, const index_t& i2){ return vals[i1] < vals[i2]; }); } else { std::partial_sort(indices, indices+K, indices+N, [&](const index_t& i1, const index_t& i2){ return vals[i1] < vals[i2]; }); } } else { if (full_sort) { std::sort(indices, indices+N, [&](const index_t& i1, const index_t& i2){ return vals[i1] > vals[i2]; }); } else { std::partial_sort(indices, indices+K, indices+N, [&](const index_t& i1, const index_t& i2){ return vals[i1] > vals[i2]; }); } } for (index_t j = 0; j < K; ++j) { sorted_vals[j] = vals[indices[j]]; } } } #ifdef __CUDACC__ template<typename DType> MSHADOW_XINLINE bool TopKCompare(DType val1, index_t ind1, DType val2, index_t ind2, bool is_ascend) { // Negative indices denote undefined values which are considered arbitrary small resp. large. return (ind2 < 0) || (ind1 >= 0 && ((is_ascend && val1 < val2) || (!is_ascend && val1 > val2))); } template<typename DType> MSHADOW_XINLINE void MergeTopK(index_t K, DType *val1, index_t *ind1, DType *val2, index_t *ind2, bool is_ascend) { // In-place merge of two sorted top-K lists into val1/ind1. First determine the intervals // [0,..,i1], [0,..i2] of the two lists that will be part of the merged list. index_t i1(K-1), i2(K-1); for (index_t i = 0; i < K; ++i) { if (TopKCompare(val1[i1], ind1[i1], val2[i2], ind2[i2], is_ascend)) { --i2; } else { --i1; } } // Now merge the lists from back to front. for (index_t i = K; i--;) { if (i2 < 0 || i1 >= 0 && TopKCompare(val2[i2], ind2[i2], val1[i1], ind1[i1], is_ascend)) { val1[i] = val1[i1]; ind1[i] = ind1[i1]; --i1; } else { val1[i] = val2[i2]; ind1[i] = ind2[i2]; --i2; } } } template<typename DType> __global__ void PartialSortSmallK(index_t K, index_t N, DType *val, index_t *ind, bool is_ascend) { // Buffer for pairwise reduction. extern __shared__ index_t buff[]; // Start of buffer sections associated with this thread. const index_t offset(threadIdx.x*K); index_t *ind_buff = &buff[offset]; DType *val_buff = reinterpret_cast<DType*>(&buff[blockDim.x*K])+offset; // Initialize top-K values for this thread. for (index_t i = 0; i < K; ++i) { ind_buff[i] = -1; } // Range of values this thread cares about. Each thread block processes // a different batch item (i.e. a different set of ind/val where we // have to select the top-K elements). All threads within the same // block work on the same batch item. const index_t first(blockIdx.x*N+threadIdx.x), last((blockIdx.x+1)*N); // Select top-K from this range and store it sorted in the buffer. // We assume a small K, so linear insertion is o.k. for (index_t i = first; i < last; i += blockDim.x) { DType cur_val(val[i]); index_t cur_ind(ind[i]); for (index_t j = K; j-- && TopKCompare(cur_val, cur_ind, val_buff[j], ind_buff[j], is_ascend); ) { if (j+1 < K) { val_buff[j+1] = val_buff[j]; ind_buff[j+1] = ind_buff[j]; } val_buff[j] = cur_val; ind_buff[j] = cur_ind; } } // Recursive merge of sorted lists for this thread block. Note that blockDim.x is not // necessary a power of two, therefore the additional checks for last_s. for (index_t s = (blockDim.x+1)/2, last_s = blockDim.x; last_s > 1; last_s = s, s = (s+1)/2) { __syncthreads(); if (threadIdx.x < s && threadIdx.x+s < last_s) { MergeTopK(K, val_buff, ind_buff, val_buff+s*K, ind_buff+s*K, is_ascend); } } // Final updates on master thread. if (threadIdx.x == 0) { for (index_t i = 0; i < K; ++i) { ind[blockIdx.x*N+i] = ind_buff[i]; val[blockIdx.x*N+i] = val_buff[i]; } } } template<typename DType> MSHADOW_FORCE_INLINE void TopKSort(const Tensor<gpu, 1, DType>& dat, const Tensor<gpu, 1, index_t>& ind, const Tensor<gpu, 1, char>& work, index_t K, index_t N, bool is_ascend, Stream<gpu> *s) { // Use full sort for all but very small K for which we // can do a partial sort entirely within shared memory. const bool full_sort(K > 5); // Batch size. const index_t M(dat.size(0)/N); if (full_sort) { // Divide workspace into two parts. The first one is needed to store batch ids. size_t alignment = std::max(sizeof(DType), sizeof(index_t)); size_t id_size = PadBytes(sizeof(index_t) * ind.size(0), alignment); Tensor<gpu, 1, index_t> batch_id(reinterpret_cast<index_t*>(work.dptr_), Shape1(ind.size(0)), s); Tensor<gpu, 1, char> sort_work(work.dptr_+id_size, Shape1(work.size(0)-id_size), s); mxnet::op::SortByKey(dat, ind, is_ascend, &sort_work); if (M > 1) { // Back to back sorting. Note that mxnet::op::SortByKey is a stable sort. batch_id = ind / N; mxnet::op::SortByKey(batch_id, dat, true, &sort_work); batch_id = ind / N; mxnet::op::SortByKey(batch_id, ind, true, &sort_work); } } else { const int nthreads(mshadow::cuda::kBaseThreadNum); PartialSortSmallK<<<M, nthreads, nthreads*K*(sizeof(index_t)+sizeof(DType)), mshadow::Stream<gpu>::GetStream(s)>>> (K, N, dat.dptr_, ind.dptr_, is_ascend); } } #endif /*! * \brief Implementation of the TopK operation * * * \param ctx the running context * \param resource temporary resource handler * \param src the Source blob * \param ret the destination blobs * \param param the topk parameters * \tparam xpu the device type. * \tparam DType type of the output value/mask. * \tparam IDType type of the output indices. */ template<typename xpu, typename DType, typename IDType> void TopKImpl(const RunContext &ctx, const Resource &resource, const std::vector<OpReqType>& req, const TBlob& src, const std::vector<TBlob>& ret, const TopKParam& param) { using namespace mshadow; using namespace mshadow::expr; // 0. If input shape is 0-shape, directly return if (src.Size() == 0) return; // 1. Parse and initialize information Stream<xpu> *s = ctx.get_stream<xpu>(); Tensor<xpu, 1, char> workspace; Tensor<xpu, 1, char> temp_workspace; Tensor<xpu, 1, DType> sorted_dat; Tensor<xpu, 1, index_t> indices, sel_indices; size_t batch_size = 0; index_t element_num = 0; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; index_t k = 0; size_t alignment = std::max(sizeof(DType), sizeof(index_t)); mxnet::TShape target_shape; ParseTopKParam(src.shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); CHECK_LE(element_num, mxnet::common::MaxIntegerValue<index_t>()) << "'index_t' does not have a sufficient precision to represent " << "the indices of the input array. The total element_num is " << element_num << ", but the selected index_t can only represent " << mxnet::common::MaxIntegerValue<index_t>() << " elements"; Tensor<xpu, 3, DType> dat = src.FlatTo3D<xpu, DType>(axis, axis, s); // Temp space needed by the full sorts. size_t temp_size = std::max( mxnet::op::SortByKeyWorkspaceSize<index_t, DType, xpu>(src.Size()), mxnet::op::SortByKeyWorkspaceSize<DType, index_t, xpu>(src.Size())); temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<index_t, index_t, xpu>(src.Size())); // Additional temp space for gpu full sorts for batch ids. temp_size += PadBytes(sizeof(index_t) * src.Size(), alignment); // Temp space for cpu sorts. temp_size = std::max(temp_size, sizeof(DType) * src.Size()); size_t workspace_size = temp_size + PadBytes(sizeof(DType) * src.Size(), alignment) + PadBytes(sizeof(index_t) * src.Size(), alignment); if (param.ret_typ == topk_enum::kReturnMask) { workspace_size += PadBytes(sizeof(index_t) * batch_size * k, alignment); } workspace = resource.get_space_typed<xpu, 1, char>(Shape1(workspace_size), s); char* workspace_curr_ptr = workspace.dptr_; sorted_dat = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); // contain sorted dat workspace_curr_ptr += PadBytes(sizeof(DType) * src.Size(), alignment); indices = Tensor<xpu, 1, index_t>(reinterpret_cast<index_t*>(workspace_curr_ptr), Shape1(src.Size()), s); // indices in the original matrix workspace_curr_ptr += PadBytes(sizeof(index_t) * src.Size(), alignment); if (param.ret_typ == topk_enum::kReturnMask) { sel_indices = Tensor<xpu, 1, index_t>(reinterpret_cast<index_t*>(workspace_curr_ptr), Shape1(batch_size * k), s); workspace_curr_ptr += PadBytes(sizeof(index_t) * batch_size * k, alignment); CHECK_EQ(sel_indices.CheckContiguous(), true); } if (std::is_same<xpu, cpu>::value) { Tensor<xpu, 1, DType> flattened_data; if (do_transpose) { flattened_data = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); workspace_curr_ptr += sizeof(DType) * src.Size(); flattened_data = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); CHECK_EQ(flattened_data.CheckContiguous(), true); } else { flattened_data = src.FlatTo1D<xpu, DType>(s); } // `temp_workspace` stores the flattened data temp_workspace = Tensor<xpu, 1, char>(reinterpret_cast<char*>(flattened_data.dptr_), Shape1(sizeof(DType)*src.Size()), s); CHECK_EQ(temp_workspace.CheckContiguous(), true); } else { if (do_transpose) { sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); } else { sorted_dat = reshape(dat, Shape1(src.Size())); } CHECK_EQ(sorted_dat.CheckContiguous(), true); temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space workspace_curr_ptr += temp_size; } mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size * element_num, 1, index_t{0}, index_t{1}, kWriteTo, indices.dptr_); CHECK_EQ(indices.CheckContiguous(), true); // 2. Perform inplace batch sort. // After sorting, each batch in `sorted_dat` will be sorted in the corresponding order // up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat` // `temp_workspace` is used to store the flattend source data for CPU device, and it's used as // a temporal buffer for GPU device. TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s); // 3. Assign results to the ret blob // When returning indices, only update(modulo) required elements instead of full elements // to avoid redundant calculation. // Cast `ret_indices` from int to real_t could introduce conversion error when the element_num // is large enough. if (param.ret_typ == topk_enum::kReturnMask) { Tensor<xpu, 1, DType> ret_mask = ret[0].FlatTo1D<xpu, DType>(s); ret_mask = scalar<DType>(0); sel_indices = reshape(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), Shape1(batch_size * k)); if (do_transpose) { mxnet::TShape src_shape = src.shape_.FlatTo3D(axis); CHECK_EQ(sel_indices.CheckContiguous(), true); sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1)); } if (req[0] == kNullOp) { return; } else if (req[0] == kWriteTo) { mxnet_op::Kernel<fill_ind_to_one, xpu>::Launch(s, batch_size * k, sel_indices.dptr_, ret_mask.dptr_); } else { LOG(FATAL) << "req=" << req[0] << " is not supported yet."; } } else if (param.ret_typ == topk_enum::kReturnIndices) { if (do_transpose) { Tensor<xpu, 3, IDType> ret_indices = ret[0].FlatTo3D<xpu, IDType>(axis, axis, s); ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1)), element_num))); } else { Tensor<xpu, 2, IDType> ret_indices = ret[0].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num))); } } else { if (do_transpose) { Tensor<xpu, 3, DType> ret_value = ret[0].FlatTo3D<xpu, DType>(axis, axis, s); Tensor<xpu, 3, IDType> ret_indices = ret[1].FlatTo3D<xpu, IDType>(axis, axis, s); ASSIGN_DISPATCH(ret_value, req[0], transpose( slice<2>(inplace_reshape(sorted_dat, Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)), 0, k), Shape3(0, 2, 1))); ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1)), element_num))); } else { Tensor<xpu, 2, DType> ret_value = ret[0].get_with_shape<xpu, 2, DType>(Shape2(batch_size, k), s); Tensor<xpu, 2, IDType> ret_indices = ret[1].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); ASSIGN_DISPATCH(ret_value, req[0], slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k)); ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num))); } } } template<typename xpu, typename DType> size_t TopKWorkspaceSize(const TBlob& src, const TopKParam& param, size_t *temp_size_ptr) { using namespace mshadow; using namespace mshadow::expr; size_t batch_size = 0; size_t temp_size; index_t element_num = 0; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; index_t k = 0; size_t alignment = std::max(sizeof(DType), sizeof(index_t)); mxnet::TShape target_shape; ParseTopKParam(src.shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); // Temp space needed by the full sorts. temp_size = std::max( mxnet::op::SortByKeyWorkspaceSize<index_t, DType, xpu>(src.Size()), mxnet::op::SortByKeyWorkspaceSize<DType, index_t, xpu>(src.Size())); temp_size = std::max(temp_size, mxnet::op::SortByKeyWorkspaceSize<index_t, index_t, xpu>(src.Size())); // Additional temp space for gpu full sorts for batch ids. temp_size += PadBytes(sizeof(index_t) * src.Size(), alignment); // Temp space for cpu sorts. temp_size = std::max(temp_size, sizeof(DType) * src.Size()); *temp_size_ptr = temp_size; size_t workspace_size = temp_size + PadBytes(sizeof(DType) * src.Size(), alignment) + PadBytes(sizeof(index_t) * src.Size(), alignment); if (param.ret_typ == topk_enum::kReturnMask) { workspace_size += PadBytes(sizeof(index_t) * batch_size * k, alignment); } return workspace_size; } template<typename xpu, typename DType, typename IDType> void TopKImplwithWorkspace(const RunContext &ctx, const std::vector<OpReqType>& req, const TBlob& src, const std::vector<TBlob>& ret, const TopKParam& param, char* workspace_curr_ptr, const size_t &temp_size, Stream<xpu>* s) { using namespace mshadow; using namespace mshadow::expr; // 0. If input shape is 0-shape, directly return if (src.Size() == 0) return; // 1. Parse and initialize information Tensor<xpu, 1, char> workspace; Tensor<xpu, 1, char> temp_workspace; Tensor<xpu, 1, DType> sorted_dat; Tensor<xpu, 1, index_t> indices, sel_indices; size_t batch_size = 0; index_t element_num = 0; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; index_t k = 0; size_t alignment = std::max(sizeof(DType), sizeof(index_t)); mxnet::TShape target_shape; ParseTopKParam(src.shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); CHECK_LE(element_num, mxnet::common::MaxIntegerValue<index_t>()) << "'index_t' does not have a sufficient precision to represent " << "the indices of the input array. The total element_num is " << element_num << ", but the selected index_t can only represent " << mxnet::common::MaxIntegerValue<index_t>() << " elements"; Tensor<xpu, 3, DType> dat = src.FlatTo3D<xpu, DType>(axis, axis, s); sorted_dat = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); // contain sorted dat workspace_curr_ptr += PadBytes(sizeof(DType) * src.Size(), alignment); indices = Tensor<xpu, 1, index_t>(reinterpret_cast<index_t*>(workspace_curr_ptr), Shape1(src.Size()), s); // indices in the original matrix workspace_curr_ptr += PadBytes(sizeof(index_t) * src.Size(), alignment); if (param.ret_typ == topk_enum::kReturnMask) { sel_indices = Tensor<xpu, 1, index_t>(reinterpret_cast<index_t*>(workspace_curr_ptr), Shape1(batch_size * k), s); workspace_curr_ptr += PadBytes(sizeof(index_t) * batch_size * k, alignment); CHECK_EQ(sel_indices.CheckContiguous(), true); } if (std::is_same<xpu, cpu>::value) { Tensor<xpu, 1, DType> flattened_data; if (do_transpose) { flattened_data = Tensor<xpu, 1, DType>(reinterpret_cast<DType*>(workspace_curr_ptr), Shape1(src.Size()), s); workspace_curr_ptr += sizeof(DType) * src.Size(); flattened_data = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); CHECK_EQ(flattened_data.CheckContiguous(), true); } else { flattened_data = src.FlatTo1D<xpu, DType>(s); } // `temp_workspace` stores the flattened data temp_workspace = Tensor<xpu, 1, char>(reinterpret_cast<char*>(flattened_data.dptr_), Shape1(sizeof(DType)*src.Size()), s); CHECK_EQ(temp_workspace.CheckContiguous(), true); } else { if (do_transpose) { sorted_dat = reshape(transpose(dat, Shape3(0, 2, 1)), Shape1(src.Size())); } else { sorted_dat = reshape(dat, Shape1(src.Size())); } CHECK_EQ(sorted_dat.CheckContiguous(), true); temp_workspace = Tensor<xpu, 1, char>(workspace_curr_ptr, Shape1(temp_size), s); // temp space workspace_curr_ptr += temp_size; } mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size * element_num, 1, index_t{0}, index_t{1}, kWriteTo, indices.dptr_); CHECK_EQ(indices.CheckContiguous(), true); // 2. Perform inplace batch sort. // After sorting, each batch in `sorted_dat` will be sorted in the corresponding order // up to the k-th element and the `indices` will contain the corresponding index in `sorted_dat` // `temp_workspace` is used to store the flattend source data for CPU device, and it's used as // a temporal buffer for GPU device. TopKSort(sorted_dat, indices, temp_workspace, k, element_num, is_ascend, s); // 3. Assign results to the ret blob // When returning indices, only update(modulo) required elements instead of full elements // to avoid redundant calculation. // Cast `ret_indices` from int to real_t could introduce conversion error when the element_num // is large enough. if (param.ret_typ == topk_enum::kReturnMask) { Tensor<xpu, 1, DType> ret_mask = ret[0].FlatTo1D<xpu, DType>(s); ret_mask = scalar<DType>(0); sel_indices = reshape(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), Shape1(batch_size * k)); if (do_transpose) { mxnet::TShape src_shape = src.shape_.FlatTo3D(axis); CHECK_EQ(sel_indices.CheckContiguous(), true); sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1)); } if (req[0] == kNullOp) { return; } else if (req[0] == kWriteTo) { mxnet_op::Kernel<fill_ind_to_one, xpu>::Launch(s, batch_size * k, sel_indices.dptr_, ret_mask.dptr_); } else { LOG(FATAL) << "req=" << req[0] << " is not supported yet."; } } else if (param.ret_typ == topk_enum::kReturnIndices) { if (do_transpose) { Tensor<xpu, 3, IDType> ret_indices = ret[0].FlatTo3D<xpu, IDType>(axis, axis, s); ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1)), element_num))); } else { Tensor<xpu, 2, IDType> ret_indices = ret[0].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); ASSIGN_DISPATCH(ret_indices, req[0], tcast<IDType>(F<mshadow_op::mod>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num))); } } else { if (do_transpose) { Tensor<xpu, 3, DType> ret_value = ret[0].FlatTo3D<xpu, DType>(axis, axis, s); Tensor<xpu, 3, IDType> ret_indices = ret[1].FlatTo3D<xpu, IDType>(axis, axis, s); ASSIGN_DISPATCH(ret_value, req[0], transpose( slice<2>(inplace_reshape(sorted_dat, Shape3(ret_value.shape_[0], ret_value.shape_[2], element_num)), 0, k), Shape3(0, 2, 1))); ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(transpose( slice<2>(inplace_reshape(indices, Shape3(ret_indices.shape_[0], ret_indices.shape_[2], element_num)), 0, k), Shape3(0, 2, 1)), element_num))); } else { Tensor<xpu, 2, DType> ret_value = ret[0].get_with_shape<xpu, 2, DType>(Shape2(batch_size, k), s); Tensor<xpu, 2, IDType> ret_indices = ret[1].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); ASSIGN_DISPATCH(ret_value, req[0], slice<1>(inplace_reshape(sorted_dat, Shape2(batch_size, element_num)), 0, k)); ASSIGN_DISPATCH(ret_indices, req[1], tcast<IDType>(F<mshadow_op::mod>(slice<1>( inplace_reshape(indices, Shape2(batch_size, element_num)), 0, k), element_num))); } } } template<typename xpu> void TopK(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnBoth) { MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { MSHADOW_TYPE_SWITCH(param.dtype, IDType, { TopKImpl<xpu, DType, IDType>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param); }) }); } else { MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { TopKImpl<xpu, DType, index_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, param); }); } } template<typename xpu> void Sort(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const SortParam& param = nnvm::get<SortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnValue; MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { TopKImpl<xpu, DType, index_t>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param); }); } template<typename xpu> void ArgSort(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.dtype = param.dtype; topk_param.ret_typ = topk_enum::kReturnIndices; MXNET_NO_FLOAT16_TYPE_SWITCH(inputs[0].type_flag_, DType, { MSHADOW_TYPE_SWITCH(param.dtype, IDType, { TopKImpl<xpu, DType, IDType>(ctx.run_ctx, ctx.requested[0], req, inputs[0], outputs, topk_param); }); }); } template<typename xpu, typename DType, typename IDType> void TopKBackwardImpl(const OpContext &ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs, const TopKParam& param) { CHECK_NE(req[0], kWriteInplace); using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.run_ctx.get_stream<xpu>(); CHECK(param.ret_typ == topk_enum::kReturnValue || param.ret_typ == topk_enum::kReturnBoth); size_t batch_size = 0; index_t element_num = 0; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; index_t k = 0; mxnet::TShape target_shape; ParseTopKParam(outputs[0].shape_, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); CHECK_LE(element_num, mxnet::common::MaxIntegerValue<IDType>()) << "'IDType' does not have a sufficient precision to represent " << "the indices of the input array. The total element_num is " << element_num << ", but the selected index_t can only represent " << mxnet::common::MaxIntegerValue<IDType>() << " elements"; Tensor<xpu, 1, index_t> workspace = ctx.requested[0].get_space_typed<xpu, 1, index_t>(Shape1(batch_size * k + batch_size), s); Tensor<xpu, 1, index_t> sel_indices = Tensor<xpu, 1, index_t>(workspace.dptr_, Shape1(batch_size * k), s); Tensor<xpu, 1, index_t> batch_shift = Tensor<xpu, 1, index_t>(workspace.dptr_ + batch_size * k, Shape1(batch_size), s); Tensor<xpu, 2, DType> out_grad = inputs[0].get_with_shape<xpu, 2, DType>(Shape2(inputs[0].shape_.Size(), 1), s); Tensor<xpu, 2, DType> in_grad = outputs[0].get_with_shape<xpu, 2, DType>(Shape2(outputs[0].shape_.Size(), 1), s); mxnet_op::Kernel<range_fwd, xpu>::Launch(s, batch_size, 1, index_t{0}, element_num, kWriteTo, batch_shift.dptr_); if (do_transpose) { Tensor<xpu, 1, IDType> indices = inputs[2].FlatTo1D<xpu, IDType>(s); mxnet::TShape src_shape = outputs[0].shape_.FlatTo3D(axis); sel_indices = reshape(transpose( broadcast_to(inplace_reshape(batch_shift, Shape3(src_shape[0], src_shape[2], 1)), mxnet::TShape(Shape3(src_shape[0], src_shape[2], k))), Shape3(0, 2, 1)), Shape1(batch_size * k)); sel_indices += tcast<index_t>(indices); sel_indices = transpose_indices(sel_indices, Shape3(src_shape[0], src_shape[2], src_shape[1]), Shape3(0, 2, 1)); } else { Tensor<xpu, 2, IDType> indices = inputs[2].get_with_shape<xpu, 2, IDType>(Shape2(batch_size, k), s); sel_indices = reshape(tcast<index_t>(indices) + broadcast_to(inplace_reshape(batch_shift, Shape2(batch_size, 1)), mxnet::TShape(Shape2(batch_size, k))), Shape1(batch_size * k)); } CHECK_EQ(sel_indices.CheckContiguous(), true); if (kWriteTo == req[0] || kAddTo == req[0]) { if (kWriteTo == req[0]) { in_grad = scalar<DType>(0); } mxnet_op::Kernel<fill_ind, xpu>::Launch(s, batch_size * k, sel_indices.dptr_, out_grad.dptr_, req[0], in_grad.dptr_); } else { LOG(FATAL) << "Not Implemented!"; } } template<typename xpu> void TopKBackward_(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnBoth) { MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { MSHADOW_TYPE_SWITCH(param.dtype, IDType, { TopKBackwardImpl<xpu, DType, IDType>(ctx, inputs, req, outputs, param); }); }); } else if (param.ret_typ == topk_enum::kReturnValue) { MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { TopKBackwardImpl<xpu, DType, index_t>(ctx, inputs, req, outputs, param); }); } else { LOG(FATAL) << "Not Implemented"; } } inline uint32_t TopKNumOutputs(const NodeAttrs& attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) { return static_cast<uint32_t>(1); } else { return static_cast<uint32_t>(2); } } inline uint32_t TopKNumVisibleOutputs(const NodeAttrs& attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); if (param.ret_typ == topk_enum::kReturnBoth) { return static_cast<uint32_t>(2); } else { return static_cast<uint32_t>(1); } } inline bool TopKType(const nnvm::NodeAttrs& attrs, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); size_t in_size = in_attrs->size(); size_t out_size = out_attrs->size(); CHECK_EQ(in_size, 1); CHECK(out_size == 1 || out_size == 2); // out_attr[0] -> stores value // out_attr[1] -> stores indices if (out_size > 1) { if (param.ret_typ == topk_enum::kReturnValue) { #if MXNET_USE_INT64_TENSOR_SIZE == 1 CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt64)) #else CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32)) #endif << "Failed to set the type of ret_indices."; } else { CHECK(type_assign(&(*out_attrs)[1], param.dtype)) << "Failed to set the type of ret_indices."; } } if (param.ret_typ == topk_enum::kReturnIndices) { CHECK(type_assign(&(*out_attrs)[0], param.dtype)) << "Failed to set the type of ret_indices."; } else { TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0)); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0)); return out_attrs->at(0) != -1; } return true; } inline bool TopKShapeImpl(const TopKParam& param, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 1U); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) { CHECK_EQ(out_attrs->size(), 1U); } else { CHECK_EQ(out_attrs->size(), 2U); } mxnet::TShape& in_shape = (*in_attrs)[0]; size_t batch_size = 0; index_t element_num = 0; // number of batches + the size of each batch int axis = 0; bool do_transpose = false; bool is_ascend = false; index_t k = 0; mxnet::TShape target_shape; ParseTopKParam(in_shape, param, &target_shape, &batch_size, &element_num, &axis, &k, &do_transpose, &is_ascend); if (param.ret_typ == topk_enum::kReturnIndices || param.ret_typ == topk_enum::kReturnMask) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape); } else { SHAPE_ASSIGN_CHECK(*out_attrs, 0, target_shape); SHAPE_ASSIGN_CHECK(*out_attrs, 1, target_shape); } return true; } inline bool TopKShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const TopKParam& param = nnvm::get<TopKParam>(attrs.parsed); return TopKShapeImpl(param, in_attrs, out_attrs); } inline bool SortType(const nnvm::NodeAttrs& attrs, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { int data_type = -1; size_t in_size = in_attrs->size(); size_t out_size = out_attrs->size(); CHECK_EQ(in_size, 1); CHECK_EQ(out_size, 2); #if MXNET_USE_INT64_TENSOR_SIZE == 1 CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt64)) #else CHECK(type_assign(&(*out_attrs)[1], mshadow::kInt32)) #endif << "Failed to set the type of ret_indices"; CHECK(type_assign(&data_type, (*in_attrs)[0])) << "Incompatible dtype of input, in_attrs[0]=" << (*in_attrs)[0]; CHECK(type_assign(&data_type, (*out_attrs)[0])) << "Incompatible dtype of output, out_attrs[0]=" << (*out_attrs)[0]; CHECK(type_assign(&(*in_attrs)[0], data_type)) << "Incompatible dtype of input, in_attrs[0]=" << (*in_attrs)[0]; CHECK(type_assign(&(*out_attrs)[0], data_type)) << "Incompatible dtype of output, out_attrs[0]=" << (*out_attrs)[0]; if (data_type == -1) return false; return true; } inline bool SortShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const SortParam& param = nnvm::get<SortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnValue; return TopKShapeImpl(topk_param, in_attrs, out_attrs); } inline bool ArgSortType(const nnvm::NodeAttrs& attrs, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed); CHECK(type_assign(&(*out_attrs)[0], param.dtype)) << "Failed to set the type of ret_indices."; return true; } inline bool ArgSortShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const ArgSortParam& param = nnvm::get<ArgSortParam>(attrs.parsed); TopKParam topk_param; topk_param.axis = param.axis; topk_param.is_ascend = param.is_ascend; topk_param.k = 0; topk_param.ret_typ = topk_enum::kReturnIndices; return TopKShapeImpl(topk_param, in_attrs, out_attrs); } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ORDERING_OP_INL_H_
builtins-1.c
/* { dg-additional-options "-ffast-math" } */ #include <assert.h> #include <math.h> #define N 10 #define N2 14 #define c1 1.2345f #define c2 1.2345 #define DELTA 0.001 #define TEST_BIT_BUILTINS(T, S, S2) \ { \ T arguments[N2] \ = {0##S, 1##S, 2##S, 3##S, \ 111##S, 333##S, 444##S, 0x80000000##S, \ 0x0000ffff##S, 0xf0000000##S, 0xff000000##S, 0xffffffff##S}; \ int clrsb[N2] = {}; \ int clz[N2] = {}; \ int ctz[N2] = {}; \ int ffs[N2] = {}; \ int parity[N2] = {}; \ int popcount[N2] = {}; \ \ _Pragma ("omp target map(to:clz[:N2], ctz[:N2], ffs[:N2], parity[:N2], popcount[:N2])") \ { \ for (unsigned i = 0; i < N2; i++) \ { \ clrsb[i] = __builtin_clrsb##S2 (arguments[i]); \ clz[i] = __builtin_clz##S2 (arguments[i]); \ ctz[i] = __builtin_ctz##S2 (arguments[i]); \ ffs[i] = __builtin_ffs##S2 (arguments[i]); \ parity[i] = __builtin_parity##S2 (arguments[i]); \ popcount[i] = __builtin_popcount##S2 (arguments[i]); \ } \ } \ \ for (unsigned i = 0; i < N2; i++) \ { \ assert (clrsb[i] == __builtin_clrsb##S2 (arguments[i])); \ if (arguments[0] != 0) \ { \ assert (clz[i] == __builtin_clz##S2 (arguments[i])); \ assert (ctz[i] == __builtin_ctz##S2 (arguments[i])); \ } \ assert (ffs[i] == __builtin_ffs##S2 (arguments[i])); \ assert (parity[i] == __builtin_parity##S2 (arguments[i])); \ assert (popcount[i] == __builtin_popcount##S2 (arguments[i])); \ } \ } #define ASSERT(v1, v2) assert (fabs (v1 - v2) < DELTA) int main () { float f[N] = {}; float d[N] = {}; /* 1) test direct mapping to HSA insns. */ #pragma omp target map(to: f[ : N], d[ : N]) { f[0] = sinf (c1); f[1] = cosf (c1); f[2] = exp2f (c1); f[3] = log2f (c1); f[4] = truncf (c1); f[5] = sqrtf (c1); d[0] = trunc (c2); d[1] = sqrt (c2); } ASSERT (f[0], sinf (c1)); ASSERT (f[1], cosf (c1)); ASSERT (f[2], exp2f (c1)); ASSERT (f[3], log2f (c1)); ASSERT (f[4], truncf (c1)); ASSERT (f[5], sqrtf (c1)); ASSERT (d[0], trunc (c2)); ASSERT (d[1], sqrt (c2)); /* 2) test bit builtins for unsigned int. */ TEST_BIT_BUILTINS (int, , ); /* 3) test bit builtins for unsigned long int. */ TEST_BIT_BUILTINS (long, l, l); /* 4) test bit builtins for unsigned long long int. */ TEST_BIT_BUILTINS (long long, ll, ll); return 0; }
blas.c
#include "blas.h" #include "utils.h" #include <math.h> #include <assert.h> #include <float.h> #include <stdio.h> #include <stdlib.h> #include <string.h> void reorg_cpu(float *x, int out_w, int out_h, int out_c, int batch, int stride, int forward, float *out) { int b,i,j,k; int in_c = out_c/(stride*stride); //printf("\n out_c = %d, out_w = %d, out_h = %d, stride = %d, forward = %d \n", out_c, out_w, out_h, stride, forward); //printf(" in_c = %d, in_w = %d, in_h = %d \n", in_c, out_w*stride, out_h*stride); for(b = 0; b < batch; ++b){ for(k = 0; k < out_c; ++k){ for(j = 0; j < out_h; ++j){ for(i = 0; i < out_w; ++i){ int in_index = i + out_w*(j + out_h*(k + out_c*b)); int c2 = k % in_c; int offset = k / in_c; int w2 = i*stride + offset % stride; int h2 = j*stride + offset / stride; int out_index = w2 + out_w*stride*(h2 + out_h*stride*(c2 + in_c*b)); if(forward) out[out_index] = x[in_index]; // used by default for forward (i.e. forward = 0) else out[in_index] = x[out_index]; } } } } } void flatten(float *x, int size, int layers, int batch, int forward) { float* swap = (float*)xcalloc(size * layers * batch, sizeof(float)); int i,c,b; for(b = 0; b < batch; ++b){ for(c = 0; c < layers; ++c){ for(i = 0; i < size; ++i){ int i1 = b*layers*size + c*size + i; int i2 = b*layers*size + i*layers + c; if (forward) swap[i2] = x[i1]; else swap[i1] = x[i2]; } } } memcpy(x, swap, size*layers*batch*sizeof(float)); free(swap); } void weighted_sum_cpu(float *a, float *b, float *s, int n, float *c) { int i; for(i = 0; i < n; ++i){ c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0); } } void weighted_delta_cpu(float *a, float *b, float *s, float *da, float *db, float *ds, int n, float *dc) { int i; for(i = 0; i < n; ++i){ if(da) da[i] += dc[i] * s[i]; if(db) db[i] += dc[i] * (1-s[i]); ds[i] += dc[i] * (a[i] - b[i]); } } static float relu(float src) { if (src > 0) return src; return 0; } void shortcut_multilayer_cpu(int size, int src_outputs, int batch, int n, int *outputs_of_layers, float **layers_output, float *out, float *in, float *weights, int nweights, WEIGHTS_NORMALIZATION_T weights_normalizion) { // nweights - l.n or l.n*l.c or (l.n*l.c*l.h*l.w) const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w) int step = 0; if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1 int id; #pragma omp parallel for for (id = 0; id < size; ++id) { int src_id = id; const int src_i = src_id % src_outputs; src_id /= src_outputs; int src_b = src_id; float sum = 1, max_val = -FLT_MAX; int i; if (weights && weights_normalizion) { if (weights_normalizion == SOFTMAX_NORMALIZATION) { for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] float w = weights[weights_index]; if (max_val < w) max_val = w; } } const float eps = 0.0001; sum = eps; for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] const float w = weights[weights_index]; if (weights_normalizion == RELU_NORMALIZATION) sum += relu(w); else if (weights_normalizion == SOFTMAX_NORMALIZATION) sum += expf(w - max_val); } } if (weights) { float w = weights[src_i / step]; if (weights_normalizion == RELU_NORMALIZATION) w = relu(w) / sum; else if (weights_normalizion == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum; out[id] = in[id] * w; // [0 or c or (c, h ,w)] } else out[id] = in[id]; // layers for (i = 0; i < n; ++i) { int add_outputs = outputs_of_layers[i]; if (src_i < add_outputs) { int add_index = add_outputs*src_b + src_i; int out_index = id; float *add = layers_output[i]; if (weights) { const int weights_index = src_i / step + (i + 1)*layer_step; // [0 or c or (c, h ,w)] float w = weights[weights_index]; if (weights_normalizion == RELU_NORMALIZATION) w = relu(w) / sum; else if (weights_normalizion == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum; out[out_index] += add[add_index] * w; // [0 or c or (c, h ,w)] } else out[out_index] += add[add_index]; } } } } void backward_shortcut_multilayer_cpu(int size, int src_outputs, int batch, int n, int *outputs_of_layers, float **layers_delta, float *delta_out, float *delta_in, float *weights, float *weight_updates, int nweights, float *in, float **layers_output, WEIGHTS_NORMALIZATION_T weights_normalizion) { // nweights - l.n or l.n*l.c or (l.n*l.c*l.h*l.w) const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w) int step = 0; if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1 int id; #pragma omp parallel for for (id = 0; id < size; ++id) { int src_id = id; int src_i = src_id % src_outputs; src_id /= src_outputs; int src_b = src_id; float grad = 1, sum = 1, max_val = -FLT_MAX;; int i; if (weights && weights_normalizion) { if (weights_normalizion == SOFTMAX_NORMALIZATION) { for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] float w = weights[weights_index]; if (max_val < w) max_val = w; } } const float eps = 0.0001; sum = eps; for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] const float w = weights[weights_index]; if (weights_normalizion == RELU_NORMALIZATION) sum += relu(w); else if (weights_normalizion == SOFTMAX_NORMALIZATION) sum += expf(w - max_val); } /* grad = 0; for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] const float delta_w = delta_in[id] * in[id]; const float w = weights[weights_index]; if (weights_normalizion == RELU_NORMALIZATION) grad += delta_w * relu(w) / sum; else if (weights_normalizion == SOFTMAX_NORMALIZATION) grad += delta_w * expf(w - max_val) / sum; } */ } if (weights) { float w = weights[src_i / step]; if (weights_normalizion == RELU_NORMALIZATION) w = relu(w) / sum; else if (weights_normalizion == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum; delta_out[id] += delta_in[id] * w; // [0 or c or (c, h ,w)] weight_updates[src_i / step] += delta_in[id] * in[id] * grad; } else delta_out[id] += delta_in[id]; // layers for (i = 0; i < n; ++i) { int add_outputs = outputs_of_layers[i]; if (src_i < add_outputs) { int add_index = add_outputs*src_b + src_i; int out_index = id; float *layer_delta = layers_delta[i]; if (weights) { float *add = layers_output[i]; const int weights_index = src_i / step + (i + 1)*layer_step; // [0 or c or (c, h ,w)] float w = weights[weights_index]; if (weights_normalizion == RELU_NORMALIZATION) w = relu(w) / sum; else if (weights_normalizion == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum; layer_delta[add_index] += delta_in[id] * w; // [0 or c or (c, h ,w)] weight_updates[weights_index] += delta_in[id] * add[add_index] * grad; } else layer_delta[add_index] += delta_in[id]; } } } } void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out) { int stride = w1/w2; int sample = w2/w1; assert(stride == h1/h2); assert(sample == h2/h1); if(stride < 1) stride = 1; if(sample < 1) sample = 1; int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; int i,j,k,b; for(b = 0; b < batch; ++b){ for(k = 0; k < minc; ++k){ for(j = 0; j < minh; ++j){ for(i = 0; i < minw; ++i){ int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); out[out_index] += add[add_index]; } } } } } void mean_cpu(float *x, int batch, int filters, int spatial, float *mean) { float scale = 1./(batch * spatial); int i,j,k; for(i = 0; i < filters; ++i){ mean[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; mean[i] += x[index]; } } mean[i] *= scale; } } void variance_cpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { float scale = 1./(batch * spatial - 1); int i,j,k; for(i = 0; i < filters; ++i){ variance[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance[i] += pow((x[index] - mean[i]), 2); } } variance[i] *= scale; } } void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) { int b, f, i; for(b = 0; b < batch; ++b){ for(f = 0; f < filters; ++f){ for(i = 0; i < spatial; ++i){ int index = b*filters*spatial + f*spatial + i; x[index] = (x[index] - mean[f])/(sqrt(variance[f] + .000001f)); } } } } void const_cpu(int N, float ALPHA, float *X, int INCX) { int i; for(i = 0; i < N; ++i) X[i*INCX] = ALPHA; } void mul_cpu(int N, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] *= X[i*INCX]; } void pow_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] = pow(X[i*INCX], ALPHA); } void axpy_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] += ALPHA*X[i*INCX]; } void scal_cpu(int N, float ALPHA, float *X, int INCX) { int i; for(i = 0; i < N; ++i) X[i*INCX] *= ALPHA; } void scal_add_cpu(int N, float ALPHA, float BETA, float *X, int INCX) { int i; for (i = 0; i < N; ++i) X[i*INCX] = X[i*INCX] * ALPHA + BETA; } void fill_cpu(int N, float ALPHA, float *X, int INCX) { int i; if (INCX == 1 && ALPHA == 0) { memset(X, 0, N * sizeof(float)); } else { for (i = 0; i < N; ++i) X[i*INCX] = ALPHA; } } void deinter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i, j; int index = 0; for(j = 0; j < B; ++j) { for(i = 0; i < NX; ++i){ if(X) X[j*NX + i] += OUT[index]; ++index; } for(i = 0; i < NY; ++i){ if(Y) Y[j*NY + i] += OUT[index]; ++index; } } } void inter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i, j; int index = 0; for(j = 0; j < B; ++j) { for(i = 0; i < NX; ++i){ OUT[index++] = X[j*NX + i]; } for(i = 0; i < NY; ++i){ OUT[index++] = Y[j*NY + i]; } } } void copy_cpu(int N, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX]; } void mult_add_into_cpu(int N, float *X, float *Y, float *Z) { int i; for(i = 0; i < N; ++i) Z[i] += X[i]*Y[i]; } void smooth_l1_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float diff = truth[i] - pred[i]; float abs_val = fabs(diff); if(abs_val < 1) { error[i] = diff * diff; delta[i] = diff; } else { error[i] = 2*abs_val - 1; delta[i] = (diff > 0) ? 1 : -1; } } } void l1_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float diff = truth[i] - pred[i]; error[i] = fabs(diff); delta[i] = diff > 0 ? 1 : -1; } } void softmax_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float t = truth[i]; float p = pred[i]; error[i] = (t) ? -log(p) : 0; delta[i] = t-p; } } void logistic_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float t = truth[i]; float p = pred[i]; error[i] = -t*log(p) - (1-t)*log(1-p); delta[i] = t-p; } } void l2_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float diff = truth[i] - pred[i]; error[i] = diff * diff; delta[i] = diff; } } float dot_cpu(int N, float *X, int INCX, float *Y, int INCY) { int i; float dot = 0; for(i = 0; i < N; ++i) dot += X[i*INCX] * Y[i*INCY]; return dot; } void softmax(float *input, int n, float temp, float *output, int stride) { int i; float sum = 0; float largest = -FLT_MAX; for(i = 0; i < n; ++i){ if(input[i*stride] > largest) largest = input[i*stride]; } for(i = 0; i < n; ++i){ float e = exp(input[i*stride]/temp - largest/temp); sum += e; output[i*stride] = e; } for(i = 0; i < n; ++i){ output[i*stride] /= sum; } } void softmax_cpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { int g, b; for(b = 0; b < batch; ++b){ for(g = 0; g < groups; ++g){ softmax(input + b*batch_offset + g*group_offset, n, temp, output + b*batch_offset + g*group_offset, stride); } } } void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { int i, j, k, b; for (b = 0; b < batch; ++b) { for (k = 0; k < c; ++k) { for (j = 0; j < h*stride; ++j) { for (i = 0; i < w*stride; ++i) { int in_index = b*w*h*c + k*w*h + (j / stride)*w + i / stride; int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i; if (forward) out[out_index] = scale*in[in_index]; else in[in_index] += scale*out[out_index]; } } } } } void constrain_cpu(int size, float ALPHA, float *X) { int i; for (i = 0; i < size; ++i) { X[i] = fminf(ALPHA, fmaxf(-ALPHA, X[i])); } } void fix_nan_and_inf_cpu(float *input, size_t size) { int i; for (i = 0; i < size; ++i) { float val = input[i]; if (isnan(val) || isinf(val)) input[i] = 1.0f / i; // pseudo random value } }
bs_omp.c
#include <assert.h> #include <getopt.h> #include <stdbool.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <unistd.h> #include "shared.hpp" #include "timer.h" #define DTYPE uint64_t /* * @brief creates a "test file" by filling a bufferwith values */ void create_test_file(DTYPE *input, uint64_t nr_elements, DTYPE *querys, uint64_t n_querys) { uint64_t max = UINT64_MAX; uint64_t min = 0; srand(time(NULL)); input[0] = 1; for (uint64_t i = 1; i < nr_elements; i++) { input[i] = input[i - 1] + (rand() % 10) + 1; } for (uint64_t i = 0; i < n_querys; i++) { querys[i] = input[rand() % (nr_elements - 2)]; } } /** * @brief compute output in the host */ uint64_t binarySearch(DTYPE *input, uint64_t input_size, DTYPE *querys, unsigned n_querys) { uint64_t found = -1; uint64_t q, r, l, m; #pragma omp parallel for private(q, r, l, m) for (q = 0; q < n_querys; q++) { l = 0; r = input_size; while (l <= r) { m = l + (r - l) / 2; // Check if x is present at mid if (input[m] == querys[q]) { found += m; break; } // If x greater, ignore left half if (input[m] < querys[q]) l = m + 1; // If x is smaller, ignore right half else r = m - 1; } } return found; } /** * @brief Main of the Host Application. */ int main(int argc, char **argv) { Timer timer; uint64_t input_size = atol(argv[1]); uint64_t n_querys = atol(argv[2]); printf("Vector size: %lu, num searches: %lu\n", input_size, n_querys); DTYPE *input = malloc((input_size) * sizeof(DTYPE)); DTYPE *querys = malloc((n_querys) * sizeof(DTYPE)); DTYPE result_host = -1; // Create an input file with arbitrary data. create_test_file(input, input_size, querys, n_querys); start(&timer, 0, 0); start_region(); result_host = binarySearch(input, input_size - 1, querys, n_querys); end_region(); stop(&timer, 0); int status = (result_host); if (status) { printf("[OK] Execution time: "); print(&timer, 0, 1); printf("ms.\n"); } else { printf("[ERROR]\n"); } free(input); return status ? 0 : 1; }
DRACC_OMP_024_MxV_Missing_Enter_Data_yes.c
/* Matrix Vector multiplication with Matrix missing on Accelerator. Using the target enter data construct. */ #include <stdio.h> #include <stdbool.h> #include <stdlib.h> #define C 512 int *a; int *b; int *c; int init(){ for(int i=0; i<C; i++){ for(int j=0; j<C; j++){ b[j+i*C]=1; } a[i]=1; c[i]=0; } return 0; } int Mult(){ #pragma omp target enter data map(to:a[0:C],c[0:C]) map(alloc:b[0:C*C]) device(0) #pragma omp target device(0) { #pragma omp teams distribute parallel for for(int i=0; i<C; i++){ for(int j=0; j<C; j++){ c[i]+=b[j+i*C]*a[j]; } } } #pragma omp target exit data map(from:c[0:C]) map(release:a[0:C],b[0:C*C]) device(0) return 0; } int check(){ bool test = false; for(int i=0; i<C; i++){ if(c[i]!=C){ test = true; } } printf("Memory Access Issue visible: %s\n",test ? "true" : "false"); return 0; } int main(){ a = malloc(C*sizeof(int)); b = malloc(C*C*sizeof(int)); c = malloc(C*sizeof(int)); init(); Mult(); check(); free(a); free(b); free(c); return 0; }
nodal_two_step_v_p_strategy.h
// // Project Name: KratosPFEMFluidDynamicsApplication $ // Last modified by: $Author: AFranci $ // Date: $Date: June 2018 $ // Revision: $Revision: 0.0 $ // // #ifndef KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H #define KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "includes/cfd_variables.h" #include "utilities/openmp_utils.h" #include "processes/process.h" #include "solving_strategies/schemes/scheme.h" #include "solving_strategies/strategies/solving_strategy.h" #include "custom_utilities/mesher_utilities.hpp" #include "custom_utilities/boundary_normals_calculation_utilities.hpp" #include "geometries/geometry.h" #include "utilities/geometry_utilities.h" #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h" #include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver.h" #include "custom_strategies/builders_and_solvers/nodal_residualbased_elimination_builder_and_solver_continuity.h" #include "custom_strategies/builders_and_solvers/nodal_residualbased_block_builder_and_solver.h" #include "custom_utilities/solver_settings.h" #include "custom_strategies/strategies/gauss_seidel_linear_strategy.h" #include "pfem_fluid_dynamics_application_variables.h" #include <stdio.h> #include <math.h> #include <iostream> #include <fstream> namespace Kratos { ///@addtogroup PFEMFluidDynamicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ template <class TSparseSpace, class TDenseSpace, class TLinearSolver> class NodalTwoStepVPStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(NodalTwoStepVPStrategy); /// Counted pointer of NodalTwoStepVPStrategy //typedef boost::shared_ptr< NodalTwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer; typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TDataType TDataType; /// Node type (default is: Node<3>) typedef Node<3> NodeType; /// Geometry type (using with given NodeType) typedef Geometry<NodeType> GeometryType; typedef std::size_t SizeType; //typedef typename BaseType::DofSetType DofSetType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType; typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType; typedef GeometryType::ShapeFunctionsGradientsType ShapeFunctionDerivativesArrayType; typedef GlobalPointersVector<Node<3>> NodeWeakPtrVectorType; ///@} ///@name Life Cycle ///@{ NodalTwoStepVPStrategy(ModelPart &rModelPart, SolverSettingsType &rSolverConfig) : BaseType(rModelPart) { InitializeStrategy(rSolverConfig); } NodalTwoStepVPStrategy(ModelPart &rModelPart, /*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/ typename TLinearSolver::Pointer pVelocityLinearSolver, typename TLinearSolver::Pointer pPressureLinearSolver, bool ReformDofSet = true, double VelTol = 0.0001, double PresTol = 0.0001, int MaxPressureIterations = 1, // Only for predictor-corrector unsigned int TimeOrder = 2, unsigned int DomainSize = 2) : BaseType(rModelPart), // Move Mesh flag, pass as input? mVelocityTolerance(VelTol), mPressureTolerance(PresTol), mMaxPressureIter(MaxPressureIterations), mDomainSize(DomainSize), mTimeOrder(TimeOrder), mReformDofSet(ReformDofSet) { KRATOS_TRY; BaseType::SetEchoLevel(1); // Check that input parameters are reasonable and sufficient. this->Check(); bool CalculateNormDxFlag = true; bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly. // Additional Typedefs typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer; typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; //initializing fractional velocity solution step typedef Scheme<TSparseSpace, TDenseSpace> SchemeType; typename SchemeType::Pointer pScheme; typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>()); /* typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new IncrementalUpdateStaticScheme< TSparseSpace, TDenseSpace > ()); */ pScheme.swap(Temp); //CONSTRUCTION OF VELOCITY BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver)); /* BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver)); */ this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel()); vel_build->SetCalculateReactionsFlag(false); /* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> >(pPressureLinearSolver, PRESSURE)); */ /* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver >(pPressureLinearSolver)); */ BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new NodalResidualBasedEliminationBuilderAndSolverContinuity<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver)); /* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new NodalResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver >(pPressureLinearSolver)); */ this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel()); pressure_build->SetCalculateReactionsFlag(false); KRATOS_CATCH(""); } /// Destructor. virtual ~NodalTwoStepVPStrategy() {} int Check() override { KRATOS_TRY; // Check elements and conditions in the model part int ierr = BaseType::Check(); if (ierr != 0) return ierr; if (DELTA_TIME.Key() == 0) KRATOS_THROW_ERROR(std::runtime_error, "DELTA_TIME Key is 0. Check that the application was correctly registered.", ""); if (BDF_COEFFICIENTS.Key() == 0) KRATOS_THROW_ERROR(std::runtime_error, "BDF_COEFFICIENTS Key is 0. Check that the application was correctly registered.", ""); ModelPart &rModelPart = BaseType::GetModelPart(); if (mTimeOrder == 2 && rModelPart.GetBufferSize() < 3) KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (BDF2), needed 3, got ", rModelPart.GetBufferSize()); if (mTimeOrder == 1 && rModelPart.GetBufferSize() < 2) KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (Backward Euler), needed 2, got ", rModelPart.GetBufferSize()); const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); for (ModelPart::ElementIterator itEl = rModelPart.ElementsBegin(); itEl != rModelPart.ElementsEnd(); ++itEl) { ierr = itEl->Check(rCurrentProcessInfo); if (ierr != 0) break; } for (ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); ++itCond) { ierr = itCond->Check(rCurrentProcessInfo); if (ierr != 0) break; } return ierr; KRATOS_CATCH(""); } double Solve() override { // Initialize BDF2 coefficients ModelPart &rModelPart = BaseType::GetModelPart(); this->SetTimeCoefficients(rModelPart.GetProcessInfo()); double NormDp = 0.0; ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; bool timeIntervalChanged = rCurrentProcessInfo[TIME_INTERVAL_CHANGED]; // bool momentumAlreadyConverged=false; // bool continuityAlreadyConverged=false; unsigned int maxNonLinearIterations = mMaxPressureIter; std::cout << "\n Solve with nodally_integrated_two_step_vp strategy at t=" << currentTime << "s" << std::endl; if (timeIntervalChanged == true && currentTime > 10 * timeInterval) { maxNonLinearIterations *= 2; } if (currentTime < 10 * timeInterval) { if (BaseType::GetEchoLevel() > 1) std::cout << "within the first 10 time steps, I consider the given iteration number x3" << std::endl; maxNonLinearIterations *= 3; } if (currentTime < 20 * timeInterval && currentTime >= 10 * timeInterval) { if (BaseType::GetEchoLevel() > 1) std::cout << "within the second 10 time steps, I consider the given iteration number x2" << std::endl; maxNonLinearIterations *= 2; } bool momentumConverged = true; bool continuityConverged = false; bool fixedTimeStep = false; /* boost::timer solve_step_time; */ // this->UnactiveSliverElements(); //this is done in set_active_flag_mesher_process which is activated from fluid_pre_refining_mesher.py this->InitializeSolutionStep(); for (unsigned int it = 0; it < maxNonLinearIterations; ++it) { if (BaseType::GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "----- > iteration: " << it << std::endl; if (it == 0) { this->ComputeNodalVolume(); this->InitializeNonLinearIterations(); } this->CalcNodalStrainsAndStresses(); momentumConverged = this->SolveMomentumIteration(it, maxNonLinearIterations, fixedTimeStep); this->UpdateTopology(rModelPart, BaseType::GetEchoLevel()); this->ComputeNodalVolume(); this->InitializeNonLinearIterations(); this->CalcNodalStrains(); if (fixedTimeStep == false) { continuityConverged = this->SolveContinuityIteration(it, maxNonLinearIterations); } // if((momentumConverged==true || it==maxNonLinearIterations-1) && momentumAlreadyConverged==false){ // std::ofstream myfile; // myfile.open ("momentumConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); // momentumAlreadyConverged=true; // } // if((continuityConverged==true || it==maxNonLinearIterations-1) && continuityAlreadyConverged==false){ // std::ofstream myfile; // myfile.open ("continuityConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); // continuityAlreadyConverged=true; // } if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 1)) { //this->ComputeErrorL2NormCaseImposedG(); //this->ComputeErrorL2NormCasePoiseuille(); this->CalculateAccelerations(); // std::ofstream myfile; // myfile.open ("maxConvergedIteration.txt",std::ios::app); // myfile << currentTime << "\t" << it << "\n"; // myfile.close(); } if ((continuityConverged && momentumConverged) && it > 1) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false); std::cout << "nodal V-P strategy converged in " << it + 1 << " iterations." << std::endl; break; } } if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Convergence tolerance not reached." << std::endl; if (mReformDofSet) this->Clear(); /* std::cout << "solve_step_time : " << solve_step_time.elapsed() << std::endl; */ return NormDp; } void FinalizeSolutionStep() override { /* this->UpdateStressStrain(); */ } void Initialize() override { std::cout << " Initialize in nodal_two_step_v_p_strategy" << std::endl; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); unsigned int sizeStrains = 3 * (dimension - 1); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes = neighb_nodes.size(); unsigned int sizeSDFNeigh = neighbourNodes * dimension; if (itNode->SolutionStepsDataHas(NODAL_CAUCHY_STRESS)) { Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS); if (rNodalStress.size() != sizeStrains) { rNodalStress.resize(sizeStrains, false); } noalias(rNodalStress) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have NODAL_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_DEVIATORIC_CAUCHY_STRESS)) { Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS); if (rNodalStress.size() != sizeStrains) { rNodalStress.resize(sizeStrains, false); } noalias(rNodalStress) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have NODAL_DEVIATORIC_CAUCHY_STRESS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_VOLUME)) { itNode->FastGetSolutionStepValue(NODAL_VOLUME) = 0; } else { std::cout << "THIS node does not have NODAL_VOLUME... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_MEAN_MESH_SIZE)) { itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0; } else { std::cout << "THIS node does not have NODAL_MEAN_MESH_SIZE... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_FREESURFACE_AREA)) { itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0; } else { std::cout << "THIS node does not have NODAL_FREESURFACE_AREA... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS)) { Vector &rNodalSFDneighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); if (rNodalSFDneighbours.size() != sizeSDFNeigh) { rNodalSFDneighbours.resize(sizeSDFNeigh, false); } noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh); } else { std::cout << "THIS node does not have NODAL_SFD_NEIGHBOURS... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_SPATIAL_DEF_RATE)) { Vector &rSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); if (rSpatialDefRate.size() != sizeStrains) { rSpatialDefRate.resize(sizeStrains, false); } noalias(rSpatialDefRate) = ZeroVector(sizeStrains); } else { std::cout << "THIS node does not have NODAL_SPATIAL_DEF_RATE... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD)) { Matrix &rFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); if (rFgrad.size1() != dimension) { rFgrad.resize(dimension, dimension, false); } noalias(rFgrad) = ZeroMatrix(dimension, dimension); } else { std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD... " << itNode->X() << " " << itNode->Y() << std::endl; } if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD_VEL)) { Matrix &rFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); if (rFgradVel.size1() != dimension) { rFgradVel.resize(dimension, dimension, false); } noalias(rFgradVel) = ZeroMatrix(dimension, dimension); } else { std::cout << "THIS node does not have NODAL_DEFORMATION_GRAD_VEL... " << itNode->X() << " " << itNode->Y() << std::endl; } this->AssignFluidMaterialToEachNode(itNode); } // } } void UnactiveSliverElements() { KRATOS_TRY; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); MesherUtilities MesherUtils; double ModelPartVolume = MesherUtils.ComputeModelPartVolume(rModelPart); double CriticalVolume = 0.001 * ModelPartVolume / double(rModelPart.Elements().size()); double ElementalVolume = 0; #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd); for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { unsigned int numNodes = itElem->GetGeometry().size(); if (numNodes == (dimension + 1)) { if (dimension == 2) { ElementalVolume = (itElem)->GetGeometry().Area(); } else if (dimension == 3) { ElementalVolume = (itElem)->GetGeometry().Volume(); } if (ElementalVolume < CriticalVolume) { // std::cout << "sliver element: it has Volume: " << ElementalVolume << " vs CriticalVolume(meanVol/1000): " << CriticalVolume<< std::endl; (itElem)->Set(ACTIVE, false); } else { (itElem)->Set(ACTIVE, true); } } } } KRATOS_CATCH(""); } void AssignFluidMaterialToEachNode(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY); double volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); double currFirstLame = volumetricCoeff - 2.0 * deviatoricCoeff / 3.0; itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT) = currFirstLame; itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT) = deviatoricCoeff; } void ComputeNodalVolume() { ModelPart &rModelPart = BaseType::GetModelPart(); ElementsArrayType &pElements = rModelPart.Elements(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition); // #pragma omp parallel // { int k = OpenMPUtils::ThisThread(); typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k]; typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1]; for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized { Element::GeometryType &geometry = itElem->GetGeometry(); double elementalVolume = 0; if (dimension == 2) { elementalVolume = geometry.Area() / 3.0; } else if (dimension == 3) { elementalVolume = geometry.Volume() * 0.25; } // index = 0; unsigned int numNodes = geometry.size(); for (unsigned int i = 0; i < numNodes; i++) { double &nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME); nodalVolume += elementalVolume; } } // } } void InitializeSolutionStep() override { this->FillNodalSFDVector(); } void FillNodalSFDVector() { ModelPart &rModelPart = BaseType::GetModelPart(); // #pragma omp parallel // { // ModelPart::NodeIterator NodesBegin; // ModelPart::NodeIterator NodesEnd; // OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); // for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) // { for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++) { InitializeNodalVariablesForRemeshedDomain(itNode); SetNeighboursOrderToNode(itNode); // it assigns neighbours to inner nodes, filling NODAL_SFD_NEIGHBOURS_ORDER } } void SetNeighboursOrderToNode(ModelPart::NodeIterator itNode) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes = neighb_nodes.size() + 1; // +1 becausealso the node itself must be considered as nieghbor node Vector &rNodeOrderedNeighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); if (rNodeOrderedNeighbours.size() != neighbourNodes) rNodeOrderedNeighbours.resize(neighbourNodes, false); noalias(rNodeOrderedNeighbours) = ZeroVector(neighbourNodes); rNodeOrderedNeighbours[0] = itNode->Id(); if (neighbourNodes > 1) { for (unsigned int k = 0; k < neighbourNodes - 1; k++) { rNodeOrderedNeighbours[k + 1] = neighb_nodes[k].Id(); } } } void InitializeNodalVariablesForRemeshedDomain(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); unsigned int sizeStrains = 3 * (dimension - 1); NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); unsigned int neighbourNodes = neighb_nodes.size() + 1; unsigned int sizeSDFNeigh = neighbourNodes * dimension; if (itNode->SolutionStepsDataHas(NODAL_CAUCHY_STRESS)) { Vector &rNodalStress = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS); if (rNodalStress.size() != sizeStrains) rNodalStress.resize(sizeStrains, false); noalias(rNodalStress) = ZeroVector(sizeStrains); } if (itNode->SolutionStepsDataHas(NODAL_DEVIATORIC_CAUCHY_STRESS)) { Vector &rNodalDevStress = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS); if (rNodalDevStress.size() != sizeStrains) rNodalDevStress.resize(sizeStrains, false); noalias(rNodalDevStress) = ZeroVector(sizeStrains); } if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS_ORDER)) { Vector &rNodalSFDneighboursOrder = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); if (rNodalSFDneighboursOrder.size() != neighbourNodes) rNodalSFDneighboursOrder.resize(neighbourNodes, false); noalias(rNodalSFDneighboursOrder) = ZeroVector(neighbourNodes); } if (itNode->SolutionStepsDataHas(NODAL_SFD_NEIGHBOURS)) { Vector &rNodalSFDneighbours = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); if (rNodalSFDneighbours.size() != sizeSDFNeigh) rNodalSFDneighbours.resize(sizeSDFNeigh, false); noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh); } if (itNode->SolutionStepsDataHas(NODAL_SPATIAL_DEF_RATE)) { Vector &rSpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); if (rSpatialDefRate.size() != sizeStrains) rSpatialDefRate.resize(sizeStrains, false); noalias(rSpatialDefRate) = ZeroVector(sizeStrains); } if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD)) { Matrix &rFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); if (rFgrad.size1() != dimension) rFgrad.resize(dimension, dimension, false); noalias(rFgrad) = ZeroMatrix(dimension, dimension); } if (itNode->SolutionStepsDataHas(NODAL_DEFORMATION_GRAD_VEL)) { Matrix &rFgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); if (rFgradVel.size1() != dimension) rFgradVel.resize(dimension, dimension, false); noalias(rFgradVel) = ZeroMatrix(dimension, dimension); } if (itNode->SolutionStepsDataHas(NODAL_VOLUME)) { itNode->FastGetSolutionStepValue(NODAL_VOLUME) = 0; } if (itNode->SolutionStepsDataHas(NODAL_MEAN_MESH_SIZE)) { itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0; } if (itNode->SolutionStepsDataHas(NODAL_FREESURFACE_AREA)) { itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0; } if (itNode->SolutionStepsDataHas(NODAL_VOLUMETRIC_DEF_RATE)) { itNode->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0; } if (itNode->SolutionStepsDataHas(NODAL_EQUIVALENT_STRAIN_RATE)) { itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0; } } void InitializeNonLinearIterations() { ModelPart &rModelPart = BaseType::GetModelPart(); ElementsArrayType &pElements = rModelPart.Elements(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif vector<unsigned int> element_partition; OpenMPUtils::CreatePartition(number_of_threads, pElements.size(), element_partition); // #pragma omp parallel // { int k = OpenMPUtils::ThisThread(); typename ElementsArrayType::iterator ElemBegin = pElements.begin() + element_partition[k]; typename ElementsArrayType::iterator ElemEnd = pElements.begin() + element_partition[k + 1]; for (typename ElementsArrayType::iterator itElem = ElemBegin; itElem != ElemEnd; itElem++) //MSI: To be parallelized { itElem->InitializeNonLinearIteration(rCurrentProcessInfo); } // } } void CalcNodalStrainsAndStresses() { ModelPart &rModelPart = BaseType::GetModelPart(); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); double theta = 0.5; if (nodalVolume > 0) { this->ComputeAndStoreNodalDeformationGradient(itNode, theta); this->CalcNodalStrainsAndStressesForNode(itNode); } else { // if nodalVolume==0 InitializeNodalVariablesForRemeshedDomain(itNode); } } // } } void CalcNodalStrainsAndStressesForNode(ModelPart::NodeIterator itNode) { ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); double currFirstLame = itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT); double deviatoricCoeff = itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT); Matrix Fgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); Matrix FgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); double detFgrad = 1.0; Matrix InvFgrad = ZeroMatrix(dimension, dimension); Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension); if (dimension == 2) { MathUtils<double>::InvertMatrix2(Fgrad, InvFgrad, detFgrad); } else if (dimension == 3) { MathUtils<double>::InvertMatrix3(Fgrad, InvFgrad, detFgrad); } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad = prod(FgradVel, InvFgrad); if (dimension == 2) { itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2])); double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol; double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0); double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0); double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_xy; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_xy; } else if (dimension == 3) { itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2)); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2)); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]); double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } double DefVol = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol; double nodalSigmaTot_xx = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0]; double nodalSigmaTot_yy = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; double nodalSigmaTot_zz = currFirstLame * DefVol + 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; double nodalSigmaTot_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaTot_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaTot_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]; double nodalSigmaDev_xx = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] - DefVol / 3.0); double nodalSigmaDev_yy = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] - DefVol / 3.0); double nodalSigmaDev_zz = 2.0 * deviatoricCoeff * (itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] - DefVol / 3.0); double nodalSigmaDev_xy = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3]; double nodalSigmaDev_xz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4]; double nodalSigmaDev_yz = 2.0 * deviatoricCoeff * itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[0] = nodalSigmaTot_xx; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[1] = nodalSigmaTot_yy; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[2] = nodalSigmaTot_zz; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[3] = nodalSigmaTot_xy; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[4] = nodalSigmaTot_xz; itNode->GetSolutionStepValue(NODAL_CAUCHY_STRESS, 0)[5] = nodalSigmaTot_yz; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[0] = nodalSigmaDev_xx; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[1] = nodalSigmaDev_yy; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[2] = nodalSigmaDev_zz; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[3] = nodalSigmaDev_xy; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[4] = nodalSigmaDev_xz; itNode->GetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS, 0)[5] = nodalSigmaDev_yz; } } void CalcNodalStrainsForNode(ModelPart::NodeIterator itNode) { /* std::cout << "Calc Nodal Strains " << std::endl; */ ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); // Matrix Fgrad=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); // Matrix FgradVel=itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); // double detFgrad=1.0; // Matrix InvFgrad=ZeroMatrix(dimension,dimension); // Matrix SpatialVelocityGrad=ZeroMatrix(dimension,dimension); double detFgrad = 1.0; Matrix nodalFgrad = ZeroMatrix(dimension, dimension); Matrix FgradVel = ZeroMatrix(dimension, dimension); Matrix InvFgrad = ZeroMatrix(dimension, dimension); Matrix SpatialVelocityGrad = ZeroMatrix(dimension, dimension); nodalFgrad = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); FgradVel = itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); //Inverse if (dimension == 2) { MathUtils<double>::InvertMatrix2(nodalFgrad, InvFgrad, detFgrad); } else if (dimension == 3) { MathUtils<double>::InvertMatrix3(nodalFgrad, InvFgrad, detFgrad); } //it computes the spatial velocity gradient tensor --> [L_ij]=dF_ik*invF_kj SpatialVelocityGrad = prod(FgradVel, InvFgrad); if (dimension == 2) { itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt((2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2])); double DefX = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0]; double DefY = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; double DefVol = DefX + DefY; itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol; } else if (dimension == 3) { itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] = SpatialVelocityGrad(0, 0); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] = SpatialVelocityGrad(1, 1); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] = SpatialVelocityGrad(2, 2); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] = 0.5 * (SpatialVelocityGrad(1, 0) + SpatialVelocityGrad(0, 1)); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] = 0.5 * (SpatialVelocityGrad(2, 0) + SpatialVelocityGrad(0, 2)); itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] = 0.5 * (SpatialVelocityGrad(2, 1) + SpatialVelocityGrad(1, 2)); itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = sqrt(2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1] + 2.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[3] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[4] + 4.0 * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5] * itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[5]); double DefX = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[0]; double DefY = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[1]; double DefZ = itNode->GetSolutionStepValue(NODAL_SPATIAL_DEF_RATE)[2]; double DefVol = DefX + DefY + DefZ; itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = DefVol; } } void CalcNodalStrains() { /* std::cout << "Calc Nodal Strains " << std::endl; */ ModelPart &rModelPart = BaseType::GetModelPart(); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); double theta = 1.0; if (nodalVolume > 0) { this->ComputeAndStoreNodalDeformationGradient(itNode, theta); this->CalcNodalStrainsForNode(itNode); } else { // if nodalVolume==0 InitializeNodalVariablesForRemeshedDomain(itNode); } } // } /* std::cout << "Calc Nodal Strains And Stresses DONE " << std::endl; */ } void ComputeAndStoreNodalDeformationGradient(ModelPart::NodeIterator itNode, double theta) { KRATOS_TRY; ModelPart &rModelPart = BaseType::GetModelPart(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); Vector rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); /* unsigned int idThisNode=nodalSFDneighboursId[0]; */ const unsigned int neighSize = nodalSFDneighboursId.size(); Matrix Fgrad = ZeroMatrix(dimension, dimension); Matrix FgradVel = ZeroMatrix(dimension, dimension); NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); if (dimension == 2) { double dNdXi = rNodalSFDneigh[0]; double dNdYi = rNodalSFDneigh[1]; Fgrad(0, 0) += dNdXi * itNode->X(); Fgrad(0, 1) += dNdYi * itNode->X(); Fgrad(1, 0) += dNdXi * itNode->Y(); Fgrad(1, 1) += dNdYi * itNode->Y(); double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; unsigned int firstRow = 2; if (neighSize > 0) { for (unsigned int i = 0; i < neighSize - 1; i++) //neigh_nodes has one cell less than nodalSFDneighboursId becuase this has also the considered node ID at the beginning { dNdXi = rNodalSFDneigh[firstRow]; dNdYi = rNodalSFDneigh[firstRow + 1]; unsigned int neigh_nodes_id = neighb_nodes[i].Id(); unsigned int other_neigh_nodes_id = nodalSFDneighboursId[i + 1]; if (neigh_nodes_id != other_neigh_nodes_id) { std::cout << "node (x,y)=(" << itNode->X() << "," << itNode->Y() << ") with neigh_nodes_id " << neigh_nodes_id << " different than other_neigh_nodes_id " << other_neigh_nodes_id << std::endl; } Fgrad(0, 0) += dNdXi * neighb_nodes[i].X(); Fgrad(0, 1) += dNdYi * neighb_nodes[i].X(); Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y(); Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y(); VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; firstRow += 2; } } } else { double dNdXi = rNodalSFDneigh[0]; double dNdYi = rNodalSFDneigh[1]; double dNdZi = rNodalSFDneigh[2]; double VelocityX = itNode->FastGetSolutionStepValue(VELOCITY_X, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); double VelocityY = itNode->FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); double VelocityZ = itNode->FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + itNode->FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta); Fgrad(0, 0) += dNdXi * itNode->X(); Fgrad(0, 1) += dNdYi * itNode->X(); Fgrad(0, 2) += dNdZi * itNode->X(); Fgrad(1, 0) += dNdXi * itNode->Y(); Fgrad(1, 1) += dNdYi * itNode->Y(); Fgrad(1, 2) += dNdZi * itNode->Y(); Fgrad(2, 0) += dNdXi * itNode->Z(); Fgrad(2, 1) += dNdYi * itNode->Z(); Fgrad(2, 2) += dNdZi * itNode->Z(); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(0, 2) += dNdZi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; FgradVel(1, 2) += dNdZi * VelocityY; FgradVel(2, 0) += dNdXi * VelocityZ; FgradVel(2, 1) += dNdYi * VelocityZ; FgradVel(2, 2) += dNdZi * VelocityZ; unsigned int firstRow = 3; if (neighSize > 0) { for (unsigned int i = 0; i < neighSize - 1; i++) { dNdXi = rNodalSFDneigh[firstRow]; dNdYi = rNodalSFDneigh[firstRow + 1]; dNdZi = rNodalSFDneigh[firstRow + 2]; VelocityX = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_X, 1) * (1 - theta); VelocityY = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Y, 1) * (1 - theta); VelocityZ = neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 0) * theta + neighb_nodes[i].FastGetSolutionStepValue(VELOCITY_Z, 1) * (1 - theta); Fgrad(0, 0) += dNdXi * neighb_nodes[i].X(); Fgrad(0, 1) += dNdYi * neighb_nodes[i].X(); Fgrad(0, 2) += dNdZi * neighb_nodes[i].X(); Fgrad(1, 0) += dNdXi * neighb_nodes[i].Y(); Fgrad(1, 1) += dNdYi * neighb_nodes[i].Y(); Fgrad(1, 2) += dNdZi * neighb_nodes[i].Y(); Fgrad(2, 0) += dNdXi * neighb_nodes[i].Z(); Fgrad(2, 1) += dNdYi * neighb_nodes[i].Z(); Fgrad(2, 2) += dNdZi * neighb_nodes[i].Z(); FgradVel(0, 0) += dNdXi * VelocityX; FgradVel(0, 1) += dNdYi * VelocityX; FgradVel(0, 2) += dNdZi * VelocityX; FgradVel(1, 0) += dNdXi * VelocityY; FgradVel(1, 1) += dNdYi * VelocityY; FgradVel(1, 2) += dNdZi * VelocityY; FgradVel(2, 0) += dNdXi * VelocityZ; FgradVel(2, 1) += dNdYi * VelocityZ; FgradVel(2, 2) += dNdZi * VelocityZ; firstRow += 3; } } } itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD) = Fgrad; itNode->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL) = FgradVel; KRATOS_CATCH(""); } void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel) { KRATOS_TRY; /* this->CalculateDisplacements(); */ this->CalculateDisplacementsAndResetNodalVariables(); BaseType::MoveMesh(); BoundaryNormalsCalculationUtilities BoundaryComputation; BoundaryComputation.CalculateWeightedBoundaryNormals(rModelPart, echoLevel); KRATOS_CATCH(""); } void CalculatePressureVelocity() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; unsigned int timeStep = rCurrentProcessInfo[STEP]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { if (timeStep == 1) { (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0; } else { double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0); double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1); double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0); CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval; } } } void CalculatePressureAcceleration() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; unsigned int timeStep = rCurrentProcessInfo[STEP]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { if (timeStep == 1) { (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0; } else { double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0); double &PreviousPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1); double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0); CurrentPressureAcceleration = (CurrentPressureVelocity - PreviousPressureVelocity) / timeInterval; } } } void CalculateAccelerations() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0); array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1); if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID))) { UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity, BDFcoeffs); } else if ((i)->Is(RIGID)) { array_1d<double, 3> Zeros(3, 0.0); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros; (i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros; } else { (i)->FastGetSolutionStepValue(NODAL_VOLUME) = 0.0; (i)->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0.0; (i)->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0; (i)->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0.0; (i)->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0; if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION)) { array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration; (i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME]; } } } } inline void UpdateAccelerations(array_1d<double, 3> &CurrentAcceleration, const array_1d<double, 3> &CurrentVelocity, array_1d<double, 3> &PreviousAcceleration, const array_1d<double, 3> &PreviousVelocity, Vector &BDFcoeffs) { /* noalias(PreviousAcceleration)=CurrentAcceleration; */ noalias(CurrentAcceleration) = -BDFcoeffs[1] * (CurrentVelocity - PreviousVelocity) - PreviousAcceleration; // std::cout<<"rBDFCoeffs[0] is "<<rBDFCoeffs[0]<<std::endl;//3/(2*delta_t) // std::cout<<"rBDFCoeffs[1] is "<<rBDFCoeffs[1]<<std::endl;//-2/(delta_t) // std::cout<<"rBDFCoeffs[2] is "<<rBDFCoeffs[2]<<std::endl;//1/(2*delta_t) } void CalculateDisplacements() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double TimeStep = rCurrentProcessInfo[DELTA_TIME]; for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0); array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1); /* if( i->IsFixed(DISPLACEMENT_X) == false ) */ CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0]; /* if( i->IsFixed(DISPLACEMENT_Y) == false ) */ CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1]; /* if( i->IsFixed(DISPLACEMENT_Z) == false ) */ CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2]; } } void CalculateDisplacementsAndResetNodalVariables() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double TimeStep = rCurrentProcessInfo[DELTA_TIME]; const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); unsigned int sizeStrains = 3 * (dimension - 1); // #pragma omp parallel // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator i = NodesBegin; i != NodesEnd; ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0); array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1); CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0]; CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1]; if (dimension == 3) { CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2]; } ///// reset Nodal variables ////// Vector &rNodalSFDneighbours = i->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); unsigned int sizeSDFNeigh = rNodalSFDneighbours.size(); // unsigned int neighbourNodes=i->GetValue(NEIGHBOUR_NODES).size()+1; // unsigned int sizeSDFNeigh=neighbourNodes*dimension; i->FastGetSolutionStepValue(NODAL_VOLUME) = 0; i->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE) = 0; i->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA) = 0; i->FastGetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) = 0; i->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE) = 0; noalias(rNodalSFDneighbours) = ZeroVector(sizeSDFNeigh); Vector &rSpatialDefRate = i->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); noalias(rSpatialDefRate) = ZeroVector(sizeStrains); Matrix &rFgrad = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD); noalias(rFgrad) = ZeroMatrix(dimension, dimension); Matrix &rFgradVel = i->FastGetSolutionStepValue(NODAL_DEFORMATION_GRAD_VEL); noalias(rFgradVel) = ZeroMatrix(dimension, dimension); } // } } void UpdatePressureAccelerations() { this->CalculateAccelerations(); this->CalculatePressureVelocity(); this->CalculatePressureAcceleration(); } void Clear() override { mpMomentumStrategy->Clear(); mpPressureStrategy->Clear(); } ///@} ///@name Access ///@{ void SetEchoLevel(int Level) override { BaseType::SetEchoLevel(Level); int StrategyLevel = Level > 0 ? Level - 1 : 0; mpMomentumStrategy->SetEchoLevel(StrategyLevel); mpPressureStrategy->SetEchoLevel(StrategyLevel); } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "NodalTwoStepVPStrategy"; return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream &rOStream) const override { rOStream << "NodalTwoStepVPStrategy"; } /// Print object's data. void PrintData(std::ostream &rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected Life Cycle ///@{ ///@} ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /// Calculate the coefficients for time iteration. /** * @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME and BDF_COEFFICIENTS variables. */ void SetTimeCoefficients(ProcessInfo &rCurrentProcessInfo) { KRATOS_TRY; if (mTimeOrder == 2) { //calculate the BDF coefficients double Dt = rCurrentProcessInfo[DELTA_TIME]; double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME]; double Rho = OldDt / Dt; double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho); Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(3, false); BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant) BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant) BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant) } else if (mTimeOrder == 1) { double Dt = rCurrentProcessInfo[DELTA_TIME]; double TimeCoeff = 1.0 / Dt; Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(2, false); BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt) BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt) } KRATOS_CATCH(""); } bool SolveMomentumIteration(unsigned int it, unsigned int maxIt, bool &fixedTimeStep) { ModelPart &rModelPart = BaseType::GetModelPart(); int Rank = rModelPart.GetCommunicator().MyPID(); bool ConvergedMomentum = false; double NormDv = 0; fixedTimeStep = false; // build momentum system and solve for fractional step velocity increment rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 1); if (it == 0) { mpMomentumStrategy->InitializeSolutionStep(); /* this->SetNeighboursVelocityId(); */ } NormDv = mpMomentumStrategy->Solve(); if (BaseType::GetEchoLevel() > 1 && Rank == 0) std::cout << "-------------- s o l v e d ! ------------------" << std::endl; double DvErrorNorm = 0; ConvergedMomentum = this->CheckVelocityConvergence(NormDv, DvErrorNorm); unsigned int iterationForCheck = 3; KRATOS_INFO("TwoStepVPStrategy") << "iteration(" << it << ") Velocity error: " << DvErrorNorm << " velTol: " << mVelocityTolerance << std::endl; // Check convergence if (it == maxIt - 1) { std::cout << " iteration(" << it << ") Final Velocity error: " << DvErrorNorm << " velTol: " << mVelocityTolerance << std::endl; fixedTimeStep = this->FixTimeStepMomentum(DvErrorNorm); } else if (it > iterationForCheck) { fixedTimeStep = this->CheckMomentumConvergence(DvErrorNorm); } // ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); // double currentTime = rCurrentProcessInfo[TIME]; // double tolerance=0.0000000001; // if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){ // std::ofstream myfile; // myfile.open ("velocityConvergenceAt025s.txt",std::ios::app); // myfile << it << "\t" << DvErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){ // std::ofstream myfile; // myfile.open ("velocityConvergenceAt05s.txt",std::ios::app); // myfile << it << "\t" << DvErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){ // std::ofstream myfile; // myfile.open ("velocityConvergenceAt075s.txt",std::ios::app); // myfile << it << "\t" << DvErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){ // std::ofstream myfile; // myfile.open ("velocityConvergenceAt100s.txt",std::ios::app); // myfile << it << "\t" << DvErrorNorm << "\n"; // myfile.close(); // } if (!ConvergedMomentum && BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Momentum equations did not reach the convergence tolerance." << std::endl; return ConvergedMomentum; } bool SolveContinuityIteration(unsigned int it, unsigned int maxIt) { ModelPart &rModelPart = BaseType::GetModelPart(); int Rank = rModelPart.GetCommunicator().MyPID(); bool ConvergedContinuity = false; double NormDp = 0; // 2. Pressure solution rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 5); if (it == 0) { mpPressureStrategy->InitializeSolutionStep(); } NormDp = mpPressureStrategy->Solve(); if (BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "The norm of pressure is: " << NormDp << std::endl; double DpErrorNorm = 0; ConvergedContinuity = this->CheckPressureConvergence(NormDp, DpErrorNorm); // Check convergence if (it == maxIt - 1) { std::cout << " iteration(" << it << ") Final Pressure error: " << DpErrorNorm << " presTol: " << mPressureTolerance << std::endl; ConvergedContinuity = this->FixTimeStepContinuity(DpErrorNorm); } else { std::cout << " iteration(" << it << ") Pressure error: " << DpErrorNorm << " presTol: " << mPressureTolerance << std::endl; } // ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); // double currentTime = rCurrentProcessInfo[TIME]; // double tolerance=0.0000000001; // if(currentTime>(0.25-tolerance) && currentTime<(0.25+tolerance)){ // std::ofstream myfile; // myfile.open ("pressureConvergenceAt025s.txt",std::ios::app); // myfile << it << "\t" << DpErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(0.5-tolerance) && currentTime<(0.5+tolerance)){ // std::ofstream myfile; // myfile.open ("pressureConvergenceAt05s.txt",std::ios::app); // myfile << it << "\t" << DpErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(0.75-tolerance) && currentTime<(0.75+tolerance)){ // std::ofstream myfile; // myfile.open ("pressureConvergenceAt075s.txt",std::ios::app); // myfile << it << "\t" << DpErrorNorm << "\n"; // myfile.close(); // } // else if(currentTime>(1.0-tolerance) && currentTime<(1.0+tolerance)){ // std::ofstream myfile; // myfile.open ("pressureConvergenceAt100s.txt",std::ios::app); // myfile << it << "\t" << DpErrorNorm << "\n"; // myfile.close(); // } if (!ConvergedContinuity && BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Continuity equation did not reach the convergence tolerance." << std::endl; return ConvergedContinuity; } bool CheckVelocityConvergence(const double NormDv, double &errorNormDv) { ModelPart &rModelPart = BaseType::GetModelPart(); double NormV = 0.00; errorNormDv = 0; #pragma omp parallel reduction(+ \ : NormV) { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const array_1d<double, 3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY); double NormVelNode = 0; for (unsigned int d = 0; d < 3; ++d) { NormVelNode += Vel[d] * Vel[d]; NormV += Vel[d] * Vel[d]; } } } BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV); NormV = sqrt(NormV); if (NormV == 0.0) NormV = 1.00; errorNormDv = NormDv / NormV; if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) { std::cout << "The norm of velocity increment is: " << NormDv << std::endl; std::cout << "The norm of velocity is: " << NormV << std::endl; std::cout << "Velocity error: " << errorNormDv << "mVelocityTolerance: " << mVelocityTolerance << std::endl; } /* else{ */ /* std::cout<<"Velocity error: "<< errorNormDv <<" velTol: " << mVelocityTolerance<< std::endl; */ /* } */ if (errorNormDv < mVelocityTolerance) { return true; } else { return false; } } void ComputeErrorL2NormCaseImposedG() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double currentTime = rCurrentProcessInfo[TIME]; double sumErrorL2Velocity = 0; double sumErrorL2VelocityX = 0; double sumErrorL2VelocityY = 0; double sumErrorL2Pressure = 0; double sumErrorL2TauXX = 0; double sumErrorL2TauYY = 0; double sumErrorL2TauXY = 0; #pragma omp parallel { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const double posX = itNode->X(); const double posY = itNode->Y(); const double nodalArea = itNode->FastGetSolutionStepValue(NODAL_VOLUME); const double velX = itNode->FastGetSolutionStepValue(VELOCITY_X); const double velY = itNode->FastGetSolutionStepValue(VELOCITY_Y); const double pressure = itNode->FastGetSolutionStepValue(PRESSURE); const double tauXX = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0]; const double tauYY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1]; const double tauXY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[2]; double expectedVelocityX = pow(posX, 2) * (1.0 - posX) * (1.0 - posX) * (2.0 * posY - 6.0 * pow(posY, 2) + 4.0 * pow(posY, 3)); double expectedVelocityY = -pow(posY, 2) * (1.0 - posY) * (1.0 - posY) * (2.0 * posX - 6.0 * pow(posX, 2) + 4.0 * pow(posX, 3)); double expectedPressure = -posX * (1.0 - posX); double expectedTauXX = 2.0 * (-4.0 * (1 - posX) * posX * (-1.0 + 2.0 * posX) * posY * (1.0 - 3.0 * posY + 2.0 * pow(posY, 2))); double expectedTauYY = 2.0 * (4.0 * posX * (1.0 - 3.0 * posX + 2.0 * pow(posX, 2)) * (1 - posY) * posY * (-1.0 + 2.0 * posY)); double expectedTauXY = (2.0 * (1.0 - 6.0 * posY + 6.0 * pow(posY, 2)) * (1 - posX) * (1 - posX) * pow(posX, 2) - 2.0 * (1.0 - 6.0 * posX + 6.0 * pow(posX, 2)) * (1 - posY) * (1 - posY) * pow(posY, 2)); double nodalErrorVelocityX = velX - expectedVelocityX; double nodalErrorVelocityY = velY - expectedVelocityY; double nodalErrorPressure = pressure - expectedPressure; double nodalErrorTauXX = tauXX - expectedTauXX; double nodalErrorTauYY = tauYY - expectedTauYY; double nodalErrorTauXY = tauXY - expectedTauXY; sumErrorL2Velocity += (pow(nodalErrorVelocityX, 2) + pow(nodalErrorVelocityY, 2)) * nodalArea; sumErrorL2VelocityX += pow(nodalErrorVelocityX, 2) * nodalArea; sumErrorL2VelocityY += pow(nodalErrorVelocityY, 2) * nodalArea; sumErrorL2Pressure += pow(nodalErrorPressure, 2) * nodalArea; sumErrorL2TauXX += pow(nodalErrorTauXX, 2) * nodalArea; sumErrorL2TauYY += pow(nodalErrorTauYY, 2) * nodalArea; sumErrorL2TauXY += pow(nodalErrorTauXY, 2) * nodalArea; // itNode->FastGetSolutionStepValue(NODAL_ERROR_XX)=nodalErrorTauXX; } } double errorL2Velocity = sqrt(sumErrorL2Velocity); double errorL2VelocityX = sqrt(sumErrorL2VelocityX); double errorL2VelocityY = sqrt(sumErrorL2VelocityY); double errorL2Pressure = sqrt(sumErrorL2Pressure); double errorL2TauXX = sqrt(sumErrorL2TauXX); double errorL2TauYY = sqrt(sumErrorL2TauYY); double errorL2TauXY = sqrt(sumErrorL2TauXY); std::ofstream myfileVelocity; myfileVelocity.open("errorL2VelocityFile.txt", std::ios::app); myfileVelocity << currentTime << "\t" << errorL2Velocity << "\n"; myfileVelocity.close(); std::ofstream myfileVelocityX; myfileVelocityX.open("errorL2VelocityXFile.txt", std::ios::app); myfileVelocityX << currentTime << "\t" << errorL2VelocityX << "\n"; myfileVelocityX.close(); std::ofstream myfileVelocityY; myfileVelocityY.open("errorL2VelocityYFile.txt", std::ios::app); myfileVelocityY << currentTime << "\t" << errorL2VelocityY << "\n"; myfileVelocityY.close(); std::ofstream myfilePressure; myfilePressure.open("errorL2PressureFile.txt", std::ios::app); myfilePressure << currentTime << "\t" << errorL2Pressure << "\n"; myfilePressure.close(); std::ofstream myfileTauXX; myfileTauXX.open("errorL2TauXXFile.txt", std::ios::app); myfileTauXX << currentTime << "\t" << errorL2TauXX << "\n"; myfileTauXX.close(); std::ofstream myfileTauYY; myfileTauYY.open("errorL2TauYYFile.txt", std::ios::app); myfileTauYY << currentTime << "\t" << errorL2TauYY << "\n"; myfileTauYY.close(); std::ofstream myfileTauXY; myfileTauXY.open("errorL2TauXYFile.txt", std::ios::app); myfileTauXY << currentTime << "\t" << errorL2TauXY << "\n"; myfileTauXY.close(); } void ComputeErrorL2NormCasePoiseuille() { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); const double currentTime = rCurrentProcessInfo[TIME]; double sumErrorL2VelocityTheta = 0; double sumErrorL2TauTheta = 0; double r_in = 0.2; double R_out = 0.5; double kappa = r_in / R_out; double omega = 0.5; double viscosity = 100.0; #pragma omp parallel { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const double posX = itNode->X(); const double posY = itNode->Y(); const double rPos = sqrt(pow(posX, 2) + pow(posY, 2)); const double cosalfa = posX / rPos; const double sinalfa = posY / rPos; const double sin2alfa = 2.0 * cosalfa * sinalfa; const double cos2alfa = 1.0 - 2.0 * pow(sinalfa, 2); const double nodalArea = itNode->FastGetSolutionStepValue(NODAL_VOLUME); const double velX = itNode->FastGetSolutionStepValue(VELOCITY_X); const double velY = itNode->FastGetSolutionStepValue(VELOCITY_Y); const double tauXX = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0]; const double tauYY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1]; const double tauXY = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[2]; double expectedVelocityTheta = pow(kappa, 2) * omega * R_out / (1.0 - pow(kappa, 2)) * (R_out / rPos - rPos / R_out); double computedVelocityTheta = sqrt(pow(velX, 2) + pow(velY, 2)); double nodalErrorVelocityTheta = computedVelocityTheta - expectedVelocityTheta; double expectedTauTheta = (2.0 * viscosity * pow(kappa, 2) * omega * pow(R_out, 2)) / (1.0 - pow(kappa, 2)) / pow(rPos, 2); double computedTauTheta = +(tauXX - tauYY) * sin2alfa / 2.0 - tauXY * cos2alfa; double nodalErrorTauTheta = computedTauTheta - expectedTauTheta; itNode->FastGetSolutionStepValue(NODAL_ERROR_XX) = computedVelocityTheta; // if(posY>-0.01 && posY<0.01){ // std::cout<<"expectedTauTheta "<<expectedTauTheta<<" computedTauTheta "<<computedTauTheta <<std::endl; // std::cout<<"tauXX "<<tauXX<<" tauYY "<<tauYY<<" tauXY "<<tauXY <<std::endl; // std::cout<<"posX "<<posX <<" posY "<<posY <<std::endl; // std::cout<<"\n "; // } // if(posX>-0.01 && posX<0.01){ // std::cout<<"expectedTauTheta "<<expectedTauTheta<<" computedTauTheta "<<computedTauTheta <<std::endl; // std::cout<<"tauXX "<<tauXX<<" tauYY "<<tauYY<<" tauXY "<<tauXY <<std::endl; // std::cout<<"posX "<<posX <<" posY "<<posY <<std::endl; // std::cout<<"\n "; // } sumErrorL2VelocityTheta += pow(nodalErrorVelocityTheta, 2) * nodalArea; sumErrorL2TauTheta += pow(nodalErrorTauTheta, 2) * nodalArea; } } double errorL2VelocityTheta = sqrt(sumErrorL2VelocityTheta); double errorL2TauTheta = sqrt(sumErrorL2TauTheta); std::ofstream myfileVelocity; myfileVelocity.open("errorL2Poiseuille.txt", std::ios::app); myfileVelocity << currentTime << "\t" << errorL2VelocityTheta << "\t" << errorL2TauTheta << "\n"; myfileVelocity.close(); } bool CheckPressureConvergence(const double NormDp, double &errorNormDp) { ModelPart &rModelPart = BaseType::GetModelPart(); double NormP = 0.00; errorNormDp = 0; // #pragma omp parallel reduction(+:NormP) // { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const double Pr = itNode->FastGetSolutionStepValue(PRESSURE); NormP += Pr * Pr; } // } BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP); NormP = sqrt(NormP); if (NormP == 0.0) NormP = 1.00; errorNormDp = NormDp / NormP; if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) { std::cout << " The norm of pressure increment is: " << NormDp << std::endl; std::cout << " The norm of pressure is: " << NormP << std::endl; std::cout << " Pressure error: " << errorNormDp << std::endl; } /* else{ */ /* std::cout<<" Pressure error: "<<errorNormDp <<" presTol: "<<mPressureTolerance << std::endl; */ /* } */ if (errorNormDp < mPressureTolerance) { return true; } else return false; } bool FixTimeStepMomentum(const double DvErrorNorm) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double minTolerance = 0.005; bool fixedTimeStep = false; if (currentTime < 3 * timeInterval) { minTolerance = 10; } if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) && DvErrorNorm != 0 && (DvErrorNorm != 1 || currentTime > timeInterval)) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true); std::cout << "NOT GOOD CONVERGENCE!!! I'll reduce the next time interval" << DvErrorNorm << std::endl; minTolerance = 0.05; if (DvErrorNorm > minTolerance) { std::cout << "BAD CONVERGENCE!!! I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << DvErrorNorm << std::endl; fixedTimeStep = true; // #pragma omp parallel // { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1); itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1); itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1); } // } } } else { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); } return fixedTimeStep; } bool CheckMomentumConvergence(const double DvErrorNorm) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double minTolerance = 0.99999; bool fixedTimeStep = false; if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) && DvErrorNorm != 0 && (DvErrorNorm != 1 || currentTime > timeInterval)) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true); std::cout << " BAD CONVERGENCE DETECTED DURING THE ITERATIVE LOOP!!! error: " << DvErrorNorm << " higher than 0.9999" << std::endl; std::cout << " I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << std::endl; fixedTimeStep = true; #pragma omp parallel { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1); itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1); itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1); } } } else { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); } return fixedTimeStep; } bool FixTimeStepContinuity(const double DvErrorNorm) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; double timeInterval = rCurrentProcessInfo[DELTA_TIME]; double minTolerance = 0.01; bool fixedTimeStep = false; if (currentTime < 3 * timeInterval) { minTolerance = 10; } if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) && DvErrorNorm != 0 && (DvErrorNorm != 1 || currentTime > timeInterval)) { fixedTimeStep = true; rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, true); } else { rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false); } return fixedTimeStep; } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} // private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ double mVelocityTolerance; double mPressureTolerance; unsigned int mMaxPressureIter; unsigned int mDomainSize; unsigned int mTimeOrder; bool mReformDofSet; // Fractional step index. /* 1 : Momentum step (calculate fractional step velocity) * 2-3 : Unused (reserved for componentwise calculation of frac step velocity) * 4 : Pressure step * 5 : Computation of projections * 6 : End of step velocity */ // unsigned int mStepId; /// Scheme for the solution of the momentum equation StrategyPointerType mpMomentumStrategy; /// Scheme for the solution of the mass equation StrategyPointerType mpPressureStrategy; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ void InitializeStrategy(SolverSettingsType &rSolverConfig) { KRATOS_TRY; mTimeOrder = rSolverConfig.GetTimeOrder(); // Check that input parameters are reasonable and sufficient. this->Check(); //ModelPart& rModelPart = this->GetModelPart(); mDomainSize = rSolverConfig.GetDomainSize(); mReformDofSet = rSolverConfig.GetReformDofSet(); BaseType::SetEchoLevel(rSolverConfig.GetEchoLevel()); // Initialize strategies for each step bool HaveVelStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Velocity, mpMomentumStrategy); if (HaveVelStrategy) { rSolverConfig.FindTolerance(SolverSettingsType::Velocity, mVelocityTolerance); /* rSolverConfig.FindMaxIter(SolverSettingsType::Velocity,mMaxVelocityIter); */ } else { KRATOS_THROW_ERROR(std::runtime_error, "NodalTwoStepVPStrategy error: No Velocity strategy defined in FractionalStepSettings", ""); } bool HavePressStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Pressure, mpPressureStrategy); if (HavePressStrategy) { rSolverConfig.FindTolerance(SolverSettingsType::Pressure, mPressureTolerance); rSolverConfig.FindMaxIter(SolverSettingsType::Pressure, mMaxPressureIter); } else { KRATOS_THROW_ERROR(std::runtime_error, "NodalTwoStepVPStrategy error: No Pressure strategy defined in FractionalStepSettings", ""); } // Check input parameters this->Check(); KRATOS_CATCH(""); } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. NodalTwoStepVPStrategy &operator=(NodalTwoStepVPStrategy const &rOther) {} /// Copy constructor. NodalTwoStepVPStrategy(NodalTwoStepVPStrategy const &rOther) {} ///@} }; /// Class NodalTwoStepVPStrategy ///@} ///@name Type Definitions ///@{ ///@} ///@} // addtogroup } // namespace Kratos. #endif // KRATOS_NODAL_TWO_STEP_V_P_STRATEGY_H
GB_binop__times_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__times_uint32 // A.*B function (eWiseMult): GB_AemultB__times_uint32 // A*D function (colscale): GB_AxD__times_uint32 // D*A function (rowscale): GB_DxB__times_uint32 // C+=B function (dense accum): GB_Cdense_accumB__times_uint32 // C+=b function (dense accum): GB_Cdense_accumb__times_uint32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__times_uint32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__times_uint32 // C=scalar+B GB_bind1st__times_uint32 // C=scalar+B' GB_bind1st_tran__times_uint32 // C=A+scalar GB_bind2nd__times_uint32 // C=A'+scalar GB_bind2nd_tran__times_uint32 // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x * y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_UINT32 || GxB_NO_TIMES_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__times_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__times_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__times_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__times_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__times_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__times_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__times_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__times_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__times_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__times_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB_bind1st_tran__times_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB_bind2nd_tran__times_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
2626.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #define EXTRALARGE_DATASET #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "correlation.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < m; i++) for (j = 0; j < n; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_correlation(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m), DATA_TYPE POLYBENCH_1D(stddev,M,m)) { int i, j, j1, j2; DATA_TYPE eps = 0.1f; #define sqrt_of_array_cell(x,j) sqrt(x[j]) #pragma scop /* Determine mean of column vectors of input data matrix */ #pragma omp parallel private(i, j, j2) num_threads(2) { #pragma omp for schedule(static, 8) for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Determine standard deviations of column vectors of data matrix. */ #pragma omp for schedule(static, 8) for (j = 0; j < _PB_M; j++) { stddev[j] = 0.0; for (i = 0; i < _PB_N; i++) stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]); stddev[j] /= float_n; stddev[j] = sqrt_of_array_cell(stddev, j); /* The following in an inelegant but usual way to handle near-zero std. dev. values, which below would cause a zero- divide. */ stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j]; } /* Center and reduce the column vectors. */ #pragma omp for schedule(static, 8) for (i = 0; i < _PB_N; i++) for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; data[i][j] /= sqrt(float_n) * stddev[j]; } /* Calculate the m * m correlation matrix. */ #pragma omp for schedule(static, 8) for (j1 = 0; j1 < _PB_M-1; j1++) { symmat[j1][j1] = 1.0; for (j2 = j1+1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += (data[i][j1] * data[i][j2]); symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop symmat[_PB_M-1][_PB_M-1] = 1.0; } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_correlation (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean), POLYBENCH_ARRAY(stddev)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); POLYBENCH_FREE_ARRAY(stddev); return 0; }
enhance.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE N N H H AAA N N CCCC EEEEE % % E NN N H H A A NN N C E % % EEE N N N HHHHH AAAAA N N N C EEE % % E N NN H H A A N NN C E % % EEEEE N N H H A A N N CCCC EEEEE % % % % % % MagickCore Image Enhancement Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoGammaImage() extract the 'mean' from the image and adjust the image % to try make set its gamma appropriately. % % The format of the AutoGammaImage method is: % % MagickBooleanType AutoGammaImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-level % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AutoGammaImage(Image *image, ExceptionInfo *exception) { double gamma, log_mean, mean, sans; MagickStatusType status; ssize_t i; log_mean=log(0.5); if (image->channel_mask == DefaultChannels) { /* Apply gamma correction equally across all given channels. */ (void) GetImageMean(image,&mean,&sans,exception); gamma=log(mean*QuantumScale)/log_mean; return(LevelImage(image,0.0,(double) QuantumRange,gamma,exception)); } /* Auto-gamma each channel separately. */ status=MagickTrue; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ChannelType channel_mask; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; channel_mask=SetImageChannelMask(image,(ChannelType) (1UL << i)); status=GetImageMean(image,&mean,&sans,exception); gamma=log(mean*QuantumScale)/log_mean; status&=LevelImage(image,0.0,(double) QuantumRange,gamma,exception); (void) SetImageChannelMask(image,channel_mask); if (status == MagickFalse) break; } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoLevelImage() adjusts the levels of a particular image channel by % scaling the minimum and maximum values to the full quantum range. % % The format of the LevelImage method is: % % MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-level % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AutoLevelImage(Image *image, ExceptionInfo *exception) { return(MinMaxStretchImage(image,0.0,0.0,1.0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B r i g h t n e s s C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BrightnessContrastImage() changes the brightness and/or contrast of an % image. It converts the brightness and contrast parameters into slope and % intercept and calls a polynomical function to apply to the image. % % The format of the BrightnessContrastImage method is: % % MagickBooleanType BrightnessContrastImage(Image *image, % const double brightness,const double contrast,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o brightness: the brightness percent (-100 .. 100). % % o contrast: the contrast percent (-100 .. 100). % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BrightnessContrastImage(Image *image, const double brightness,const double contrast,ExceptionInfo *exception) { #define BrightnessContastImageTag "BrightnessContast/Image" double alpha, coefficients[2], intercept, slope; MagickBooleanType status; /* Compute slope and intercept. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); alpha=contrast; slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0)); if (slope < 0.0) slope=0.0; intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope); coefficients[0]=slope; coefficients[1]=intercept; status=FunctionImage(image,PolynomialFunction,2,coefficients,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C L A H E I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CLAHEImage() is a variant of adaptive histogram equalization in which the % contrast amplification is limited, so as to reduce this problem of noise % amplification. % % Adapted from implementation by Karel Zuiderveld, karel@cv.ruu.nl in % "Graphics Gems IV", Academic Press, 1994. % % The format of the CLAHEImage method is: % % MagickBooleanType CLAHEImage(Image *image,const size_t width, % const size_t height,const size_t number_bins,const double clip_limit, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the tile divisions to use in horizontal direction. % % o height: the height of the tile divisions to use in vertical direction. % % o number_bins: number of bins for histogram ("dynamic range"). % % o clip_limit: contrast limit for localised changes in contrast. A limit % less than 1 results in standard non-contrast limited AHE. % % o exception: return any errors or warnings in this structure. % */ typedef struct _RangeInfo { unsigned short min, max; } RangeInfo; static void ClipCLAHEHistogram(const double clip_limit,const size_t number_bins, size_t *histogram) { #define NumberCLAHEGrays (65536) ssize_t i; size_t cumulative_excess, previous_excess, step; ssize_t excess; /* Compute total number of excess pixels. */ cumulative_excess=0; for (i=0; i < (ssize_t) number_bins; i++) { excess=(ssize_t) histogram[i]-(ssize_t) clip_limit; if (excess > 0) cumulative_excess+=excess; } /* Clip histogram and redistribute excess pixels across all bins. */ step=cumulative_excess/number_bins; excess=(ssize_t) (clip_limit-step); for (i=0; i < (ssize_t) number_bins; i++) { if ((double) histogram[i] > clip_limit) histogram[i]=(size_t) clip_limit; else if ((ssize_t) histogram[i] > excess) { cumulative_excess-=histogram[i]-excess; histogram[i]=(size_t) clip_limit; } else { cumulative_excess-=step; histogram[i]+=step; } } /* Redistribute remaining excess. */ do { size_t *p; size_t *q; previous_excess=cumulative_excess; p=histogram; q=histogram+number_bins; while ((cumulative_excess != 0) && (p < q)) { step=number_bins/cumulative_excess; if (step < 1) step=1; for (p=histogram; (p < q) && (cumulative_excess != 0); p+=step) if ((double) *p < clip_limit) { (*p)++; cumulative_excess--; } p++; } } while ((cumulative_excess != 0) && (cumulative_excess < previous_excess)); } static void GenerateCLAHEHistogram(const RectangleInfo *clahe_info, const RectangleInfo *tile_info,const size_t number_bins, const unsigned short *lut,const unsigned short *pixels,size_t *histogram) { const unsigned short *p; ssize_t i; /* Classify the pixels into a gray histogram. */ for (i=0; i < (ssize_t) number_bins; i++) histogram[i]=0L; p=pixels; for (i=0; i < (ssize_t) tile_info->height; i++) { const unsigned short *q; q=p+tile_info->width; while (p < q) histogram[lut[*p++]]++; q+=clahe_info->width; p=q-tile_info->width; } } static void InterpolateCLAHE(const RectangleInfo *clahe_info,const size_t *Q12, const size_t *Q22,const size_t *Q11,const size_t *Q21, const RectangleInfo *tile,const unsigned short *lut,unsigned short *pixels) { ssize_t y; unsigned short intensity; /* Bilinear interpolate four tiles to eliminate boundary artifacts. */ for (y=(ssize_t) tile->height; y > 0; y--) { ssize_t x; for (x=(ssize_t) tile->width; x > 0; x--) { intensity=lut[*pixels]; *pixels++=(unsigned short) (PerceptibleReciprocal((double) tile->width* tile->height)*(y*((double) x*Q12[intensity]+(tile->width-x)* Q22[intensity])+(tile->height-y)*((double) x*Q11[intensity]+ (tile->width-x)*Q21[intensity]))); } pixels+=(clahe_info->width-tile->width); } } static void GenerateCLAHELut(const RangeInfo *range_info, const size_t number_bins,unsigned short *lut) { ssize_t i; unsigned short delta; /* Scale input image [intensity min,max] to [0,number_bins-1]. */ delta=(unsigned short) ((range_info->max-range_info->min)/number_bins+1); for (i=(ssize_t) range_info->min; i <= (ssize_t) range_info->max; i++) lut[i]=(unsigned short) ((i-range_info->min)/delta); } static void MapCLAHEHistogram(const RangeInfo *range_info, const size_t number_bins,const size_t number_pixels,size_t *histogram) { double scale, sum; ssize_t i; /* Rescale histogram to range [min-intensity .. max-intensity]. */ scale=(double) (range_info->max-range_info->min)/number_pixels; sum=0.0; for (i=0; i < (ssize_t) number_bins; i++) { sum+=histogram[i]; histogram[i]=(size_t) (range_info->min+scale*sum); if (histogram[i] > range_info->max) histogram[i]=range_info->max; } } static MagickBooleanType CLAHE(const RectangleInfo *clahe_info, const RectangleInfo *tile_info,const RangeInfo *range_info, const size_t number_bins,const double clip_limit,unsigned short *pixels) { MemoryInfo *tile_cache; unsigned short *p; size_t limit, *tiles; ssize_t y; unsigned short *lut; /* Constrast limited adapted histogram equalization. */ if (clip_limit == 1.0) return(MagickTrue); tile_cache=AcquireVirtualMemory((size_t) clahe_info->x*clahe_info->y, number_bins*sizeof(*tiles)); if (tile_cache == (MemoryInfo *) NULL) return(MagickFalse); lut=(unsigned short *) AcquireQuantumMemory(NumberCLAHEGrays,sizeof(*lut)); if (lut == (unsigned short *) NULL) { tile_cache=RelinquishVirtualMemory(tile_cache); return(MagickFalse); } tiles=(size_t *) GetVirtualMemoryBlob(tile_cache); limit=(size_t) (clip_limit*(tile_info->width*tile_info->height)/number_bins); if (limit < 1UL) limit=1UL; /* Generate greylevel mappings for each tile. */ GenerateCLAHELut(range_info,number_bins,lut); p=pixels; for (y=0; y < (ssize_t) clahe_info->y; y++) { ssize_t x; for (x=0; x < (ssize_t) clahe_info->x; x++) { size_t *histogram; histogram=tiles+(number_bins*(y*clahe_info->x+x)); GenerateCLAHEHistogram(clahe_info,tile_info,number_bins,lut,p,histogram); ClipCLAHEHistogram((double) limit,number_bins,histogram); MapCLAHEHistogram(range_info,number_bins,tile_info->width* tile_info->height,histogram); p+=tile_info->width; } p+=clahe_info->width*(tile_info->height-1); } /* Interpolate greylevel mappings to get CLAHE image. */ p=pixels; for (y=0; y <= (ssize_t) clahe_info->y; y++) { OffsetInfo offset; RectangleInfo tile; ssize_t x; tile.height=tile_info->height; tile.y=y-1; offset.y=tile.y+1; if (y == 0) { /* Top row. */ tile.height=tile_info->height >> 1; tile.y=0; offset.y=0; } else if (y == (ssize_t) clahe_info->y) { /* Bottom row. */ tile.height=(tile_info->height+1) >> 1; tile.y=clahe_info->y-1; offset.y=tile.y; } for (x=0; x <= (ssize_t) clahe_info->x; x++) { tile.width=tile_info->width; tile.x=x-1; offset.x=tile.x+1; if (x == 0) { /* Left column. */ tile.width=tile_info->width >> 1; tile.x=0; offset.x=0; } else if (x == (ssize_t) clahe_info->x) { /* Right column. */ tile.width=(tile_info->width+1) >> 1; tile.x=clahe_info->x-1; offset.x=tile.x; } InterpolateCLAHE(clahe_info, tiles+(number_bins*(tile.y*clahe_info->x+tile.x)), /* Q12 */ tiles+(number_bins*(tile.y*clahe_info->x+offset.x)), /* Q22 */ tiles+(number_bins*(offset.y*clahe_info->x+tile.x)), /* Q11 */ tiles+(number_bins*(offset.y*clahe_info->x+offset.x)), /* Q21 */ &tile,lut,p); p+=tile.width; } p+=clahe_info->width*(tile.height-1); } lut=(unsigned short *) RelinquishMagickMemory(lut); tile_cache=RelinquishVirtualMemory(tile_cache); return(MagickTrue); } MagickExport MagickBooleanType CLAHEImage(Image *image,const size_t width, const size_t height,const size_t number_bins,const double clip_limit, ExceptionInfo *exception) { #define CLAHEImageTag "CLAHE/Image" CacheView *image_view; ColorspaceType colorspace; MagickBooleanType status; MagickOffsetType progress; MemoryInfo *pixel_cache; RangeInfo range_info; RectangleInfo clahe_info, tile_info; size_t n; ssize_t y; unsigned short *pixels; /* Configure CLAHE parameters. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); range_info.min=0; range_info.max=NumberCLAHEGrays-1; tile_info.width=width; if (tile_info.width == 0) tile_info.width=image->columns >> 3; tile_info.height=height; if (tile_info.height == 0) tile_info.height=image->rows >> 3; tile_info.x=0; if ((image->columns % tile_info.width) != 0) tile_info.x=(ssize_t) tile_info.width-(image->columns % tile_info.width); tile_info.y=0; if ((image->rows % tile_info.height) != 0) tile_info.y=(ssize_t) tile_info.height-(image->rows % tile_info.height); clahe_info.width=image->columns+tile_info.x; clahe_info.height=image->rows+tile_info.y; clahe_info.x=(ssize_t) clahe_info.width/tile_info.width; clahe_info.y=(ssize_t) clahe_info.height/tile_info.height; pixel_cache=AcquireVirtualMemory(clahe_info.width,clahe_info.height* sizeof(*pixels)); if (pixel_cache == (MemoryInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); pixels=(unsigned short *) GetVirtualMemoryBlob(pixel_cache); colorspace=image->colorspace; if (TransformImageColorspace(image,LabColorspace,exception) == MagickFalse) { pixel_cache=RelinquishVirtualMemory(pixel_cache); return(MagickFalse); } /* Initialize CLAHE pixels. */ image_view=AcquireVirtualCacheView(image,exception); progress=0; status=MagickTrue; n=0; for (y=0; y < (ssize_t) clahe_info.height; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-(tile_info.x >> 1),y- (tile_info.y >> 1),clahe_info.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) clahe_info.width; x++) { pixels[n++]=ScaleQuantumToShort(p[0]); p+=GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed=SetImageProgress(image,CLAHEImageTag,progress,2* GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status=CLAHE(&clahe_info,&tile_info,&range_info,number_bins == 0 ? (size_t) 128 : MagickMin(number_bins,256),clip_limit,pixels); if (status == MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); /* Push CLAHE pixels to CLAHE image. */ image_view=AcquireAuthenticCacheView(image,exception); n=clahe_info.width*(tile_info.y >> 1); for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } n+=tile_info.x >> 1; for (x=0; x < (ssize_t) image->columns; x++) { q[0]=ScaleShortToQuantum(pixels[n++]); q+=GetPixelChannels(image); } n+=(clahe_info.width-image->columns-(tile_info.x >> 1)); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed=SetImageProgress(image,CLAHEImageTag,progress,2* GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); pixel_cache=RelinquishVirtualMemory(pixel_cache); if (TransformImageColorspace(image,colorspace,exception) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClutImage() replaces each color value in the given image, by using it as an % index to lookup a replacement color value in a Color Look UP Table in the % form of an image. The values are extracted along a diagonal of the CLUT % image so either a horizontal or vertial gradient image can be used. % % Typically this is used to either re-color a gray-scale image according to a % color gradient in the CLUT image, or to perform a freeform histogram % (level) adjustment according to the (typically gray-scale) gradient in the % CLUT image. % % When the 'channel' mask includes the matte/alpha transparency channel but % one image has no such channel it is assumed that that image is a simple % gray-scale image that will effect the alpha channel values, either for % gray-scale coloring (with transparent or semi-transparent colors), or % a histogram adjustment of existing alpha channel values. If both images % have matte channels, direct and normal indexing is applied, which is rarely % used. % % The format of the ClutImage method is: % % MagickBooleanType ClutImage(Image *image,Image *clut_image, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o clut_image: the color lookup table image for replacement color values. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define ClutImageTag "Clut/Image" CacheView *clut_view, *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo *clut_map; ssize_t i; ssize_t adjust, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clut_image != (Image *) NULL); assert(clut_image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsGrayColorspace(clut_image->colorspace) == MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); clut_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*clut_map)); if (clut_map == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Clut image. */ status=MagickTrue; progress=0; adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1); clut_view=AcquireVirtualCacheView(clut_image,exception); for (i=0; i <= (ssize_t) MaxMap; i++) { GetPixelInfo(clut_image,clut_map+i); status=InterpolatePixelInfo(clut_image,clut_view,method, (double) i*(clut_image->columns-adjust)/MaxMap,(double) i* (clut_image->rows-adjust)/MaxMap,clut_map+i,exception); if (status == MagickFalse) break; } clut_view=DestroyCacheView(clut_view); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { PixelTrait traits; GetPixelInfoPixel(image,q,&pixel); traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.red=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.red))].red; traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.green=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.green))].green; traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.blue=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.blue))].blue; traits=GetPixelChannelTraits(image,BlackPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.black=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.black))].black; traits=GetPixelChannelTraits(image,AlphaPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.alpha=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.alpha))].alpha; SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ClutImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); clut_map=(PixelInfo *) RelinquishMagickMemory(clut_map); if ((clut_image->alpha_trait != UndefinedPixelTrait) && ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)) (void) SetImageAlphaChannel(image,ActivateAlphaChannel,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r D e c i s i o n L i s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorDecisionListImage() accepts a lightweight Color Correction Collection % (CCC) file which solely contains one or more color corrections and applies % the correction to the image. Here is a sample CCC file: % % <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2"> % <ColorCorrection id="cc03345"> % <SOPNode> % <Slope> 0.9 1.2 0.5 </Slope> % <Offset> 0.4 -0.5 0.6 </Offset> % <Power> 1.0 0.8 1.5 </Power> % </SOPNode> % <SATNode> % <Saturation> 0.85 </Saturation> % </SATNode> % </ColorCorrection> % </ColorCorrectionCollection> % % which includes the slop, offset, and power for each of the RGB channels % as well as the saturation. % % The format of the ColorDecisionListImage method is: % % MagickBooleanType ColorDecisionListImage(Image *image, % const char *color_correction_collection,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o color_correction_collection: the color correction collection in XML. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ColorDecisionListImage(Image *image, const char *color_correction_collection,ExceptionInfo *exception) { #define ColorDecisionListCorrectImageTag "ColorDecisionList/Image" typedef struct _Correction { double slope, offset, power; } Correction; typedef struct _ColorCorrection { Correction red, green, blue; double saturation; } ColorCorrection; CacheView *image_view; char token[MagickPathExtent]; ColorCorrection color_correction; const char *content, *p; MagickBooleanType status; MagickOffsetType progress; PixelInfo *cdl_map; ssize_t i; ssize_t y; XMLTreeInfo *cc, *ccc, *sat, *sop; /* Allocate and initialize cdl maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (color_correction_collection == (const char *) NULL) return(MagickFalse); ccc=NewXMLTree((const char *) color_correction_collection,exception); if (ccc == (XMLTreeInfo *) NULL) return(MagickFalse); cc=GetXMLTreeChild(ccc,"ColorCorrection"); if (cc == (XMLTreeInfo *) NULL) { ccc=DestroyXMLTree(ccc); return(MagickFalse); } color_correction.red.slope=1.0; color_correction.red.offset=0.0; color_correction.red.power=1.0; color_correction.green.slope=1.0; color_correction.green.offset=0.0; color_correction.green.power=1.0; color_correction.blue.slope=1.0; color_correction.blue.offset=0.0; color_correction.blue.power=1.0; color_correction.saturation=0.0; sop=GetXMLTreeChild(cc,"SOPNode"); if (sop != (XMLTreeInfo *) NULL) { XMLTreeInfo *offset, *power, *slope; slope=GetXMLTreeChild(sop,"Slope"); if (slope != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(slope); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.slope=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.slope=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.slope=StringToDouble(token, (char **) NULL); break; } } } } offset=GetXMLTreeChild(sop,"Offset"); if (offset != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(offset); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.offset=StringToDouble(token, (char **) NULL); break; } case 1: { color_correction.green.offset=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.offset=StringToDouble(token, (char **) NULL); break; } } } } power=GetXMLTreeChild(sop,"Power"); if (power != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(power); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.power=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.power=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.power=StringToDouble(token, (char **) NULL); break; } } } } } sat=GetXMLTreeChild(cc,"SATNode"); if (sat != (XMLTreeInfo *) NULL) { XMLTreeInfo *saturation; saturation=GetXMLTreeChild(sat,"Saturation"); if (saturation != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(saturation); p=(const char *) content; (void) GetNextToken(p,&p,MagickPathExtent,token); color_correction.saturation=StringToDouble(token,(char **) NULL); } } ccc=DestroyXMLTree(ccc); if (image->debug != MagickFalse) { (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Color Correction Collection:"); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.slope: %g",color_correction.red.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.offset: %g",color_correction.red.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.power: %g",color_correction.red.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.slope: %g",color_correction.green.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.offset: %g",color_correction.green.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.power: %g",color_correction.green.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.slope: %g",color_correction.blue.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.offset: %g",color_correction.blue.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.power: %g",color_correction.blue.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.saturation: %g",color_correction.saturation); } cdl_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map)); if (cdl_map == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); for (i=0; i <= (ssize_t) MaxMap; i++) { cdl_map[i].red=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+ color_correction.red.offset,color_correction.red.power)))); cdl_map[i].green=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+ color_correction.green.offset,color_correction.green.power)))); cdl_map[i].blue=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+ color_correction.blue.offset,color_correction.blue.power)))); } if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Apply transfer function to colormap. */ double luma; luma=0.21267f*image->colormap[i].red+0.71526*image->colormap[i].green+ 0.07217f*image->colormap[i].blue; image->colormap[i].red=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red-luma; image->colormap[i].green=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green-luma; image->colormap[i].blue=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue-luma; } /* Apply transfer function to image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double luma; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { luma=0.21267f*GetPixelRed(image,q)+0.71526*GetPixelGreen(image,q)+ 0.07217f*GetPixelBlue(image,q); SetPixelRed(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelRed(image,q))].red-luma)),q); SetPixelGreen(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelGreen(image,q))].green-luma)),q); SetPixelBlue(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelBlue(image,q))].blue-luma)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag, progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); cdl_map=(PixelInfo *) RelinquishMagickMemory(cdl_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastImage() enhances the intensity differences between the lighter and % darker elements of the image. Set sharpen to a MagickTrue to increase the % image contrast otherwise the contrast is reduced. % % The format of the ContrastImage method is: % % MagickBooleanType ContrastImage(Image *image, % const MagickBooleanType sharpen,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % % o exception: return any errors or warnings in this structure. % */ static void Contrast(const int sign,double *red,double *green,double *blue) { double brightness, hue, saturation; /* Enhance contrast: dark color become darker, light color become lighter. */ assert(red != (double *) NULL); assert(green != (double *) NULL); assert(blue != (double *) NULL); hue=0.0; saturation=0.0; brightness=0.0; ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)- brightness); if (brightness > 1.0) brightness=1.0; else if (brightness < 0.0) brightness=0.0; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } MagickExport MagickBooleanType ContrastImage(Image *image, const MagickBooleanType sharpen,ExceptionInfo *exception) { #define ContrastImageTag "Contrast/Image" CacheView *image_view; int sign; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateContrastImage(image,sharpen,exception) != MagickFalse) return(MagickTrue); #endif if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sign=sharpen != MagickFalse ? 1 : -1; if (image->storage_class == PseudoClass) { /* Contrast enhance colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { double blue, green, red; red=(double) image->colormap[i].red; green=(double) image->colormap[i].green; blue=(double) image->colormap[i].blue; Contrast(sign,&red,&green,&blue); image->colormap[i].red=(MagickRealType) red; image->colormap[i].green=(MagickRealType) green; image->colormap[i].blue=(MagickRealType) blue; } } /* Contrast enhance image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double blue, green, red; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); Contrast(sign,&red,&green,&blue); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ContrastImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastStretchImage() is a simple image enhancement technique that attempts % to improve the contrast in an image by 'stretching' the range of intensity % values it contains to span a desired range of values. It differs from the % more sophisticated histogram equalization in that it can only apply a % linear scaling function to the image pixel values. As a result the % 'enhancement' is less harsh. % % The format of the ContrastStretchImage method is: % % MagickBooleanType ContrastStretchImage(Image *image, % const char *levels,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % % o levels: Specify the levels where the black and white points have the % range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.). % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ContrastStretchImage(Image *image, const double black_point,const double white_point,ExceptionInfo *exception) { #define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color))) #define ContrastStretchImageTag "ContrastStretch/Image" CacheView *image_view; double *black, *histogram, *stretch_map, *white; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; /* Allocate histogram and stretch map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageGray(image,exception) != MagickFalse) (void) SetImageColorspace(image,GRAYColorspace,exception); black=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*black)); white=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*white)); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels* sizeof(*histogram)); stretch_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels* sizeof(*stretch_map)); if ((black == (double *) NULL) || (white == (double *) NULL) || (histogram == (double *) NULL) || (stretch_map == (double *) NULL)) { if (stretch_map != (double *) NULL) stretch_map=(double *) RelinquishMagickMemory(stretch_map); if (histogram != (double *) NULL) histogram=(double *) RelinquishMagickMemory(histogram); if (white != (double *) NULL) white=(double *) RelinquishMagickMemory(white); if (black != (double *) NULL) black=(double *) RelinquishMagickMemory(black); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ status=MagickTrue; (void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; pixel=GetPixelIntensity(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { if (image->channel_mask != DefaultChannels) pixel=(double) p[i]; histogram[GetPixelChannels(image)*ScaleQuantumToMap( ClampToQuantum(pixel))+i]++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Find the histogram boundaries by locating the black/white levels. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; ssize_t j; black[i]=0.0; white[i]=MaxRange(QuantumRange); intensity=0.0; for (j=0; j <= (ssize_t) MaxMap; j++) { intensity+=histogram[GetPixelChannels(image)*j+i]; if (intensity > black_point) break; } black[i]=(double) j; intensity=0.0; for (j=(ssize_t) MaxMap; j != 0; j--) { intensity+=histogram[GetPixelChannels(image)*j+i]; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white[i]=(double) j; } histogram=(double *) RelinquishMagickMemory(histogram); /* Stretch the histogram to create the stretched image mapping. */ (void) memset(stretch_map,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*stretch_map)); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ssize_t j; for (j=0; j <= (ssize_t) MaxMap; j++) { double gamma; gamma=PerceptibleReciprocal(white[i]-black[i]); if (j < (ssize_t) black[i]) stretch_map[GetPixelChannels(image)*j+i]=0.0; else if (j > (ssize_t) white[i]) stretch_map[GetPixelChannels(image)*j+i]=(double) QuantumRange; else if (black[i] != white[i]) stretch_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum( (double) (MaxMap*gamma*(j-black[i]))); } } if (image->storage_class == PseudoClass) { ssize_t j; /* Stretch-contrast colormap. */ for (j=0; j < (ssize_t) image->colors; j++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,RedPixelChannel); image->colormap[j].red=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+i]; } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,GreenPixelChannel); image->colormap[j].green=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+i]; } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,BluePixelChannel); image->colormap[j].blue=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+i]; } if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,AlphaPixelChannel); image->colormap[j].alpha=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+i]; } } } /* Stretch-contrast image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (black[j] == white[j]) continue; q[j]=ClampToQuantum(stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(q[j])+j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ContrastStretchImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); stretch_map=(double *) RelinquishMagickMemory(stretch_map); white=(double *) RelinquishMagickMemory(white); black=(double *) RelinquishMagickMemory(black); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E n h a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EnhanceImage() applies a digital filter that improves the quality of a % noisy image. % % The format of the EnhanceImage method is: % % Image *EnhanceImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception) { #define EnhanceImageTag "Enhance/Image" #define EnhancePixel(weight) \ mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \ distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \ distance_squared=(4.0+mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \ distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \ distance_squared+=(7.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \ distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \ distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \ distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \ distance_squared+=(5.0-mean)*distance*distance; \ if (distance_squared < 0.069) \ { \ aggregate.red+=(weight)*GetPixelRed(image,r); \ aggregate.green+=(weight)*GetPixelGreen(image,r); \ aggregate.blue+=(weight)*GetPixelBlue(image,r); \ aggregate.black+=(weight)*GetPixelBlack(image,r); \ aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \ total_weight+=(weight); \ } \ r+=GetPixelChannels(image); CacheView *enhance_view, *image_view; Image *enhance_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize enhanced image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); enhance_image=CloneImage(image,0,0,MagickTrue, exception); if (enhance_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(enhance_image,DirectClass,exception) == MagickFalse) { enhance_image=DestroyImage(enhance_image); return((Image *) NULL); } /* Enhance image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); enhance_view=AcquireAuthenticCacheView(enhance_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,enhance_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; ssize_t center; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception); q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*(2*(image->columns+4)+2); GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { double distance, distance_squared, mean, total_weight; PixelInfo aggregate; const Quantum *magick_restrict r; GetPixelInfo(image,&aggregate); total_weight=0.0; GetPixelInfoPixel(image,p+center,&pixel); r=p; EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); r=p+GetPixelChannels(image)*(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+2*GetPixelChannels(image)*(image->columns+4); EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0); EnhancePixel(40.0); EnhancePixel(10.0); r=p+3*GetPixelChannels(image)*(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+4*GetPixelChannels(image)*(image->columns+4); EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); if (total_weight > MagickEpsilon) { pixel.red=((aggregate.red+total_weight/2.0)/total_weight); pixel.green=((aggregate.green+total_weight/2.0)/total_weight); pixel.blue=((aggregate.blue+total_weight/2.0)/total_weight); pixel.black=((aggregate.black+total_weight/2.0)/total_weight); pixel.alpha=((aggregate.alpha+total_weight/2.0)/total_weight); } SetPixelViaPixelInfo(enhance_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(enhance_image); } if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EnhanceImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } enhance_view=DestroyCacheView(enhance_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) enhance_image=DestroyImage(enhance_image); return(enhance_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E q u a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EqualizeImage() applies a histogram equalization to the image. % % The format of the EqualizeImage method is: % % MagickBooleanType EqualizeImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType EqualizeImage(Image *image, ExceptionInfo *exception) { #define EqualizeImageTag "Equalize/Image" CacheView *image_view; double black[CompositePixelChannel+1], *equalize_map, *histogram, *map, white[CompositePixelChannel+1]; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; /* Allocate and initialize histogram arrays. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateEqualizeImage(image,exception) != MagickFalse) return(MagickTrue); #endif if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); equalize_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels* sizeof(*equalize_map)); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels* sizeof(*histogram)); map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*sizeof(*map)); if ((equalize_map == (double *) NULL) || (histogram == (double *) NULL) || (map == (double *) NULL)) { if (map != (double *) NULL) map=(double *) RelinquishMagickMemory(map); if (histogram != (double *) NULL) histogram=(double *) RelinquishMagickMemory(histogram); if (equalize_map != (double *) NULL) equalize_map=(double *) RelinquishMagickMemory(equalize_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ status=MagickTrue; (void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; intensity=(double) p[i]; if ((image->channel_mask & SyncChannels) != 0) intensity=GetPixelIntensity(image,p); histogram[GetPixelChannels(image)*ScaleQuantumToMap( ClampToQuantum(intensity))+i]++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Integrate the histogram to get the equalization map. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; ssize_t j; intensity=0.0; for (j=0; j <= (ssize_t) MaxMap; j++) { intensity+=histogram[GetPixelChannels(image)*j+i]; map[GetPixelChannels(image)*j+i]=intensity; } } (void) memset(equalize_map,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*equalize_map)); (void) memset(black,0,sizeof(*black)); (void) memset(white,0,sizeof(*white)); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ssize_t j; black[i]=map[i]; white[i]=map[GetPixelChannels(image)*MaxMap+i]; if (black[i] != white[i]) for (j=0; j <= (ssize_t) MaxMap; j++) equalize_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum((double) ((MaxMap*(map[ GetPixelChannels(image)*j+i]-black[i]))/(white[i]-black[i]))); } histogram=(double *) RelinquishMagickMemory(histogram); map=(double *) RelinquishMagickMemory(map); if (image->storage_class == PseudoClass) { ssize_t j; /* Equalize colormap. */ for (j=0; j < (ssize_t) image->colors; j++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, RedPixelChannel); if (black[channel] != white[channel]) image->colormap[j].red=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+ channel]; } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, GreenPixelChannel); if (black[channel] != white[channel]) image->colormap[j].green=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+ channel]; } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, BluePixelChannel); if (black[channel] != white[channel]) image->colormap[j].blue=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+ channel]; } if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel = GetPixelChannelChannel(image, AlphaPixelChannel); if (black[channel] != white[channel]) image->colormap[j].alpha=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+ channel]; } } } /* Equalize image. */ progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j])) continue; q[j]=ClampToQuantum(equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(q[j])+j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,EqualizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); equalize_map=(double *) RelinquishMagickMemory(equalize_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GammaImage() gamma-corrects a particular image channel. The same % image viewed on different devices will have perceptual differences in the % way the image's intensities are represented on the screen. Specify % individual gamma levels for the red, green, and blue channels, or adjust % all three with the gamma parameter. Values typically range from 0.8 to 2.3. % % You can also reduce the influence of a particular channel with a gamma % value of 0. % % The format of the GammaImage method is: % % MagickBooleanType GammaImage(Image *image,const double gamma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o level: the image gamma as a string (e.g. 1.6,1.2,1.0). % % o gamma: the image gamma. % */ static inline double gamma_pow(const double value,const double gamma) { return(value < 0.0 ? value : pow(value,gamma)); } MagickExport MagickBooleanType GammaImage(Image *image,const double gamma, ExceptionInfo *exception) { #define GammaImageTag "Gamma/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; Quantum *gamma_map; ssize_t i; ssize_t y; /* Allocate and initialize gamma maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (gamma == 1.0) return(MagickTrue); gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map)); if (gamma_map == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map)); if (gamma != 0.0) for (i=0; i <= (ssize_t) MaxMap; i++) gamma_map[i]=ScaleMapToQuantum((double) (MaxMap*pow((double) i/ MaxMap,PerceptibleReciprocal(gamma)))); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Gamma-correct colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].red))]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].green))]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].blue))]; if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].alpha))]; } /* Gamma-correct image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=gamma_map[ScaleQuantumToMap(ClampToQuantum((MagickRealType) q[j]))]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,GammaImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map); if (image->gamma != 0.0) image->gamma*=gamma; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GrayscaleImage() converts the image to grayscale. % % The format of the GrayscaleImage method is: % % MagickBooleanType GrayscaleImage(Image *image, % const PixelIntensityMethod method ,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: the pixel intensity method. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GrayscaleImage(Image *image, const PixelIntensityMethod method,ExceptionInfo *exception) { #define GrayscaleImageTag "Grayscale/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse) { image->intensity=method; image->type=GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return(SetImageColorspace(image,LinearGRAYColorspace,exception)); return(SetImageColorspace(image,GRAYColorspace,exception)); } #endif /* Grayscale image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType blue, green, red, intensity; red=(MagickRealType) GetPixelRed(image,q); green=(MagickRealType) GetPixelGreen(image,q); blue=(MagickRealType) GetPixelBlue(image,q); intensity=0.0; switch (method) { case AveragePixelIntensityMethod: { intensity=(red+green+blue)/3.0; break; } case BrightnessPixelIntensityMethod: { intensity=MagickMax(MagickMax(red,green),blue); break; } case LightnessPixelIntensityMethod: { intensity=(MagickMin(MagickMin(red,green),blue)+ MagickMax(MagickMax(red,green),blue))/2.0; break; } case MSPixelIntensityMethod: { intensity=(MagickRealType) (((double) red*red+green*green+ blue*blue)/3.0); break; } case Rec601LumaPixelIntensityMethod: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec601LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec709LumaPixelIntensityMethod: default: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case Rec709LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case RMSPixelIntensityMethod: { intensity=(MagickRealType) (sqrt((double) red*red+green*green+ blue*blue)/sqrt(3.0)); break; } } SetPixelGray(image,ClampToQuantum(intensity),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,GrayscaleImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->intensity=method; image->type=GrayscaleType; if ((method == Rec601LuminancePixelIntensityMethod) || (method == Rec709LuminancePixelIntensityMethod)) return(SetImageColorspace(image,LinearGRAYColorspace,exception)); return(SetImageColorspace(image,GRAYColorspace,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H a l d C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HaldClutImage() applies a Hald color lookup table to the image. A Hald % color lookup table is a 3-dimensional color cube mapped to 2 dimensions. % Create it with the HALD coder. You can apply any color transformation to % the Hald image and then use this method to apply the transform to the % image. % % The format of the HaldClutImage method is: % % MagickBooleanType HaldClutImage(Image *image,Image *hald_image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o hald_image: the color lookup table image for replacement color values. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType HaldClutImage(Image *image, const Image *hald_image,ExceptionInfo *exception) { #define HaldClutImageTag "Clut/Image" typedef struct _HaldInfo { double x, y, z; } HaldInfo; CacheView *hald_view, *image_view; double width; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; size_t cube_size, length, level; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(hald_image != (Image *) NULL); assert(hald_image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Hald clut image. */ status=MagickTrue; progress=0; length=(size_t) MagickMin((MagickRealType) hald_image->columns, (MagickRealType) hald_image->rows); for (level=2; (level*level*level) < length; level++) ; level*=level; cube_size=level*level; width=(double) hald_image->columns; GetPixelInfo(hald_image,&zero); hald_view=AcquireVirtualCacheView(hald_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double area, offset; HaldInfo point; PixelInfo pixel, pixel1, pixel2, pixel3, pixel4; point.x=QuantumScale*(level-1.0)*GetPixelRed(image,q); point.y=QuantumScale*(level-1.0)*GetPixelGreen(image,q); point.z=QuantumScale*(level-1.0)*GetPixelBlue(image,q); offset=point.x+level*floor(point.y)+cube_size*floor(point.z); point.x-=floor(point.x); point.y-=floor(point.y); point.z-=floor(point.z); pixel1=zero; status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset,width),floor(offset/width),&pixel1,exception); if (status == MagickFalse) break; pixel2=zero; status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception); if (status == MagickFalse) break; pixel3=zero; area=point.y; if (hald_image->interpolate == NearestInterpolatePixel) area=(point.y < 0.5) ? 0.0 : 1.0; CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha, area,&pixel3); offset+=cube_size; status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset,width),floor(offset/width),&pixel1,exception); if (status == MagickFalse) break; status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception); if (status == MagickFalse) break; pixel4=zero; CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha, area,&pixel4); pixel=zero; area=point.z; if (hald_image->interpolate == NearestInterpolatePixel) area=(point.z < 0.5)? 0.0 : 1.0; CompositePixelInfoAreaBlend(&pixel3,pixel3.alpha,&pixel4,pixel4.alpha, area,&pixel); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,ClampToQuantum(pixel.red),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,ClampToQuantum(pixel.green),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,ClampToQuantum(pixel.blue),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,ClampToQuantum(pixel.black),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,HaldClutImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } hald_view=DestroyCacheView(hald_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() adjusts the levels of a particular image channel by % scaling the colors falling between specified white and black points to % the full available quantum range. % % The parameters provided represent the black, and white points. The black % point specifies the darkest color in the image. Colors darker than the % black point are set to zero. White point specifies the lightest color in % the image. Colors brighter than the white point are set to the maximum % quantum value. % % If a '!' flag is given, map black and white colors to the given levels % rather than mapping those levels to black and white. See % LevelizeImage() below. % % Gamma specifies a gamma correction to apply to the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const double black_point, % const double white_point,const double gamma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantumRange (white) to. % % o exception: return any errors or warnings in this structure. % */ static inline double LevelPixel(const double black_point, const double white_point,const double gamma,const double pixel) { double level_pixel, scale; scale=PerceptibleReciprocal(white_point-black_point); level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point), PerceptibleReciprocal(gamma)); return(level_pixel); } MagickExport MagickBooleanType LevelImage(Image *image,const double black_point, const double white_point,const double gamma,ExceptionInfo *exception) { #define LevelImageTag "Level/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].red)); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].green)); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].blue)); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].alpha)); } /* Level image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=ClampToQuantum(LevelPixel(black_point,white_point,gamma, (double) q[j])); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,LevelImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) ClampImage(image,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImage() applies the reversed LevelImage() operation to just % the specific channels specified. It compresses the full range of color % values, so that they lie between the given black and white points. Gamma is % applied before the values are mapped. % % LevelizeImage() can be called with by using a +level command line % API option, or using a '!' on a -level or LevelImage() geometry string. % % It can be used to de-contrast a greyscale image to the exact levels % specified. Or by using specific levels for each channel of an image you % can convert a gray-scale image to any linear color gradient, according to % those levels. % % The format of the LevelizeImage method is: % % MagickBooleanType LevelizeImage(Image *image,const double black_point, % const double white_point,const double gamma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantumRange (white) to. % % o gamma: adjust gamma by this factor before mapping values. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LevelizeImage(Image *image, const double black_point,const double white_point,const double gamma, ExceptionInfo *exception) { #define LevelizeImageTag "Levelize/Image" #define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \ (QuantumScale*(x)),gamma))*(white_point-black_point)+black_point) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) LevelizeValue(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) LevelizeValue( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) LevelizeValue(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) LevelizeValue( image->colormap[i].alpha); } /* Level image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=LevelizeValue(q[j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,LevelizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColors() maps the given color to "black" and "white" values, % linearly spreading out the colors, and level values on a channel by channel % bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriately. This effectivally maps a greyscale gradient into the given % color gradient. % % The format of the LevelImageColors method is: % % MagickBooleanType LevelImageColors(Image *image, % const PixelInfo *black_color,const PixelInfo *white_color, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LevelImageColors(Image *image, const PixelInfo *black_color,const PixelInfo *white_color, const MagickBooleanType invert,ExceptionInfo *exception) { ChannelType channel_mask; MagickStatusType status; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsGrayColorspace(black_color->colorspace) == MagickFalse) || (IsGrayColorspace(white_color->colorspace) == MagickFalse))) (void) SetImageColorspace(image,sRGBColorspace,exception); status=MagickTrue; if (invert == MagickFalse) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,RedChannel); status&=LevelImage(image,black_color->red,white_color->red,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,GreenChannel); status&=LevelImage(image,black_color->green,white_color->green,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,BlueChannel); status&=LevelImage(image,black_color->blue,white_color->blue,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { channel_mask=SetImageChannelMask(image,BlackChannel); status&=LevelImage(image,black_color->black,white_color->black,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { channel_mask=SetImageChannelMask(image,AlphaChannel); status&=LevelImage(image,black_color->alpha,white_color->alpha,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } } else { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,RedChannel); status&=LevelizeImage(image,black_color->red,white_color->red,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,GreenChannel); status&=LevelizeImage(image,black_color->green,white_color->green,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,BlueChannel); status&=LevelizeImage(image,black_color->blue,white_color->blue,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { channel_mask=SetImageChannelMask(image,BlackChannel); status&=LevelizeImage(image,black_color->black,white_color->black,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { channel_mask=SetImageChannelMask(image,AlphaChannel); status&=LevelizeImage(image,black_color->alpha,white_color->alpha,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i n e a r S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LinearStretchImage() discards any pixels below the black point and above % the white point and levels the remaining pixels. % % The format of the LinearStretchImage method is: % % MagickBooleanType LinearStretchImage(Image *image, % const double black_point,const double white_point, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LinearStretchImage(Image *image, const double black_point,const double white_point,ExceptionInfo *exception) { #define LinearStretchImageTag "LinearStretch/Image" CacheView *image_view; double *histogram, intensity; MagickBooleanType status; ssize_t black, white, y; /* Allocate histogram and linear map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ (void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { intensity=GetPixelIntensity(image,p); histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Find the histogram boundaries by locating the black and white point levels. */ intensity=0.0; for (black=0; black < (ssize_t) MaxMap; black++) { intensity+=histogram[black]; if (intensity >= black_point) break; } intensity=0.0; for (white=(ssize_t) MaxMap; white != 0; white--) { intensity+=histogram[white]; if (intensity >= white_point) break; } histogram=(double *) RelinquishMagickMemory(histogram); status=LevelImage(image,(double) ScaleMapToQuantum((MagickRealType) black), (double) ScaleMapToQuantum((MagickRealType) white),1.0,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d u l a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModulateImage() lets you control the brightness, saturation, and hue % of an image. Modulate represents the brightness, saturation, and hue % as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the % modulation is lightness, saturation, and hue. For HWB, use blackness, % whiteness, and hue. And for HCL, use chrome, luma, and hue. % % The format of the ModulateImage method is: % % MagickBooleanType ModulateImage(Image *image,const char *modulate, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o modulate: Define the percent change in brightness, saturation, and hue. % % o exception: return any errors or warnings in this structure. % */ static inline void ModulateHCL(const double percent_hue, const double percent_chroma,const double percent_luma,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHCLp(const double percent_hue, const double percent_chroma,const double percent_luma,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLpToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHSB(const double percent_hue, const double percent_saturation,const double percent_brightness,double *red, double *green,double *blue) { double brightness, hue, saturation; /* Increase or decrease color brightness, saturation, or hue. */ ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; brightness*=0.01*percent_brightness; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } static inline void ModulateHSI(const double percent_hue, const double percent_saturation,const double percent_intensity,double *red, double *green,double *blue) { double intensity, hue, saturation; /* Increase or decrease color intensity, saturation, or hue. */ ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; intensity*=0.01*percent_intensity; ConvertHSIToRGB(hue,saturation,intensity,red,green,blue); } static inline void ModulateHSL(const double percent_hue, const double percent_saturation,const double percent_lightness,double *red, double *green,double *blue) { double hue, lightness, saturation; /* Increase or decrease color lightness, saturation, or hue. */ ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; lightness*=0.01*percent_lightness; ConvertHSLToRGB(hue,saturation,lightness,red,green,blue); } static inline void ModulateHSV(const double percent_hue, const double percent_saturation,const double percent_value,double *red, double *green,double *blue) { double hue, saturation, value; /* Increase or decrease color value, saturation, or hue. */ ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; value*=0.01*percent_value; ConvertHSVToRGB(hue,saturation,value,red,green,blue); } static inline void ModulateHWB(const double percent_hue, const double percent_whiteness,const double percent_blackness,double *red, double *green,double *blue) { double blackness, hue, whiteness; /* Increase or decrease color blackness, whiteness, or hue. */ ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness); hue+=fmod((percent_hue-100.0),200.0)/200.0; blackness*=0.01*percent_blackness; whiteness*=0.01*percent_whiteness; ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue); } static inline void ModulateLCHab(const double percent_luma, const double percent_chroma,const double percent_hue,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHabToRGB(luma,chroma,hue,red,green,blue); } static inline void ModulateLCHuv(const double percent_luma, const double percent_chroma,const double percent_hue,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue); } MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate, ExceptionInfo *exception) { #define ModulateImageTag "Modulate/Image" CacheView *image_view; ColorspaceType colorspace; const char *artifact; double percent_brightness, percent_hue, percent_saturation; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; ssize_t i; ssize_t y; /* Initialize modulate table. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (modulate == (char *) NULL) return(MagickFalse); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); flags=ParseGeometry(modulate,&geometry_info); percent_brightness=geometry_info.rho; percent_saturation=geometry_info.sigma; if ((flags & SigmaValue) == 0) percent_saturation=100.0; percent_hue=geometry_info.xi; if ((flags & XiValue) == 0) percent_hue=100.0; colorspace=UndefinedColorspace; artifact=GetImageArtifact(image,"modulate:colorspace"); if (artifact != (const char *) NULL) colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions, MagickFalse,artifact); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { double blue, green, red; /* Modulate image colormap. */ red=(double) image->colormap[i].red; green=(double) image->colormap[i].green; blue=(double) image->colormap[i].blue; switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSIColorspace: { ModulateHSI(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } image->colormap[i].red=red; image->colormap[i].green=green; image->colormap[i].blue=blue; } /* Modulate image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateModulateImage(image,percent_brightness,percent_hue, percent_saturation,colorspace,exception) != MagickFalse) return(MagickTrue); #endif status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red; red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHColorspace: case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ModulateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e g a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NegateImage() negates the colors in the reference image. The grayscale % option means that only grayscale values within the image are negated. % % The format of the NegateImage method is: % % MagickBooleanType NegateImage(Image *image, % const MagickBooleanType grayscale,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o grayscale: If MagickTrue, only negate grayscale pixels within the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType NegateImage(Image *image, const MagickBooleanType grayscale,ExceptionInfo *exception) { #define NegateImageTag "Negate/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Negate colormap. */ if (grayscale != MagickFalse) if ((image->colormap[i].red != image->colormap[i].green) || (image->colormap[i].green != image->colormap[i].blue)) continue; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } /* Negate image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); if( grayscale != MagickFalse ) { for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; if (IsPixelGray(image,q) == MagickFalse) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=QuantumRange-q[j]; } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed=SetImageProgress(image,NegateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(MagickTrue); } /* Negate image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=QuantumRange-q[j]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,NegateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N o r m a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The NormalizeImage() method enhances the contrast of a color image by % mapping the darkest 2 percent of all pixel to black and the brightest % 1 percent to white. % % The format of the NormalizeImage method is: % % MagickBooleanType NormalizeImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType NormalizeImage(Image *image, ExceptionInfo *exception) { double black_point, white_point; black_point=(double) image->columns*image->rows*0.0015; white_point=(double) image->columns*image->rows*0.9995; return(ContrastStretchImage(image,black_point,white_point,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i g m o i d a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SigmoidalContrastImage() adjusts the contrast of an image with a non-linear % sigmoidal contrast algorithm. Increase the contrast of the image using a % sigmoidal transfer function without saturating highlights or shadows. % Contrast indicates how much to increase the contrast (0 is none; 3 is % typical; 20 is pushing it); mid-point indicates where midtones fall in the % resultant image (0 is white; 50% is middle-gray; 100% is black). Set % sharpen to MagickTrue to increase the image contrast otherwise the contrast % is reduced. % % The format of the SigmoidalContrastImage method is: % % MagickBooleanType SigmoidalContrastImage(Image *image, % const MagickBooleanType sharpen,const char *levels, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % % o contrast: strength of the contrast, the larger the number the more % 'threshold-like' it becomes. % % o midpoint: midpoint of the function as a color value 0 to QuantumRange. % % o exception: return any errors or warnings in this structure. % */ /* ImageMagick 6 has a version of this function which uses LUTs. */ /* Sigmoidal function Sigmoidal with inflexion point moved to b and "slope constant" set to a. The first version, based on the hyperbolic tangent tanh, when combined with the scaling step, is an exact arithmetic clone of the sigmoid function based on the logistic curve. The equivalence is based on the identity 1/(1+exp(-t)) = (1+tanh(t/2))/2 (http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the scaled sigmoidal derivation is invariant under affine transformations of the ordinate. The tanh version is almost certainly more accurate and cheaper. The 0.5 factor in the argument is to clone the legacy ImageMagick behavior. The reason for making the define depend on atanh even though it only uses tanh has to do with the construction of the inverse of the scaled sigmoidal. */ #if defined(MAGICKCORE_HAVE_ATANH) #define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) ) #else #define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) ) #endif /* Scaled sigmoidal function: ( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) / ( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) ) See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by zero. This is fixed below by exiting immediately when contrast is small, leaving the image (or colormap) unmodified. This appears to be safe because the series expansion of the logistic sigmoidal function around x=b is 1/2-a*(b-x)/4+... so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh). */ #define ScaledSigmoidal(a,b,x) ( \ (Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \ (Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) ) /* Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even when creating a LUT from in gamut values, hence the branching. In addition, HDRI may have out of gamut values. InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal: It is only a right inverse. This is unavoidable. */ static inline double InverseScaledSigmoidal(const double a,const double b, const double x) { const double sig0=Sigmoidal(a,b,0.0); const double sig1=Sigmoidal(a,b,1.0); const double argument=(sig1-sig0)*x+sig0; const double clamped= ( #if defined(MAGICKCORE_HAVE_ATANH) argument < -1+MagickEpsilon ? -1+MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b+(2.0/a)*atanh(clamped)); #else argument < MagickEpsilon ? MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b-log(1.0/clamped-1.0)/a); #endif } MagickExport MagickBooleanType SigmoidalContrastImage(Image *image, const MagickBooleanType sharpen,const double contrast,const double midpoint, ExceptionInfo *exception) { #define SigmoidalContrastImageTag "SigmoidalContrast/Image" #define ScaledSig(x) ( ClampToQuantum(QuantumRange* \ ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) ) #define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \ InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) ) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Convenience macros. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Side effect: may clamp values unless contrast<MagickEpsilon, in which case nothing is done. */ if (contrast < MagickEpsilon) return(MagickTrue); /* Sigmoidal-contrast enhance colormap. */ if (image->storage_class == PseudoClass) { ssize_t i; if( sharpen != MagickFalse ) for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(MagickRealType) ScaledSig( image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(MagickRealType) ScaledSig( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(MagickRealType) ScaledSig( image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(MagickRealType) ScaledSig( image->colormap[i].alpha); } else for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(MagickRealType) InverseScaledSig( image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(MagickRealType) InverseScaledSig( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(MagickRealType) InverseScaledSig( image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(MagickRealType) InverseScaledSig( image->colormap[i].alpha); } } /* Sigmoidal-contrast enhance image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if( sharpen != MagickFalse ) q[i]=ScaledSig(q[i]); else q[i]=InverseScaledSig(q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e B a l a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteBalanceImage() applies white balancing to an image according to a % grayworld assumption in the LAB colorspace. % % The format of the WhiteBalanceImage method is: % % MagickBooleanType WhiteBalanceImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-level % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteBalanceImage(Image *image, ExceptionInfo *exception) { #define WhiteBalanceImageTag "WhiteBalance/Image" CacheView *image_view; const char *artifact; double a_mean, b_mean; MagickOffsetType progress; MagickStatusType status; ssize_t y; /* White balance image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=TransformImageColorspace(image,LabColorspace,exception); a_mean=0.0; b_mean=0.0; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { a_mean+=QuantumScale*GetPixela(image,p)-0.5; b_mean+=QuantumScale*GetPixelb(image,p)-0.5; p+=GetPixelChannels(image); } } a_mean/=((double) image->columns*image->rows); b_mean/=((double) image->columns*image->rows); progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double a, b; /* Scale the chroma distance shifted according to amount of luminance. */ a=(double) GetPixela(image,q)-1.1*GetPixelL(image,q)*a_mean; b=(double) GetPixelb(image,q)-1.1*GetPixelL(image,q)*b_mean; SetPixela(image,ClampToQuantum(a),q); SetPixelb(image,ClampToQuantum(b),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,WhiteBalanceImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); artifact=GetImageArtifact(image,"white-balance:vibrance"); if (artifact != (const char *) NULL) { ChannelType channel_mask; double black_point; GeometryInfo geometry_info; MagickStatusType flags; /* Level the a & b channels. */ flags=ParseGeometry(artifact,&geometry_info); black_point=geometry_info.rho; if ((flags & PercentValue) != 0) black_point*=(double) (QuantumRange/100.0); channel_mask=SetImageChannelMask(image,(ChannelType) (aChannel | bChannel)); status&=LevelImage(image,black_point,(double) QuantumRange-black_point, 1.0,exception); (void) SetImageChannelMask(image,channel_mask); } status&=TransformImageColorspace(image,sRGBColorspace,exception); return(status != 0 ? MagickTrue : MagickFalse); }
csr.c
/*! * \file * * \brief Various routines with dealing with CSR matrices * * \author George Karypis * \version\verbatim $Id: csr.c 13437 2013-01-11 21:54:10Z karypis $ \endverbatim */ #include <GKlib.h> #define OMPMINOPS 50000 /*************************************************************************/ /*! Allocate memory for a CSR matrix and initializes it \returns the allocated matrix. The various fields are set to NULL. */ /**************************************************************************/ gk_csr_t *gk_csr_Create() { gk_csr_t *mat; mat = (gk_csr_t *)gk_malloc(sizeof(gk_csr_t), "gk_csr_Create: mat"); gk_csr_Init(mat); return mat; } /*************************************************************************/ /*! Initializes the matrix \param mat is the matrix to be initialized. */ /*************************************************************************/ void gk_csr_Init(gk_csr_t *mat) { memset(mat, 0, sizeof(gk_csr_t)); mat->nrows = mat->ncols = -1; } /*************************************************************************/ /*! Frees all the memory allocated for matrix. \param mat is the matrix to be freed. */ /*************************************************************************/ void gk_csr_Free(gk_csr_t **mat) { if (*mat == NULL) return; gk_csr_FreeContents(*mat); gk_free((void **)mat, LTERM); } /*************************************************************************/ /*! Frees only the memory allocated for the matrix's different fields and sets them to NULL. \param mat is the matrix whose contents will be freed. */ /*************************************************************************/ void gk_csr_FreeContents(gk_csr_t *mat) { gk_free((void *)&mat->rowptr, &mat->rowind, &mat->rowval, &mat->rowids, &mat->colptr, &mat->colind, &mat->colval, &mat->colids, &mat->rnorms, &mat->cnorms, &mat->rsums, &mat->csums, &mat->rsizes, &mat->csizes, &mat->rvols, &mat->cvols, &mat->rwgts, &mat->cwgts, LTERM); } /*************************************************************************/ /*! Returns a copy of a matrix. \param mat is the matrix to be duplicated. \returns the newly created copy of the matrix. */ /**************************************************************************/ gk_csr_t *gk_csr_Dup(gk_csr_t *mat) { gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = mat->nrows; nmat->ncols = mat->ncols; /* copy the row structure */ if (mat->rowptr) nmat->rowptr = gk_zcopy(mat->nrows+1, mat->rowptr, gk_zmalloc(mat->nrows+1, "gk_csr_Dup: rowptr")); if (mat->rowids) nmat->rowids = gk_icopy(mat->nrows, mat->rowids, gk_imalloc(mat->nrows, "gk_csr_Dup: rowids")); if (mat->rnorms) nmat->rnorms = gk_fcopy(mat->nrows, mat->rnorms, gk_fmalloc(mat->nrows, "gk_csr_Dup: rnorms")); if (mat->rowind) nmat->rowind = gk_icopy(mat->rowptr[mat->nrows], mat->rowind, gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowind")); if (mat->rowval) nmat->rowval = gk_fcopy(mat->rowptr[mat->nrows], mat->rowval, gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowval")); /* copy the col structure */ if (mat->colptr) nmat->colptr = gk_zcopy(mat->ncols+1, mat->colptr, gk_zmalloc(mat->ncols+1, "gk_csr_Dup: colptr")); if (mat->colids) nmat->colids = gk_icopy(mat->ncols, mat->colids, gk_imalloc(mat->ncols, "gk_csr_Dup: colids")); if (mat->cnorms) nmat->cnorms = gk_fcopy(mat->ncols, mat->cnorms, gk_fmalloc(mat->ncols, "gk_csr_Dup: cnorms")); if (mat->colind) nmat->colind = gk_icopy(mat->colptr[mat->ncols], mat->colind, gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colind")); if (mat->colval) nmat->colval = gk_fcopy(mat->colptr[mat->ncols], mat->colval, gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colval")); return nmat; } /*************************************************************************/ /*! Returns a submatrix containint a set of consecutive rows. \param mat is the original matrix. \param rstart is the starting row. \param nrows is the number of rows from rstart to extract. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ gk_csr_t *gk_csr_ExtractSubmatrix(gk_csr_t *mat, int rstart, int nrows) { ssize_t i; gk_csr_t *nmat; if (rstart+nrows > mat->nrows) return NULL; nmat = gk_csr_Create(); nmat->nrows = nrows; nmat->ncols = mat->ncols; /* copy the row structure */ if (mat->rowptr) nmat->rowptr = gk_zcopy(nrows+1, mat->rowptr+rstart, gk_zmalloc(nrows+1, "gk_csr_ExtractSubmatrix: rowptr")); for (i=nrows; i>=0; i--) nmat->rowptr[i] -= nmat->rowptr[0]; ASSERT(nmat->rowptr[0] == 0); if (mat->rowids) nmat->rowids = gk_icopy(nrows, mat->rowids+rstart, gk_imalloc(nrows, "gk_csr_ExtractSubmatrix: rowids")); if (mat->rnorms) nmat->rnorms = gk_fcopy(nrows, mat->rnorms+rstart, gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rnorms")); if (mat->rsums) nmat->rsums = gk_fcopy(nrows, mat->rsums+rstart, gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rsums")); ASSERT(nmat->rowptr[nrows] == mat->rowptr[rstart+nrows]-mat->rowptr[rstart]); if (mat->rowind) nmat->rowind = gk_icopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], mat->rowind+mat->rowptr[rstart], gk_imalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], "gk_csr_ExtractSubmatrix: rowind")); if (mat->rowval) nmat->rowval = gk_fcopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], mat->rowval+mat->rowptr[rstart], gk_fmalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], "gk_csr_ExtractSubmatrix: rowval")); return nmat; } /*************************************************************************/ /*! Returns a submatrix containing a certain set of rows. \param mat is the original matrix. \param nrows is the number of rows to extract. \param rind is the set of row numbers to extract. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ gk_csr_t *gk_csr_ExtractRows(gk_csr_t *mat, int nrows, int *rind) { ssize_t i, ii, j, nnz; gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = nrows; nmat->ncols = mat->ncols; for (nnz=0, i=0; i<nrows; i++) nnz += mat->rowptr[rind[i]+1]-mat->rowptr[rind[i]]; nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr"); nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind"); nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval"); nmat->rowptr[0] = 0; for (nnz=0, j=0, ii=0; ii<nrows; ii++) { i = rind[ii]; gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz); gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz); nnz += mat->rowptr[i+1]-mat->rowptr[i]; nmat->rowptr[++j] = nnz; } ASSERT(j == nmat->nrows); return nmat; } /*************************************************************************/ /*! Returns a submatrix corresponding to a specified partitioning of rows. \param mat is the original matrix. \param part is the partitioning vector of the rows. \param pid is the partition ID that will be extracted. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ gk_csr_t *gk_csr_ExtractPartition(gk_csr_t *mat, int *part, int pid) { ssize_t i, j, nnz; gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = 0; nmat->ncols = mat->ncols; for (nnz=0, i=0; i<mat->nrows; i++) { if (part[i] == pid) { nmat->nrows++; nnz += mat->rowptr[i+1]-mat->rowptr[i]; } } nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr"); nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind"); nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval"); nmat->rowptr[0] = 0; for (nnz=0, j=0, i=0; i<mat->nrows; i++) { if (part[i] == pid) { gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz); gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz); nnz += mat->rowptr[i+1]-mat->rowptr[i]; nmat->rowptr[++j] = nnz; } } ASSERT(j == nmat->nrows); return nmat; } /*************************************************************************/ /*! Splits the matrix into multiple sub-matrices based on the provided color array. \param mat is the original matrix. \param color is an array of size equal to the number of non-zeros in the matrix (row-wise structure). The matrix is split into as many parts as the number of colors. For meaningfull results, the colors should be numbered consecutively starting from 0. \returns an array of matrices for each supplied color number. */ /**************************************************************************/ gk_csr_t **gk_csr_Split(gk_csr_t *mat, int *color) { ssize_t i, j; int nrows, ncolors; ssize_t *rowptr; int *rowind; float *rowval; gk_csr_t **smats; nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; ncolors = gk_imax(rowptr[nrows], color)+1; smats = (gk_csr_t **)gk_malloc(sizeof(gk_csr_t *)*ncolors, "gk_csr_Split: smats"); for (i=0; i<ncolors; i++) { smats[i] = gk_csr_Create(); smats[i]->nrows = mat->nrows; smats[i]->ncols = mat->ncols; smats[i]->rowptr = gk_zsmalloc(nrows+1, 0, "gk_csr_Split: smats[i]->rowptr"); } for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) smats[color[j]]->rowptr[i]++; } for (i=0; i<ncolors; i++) MAKECSR(j, nrows, smats[i]->rowptr); for (i=0; i<ncolors; i++) { smats[i]->rowind = gk_imalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowind"); smats[i]->rowval = gk_fmalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowval"); } for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { smats[color[j]]->rowind[smats[color[j]]->rowptr[i]] = rowind[j]; smats[color[j]]->rowval[smats[color[j]]->rowptr[i]] = rowval[j]; smats[color[j]]->rowptr[i]++; } } for (i=0; i<ncolors; i++) SHIFTCSR(j, nrows, smats[i]->rowptr); return smats; } /**************************************************************************/ /*! Reads a CSR matrix from the supplied file and stores it the matrix's forward structure. \param filename is the file that stores the data. \param format is either GK_CSR_FMT_METIS, GK_CSR_FMT_CLUTO, GK_CSR_FMT_CSR, GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL specifying the type of the input format. The GK_CSR_FMT_CSR does not contain a header line, whereas the GK_CSR_FMT_BINROW is a binary format written by gk_csr_Write() using the same format specifier. \param readvals is either 1 or 0, indicating if the CSR file contains values or it does not. It only applies when GK_CSR_FMT_CSR is used. \param numbering is either 1 or 0, indicating if the numbering of the indices start from 1 or 0, respectively. If they start from 1, they are automatically decreamented during input so that they will start from 0. It only applies when GK_CSR_FMT_CSR is used. \returns the matrix that was read. */ /**************************************************************************/ gk_csr_t *gk_csr_Read(char *filename, int format, int readvals, int numbering) { ssize_t i, k, l; size_t nfields, nrows, ncols, nnz, fmt, ncon; size_t lnlen; ssize_t *rowptr; int *rowind, ival; float *rowval=NULL, fval; int readsizes, readwgts; char *line=NULL, *head, *tail, fmtstr[256]; FILE *fpin; gk_csr_t *mat=NULL; if (!gk_fexists(filename)) gk_errexit(SIGERR, "File %s does not exist!\n", filename); if (format == GK_CSR_FMT_BINROW) { mat = gk_csr_Create(); fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin"); if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename); if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename); mat->rowptr = gk_zmalloc(mat->nrows+1, "gk_csr_Read: rowptr"); if (fread(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpin) != mat->nrows+1) gk_errexit(SIGERR, "Failed to read the rowptr from file %s!\n", filename); mat->rowind = gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowind"); if (fread(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows]) gk_errexit(SIGERR, "Failed to read the rowind from file %s!\n", filename); if (readvals == 1) { mat->rowval = gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowval"); if (fread(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows]) gk_errexit(SIGERR, "Failed to read the rowval from file %s!\n", filename); } gk_fclose(fpin); return mat; } if (format == GK_CSR_FMT_BINCOL) { mat = gk_csr_Create(); fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin"); if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename); if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename); mat->colptr = gk_zmalloc(mat->ncols+1, "gk_csr_Read: colptr"); if (fread(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpin) != mat->ncols+1) gk_errexit(SIGERR, "Failed to read the colptr from file %s!\n", filename); mat->colind = gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Read: colind"); if (fread(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols]) gk_errexit(SIGERR, "Failed to read the colind from file %s!\n", filename); if (readvals) { mat->colval = gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Read: colval"); if (fread(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols]) gk_errexit(SIGERR, "Failed to read the colval from file %s!\n", filename); } gk_fclose(fpin); return mat; } if (format == GK_CSR_FMT_CLUTO) { fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin"); do { if (gk_getline(&line, &lnlen, fpin) <= 0) gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename); } while (line[0] == '%'); if (sscanf(line, "%zu %zu %zu", &nrows, &ncols, &nnz) != 3) gk_errexit(SIGERR, "Header line must contain 3 integers.\n"); readsizes = 0; readwgts = 0; readvals = 1; numbering = 1; } else if (format == GK_CSR_FMT_METIS) { fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin"); do { if (gk_getline(&line, &lnlen, fpin) <= 0) gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename); } while (line[0] == '%'); fmt = ncon = 0; nfields = sscanf(line, "%zu %zu %zu %zu", &nrows, &nnz, &fmt, &ncon); if (nfields < 2) gk_errexit(SIGERR, "Header line must contain at least 2 integers (#vtxs and #edges).\n"); ncols = nrows; nnz *= 2; if (fmt > 111) gk_errexit(SIGERR, "Cannot read this type of file format [fmt=%zu]!\n", fmt); sprintf(fmtstr, "%03zu", fmt%1000); readsizes = (fmtstr[0] == '1'); readwgts = (fmtstr[1] == '1'); readvals = (fmtstr[2] == '1'); numbering = 1; ncon = (ncon == 0 ? 1 : ncon); } else { readsizes = 0; readwgts = 0; gk_getfilestats(filename, &nrows, &nnz, NULL, NULL); if (readvals == 1 && nnz%2 == 1) gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not even.\n", nnz, readvals); if (readvals == 1) nnz = nnz/2; fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin"); } mat = gk_csr_Create(); mat->nrows = nrows; rowptr = mat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Read: rowptr"); rowind = mat->rowind = gk_imalloc(nnz, "gk_csr_Read: rowind"); if (readvals != 2) rowval = mat->rowval = gk_fsmalloc(nnz, 1.0, "gk_csr_Read: rowval"); if (readsizes) mat->rsizes = gk_fsmalloc(nrows, 0.0, "gk_csr_Read: rsizes"); if (readwgts) mat->rwgts = gk_fsmalloc(nrows*ncon, 0.0, "gk_csr_Read: rwgts"); /*---------------------------------------------------------------------- * Read the sparse matrix file *---------------------------------------------------------------------*/ numbering = (numbering ? - 1 : 0); for (ncols=0, rowptr[0]=0, k=0, i=0; i<nrows; i++) { do { if (gk_getline(&line, &lnlen, fpin) == -1) gk_errexit(SIGERR, "Premature end of input file: file while reading row %d\n", i); } while (line[0] == '%'); head = line; tail = NULL; /* Read vertex sizes */ if (readsizes) { #ifdef __MSC__ mat->rsizes[i] = (float)strtod(head, &tail); #else mat->rsizes[i] = strtof(head, &tail); #endif if (tail == head) gk_errexit(SIGERR, "The line for vertex %zd does not have size information\n", i+1); if (mat->rsizes[i] < 0) errexit("The size for vertex %zd must be >= 0\n", i+1); head = tail; } /* Read vertex weights */ if (readwgts) { for (l=0; l<ncon; l++) { #ifdef __MSC__ mat->rwgts[i*ncon+l] = (float)strtod(head, &tail); #else mat->rwgts[i*ncon+l] = strtof(head, &tail); #endif if (tail == head) errexit("The line for vertex %zd does not have enough weights " "for the %d constraints.\n", i+1, ncon); if (mat->rwgts[i*ncon+l] < 0) errexit("The weight vertex %zd and constraint %zd must be >= 0\n", i+1, l); head = tail; } } /* Read the rest of the row */ while (1) { ival = (int)strtol(head, &tail, 0); if (tail == head) break; head = tail; if ((rowind[k] = ival + numbering) < 0) gk_errexit(SIGERR, "Error: Invalid column number %d at row %zd.\n", ival, i); ncols = gk_max(rowind[k], ncols); if (readvals == 1) { #ifdef __MSC__ fval = (float)strtod(head, &tail); #else fval = strtof(head, &tail); #endif if (tail == head) gk_errexit(SIGERR, "Value could not be found for column! Row:%zd, NNZ:%zd\n", i, k); head = tail; rowval[k] = fval; } k++; } rowptr[i+1] = k; } if (format == GK_CSR_FMT_METIS) { ASSERT(ncols+1 == mat->nrows); mat->ncols = mat->nrows; } else { mat->ncols = ncols+1; } if (k != nnz) gk_errexit(SIGERR, "gk_csr_Read: Something wrong with the number of nonzeros in " "the input file. NNZ=%zd, ActualNNZ=%zd.\n", nnz, k); gk_fclose(fpin); gk_free((void **)&line, LTERM); return mat; } /**************************************************************************/ /*! Writes the row-based structure of a matrix into a file. \param mat is the matrix to be written, \param filename is the name of the output file. \param format is one of: GK_CSR_FMT_CLUTO, GK_CSR_FMT_CSR, GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL. \param writevals is either 1 or 0 indicating if the values will be written or not. This is only applicable when GK_CSR_FMT_CSR is used. \param numbering is either 1 or 0 indicating if the internal 0-based numbering will be shifted by one or not during output. This is only applicable when GK_CSR_FMT_CSR is used. */ /**************************************************************************/ void gk_csr_Write(gk_csr_t *mat, char *filename, int format, int writevals, int numbering) { ssize_t i, j; FILE *fpout; if (format == GK_CSR_FMT_BINROW) { if (filename == NULL) gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n"); fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout"); fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout); fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout); fwrite(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpout); fwrite(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpout); if (writevals) fwrite(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpout); gk_fclose(fpout); return; } if (format == GK_CSR_FMT_BINCOL) { if (filename == NULL) gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n"); fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout"); fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout); fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout); fwrite(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpout); fwrite(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpout); if (writevals) fwrite(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpout); gk_fclose(fpout); return; } if (filename) fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout"); else fpout = stdout; if (format == GK_CSR_FMT_CLUTO) { fprintf(fpout, "%d %d %zd\n", mat->nrows, mat->ncols, mat->rowptr[mat->nrows]); writevals = 1; numbering = 1; } for (i=0; i<mat->nrows; i++) { for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) { fprintf(fpout, " %d", mat->rowind[j]+(numbering ? 1 : 0)); if (writevals) fprintf(fpout, " %f", mat->rowval[j]); } fprintf(fpout, "\n"); } if (filename) gk_fclose(fpout); } /*************************************************************************/ /*! Prunes certain rows/columns of the matrix. The prunning takes place by analyzing the row structure of the matrix. The prunning takes place by removing rows/columns but it does not affect the numbering of the remaining rows/columns. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param minf is the minimum number of rows (columns) that a column (row) must be present in order to be kept, \param maxf is the maximum number of rows (columns) that a column (row) must be present at in order to be kept. \returns the prunned matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_Prune(gk_csr_t *mat, int what, int minf, int maxf) { ssize_t i, j, nnz; int nrows, ncols; ssize_t *rowptr, *nrowptr; int *rowind, *nrowind, *collen; float *rowval, *nrowval; gk_csr_t *nmat; nmat = gk_csr_Create(); nrows = nmat->nrows = mat->nrows; ncols = nmat->ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Prune: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_Prune: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_Prune: nrowval"); switch (what) { case GK_CSR_COL: collen = gk_ismalloc(ncols, 0, "gk_csr_Prune: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { ASSERT(rowind[j] < ncols); collen[rowind[j]]++; } } for (i=0; i<ncols; i++) collen[i] = (collen[i] >= minf && collen[i] <= maxf ? 1 : 0); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (collen[rowind[j]]) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; nnz++; } } nrowptr[i+1] = nnz; } gk_free((void **)&collen, LTERM); break; case GK_CSR_ROW: nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { if (rowptr[i+1]-rowptr[i] >= minf && rowptr[i+1]-rowptr[i] <= maxf) { for (j=rowptr[i]; j<rowptr[i+1]; j++, nnz++) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; } } nrowptr[i+1] = nnz; } break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Eliminates certain entries from the rows/columns of the matrix. The filtering takes place by keeping only the highest weight entries whose sum accounts for a certain fraction of the overall weight of the row/column. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param norm indicates the norm that will be used to aggregate the weights and possible values are 1 or 2, \param fraction is the fraction of the overall norm that will be retained by the kept entries. \returns the filtered matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_LowFilter(gk_csr_t *mat, int what, int norm, float fraction) { ssize_t i, j, nnz; int nrows, ncols, ncand, maxlen=0; ssize_t *rowptr, *colptr, *nrowptr; int *rowind, *colind, *nrowind; float *rowval, *colval, *nrowval, rsum, tsum; gk_csr_t *nmat; gk_fkv_t *cand; nmat = gk_csr_Create(); nrows = nmat->nrows = mat->nrows; ncols = nmat->ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; colptr = mat->colptr; colind = mat->colind; colval = mat->colval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval"); switch (what) { case GK_CSR_COL: if (mat->colptr == NULL) gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n"); gk_zcopy(nrows+1, rowptr, nrowptr); for (i=0; i<ncols; i++) maxlen = gk_max(maxlen, colptr[i+1]-colptr[i]); #pragma omp parallel private(i, j, ncand, rsum, tsum, cand) { cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand"); #pragma omp for schedule(static) for (i=0; i<ncols; i++) { for (tsum=0.0, ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) { cand[ncand].val = colind[j]; cand[ncand].key = colval[j]; tsum += (norm == 1 ? colval[j] : colval[j]*colval[j]); } gk_fkvsortd(ncand, cand); for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) { rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key); nrowind[nrowptr[cand[j].val]] = i; nrowval[nrowptr[cand[j].val]] = cand[j].key; nrowptr[cand[j].val]++; } } gk_free((void **)&cand, LTERM); } /* compact the nrowind/nrowval */ for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) { nrowind[nnz] = nrowind[j]; nrowval[nnz] = nrowval[j]; } nrowptr[i] = nnz; } SHIFTCSR(i, nrows, nrowptr); break; case GK_CSR_ROW: if (mat->rowptr == NULL) gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n"); for (i=0; i<nrows; i++) maxlen = gk_max(maxlen, rowptr[i+1]-rowptr[i]); #pragma omp parallel private(i, j, ncand, rsum, tsum, cand) { cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand"); #pragma omp for schedule(static) for (i=0; i<nrows; i++) { for (tsum=0.0, ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) { cand[ncand].val = rowind[j]; cand[ncand].key = rowval[j]; tsum += (norm == 1 ? rowval[j] : rowval[j]*rowval[j]); } gk_fkvsortd(ncand, cand); for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) { rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key); nrowind[rowptr[i]+j] = cand[j].val; nrowval[rowptr[i]+j] = cand[j].key; } nrowptr[i+1] = rowptr[i]+j; } gk_free((void **)&cand, LTERM); } /* compact nrowind/nrowval */ nrowptr[0] = nnz = 0; for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<nrowptr[i+1]; j++, nnz++) { nrowind[nnz] = nrowind[j]; nrowval[nnz] = nrowval[j]; } nrowptr[i+1] = nnz; } break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Eliminates certain entries from the rows/columns of the matrix. The filtering takes place by keeping only the highest weight top-K entries along each row/column and those entries whose weight is greater than a specified value. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param topk is the number of the highest weight entries to keep. \param keepval is the weight of a term above which will be kept. This is used to select additional terms past the first topk. \returns the filtered matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_TopKPlusFilter(gk_csr_t *mat, int what, int topk, float keepval) { ssize_t i, j, k, nnz; int nrows, ncols, ncand; ssize_t *rowptr, *colptr, *nrowptr; int *rowind, *colind, *nrowind; float *rowval, *colval, *nrowval; gk_csr_t *nmat; gk_fkv_t *cand; nmat = gk_csr_Create(); nrows = nmat->nrows = mat->nrows; ncols = nmat->ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; colptr = mat->colptr; colind = mat->colind; colval = mat->colval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval"); switch (what) { case GK_CSR_COL: if (mat->colptr == NULL) gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n"); cand = gk_fkvmalloc(nrows, "gk_csr_LowFilter: cand"); gk_zcopy(nrows+1, rowptr, nrowptr); for (i=0; i<ncols; i++) { for (ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) { cand[ncand].val = colind[j]; cand[ncand].key = colval[j]; } gk_fkvsortd(ncand, cand); k = gk_min(topk, ncand); for (j=0; j<k; j++) { nrowind[nrowptr[cand[j].val]] = i; nrowval[nrowptr[cand[j].val]] = cand[j].key; nrowptr[cand[j].val]++; } for (; j<ncand; j++) { if (cand[j].key < keepval) break; nrowind[nrowptr[cand[j].val]] = i; nrowval[nrowptr[cand[j].val]] = cand[j].key; nrowptr[cand[j].val]++; } } /* compact the nrowind/nrowval */ for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) { nrowind[nnz] = nrowind[j]; nrowval[nnz] = nrowval[j]; } nrowptr[i] = nnz; } SHIFTCSR(i, nrows, nrowptr); gk_free((void **)&cand, LTERM); break; case GK_CSR_ROW: if (mat->rowptr == NULL) gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n"); cand = gk_fkvmalloc(ncols, "gk_csr_LowFilter: cand"); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { for (ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) { cand[ncand].val = rowind[j]; cand[ncand].key = rowval[j]; } gk_fkvsortd(ncand, cand); k = gk_min(topk, ncand); for (j=0; j<k; j++, nnz++) { nrowind[nnz] = cand[j].val; nrowval[nnz] = cand[j].key; } for (; j<ncand; j++, nnz++) { if (cand[j].key < keepval) break; nrowind[nnz] = cand[j].val; nrowval[nnz] = cand[j].key; } nrowptr[i+1] = nnz; } gk_free((void **)&cand, LTERM); break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Eliminates certain entries from the rows/columns of the matrix. The filtering takes place by keeping only the terms whose contribution to the total length of the document is greater than a user-splied multiple over the average. This routine assumes that the vectors are normalized to be unit length. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param zscore is the multiplicative factor over the average contribution to the length of the document. \returns the filtered matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_ZScoreFilter(gk_csr_t *mat, int what, float zscore) { ssize_t i, j, nnz; int nrows; ssize_t *rowptr, *nrowptr; int *rowind, *nrowind; float *rowval, *nrowval, avgwgt; gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = mat->nrows; nmat->ncols = mat->ncols; nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_ZScoreFilter: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowval"); switch (what) { case GK_CSR_COL: gk_errexit(SIGERR, "This has not been implemented yet.\n"); break; case GK_CSR_ROW: if (mat->rowptr == NULL) gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n"); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { avgwgt = zscore/(rowptr[i+1]-rowptr[i]); for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] > avgwgt) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; nnz++; } } nrowptr[i+1] = nnz; } break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Compacts the column-space of the matrix by removing empty columns. As a result of the compaction, the column numbers are renumbered. The compaction operation is done in place and only affects the row-based representation of the matrix. The new columns are ordered in decreasing frequency. \param mat the matrix whose empty columns will be removed. */ /**************************************************************************/ void gk_csr_CompactColumns(gk_csr_t *mat) { ssize_t i; int nrows, ncols, nncols; ssize_t *rowptr; int *rowind, *colmap; gk_ikv_t *clens; nrows = mat->nrows; ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; colmap = gk_imalloc(ncols, "gk_csr_CompactColumns: colmap"); clens = gk_ikvmalloc(ncols, "gk_csr_CompactColumns: clens"); for (i=0; i<ncols; i++) { clens[i].key = 0; clens[i].val = i; } for (i=0; i<rowptr[nrows]; i++) clens[rowind[i]].key++; gk_ikvsortd(ncols, clens); for (nncols=0, i=0; i<ncols; i++) { if (clens[i].key > 0) colmap[clens[i].val] = nncols++; else break; } for (i=0; i<rowptr[nrows]; i++) rowind[i] = colmap[rowind[i]]; mat->ncols = nncols; gk_free((void **)&colmap, &clens, LTERM); } /*************************************************************************/ /*! Sorts the indices in increasing order \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which set of indices to sort. */ /**************************************************************************/ void gk_csr_SortIndices(gk_csr_t *mat, int what) { int n, nn=0; ssize_t *ptr; int *ind; float *val; switch (what) { case GK_CSR_ROW: if (!mat->rowptr) gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n"); n = mat->nrows; ptr = mat->rowptr; ind = mat->rowind; val = mat->rowval; break; case GK_CSR_COL: if (!mat->colptr) gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n"); n = mat->ncols; ptr = mat->colptr; ind = mat->colind; val = mat->colval; break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return; } #pragma omp parallel if (n > 100) { ssize_t i, j, k; gk_ikv_t *cand; float *tval; #pragma omp single for (i=0; i<n; i++) nn = gk_max(nn, ptr[i+1]-ptr[i]); cand = gk_ikvmalloc(nn, "gk_csr_SortIndices: cand"); tval = gk_fmalloc(nn, "gk_csr_SortIndices: tval"); #pragma omp for schedule(static) for (i=0; i<n; i++) { for (k=0, j=ptr[i]; j<ptr[i+1]; j++) { if (j > ptr[i] && ind[j] < ind[j-1]) k = 1; /* an inversion */ cand[j-ptr[i]].val = j-ptr[i]; cand[j-ptr[i]].key = ind[j]; tval[j-ptr[i]] = val[j]; } if (k) { gk_ikvsorti(ptr[i+1]-ptr[i], cand); for (j=ptr[i]; j<ptr[i+1]; j++) { ind[j] = cand[j-ptr[i]].key; val[j] = tval[cand[j-ptr[i]].val]; } } } gk_free((void **)&cand, &tval, LTERM); } } /*************************************************************************/ /*! Creates a row/column index from the column/row data. \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which index will be created. */ /**************************************************************************/ void gk_csr_CreateIndex(gk_csr_t *mat, int what) { /* 'f' stands for forward, 'r' stands for reverse */ ssize_t i, j, k, nf, nr; ssize_t *fptr, *rptr; int *find, *rind; float *fval, *rval; switch (what) { case GK_CSR_COL: nf = mat->nrows; fptr = mat->rowptr; find = mat->rowind; fval = mat->rowval; if (mat->colptr) gk_free((void **)&mat->colptr, LTERM); if (mat->colind) gk_free((void **)&mat->colind, LTERM); if (mat->colval) gk_free((void **)&mat->colval, LTERM); nr = mat->ncols; rptr = mat->colptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr"); rind = mat->colind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind"); rval = mat->colval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL); break; case GK_CSR_ROW: nf = mat->ncols; fptr = mat->colptr; find = mat->colind; fval = mat->colval; if (mat->rowptr) gk_free((void **)&mat->rowptr, LTERM); if (mat->rowind) gk_free((void **)&mat->rowind, LTERM); if (mat->rowval) gk_free((void **)&mat->rowval, LTERM); nr = mat->nrows; rptr = mat->rowptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr"); rind = mat->rowind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind"); rval = mat->rowval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL); break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return; } for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rptr[find[j]]++; } MAKECSR(i, nr, rptr); if (rptr[nr] > 6*nr) { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rind[rptr[find[j]]++] = i; } SHIFTCSR(i, nr, rptr); if (fval) { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rval[rptr[find[j]]++] = fval[j]; } SHIFTCSR(i, nr, rptr); } } else { if (fval) { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) { k = find[j]; rind[rptr[k]] = i; rval[rptr[k]++] = fval[j]; } } } else { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rind[rptr[find[j]]++] = i; } } SHIFTCSR(i, nr, rptr); } } /*************************************************************************/ /*! Normalizes the rows/columns of the matrix to be unit length. \param mat the matrix itself, \param what indicates what will be normalized and is obtained by specifying GK_CSR_ROW, GK_CSR_COL, GK_CSR_ROW|GK_CSR_COL. \param norm indicates what norm is to normalize to, 1: 1-norm, 2: 2-norm */ /**************************************************************************/ void gk_csr_Normalize(gk_csr_t *mat, int what, int norm) { ssize_t i, j; int n; ssize_t *ptr; float *val, sum; if (what & GK_CSR_ROW && mat->rowval) { n = mat->nrows; ptr = mat->rowptr; val = mat->rowval; #pragma omp parallel if (ptr[n] > OMPMINOPS) { #pragma omp for private(j, sum) schedule(static) for (i = 0; i < n; i++) { for (sum = 0.0, j = ptr[i]; j < ptr[i + 1]; j++) { if (norm == 2) sum += val[j] * val[j]; else if (norm == 1) sum += val[j]; /* assume val[j] > 0 */ } if (sum > 0) { if (norm == 2) sum = 1.0 / sqrt(sum); else if (norm == 1) sum = 1.0 / sum; for (j = ptr[i]; j < ptr[i + 1]; j++) val[j] *= sum; } } } } if (what & GK_CSR_COL && mat->colval) { n = mat->ncols; ptr = mat->colptr; val = mat->colval; #pragma omp parallel if (ptr[n] > OMPMINOPS) { #pragma omp for private(j, sum) schedule(static) for (i = 0; i < n; i++) { for (sum = 0.0, j = ptr[i]; j < ptr[i + 1]; j++) if (norm == 2) sum += val[j] * val[j]; else if (norm == 1) sum += val[j]; if (sum > 0) { if (norm == 2) sum = 1.0 / sqrt(sum); else if (norm == 1) sum = 1.0 / sum; for (j = ptr[i]; j < ptr[i + 1]; j++) val[j] *= sum; } } } } } /*************************************************************************/ /*! Applies different row scaling methods. \param mat the matrix itself, \param type indicates the type of row scaling. Possible values are: GK_CSR_MAXTF, GK_CSR_SQRT, GK_CSR_LOG, GK_CSR_IDF, GK_CSR_MAXTF2. */ /**************************************************************************/ void gk_csr_Scale(gk_csr_t *mat, int type) { ssize_t i, j; int nrows, ncols, nnzcols, bgfreq; ssize_t *rowptr; int *rowind, *collen; float *rowval, *cscale, maxtf; nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; switch (type) { case GK_CSR_MAXTF: /* TF' = .5 + .5*TF/MAX(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j, maxtf) schedule(static) for (i=0; i<nrows; i++) { maxtf = fabs(rowval[rowptr[i]]); for (j=rowptr[i]; j<rowptr[i+1]; j++) maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf); for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] = .5 + .5*rowval[j]/maxtf; } } break; case GK_CSR_MAXTF2: /* TF' = .1 + .9*TF/MAX(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j, maxtf) schedule(static) for (i=0; i<nrows; i++) { maxtf = fabs(rowval[rowptr[i]]); for (j=rowptr[i]; j<rowptr[i+1]; j++) maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf); for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] = .1 + .9*rowval[j]/maxtf; } } break; case GK_CSR_SQRT: /* TF' = .1+SQRT(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], sqrt(fabs(rowval[j]))); } } } break; case GK_CSR_POW25: /* TF' = .1+POW(TF,.25) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], sqrt(sqrt(fabs(rowval[j])))); } } } break; case GK_CSR_POW65: /* TF' = .1+POW(TF,.65) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .65)); } } } break; case GK_CSR_POW75: /* TF' = .1+POW(TF,.75) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .75)); } } } break; case GK_CSR_POW85: /* TF' = .1+POW(TF,.85) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .85)); } } } break; case GK_CSR_LOG: /* TF' = 1+log_2(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { double logscale = 1.0/log(2.0); #pragma omp for schedule(static,32) for (i=0; i<rowptr[nrows]; i++) { if (rowval[i] != 0.0) rowval[i] = 1+(rowval[i]>0.0 ? log(rowval[i]) : -log(-rowval[i]))*logscale; } #ifdef XXX #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = 1+(rowval[j]>0.0 ? log(rowval[j]) : -log(-rowval[j]))*logscale; //rowval[j] = 1+sign(rowval[j], log(fabs(rowval[j]))*logscale); } } #endif } break; case GK_CSR_IDF: /* TF' = TF*IDF */ ncols = mat->ncols; cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale"); collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) collen[rowind[j]]++; } #pragma omp parallel if (ncols > OMPMINOPS) { #pragma omp for schedule(static) for (i=0; i<ncols; i++) cscale[i] = (collen[i] > 0 ? log(1.0*nrows/collen[i]) : 0.0); } #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] *= cscale[rowind[j]]; } } gk_free((void **)&cscale, &collen, LTERM); break; case GK_CSR_IDF2: /* TF' = TF*IDF */ ncols = mat->ncols; cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale"); collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) collen[rowind[j]]++; } nnzcols = 0; #pragma omp parallel if (ncols > OMPMINOPS) { #pragma omp for schedule(static) reduction(+:nnzcols) for (i=0; i<ncols; i++) nnzcols += (collen[i] > 0 ? 1 : 0); bgfreq = gk_max(10, (ssize_t)(.5*rowptr[nrows]/nnzcols)); printf("nnz: %zd, nnzcols: %d, bgfreq: %d\n", rowptr[nrows], nnzcols, bgfreq); #pragma omp for schedule(static) for (i=0; i<ncols; i++) cscale[i] = (collen[i] > 0 ? log(1.0*(nrows+2*bgfreq)/(bgfreq+collen[i])) : 0.0); } #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] *= cscale[rowind[j]]; } } gk_free((void **)&cscale, &collen, LTERM); break; default: gk_errexit(SIGERR, "Unknown scaling type of %d\n", type); } } /*************************************************************************/ /*! Computes the sums of the rows/columns \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which sums to compute. */ /**************************************************************************/ void gk_csr_ComputeSums(gk_csr_t *mat, int what) { ssize_t i; int n; ssize_t *ptr; float *val, *sums; switch (what) { case GK_CSR_ROW: n = mat->nrows; ptr = mat->rowptr; val = mat->rowval; if (mat->rsums) gk_free((void **)&mat->rsums, LTERM); sums = mat->rsums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums"); break; case GK_CSR_COL: n = mat->ncols; ptr = mat->colptr; val = mat->colval; if (mat->csums) gk_free((void **)&mat->csums, LTERM); sums = mat->csums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums"); break; default: gk_errexit(SIGERR, "Invalid sum type of %d.\n", what); return; } #pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static) for (i=0; i<n; i++) sums[i] = gk_fsum(ptr[i+1]-ptr[i], val+ptr[i], 1); } /*************************************************************************/ /*! Computes the squared of the norms of the rows/columns \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which squared norms to compute. */ /**************************************************************************/ void gk_csr_ComputeSquaredNorms(gk_csr_t *mat, int what) { ssize_t i; int n; ssize_t *ptr; float *val, *norms; switch (what) { case GK_CSR_ROW: n = mat->nrows; ptr = mat->rowptr; val = mat->rowval; if (mat->rnorms) gk_free((void **)&mat->rnorms, LTERM); norms = mat->rnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms"); break; case GK_CSR_COL: n = mat->ncols; ptr = mat->colptr; val = mat->colval; if (mat->cnorms) gk_free((void **)&mat->cnorms, LTERM); norms = mat->cnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms"); break; default: gk_errexit(SIGERR, "Invalid norm type of %d.\n", what); return; } #pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static) for (i=0; i<n; i++) norms[i] = gk_fdot(ptr[i+1]-ptr[i], val+ptr[i], 1, val+ptr[i], 1); } /*************************************************************************/ /*! Computes the similarity between two rows/columns \param mat the matrix itself. The routine assumes that the indices are sorted in increasing order. \param i1 is the first row/column, \param i2 is the second row/column, \param what is either GK_CSR_ROW or GK_CSR_COL indicating the type of objects between the similarity will be computed, \param simtype is the type of similarity and is one of GK_CSR_COS, GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN \returns the similarity between the two rows/columns. */ /**************************************************************************/ float gk_csr_ComputeSimilarity(gk_csr_t *mat, int i1, int i2, int what, int simtype) { int nind1, nind2; int *ind1, *ind2; float *val1, *val2, stat1, stat2, sim; switch (what) { case GK_CSR_ROW: if (!mat->rowptr) gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n"); nind1 = mat->rowptr[i1+1]-mat->rowptr[i1]; nind2 = mat->rowptr[i2+1]-mat->rowptr[i2]; ind1 = mat->rowind + mat->rowptr[i1]; ind2 = mat->rowind + mat->rowptr[i2]; val1 = mat->rowval + mat->rowptr[i1]; val2 = mat->rowval + mat->rowptr[i2]; break; case GK_CSR_COL: if (!mat->colptr) gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n"); nind1 = mat->colptr[i1+1]-mat->colptr[i1]; nind2 = mat->colptr[i2+1]-mat->colptr[i2]; ind1 = mat->colind + mat->colptr[i1]; ind2 = mat->colind + mat->colptr[i2]; val1 = mat->colval + mat->colptr[i1]; val2 = mat->colval + mat->colptr[i2]; break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return 0.0; } switch (simtype) { case GK_CSR_COS: case GK_CSR_JAC: sim = stat1 = stat2 = 0.0; i1 = i2 = 0; while (i1<nind1 && i2<nind2) { if (i1 == nind1) { stat2 += val2[i2]*val2[i2]; i2++; } else if (i2 == nind2) { stat1 += val1[i1]*val1[i1]; i1++; } else if (ind1[i1] < ind2[i2]) { stat1 += val1[i1]*val1[i1]; i1++; } else if (ind1[i1] > ind2[i2]) { stat2 += val2[i2]*val2[i2]; i2++; } else { sim += val1[i1]*val2[i2]; stat1 += val1[i1]*val1[i1]; stat2 += val2[i2]*val2[i2]; i1++; i2++; } } if (simtype == GK_CSR_COS) sim = (stat1*stat2 > 0.0 ? sim/sqrt(stat1*stat2) : 0.0); else sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0); break; case GK_CSR_MIN: sim = stat1 = stat2 = 0.0; i1 = i2 = 0; while (i1<nind1 && i2<nind2) { if (i1 == nind1) { stat2 += val2[i2]; i2++; } else if (i2 == nind2) { stat1 += val1[i1]; i1++; } else if (ind1[i1] < ind2[i2]) { stat1 += val1[i1]; i1++; } else if (ind1[i1] > ind2[i2]) { stat2 += val2[i2]; i2++; } else { sim += gk_min(val1[i1],val2[i2]); stat1 += val1[i1]; stat2 += val2[i2]; i1++; i2++; } } sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0); break; case GK_CSR_AMIN: sim = stat1 = stat2 = 0.0; i1 = i2 = 0; while (i1<nind1 && i2<nind2) { if (i1 == nind1) { stat2 += val2[i2]; i2++; } else if (i2 == nind2) { stat1 += val1[i1]; i1++; } else if (ind1[i1] < ind2[i2]) { stat1 += val1[i1]; i1++; } else if (ind1[i1] > ind2[i2]) { stat2 += val2[i2]; i2++; } else { sim += gk_min(val1[i1],val2[i2]); stat1 += val1[i1]; stat2 += val2[i2]; i1++; i2++; } } sim = (stat1 > 0.0 ? sim/stat1 : 0.0); break; default: gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype); return -1; } return sim; } /*************************************************************************/ /*! Finds the n most similar rows (neighbors) to the query using cosine similarity. \param mat the matrix itself \param nqterms is the number of columns in the query \param qind is the list of query columns \param qval is the list of correspodning query weights \param simtype is the type of similarity and is one of GK_CSR_COS, GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN \param nsim is the maximum number of requested most similar rows. If -1 is provided, then everything is returned unsorted. \param minsim is the minimum similarity of the requested most similar rows \param hits is the result set. This array should be at least of length nsim. \param i_marker is an array of size equal to the number of rows whose values are initialized to -1. If NULL is provided then this array is allocated and freed internally. \param i_cand is an array of size equal to the number of rows. If NULL is provided then this array is allocated and freed internally. \returns the number of identified most similar rows, which can be smaller than the requested number of nnbrs in those cases in which there are no sufficiently many neighbors. */ /**************************************************************************/ int gk_csr_GetSimilarRows(gk_csr_t *mat, int nqterms, int *qind, float *qval, int simtype, int nsim, float minsim, gk_fkv_t *hits, int *i_marker, gk_fkv_t *i_cand) { ssize_t i, ii, j, k; int nrows, ncols, ncand; ssize_t *colptr; int *colind, *marker; float *colval, *rnorms, mynorm, *rsums, mysum; gk_fkv_t *cand; if (nqterms == 0) return 0; nrows = mat->nrows; ncols = mat->ncols; colptr = mat->colptr; colind = mat->colind; colval = mat->colval; marker = (i_marker ? i_marker : gk_ismalloc(nrows, -1, "gk_csr_SimilarRows: marker")); cand = (i_cand ? i_cand : gk_fkvmalloc(nrows, "gk_csr_SimilarRows: cand")); switch (simtype) { case GK_CSR_COS: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += colval[j]*qval[ii]; } } } break; case GK_CSR_JAC: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += colval[j]*qval[ii]; } } } rnorms = mat->rnorms; mynorm = gk_fdot(nqterms, qval, 1, qval, 1); for (i=0; i<ncand; i++) cand[i].key = cand[i].key/(rnorms[cand[i].val]+mynorm-cand[i].key); break; case GK_CSR_MIN: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += gk_min(colval[j], qval[ii]); } } } rsums = mat->rsums; mysum = gk_fsum(nqterms, qval, 1); for (i=0; i<ncand; i++) cand[i].key = cand[i].key/(rsums[cand[i].val]+mysum-cand[i].key); break; /* Assymetric MIN similarity */ case GK_CSR_AMIN: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += gk_min(colval[j], qval[ii]); } } } mysum = gk_fsum(nqterms, qval, 1); for (i=0; i<ncand; i++) cand[i].key = cand[i].key/mysum; break; default: gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype); return -1; } /* go and prune the hits that are bellow minsim */ for (j=0, i=0; i<ncand; i++) { marker[cand[i].val] = -1; if (cand[i].key >= minsim) cand[j++] = cand[i]; } ncand = j; if (nsim == -1 || nsim >= ncand) { nsim = ncand; } else { nsim = gk_min(nsim, ncand); gk_dfkvkselect(ncand, nsim, cand); gk_fkvsortd(nsim, cand); } gk_fkvcopy(nsim, cand, hits); if (i_marker == NULL) gk_free((void **)&marker, LTERM); if (i_cand == NULL) gk_free((void **)&cand, LTERM); return nsim; }
conv3x3x3.h
// Copyright (c) 2017, The OctNet authors // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of the <organization> nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef OCTREE_CONV3X3X3_H #define OCTREE_CONV3X3X3_H #include "core.h" #define INV_FILTER_TRUE true #define INV_FILTER_FALSE false #define ADD_BIAS_TRUE true #define ADD_BIAS_FALSE false /// Get 0-padded data from an octree for given dense subscript indices. /// @deprecated /// /// @param grid_in /// @param n /// @param d /// @param h /// @param w /// @param f /// @return 0 if the subscript indices are out of bounds, otherwise the data. OCTREE_FUNCTION inline ot_data_t conv3x3x3_get_padded_data(const octree* grid_in, int n, int d, int h, int w, int f) { const bool in_vol = d >= 0 && h >= 0 && w >= 0 && d < 8 * grid_in->grid_depth && h < 8 * grid_in->grid_height && w < 8 * grid_in->grid_width; if(in_vol) { const int grid_idx = octree_grid_idx(grid_in, n, d / 8, h / 8, w / 8); // printf("%d,%d,%d => %d, %d\n", d, h, w, grid_idx, bit_idx); const ot_tree_t* tree = octree_get_tree(grid_in, grid_idx); const int bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8); /* ot_data_t* data_in = grid_in->data_ptrs[grid_idx]; */ ot_data_t* data_in = octree_get_data(grid_in, grid_idx); const int data_idx = tree_data_idx(tree, bit_idx, grid_in->feature_size); return (data_in + data_idx)[f]; } else { return 0; } } /// Applies a 3x3x3 convolution on a given position in grid grid_in. /// @deprecated /// /// @tparam inv_filter if filter weights should be transposed /// @param n /// @param ds /// @param hs /// @param ws /// @param grid_in /// @param weights convolution weights. /// @param channels_out number of output channels after convolution. /// @param factor scaling factor of weights. /// @param out convolution result template <bool inv_filter> OCTREE_FUNCTION inline void conv3x3x3_point(int n, int ds, int hs, int ws, const octree* grid_in, const ot_data_t* weights, int channels_out, float factor, ot_data_t* out) { const int channels_in = grid_in->feature_size; for(int kd = 0; kd < 3; ++kd) { for(int kh = 0; kh < 3; ++kh) { for(int kw = 0; kw < 3; ++kw) { int d, h, w; d = ds - 1 + kd; h = hs - 1 + kh; w = ws - 1 + kw; int k_idx; if(inv_filter) { k_idx = ((2 - kd) * 3 + (2 - kh)) * 3 + (2 - kw); } else { k_idx = (kd * 3 + kh) * 3 + kw; } for(int ci = 0; ci < channels_in; ++ci) { ot_data_t in = factor * conv3x3x3_get_padded_data(grid_in, n, d, h, w, ci); for(int co = 0; co < channels_out; ++co) { int weights_idx; if(inv_filter) { weights_idx = (ci * channels_out + co) * 3 * 3 * 3 + k_idx; } else { weights_idx = (co * channels_in + ci) * 3 * 3 * 3 + k_idx; } out[co] += weights[weights_idx] * in; } } } } } } /// Performs a 3x3x3 convolution along an octree cell surface. /// @deprecated /// /// @tparam inv_filter if filter weights should be transposed. /// @tparam add_bias if bias term should be added after convolution. /// @param bd1 start index of surface in shallow octree. /// @param bd2 end index of surface in shallow octree. /// @param bh1 start index of surface in shallow octree. /// @param bh2 end index of surface in shallow octree. /// @param bw1 start index of surface in shallow octree. /// @param bw2 end index of surface in shallow octree. /// @param n batch index. /// @param ds subscript index of tensor corresponding to shallow octree to locate surface. /// @param hs subscript index of tensor corresponding to shallow octree to locate surface. /// @param ws subscript index of tensor corresponding to shallow octree to locate surface. /// @param grid_in /// @param weights convolution weights. /// @param bias bias value. /// @param channels_out number of output channels after conv. /// @param factor scaling factor for convolution weights. /// @param out convolution result template <bool inv_filter, bool add_bias> OCTREE_FUNCTION inline void conv3x3x3_border(const int bd1, const int bd2, const int bh1, const int bh2, const int bw1, const int bw2, const int n, const int ds, const int hs, const int ws, const octree* grid_in, const ot_data_t* weights, const ot_data_t* bias, int channels_out, float factor, ot_data_t* out) { // weights \in R^{channels_out \times channels_in \times kd=3 \times kh=3 \times kw=3} // out \in R^{channels_out} int d, h, w; // Attention at start and end indices, so we convolve every border point only once d = ds + bd1; for(h = hs + bh1; h < hs + bh2; ++h) { for(w = ws + bw1; w < ws + bw2; ++w) { conv3x3x3_point<inv_filter>(n, d, h, w, grid_in, weights, channels_out, factor, out); } } d = ds + bd2 - 1; for(h = hs + bh1; h < hs + bh2; ++h) { for(w = ws + bw1; w < ws + bw2; ++w) { conv3x3x3_point<inv_filter>(n, d, h, w, grid_in, weights, channels_out, factor, out); } } h = hs + bh1; for(d = ds + bd1 + 1; d < ds + bd2 - 1; ++d) { for(w = ws + bw1; w < ws + bw2; ++w) { conv3x3x3_point<inv_filter>(n, d, h, w, grid_in, weights, channels_out, factor, out); } } h = hs + bh2 - 1; for(d = ds + bd1 + 1; d < ds + bd2 - 1; ++d) { for(w = ws + bw1; w < ws + bw2; ++w) { conv3x3x3_point<inv_filter>(n, d, h, w, grid_in, weights, channels_out, factor, out); } } w = ws + bw1; for(d = ds + bd1 + 1; d < ds + bd2 - 1; ++d) { for(h = hs + bh1 + 1; h < hs + bh2 - 1; ++h) { conv3x3x3_point<inv_filter>(n, d, h, w, grid_in, weights, channels_out, factor, out); } } w = ws + bw2 - 1; for(d = ds + bd1 + 1; d < ds + bd2 - 1; ++d) { for(h = hs + bh1 + 1; h < hs + bh2 - 1; ++h) { conv3x3x3_point<inv_filter>(n, d, h, w, grid_in, weights, channels_out, factor, out); } } if(add_bias) { ot_data_t bfactor = factor * (2 * (bh2 - bh1) * (bw2 - bw1) + 2 * (bd2 - bd1 - 2) * (bw2 - bw1) + 2 * (bd2 - bd1 - 2) * (bh2 - bh1 - 2)); for(int co = 0; co < channels_out; ++co) { out[co] += bfactor * bias[co]; } } } /// Performs the convolution on the constant part of the octree cell. /// @deprecated /// /// @tparam inv_filter if filter weights should be transposed. /// @tparam add_bias if bias term should be added after convolution. /// @param in_data constant input cell data. /// @param weights convolution weights. /// @param bias bias term. /// @param channels_in number of input channels for convolution. /// @param channels_out number of output channels for convolution. /// @param factor scaling factor for conv weights. /// @param out convolution result template <bool inv_filter, bool add_bias> OCTREE_FUNCTION inline void conv3x3x3_const(const ot_data_t* in_data, const ot_data_t* weights, const ot_data_t* bias, int channels_in, int channels_out, float factor, ot_data_t* out) { for(int kd = 0; kd < 3; ++kd) { for(int kh = 0; kh < 3; ++kh) { for(int kw = 0; kw < 3; ++kw) { int k_idx = (kd * 3 + kh) * 3 + kw; for(int ci = 0; ci < channels_in; ++ci) { ot_data_t in = factor * in_data[ci]; for(int co = 0; co < channels_out; ++co) { int weights_idx; if(inv_filter) { weights_idx = (ci * channels_out + co) * 3 * 3 * 3 + k_idx; } else { weights_idx = (co * channels_in + ci) * 3 * 3 * 3 + k_idx; } out[co] += weights[weights_idx] * in; } } } } } if(add_bias) { for(int co = 0; co < channels_out; ++co) { out[co] += factor * bias[co]; // printf(" b %f * %f => %f\n", factor, bias[co], out[co]); } } } /// Backward function for the convolution of a single point in the grid-octree /// data structure. /// Computes the gradient wrt. to the weights. /// @deprecated /// /// @param n /// @param ds subscript index of corresponding tensor. /// @param hs subscript index of corresponding tensor. /// @param ws subscript index of corresponding tensor. /// @param grid_in input to the convolution operation. /// @param grad_out gradient of the successive operation. /// @param channels_out number of output channels of the convolution operation. /// @param scale scale factor for the gradient update. /// @param grad_weights resulting gradient of convolution weights. OCTREE_FUNCTION inline void conv3x3x3_point_wbwd(int n, int ds, int hs, int ws, const octree* grid_in, const ot_data_t* grad_out, int channels_out, ot_data_t scale, ot_data_t* grad_weights) { const int channels_in = grid_in->feature_size; for(int kd = 0; kd < 3; ++kd) { for(int kh = 0; kh < 3; ++kh) { for(int kw = 0; kw < 3; ++kw) { int d, h, w; d = ds - 1 + kd; h = hs - 1 + kh; w = ws - 1 + kw; int k_idx = (kd * 3 + kh) * 3 + kw; for(int ci = 0; ci < channels_in; ++ci) { ot_data_t in = conv3x3x3_get_padded_data(grid_in, n, d, h, w, ci); for(int co = 0; co < channels_out; ++co) { int weights_idx; weights_idx = (co * channels_in + ci) * 3 * 3 * 3 + k_idx; ot_data_t val = scale * grad_out[co] * in; #if defined(__CUDA_ARCH__) atomicAdd(grad_weights + weights_idx, val); #elif defined(_OPENMP) #pragma omp atomic grad_weights[weights_idx] += val; #else grad_weights[weights_idx] += val; #endif } } } } } } /// Backward function for the convolution along a octree cell surface. /// Computes the gradient wrt. to the weights. /// @deprecated /// /// @param bd1 start index of surface in shallow octree. /// @param bd2 end index of surface in shallow octree. /// @param bh1 start index of surface in shallow octree. /// @param bh2 end index of surface in shallow octree. /// @param bw1 start index of surface in shallow octree. /// @param bw2 end index of surface in shallow octree. /// @param n batch index. /// @param ds subscript index of tensor corresponding to shallow octree to locate surface. /// @param hs subscript index of tensor corresponding to shallow octree to locate surface. /// @param ws subscript index of tensor corresponding to shallow octree to locate surface. /// @param grid_in octree input to the convolution. /// @param grad_out gradient of the successive operation. /// @param channels_out number of output channels of the convolution. /// @param scale scale factor of the gradient update. /// @param grad_weights resulting gradient weights. /// @param grad_bias resulting gradient bias. OCTREE_FUNCTION inline void conv3x3x3_border_wbwd(const int bd1, const int bd2, const int bh1, const int bh2, const int bw1, const int bw2, const int n, const int ds, const int hs, const int ws, const octree* grid_in, const ot_data_t* grad_out, int channels_out, ot_data_t scale, ot_data_t* grad_weights, ot_data_t* grad_bias) { int d, h, w; // Attention at start and end indices, so we convolve every border point only once d = ds + bd1; for(h = hs + bh1; h < hs + bh2; ++h) { for(w = ws + bw1; w < ws + bw2; ++w) { conv3x3x3_point_wbwd(n, d, h, w, grid_in, grad_out, channels_out, scale, grad_weights); } } d = ds + bd2 - 1; for(h = hs + bh1; h < hs + bh2; ++h) { for(w = ws + bw1; w < ws + bw2; ++w) { conv3x3x3_point_wbwd(n, d, h, w, grid_in, grad_out, channels_out, scale, grad_weights); } } h = hs + bh1; for(d = ds + bd1 + 1; d < ds + bd2 - 1; ++d) { for(w = ws + bw1; w < ws + bw2; ++w) { conv3x3x3_point_wbwd(n, d, h, w, grid_in, grad_out, channels_out, scale, grad_weights); } } h = hs + bh2 - 1; for(d = ds + bd1 + 1; d < ds + bd2 - 1; ++d) { for(w = ws + bw1; w < ws + bw2; ++w) { conv3x3x3_point_wbwd(n, d, h, w, grid_in, grad_out, channels_out, scale, grad_weights); } } w = ws + bw1; for(d = ds + bd1 + 1; d < ds + bd2 - 1; ++d) { for(h = hs + bh1 + 1; h < hs + bh2 - 1; ++h) { conv3x3x3_point_wbwd(n, d, h, w, grid_in, grad_out, channels_out, scale, grad_weights); } } w = ws + bw2 - 1; for(d = ds + bd1 + 1; d < ds + bd2 - 1; ++d) { for(h = hs + bh1 + 1; h < hs + bh2 - 1; ++h) { conv3x3x3_point_wbwd(n, d, h, w, grid_in, grad_out, channels_out, scale, grad_weights); } } ot_data_t factor = scale * (2 * (bh2 - bh1) * (bw2 - bw1) + 2 * (bd2 - bd1 - 2) * (bw2 - bw1) + 2 * (bd2 - bd1 - 2) * (bh2 - bh1 - 2)); for(int co = 0; co < channels_out; ++co) { ot_data_t val = factor * grad_out[co]; #if defined(__CUDA_ARCH__) atomicAdd(grad_bias + co, val); #elif defined(_OPENMP) #pragma omp atomic grad_bias[co] += val; #else grad_bias[co] += val; #endif } } /// Backward function for the constant convolution. /// @deprecated /// /// @param in_data constant input cell data. /// @param grad_out the gradient of the successive operation. /// @param channels_in the number of input channels /// @param channels_out number of output channels for convolution. /// @param scale scale factor of the gradient update. /// @param grad_weights resulting gradient weights. /// @param grad_bias resulting gradient bias. OCTREE_FUNCTION inline void conv3x3x3_const_wbwd(const ot_data_t* in_data, const ot_data_t* grad_out, int channels_in, int channels_out, float factor, ot_data_t* grad_weights, ot_data_t* grad_bias) { for(int kd = 0; kd < 3; ++kd) { for(int kh = 0; kh < 3; ++kh) { for(int kw = 0; kw < 3; ++kw) { int k_idx = (kd * 3 + kh) * 3 + kw; for(int ci = 0; ci < channels_in; ++ci) { ot_data_t in = factor * in_data[ci]; for(int co = 0; co < channels_out; ++co) { int weights_idx; weights_idx = (co * channels_in + ci) * 3 * 3 * 3 + k_idx; ot_data_t val = grad_out[co] * in; #if defined(__CUDA_ARCH__) atomicAdd(grad_weights + weights_idx, val); #elif defined(_OPENMP) #pragma omp atomic grad_weights[weights_idx] += val; #else grad_weights[weights_idx] += val; #endif } } } } } for(int co = 0; co < channels_out; ++co) { ot_data_t val = factor * grad_out[co]; #if defined(__CUDA_ARCH__) atomicAdd(grad_bias + co, val); #elif defined(_OPENMP) #pragma omp atomic grad_bias[co] += val; #else grad_bias[co] += val; #endif } } #endif