diff --git a/llava_next/include/python3.10/bltinmodule.h b/llava_next/include/python3.10/bltinmodule.h new file mode 100644 index 0000000000000000000000000000000000000000..868c9e6443bfc1d1d48fb0806af1bf21490fc44c --- /dev/null +++ b/llava_next/include/python3.10/bltinmodule.h @@ -0,0 +1,14 @@ +#ifndef Py_BLTINMODULE_H +#define Py_BLTINMODULE_H +#ifdef __cplusplus +extern "C" { +#endif + +PyAPI_DATA(PyTypeObject) PyFilter_Type; +PyAPI_DATA(PyTypeObject) PyMap_Type; +PyAPI_DATA(PyTypeObject) PyZip_Type; + +#ifdef __cplusplus +} +#endif +#endif /* !Py_BLTINMODULE_H */ diff --git a/llava_next/include/python3.10/fileobject.h b/llava_next/include/python3.10/fileobject.h new file mode 100644 index 0000000000000000000000000000000000000000..6ec2994aa859b6fbcd71d12fd283733ea1d049ab --- /dev/null +++ b/llava_next/include/python3.10/fileobject.h @@ -0,0 +1,49 @@ +/* File object interface (what's left of it -- see io.py) */ + +#ifndef Py_FILEOBJECT_H +#define Py_FILEOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#define PY_STDIOTEXTMODE "b" + +PyAPI_FUNC(PyObject *) PyFile_FromFd(int, const char *, const char *, int, + const char *, const char *, + const char *, int); +PyAPI_FUNC(PyObject *) PyFile_GetLine(PyObject *, int); +PyAPI_FUNC(int) PyFile_WriteObject(PyObject *, PyObject *, int); +PyAPI_FUNC(int) PyFile_WriteString(const char *, PyObject *); +PyAPI_FUNC(int) PyObject_AsFileDescriptor(PyObject *); + +/* The default encoding used by the platform file system APIs + If non-NULL, this is different than the default encoding for strings +*/ +PyAPI_DATA(const char *) Py_FileSystemDefaultEncoding; +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03060000 +PyAPI_DATA(const char *) Py_FileSystemDefaultEncodeErrors; +#endif +PyAPI_DATA(int) Py_HasFileSystemDefaultEncoding; + +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03070000 +PyAPI_DATA(int) Py_UTF8Mode; +#endif + +/* A routine to check if a file descriptor can be select()-ed. */ +#ifdef _MSC_VER + /* On Windows, any socket fd can be select()-ed, no matter how high */ + #define _PyIsSelectable_fd(FD) (1) +#else + #define _PyIsSelectable_fd(FD) ((unsigned int)(FD) < (unsigned int)FD_SETSIZE) +#endif + +#ifndef Py_LIMITED_API +# define Py_CPYTHON_FILEOBJECT_H +# include "cpython/fileobject.h" +# undef Py_CPYTHON_FILEOBJECT_H +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FILEOBJECT_H */ diff --git a/llava_next/include/python3.10/opcode.h b/llava_next/include/python3.10/opcode.h new file mode 100644 index 0000000000000000000000000000000000000000..52039754bd88ea552614486b9ff1e965706d214a --- /dev/null +++ b/llava_next/include/python3.10/opcode.h @@ -0,0 +1,172 @@ +/* Auto-generated by Tools/scripts/generate_opcode_h.py from Lib/opcode.py */ +#ifndef Py_OPCODE_H +#define Py_OPCODE_H +#ifdef __cplusplus +extern "C" { +#endif + + + /* Instruction opcodes for compiled code */ +#define POP_TOP 1 +#define ROT_TWO 2 +#define ROT_THREE 3 +#define DUP_TOP 4 +#define DUP_TOP_TWO 5 +#define ROT_FOUR 6 +#define NOP 9 +#define UNARY_POSITIVE 10 +#define UNARY_NEGATIVE 11 +#define UNARY_NOT 12 +#define UNARY_INVERT 15 +#define BINARY_MATRIX_MULTIPLY 16 +#define INPLACE_MATRIX_MULTIPLY 17 +#define BINARY_POWER 19 +#define BINARY_MULTIPLY 20 +#define BINARY_MODULO 22 +#define BINARY_ADD 23 +#define BINARY_SUBTRACT 24 +#define BINARY_SUBSCR 25 +#define BINARY_FLOOR_DIVIDE 26 +#define BINARY_TRUE_DIVIDE 27 +#define INPLACE_FLOOR_DIVIDE 28 +#define INPLACE_TRUE_DIVIDE 29 +#define GET_LEN 30 +#define MATCH_MAPPING 31 +#define MATCH_SEQUENCE 32 +#define MATCH_KEYS 33 +#define COPY_DICT_WITHOUT_KEYS 34 +#define WITH_EXCEPT_START 49 +#define GET_AITER 50 +#define GET_ANEXT 51 +#define BEFORE_ASYNC_WITH 52 +#define END_ASYNC_FOR 54 +#define INPLACE_ADD 55 +#define INPLACE_SUBTRACT 56 +#define INPLACE_MULTIPLY 57 +#define INPLACE_MODULO 59 +#define STORE_SUBSCR 60 +#define DELETE_SUBSCR 61 +#define BINARY_LSHIFT 62 +#define BINARY_RSHIFT 63 +#define BINARY_AND 64 +#define BINARY_XOR 65 +#define BINARY_OR 66 +#define INPLACE_POWER 67 +#define GET_ITER 68 +#define GET_YIELD_FROM_ITER 69 +#define PRINT_EXPR 70 +#define LOAD_BUILD_CLASS 71 +#define YIELD_FROM 72 +#define GET_AWAITABLE 73 +#define LOAD_ASSERTION_ERROR 74 +#define INPLACE_LSHIFT 75 +#define INPLACE_RSHIFT 76 +#define INPLACE_AND 77 +#define INPLACE_XOR 78 +#define INPLACE_OR 79 +#define LIST_TO_TUPLE 82 +#define RETURN_VALUE 83 +#define IMPORT_STAR 84 +#define SETUP_ANNOTATIONS 85 +#define YIELD_VALUE 86 +#define POP_BLOCK 87 +#define POP_EXCEPT 89 +#define HAVE_ARGUMENT 90 +#define STORE_NAME 90 +#define DELETE_NAME 91 +#define UNPACK_SEQUENCE 92 +#define FOR_ITER 93 +#define UNPACK_EX 94 +#define STORE_ATTR 95 +#define DELETE_ATTR 96 +#define STORE_GLOBAL 97 +#define DELETE_GLOBAL 98 +#define ROT_N 99 +#define LOAD_CONST 100 +#define LOAD_NAME 101 +#define BUILD_TUPLE 102 +#define BUILD_LIST 103 +#define BUILD_SET 104 +#define BUILD_MAP 105 +#define LOAD_ATTR 106 +#define COMPARE_OP 107 +#define IMPORT_NAME 108 +#define IMPORT_FROM 109 +#define JUMP_FORWARD 110 +#define JUMP_IF_FALSE_OR_POP 111 +#define JUMP_IF_TRUE_OR_POP 112 +#define JUMP_ABSOLUTE 113 +#define POP_JUMP_IF_FALSE 114 +#define POP_JUMP_IF_TRUE 115 +#define LOAD_GLOBAL 116 +#define IS_OP 117 +#define CONTAINS_OP 118 +#define RERAISE 119 +#define JUMP_IF_NOT_EXC_MATCH 121 +#define SETUP_FINALLY 122 +#define LOAD_FAST 124 +#define STORE_FAST 125 +#define DELETE_FAST 126 +#define GEN_START 129 +#define RAISE_VARARGS 130 +#define CALL_FUNCTION 131 +#define MAKE_FUNCTION 132 +#define BUILD_SLICE 133 +#define LOAD_CLOSURE 135 +#define LOAD_DEREF 136 +#define STORE_DEREF 137 +#define DELETE_DEREF 138 +#define CALL_FUNCTION_KW 141 +#define CALL_FUNCTION_EX 142 +#define SETUP_WITH 143 +#define EXTENDED_ARG 144 +#define LIST_APPEND 145 +#define SET_ADD 146 +#define MAP_ADD 147 +#define LOAD_CLASSDEREF 148 +#define MATCH_CLASS 152 +#define SETUP_ASYNC_WITH 154 +#define FORMAT_VALUE 155 +#define BUILD_CONST_KEY_MAP 156 +#define BUILD_STRING 157 +#define LOAD_METHOD 160 +#define CALL_METHOD 161 +#define LIST_EXTEND 162 +#define SET_UPDATE 163 +#define DICT_MERGE 164 +#define DICT_UPDATE 165 +#ifdef NEED_OPCODE_JUMP_TABLES +static uint32_t _PyOpcode_RelativeJump[8] = { + 0U, + 0U, + 536870912U, + 67125248U, + 67141632U, + 0U, + 0U, + 0U, +}; +static uint32_t _PyOpcode_Jump[8] = { + 0U, + 0U, + 536870912U, + 101695488U, + 67141632U, + 0U, + 0U, + 0U, +}; +#endif /* OPCODE_TABLES */ + +/* EXCEPT_HANDLER is a special, implicit block type which is created when + entering an except handler. It is not an opcode but we define it here + as we want it to be available to both frameobject.c and ceval.c, while + remaining private.*/ +#define EXCEPT_HANDLER 257 + +#define HAS_ARG(op) ((op) >= HAVE_ARGUMENT) + +#ifdef __cplusplus +} +#endif +#endif /* !Py_OPCODE_H */ diff --git a/llava_next/include/python3.10/patchlevel.h b/llava_next/include/python3.10/patchlevel.h new file mode 100644 index 0000000000000000000000000000000000000000..161e088a02b55a213f65d7dee095ec58f23da185 --- /dev/null +++ b/llava_next/include/python3.10/patchlevel.h @@ -0,0 +1,35 @@ + +/* Python version identification scheme. + + When the major or minor version changes, the VERSION variable in + configure.ac must also be changed. + + There is also (independent) API version information in modsupport.h. +*/ + +/* Values for PY_RELEASE_LEVEL */ +#define PY_RELEASE_LEVEL_ALPHA 0xA +#define PY_RELEASE_LEVEL_BETA 0xB +#define PY_RELEASE_LEVEL_GAMMA 0xC /* For release candidates */ +#define PY_RELEASE_LEVEL_FINAL 0xF /* Serial should be 0 here */ + /* Higher for patch releases */ + +/* Version parsed out into numeric values */ +/*--start constants--*/ +#define PY_MAJOR_VERSION 3 +#define PY_MINOR_VERSION 10 +#define PY_MICRO_VERSION 16 +#define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_FINAL +#define PY_RELEASE_SERIAL 0 + +/* Version as a string */ +#define PY_VERSION "3.10.16" +/*--end constants--*/ + +/* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2. + Use this for numeric comparisons, e.g. #if PY_VERSION_HEX >= ... */ +#define PY_VERSION_HEX ((PY_MAJOR_VERSION << 24) | \ + (PY_MINOR_VERSION << 16) | \ + (PY_MICRO_VERSION << 8) | \ + (PY_RELEASE_LEVEL << 4) | \ + (PY_RELEASE_SERIAL << 0)) diff --git a/llava_next/include/python3.10/pylifecycle.h b/llava_next/include/python3.10/pylifecycle.h new file mode 100644 index 0000000000000000000000000000000000000000..2df7fe6e3c83d61e9e7bde66d46661f97431d417 --- /dev/null +++ b/llava_next/include/python3.10/pylifecycle.h @@ -0,0 +1,74 @@ + +/* Interfaces to configure, query, create & destroy the Python runtime */ + +#ifndef Py_PYLIFECYCLE_H +#define Py_PYLIFECYCLE_H +#ifdef __cplusplus +extern "C" { +#endif + + +/* Initialization and finalization */ +PyAPI_FUNC(void) Py_Initialize(void); +PyAPI_FUNC(void) Py_InitializeEx(int); +PyAPI_FUNC(void) Py_Finalize(void); +#if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x03060000 +PyAPI_FUNC(int) Py_FinalizeEx(void); +#endif +PyAPI_FUNC(int) Py_IsInitialized(void); + +/* Subinterpreter support */ +PyAPI_FUNC(PyThreadState *) Py_NewInterpreter(void); +PyAPI_FUNC(void) Py_EndInterpreter(PyThreadState *); + + +/* Py_PyAtExit is for the atexit module, Py_AtExit is for low-level + * exit functions. + */ +PyAPI_FUNC(int) Py_AtExit(void (*func)(void)); + +PyAPI_FUNC(void) _Py_NO_RETURN Py_Exit(int); + +/* Bootstrap __main__ (defined in Modules/main.c) */ +PyAPI_FUNC(int) Py_Main(int argc, wchar_t **argv); +PyAPI_FUNC(int) Py_BytesMain(int argc, char **argv); + +/* In pathconfig.c */ +PyAPI_FUNC(void) Py_SetProgramName(const wchar_t *); +PyAPI_FUNC(wchar_t *) Py_GetProgramName(void); + +PyAPI_FUNC(void) Py_SetPythonHome(const wchar_t *); +PyAPI_FUNC(wchar_t *) Py_GetPythonHome(void); + +PyAPI_FUNC(wchar_t *) Py_GetProgramFullPath(void); + +PyAPI_FUNC(wchar_t *) Py_GetPrefix(void); +PyAPI_FUNC(wchar_t *) Py_GetExecPrefix(void); +PyAPI_FUNC(wchar_t *) Py_GetPath(void); +PyAPI_FUNC(void) Py_SetPath(const wchar_t *); +#ifdef MS_WINDOWS +int _Py_CheckPython3(void); +#endif + +/* In their own files */ +PyAPI_FUNC(const char *) Py_GetVersion(void); +PyAPI_FUNC(const char *) Py_GetPlatform(void); +PyAPI_FUNC(const char *) Py_GetCopyright(void); +PyAPI_FUNC(const char *) Py_GetCompiler(void); +PyAPI_FUNC(const char *) Py_GetBuildInfo(void); + +/* Signals */ +typedef void (*PyOS_sighandler_t)(int); +PyAPI_FUNC(PyOS_sighandler_t) PyOS_getsig(int); +PyAPI_FUNC(PyOS_sighandler_t) PyOS_setsig(int, PyOS_sighandler_t); + +#ifndef Py_LIMITED_API +# define Py_CPYTHON_PYLIFECYCLE_H +# include "cpython/pylifecycle.h" +# undef Py_CPYTHON_PYLIFECYCLE_H +#endif + +#ifdef __cplusplus +} +#endif +#endif /* !Py_PYLIFECYCLE_H */ diff --git a/llava_next/include/python3.10/structmember.h b/llava_next/include/python3.10/structmember.h new file mode 100644 index 0000000000000000000000000000000000000000..93b7aff8091848fd8550da9bd6a0843ec82727dd --- /dev/null +++ b/llava_next/include/python3.10/structmember.h @@ -0,0 +1,75 @@ +#ifndef Py_STRUCTMEMBER_H +#define Py_STRUCTMEMBER_H +#ifdef __cplusplus +extern "C" { +#endif + + +/* Interface to map C struct members to Python object attributes */ + +#include /* For offsetof */ + +/* An array of PyMemberDef structures defines the name, type and offset + of selected members of a C structure. These can be read by + PyMember_GetOne() and set by PyMember_SetOne() (except if their READONLY + flag is set). The array must be terminated with an entry whose name + pointer is NULL. */ + +typedef struct PyMemberDef { + const char *name; + int type; + Py_ssize_t offset; + int flags; + const char *doc; +} PyMemberDef; + +/* Types */ +#define T_SHORT 0 +#define T_INT 1 +#define T_LONG 2 +#define T_FLOAT 3 +#define T_DOUBLE 4 +#define T_STRING 5 +#define T_OBJECT 6 +/* XXX the ordering here is weird for binary compatibility */ +#define T_CHAR 7 /* 1-character string */ +#define T_BYTE 8 /* 8-bit signed int */ +/* unsigned variants: */ +#define T_UBYTE 9 +#define T_USHORT 10 +#define T_UINT 11 +#define T_ULONG 12 + +/* Added by Jack: strings contained in the structure */ +#define T_STRING_INPLACE 13 + +/* Added by Lillo: bools contained in the structure (assumed char) */ +#define T_BOOL 14 + +#define T_OBJECT_EX 16 /* Like T_OBJECT, but raises AttributeError + when the value is NULL, instead of + converting to None. */ +#define T_LONGLONG 17 +#define T_ULONGLONG 18 + +#define T_PYSSIZET 19 /* Py_ssize_t */ +#define T_NONE 20 /* Value is always None */ + + +/* Flags */ +#define READONLY 1 +#define READ_RESTRICTED 2 +#define PY_WRITE_RESTRICTED 4 +#define RESTRICTED (READ_RESTRICTED | PY_WRITE_RESTRICTED) + +#define PY_AUDIT_READ READ_RESTRICTED + +/* Current API, use this */ +PyAPI_FUNC(PyObject *) PyMember_GetOne(const char *, struct PyMemberDef *); +PyAPI_FUNC(int) PyMember_SetOne(char *, struct PyMemberDef *, PyObject *); + + +#ifdef __cplusplus +} +#endif +#endif /* !Py_STRUCTMEMBER_H */ diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/AutogradState.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/AutogradState.h new file mode 100644 index 0000000000000000000000000000000000000000..f98de71a83b6101cf5cb542eee360a22bf1930e3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/AutogradState.h @@ -0,0 +1,72 @@ +#pragma once + +#include + +namespace c10 { + +// Structure used to pack all the thread local boolean +// flags used by autograd +struct C10_API AutogradState { + static AutogradState& get_tls_state(); + static void set_tls_state(AutogradState state); + + AutogradState( + bool grad_mode, + bool inference_mode, + bool fw_grad_mode, + bool multithreading_enabled) + : grad_mode_(grad_mode), + inference_mode_(inference_mode), + fw_grad_mode_(fw_grad_mode), + multithreading_enabled_(multithreading_enabled), + view_replay_enabled_(false) {} + + void set_grad_mode(bool enabled) { + grad_mode_ = enabled; + } + + void set_fw_grad_mode(bool enabled) { + fw_grad_mode_ = enabled; + } + + void set_inference_mode(bool enabled) { + inference_mode_ = enabled; + } + + void set_multithreading_enabled(bool multithreading_enabled) { + multithreading_enabled_ = multithreading_enabled; + } + + void set_view_replay_enabled(bool view_replay_enabled) { + view_replay_enabled_ = view_replay_enabled; + } + + bool get_grad_mode() const { + return grad_mode_; + } + + bool get_fw_grad_mode() const { + return fw_grad_mode_; + } + + bool get_inference_mode() const { + return inference_mode_; + } + + bool get_multithreading_enabled() const { + return multithreading_enabled_; + } + + bool get_view_replay_enabled() const { + return view_replay_enabled_; + } + + private: + bool grad_mode_ : 1; + bool inference_mode_ : 1; + bool fw_grad_mode_ : 1; + bool multithreading_enabled_ : 1; + bool view_replay_enabled_ : 1; +}; + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/Backend.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/Backend.h new file mode 100644 index 0000000000000000000000000000000000000000..8ecaa7be7377414337fe3d019e37face208e20eb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/Backend.h @@ -0,0 +1,387 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace c10 { + +/** + * This legacy enum class defines the set of backends supported by old school, + * code generated Type-based ATen. A "backend" in this sense roughly + * corresponds to the cartesian product of (device type, layout), but restricted + * only to combinations which we actually have kernels for. Backend does NOT + * include dtype. + * + * The reason we are sunsetting this enum class is because it doesn't allow for + * open registration; e.g., if you want to add SparseXLA, you'd have to + * edit this enum; you wouldn't be able to do it out of tree. DispatchKey is + * the replacement for Backend which supports open registration. + * + * NB: The concept of 'Backend' here disagrees with the notion of backend + * exposed to users in torch.backends. Backend here is something like "CPU" + * or "SparseCUDA"; backend in torch.backends is something like "MKL" or + * "CUDNN". + */ +enum class Backend { + CPU, + CUDA, + HIP, + VE, + FPGA, + IPU, + XPU, + SparseCPU, + SparseCUDA, + SparseCsrCPU, + SparseCsrCUDA, + SparseHIP, + SparseVE, + SparseXPU, + SparsePrivateUse1, + SparseCsrHIP, + SparseCsrVE, + SparseCsrXPU, + SparseCsrPrivateUse1, + MAIA, + XLA, + Vulkan, + Metal, + Meta, + QuantizedCPU, + QuantizedCUDA, + QuantizedXPU, + QuantizedPrivateUse1, + Undefined, + MkldnnCPU, + MPS, + HPU, + Lazy, + MTIA, + PrivateUse1, + NumOptions +}; + +inline Backend dispatchKeyToBackend(DispatchKey t) { + if (t == DispatchKey::CPU || t == DispatchKey::AutogradCPU) { + return Backend::CPU; + } else if (t == DispatchKey::CUDA || t == DispatchKey::AutogradCUDA) { + return Backend::CUDA; + } else if (t == DispatchKey::HIP) { + return Backend::HIP; + } else if (t == DispatchKey::VE) { + return Backend::VE; + } else if (t == DispatchKey::FPGA) { + return Backend::FPGA; + } else if (t == DispatchKey::MAIA) { + return Backend::MAIA; + } else if (t == DispatchKey::XLA || t == DispatchKey::AutogradXLA) { + return Backend::XLA; + } else if (t == DispatchKey::Lazy || t == DispatchKey::AutogradLazy) { + return Backend::Lazy; + } else if (t == DispatchKey::MPS || t == DispatchKey::AutogradMPS) { + return Backend::MPS; + } else if (t == DispatchKey::Vulkan) { + return Backend::Vulkan; + } else if (t == DispatchKey::Metal) { + return Backend::Metal; + } else if (t == DispatchKey::Meta) { + return Backend::Meta; + } else if (t == DispatchKey::SparseCPU) { + return Backend::SparseCPU; + } else if (t == DispatchKey::SparseCUDA) { + return Backend::SparseCUDA; + } else if (t == DispatchKey::SparseHIP) { + return Backend::SparseHIP; + } else if (t == DispatchKey::SparseVE) { + return Backend::SparseVE; + } else if (t == DispatchKey::SparsePrivateUse1) { + return Backend::SparsePrivateUse1; + } else if (t == DispatchKey::SparseCsrCPU) { + return Backend::SparseCsrCPU; + } else if (t == DispatchKey::SparseCsrCUDA) { + return Backend::SparseCsrCUDA; + } else if (t == DispatchKey::SparseCsrHIP) { + return Backend::SparseCsrHIP; + } else if (t == DispatchKey::SparseCsrVE) { + return Backend::SparseCsrVE; + } else if (t == DispatchKey::SparseCsrPrivateUse1) { + return Backend::SparseCsrPrivateUse1; + } else if (t == DispatchKey::MkldnnCPU) { + return Backend::MkldnnCPU; + } else if (t == DispatchKey::QuantizedCPU) { + return Backend::QuantizedCPU; + } else if (t == DispatchKey::QuantizedCUDA) { + return Backend::QuantizedCUDA; + } else if (t == DispatchKey::IPU || t == DispatchKey::AutogradIPU) { + return Backend::IPU; + } else if (t == DispatchKey::XPU || t == DispatchKey::AutogradXPU) { + return Backend::XPU; + } else if (t == DispatchKey::SparseXPU) { + return Backend::SparseXPU; + } else if (t == DispatchKey::SparseCsrXPU) { + return Backend::SparseCsrXPU; + } else if (t == DispatchKey::QuantizedXPU) { + return Backend::QuantizedXPU; + } else if (t == DispatchKey::QuantizedPrivateUse1) { + return Backend::QuantizedPrivateUse1; + } else if (t == DispatchKey::HPU || t == DispatchKey::AutogradHPU) { + return Backend::HPU; + } else if (t == DispatchKey::MTIA || t == DispatchKey::AutogradMTIA) { + return Backend::MTIA; + } else if ( + t == DispatchKey::PrivateUse1 || t == DispatchKey::AutogradPrivateUse1) { + return Backend::PrivateUse1; + } else if (t == DispatchKey::Undefined) { + return Backend::Undefined; + } else { + TORCH_CHECK(false, "Unrecognized tensor type ID: ", t); + } +} + +inline DispatchKey backendToDispatchKey(Backend b) { + switch (b) { + case Backend::CPU: + return DispatchKey::CPU; + case Backend::CUDA: + return DispatchKey::CUDA; + case Backend::HIP: + return DispatchKey::HIP; + case Backend::VE: + return DispatchKey::VE; + case Backend::FPGA: + return DispatchKey::FPGA; + case Backend::MAIA: + return DispatchKey::MAIA; + case Backend::XLA: + return DispatchKey::XLA; + case Backend::Lazy: + return DispatchKey::Lazy; + case Backend::IPU: + return DispatchKey::IPU; + case Backend::XPU: + return DispatchKey::XPU; + case Backend::SparseXPU: + return DispatchKey::SparseXPU; + case Backend::SparseCsrXPU: + return DispatchKey::SparseCsrXPU; + case Backend::SparseCPU: + return DispatchKey::SparseCPU; + case Backend::SparseCUDA: + return DispatchKey::SparseCUDA; + case Backend::SparseHIP: + return DispatchKey::SparseHIP; + case Backend::SparseVE: + return DispatchKey::SparseVE; + case Backend::SparsePrivateUse1: + return DispatchKey::SparsePrivateUse1; + case Backend::SparseCsrCPU: + return DispatchKey::SparseCsrCPU; + case Backend::SparseCsrCUDA: + return DispatchKey::SparseCsrCUDA; + case Backend::SparseCsrHIP: + return DispatchKey::SparseCsrHIP; + case Backend::SparseCsrVE: + return DispatchKey::SparseCsrVE; + case Backend::SparseCsrPrivateUse1: + return DispatchKey::SparseCsrPrivateUse1; + case Backend::MkldnnCPU: + return DispatchKey::MkldnnCPU; + case Backend::Vulkan: + return DispatchKey::Vulkan; + case Backend::Metal: + return DispatchKey::Metal; + case Backend::Meta: + return DispatchKey::Meta; + case Backend::QuantizedCPU: + return DispatchKey::QuantizedCPU; + case Backend::QuantizedCUDA: + return DispatchKey::QuantizedCUDA; + case Backend::QuantizedPrivateUse1: + return DispatchKey::QuantizedPrivateUse1; + case Backend::Undefined: + return DispatchKey::Undefined; + case Backend::MPS: + return DispatchKey::MPS; + case Backend::HPU: + return DispatchKey::HPU; + case Backend::MTIA: + return DispatchKey::MTIA; + case Backend::PrivateUse1: + return DispatchKey::PrivateUse1; + default: + throw std::runtime_error("Unknown backend"); + } +} + +inline DeviceType backendToDeviceType(Backend b) { + switch (b) { + case Backend::CPU: + case Backend::MkldnnCPU: + case Backend::SparseCPU: + case Backend::SparseCsrCPU: + case Backend::QuantizedCPU: + return DeviceType::CPU; + case Backend::CUDA: + case Backend::SparseCUDA: + case Backend::QuantizedCUDA: + case Backend::SparseCsrCUDA: + return DeviceType::CUDA; + case Backend::HIP: + return DeviceType::HIP; + case Backend::VE: + return DeviceType::VE; + case Backend::FPGA: + return DeviceType::FPGA; + case Backend::MAIA: + return DeviceType::MAIA; + case Backend::XLA: + return DeviceType::XLA; + case Backend::Lazy: + return DeviceType::Lazy; + case Backend::SparseHIP: + return DeviceType::HIP; + case Backend::SparseVE: + return DeviceType::VE; + case Backend::SparseCsrHIP: + return DeviceType::HIP; + case Backend::SparseCsrVE: + return DeviceType::VE; + case Backend::IPU: + return DeviceType::IPU; + case Backend::XPU: + case Backend::SparseXPU: + case Backend::SparseCsrXPU: + case Backend::QuantizedXPU: + return DeviceType::XPU; + case Backend::Vulkan: + return DeviceType::Vulkan; + case Backend::Metal: + return DeviceType::Metal; + case Backend::Meta: + return DeviceType::Meta; + case Backend::MPS: + return DeviceType::MPS; + case Backend::HPU: + return DeviceType::HPU; + case Backend::MTIA: + return DeviceType::MTIA; + case Backend::PrivateUse1: + case Backend::SparsePrivateUse1: + case Backend::SparseCsrPrivateUse1: + case Backend::QuantizedPrivateUse1: + return DeviceType::PrivateUse1; + case Backend::Undefined: + TORCH_CHECK(false, "Undefined backend is not a valid device type"); + default: + TORCH_CHECK(false, "Unknown backend"); + } +} + +inline const char* toString(Backend b) { + switch (b) { + case Backend::CPU: + return "CPU"; + case Backend::CUDA: + return "CUDA"; + case Backend::HIP: + return "HIP"; + case Backend::VE: + return "VE"; + case Backend::FPGA: + return "FPGA"; + case Backend::XPU: + return "XPU"; + case Backend::IPU: + return "IPU"; + case Backend::MAIA: + return "MAIA"; + case Backend::XLA: + return "XLA"; + case Backend::Lazy: + return "Lazy"; + case Backend::MPS: + return "MPS"; + case Backend::SparseCPU: + return "SparseCPU"; + case Backend::SparseCUDA: + return "SparseCUDA"; + case Backend::SparseHIP: + return "SparseHIP"; + case Backend::SparseVE: + return "SparseVE"; + case Backend::SparseXPU: + return "SparseXPU"; + case Backend::SparsePrivateUse1: + return "SparsePrivateUse1"; + case Backend::SparseCsrCPU: + return "SparseCsrCPU"; + case Backend::SparseCsrCUDA: + return "SparseCsrCUDA"; + case Backend::SparseCsrHIP: + return "SparseCsrHIP"; + case Backend::SparseCsrVE: + return "SparseCsrVE"; + case Backend::SparseCsrXPU: + return "SparseCsrXPU"; + case Backend::SparseCsrPrivateUse1: + return "SparseCsrPrivateUse1"; + case Backend::MkldnnCPU: + return "MkldnnCPU"; + case Backend::Vulkan: + return "Vulkan"; + case Backend::Metal: + return "Metal"; + case Backend::Meta: + return "Meta"; + case Backend::QuantizedCPU: + return "QuantizedCPU"; + case Backend::QuantizedCUDA: + return "QuantizedCUDA"; + case Backend::QuantizedXPU: + return "QuantizedXPU"; + case Backend::QuantizedPrivateUse1: + return "QuantizedPrivateUse1"; + case Backend::HPU: + return "HPU"; + case Backend::MTIA: + return "MTIA"; + case Backend::PrivateUse1: + return "PrivateUseOne"; + default: + return "UNKNOWN_BACKEND"; + } +} + +inline bool isSparse(Backend b) { + switch (b) { + case Backend::SparseXPU: + case Backend::SparseCPU: + case Backend::SparseCUDA: + case Backend::SparseHIP: + case Backend::SparseVE: + case Backend::SparsePrivateUse1: + return true; + default: + return false; + } +} + +inline bool isSparseCsr(Backend b) { + switch (b) { + case Backend::SparseCsrXPU: + case Backend::SparseCsrCPU: + case Backend::SparseCsrCUDA: + case Backend::SparseCsrHIP: + case Backend::SparseCsrVE: + case Backend::SparseCsrPrivateUse1: + return true; + default: + return false; + } +} + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/CPUAllocator.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/CPUAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..98debb9db50ddffbca6ff4fc567e895d482d27c9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/CPUAllocator.h @@ -0,0 +1,59 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +// TODO: rename to c10 +C10_DECLARE_bool(caffe2_report_cpu_memory_usage); + +namespace c10 { + +using MemoryDeleter = void (*)(void*); + +// A helper function that is basically doing nothing. +C10_API void NoDelete(void*); + +// A simple struct that is used to report C10's memory allocation, +// deallocation status and out-of-memory events to the profiler +class C10_API ProfiledCPUMemoryReporter { + public: + ProfiledCPUMemoryReporter() = default; + void New(void* ptr, size_t nbytes); + void OutOfMemory(size_t nbytes); + void Delete(void* ptr); + + private: + std::mutex mutex_; + std::unordered_map size_table_; + size_t allocated_ = 0; + size_t log_cnt_ = 0; +}; + +C10_API ProfiledCPUMemoryReporter& profiledCPUMemoryReporter(); + +// Get the CPU Allocator. +C10_API at::Allocator* GetCPUAllocator(); +// Sets the CPU allocator to the given allocator: the caller gives away the +// ownership of the pointer. +C10_API void SetCPUAllocator(at::Allocator* alloc, uint8_t priority = 0); + +// Get the Default CPU Allocator +C10_API at::Allocator* GetDefaultCPUAllocator(); + +// Get the Default Mobile CPU Allocator +C10_API at::Allocator* GetDefaultMobileCPUAllocator(); + +// The CPUCachingAllocator is experimental and might disappear in the future. +// The only place that uses it is in StaticRuntime. +// Set the CPU Caching Allocator +C10_API void SetCPUCachingAllocator(Allocator* alloc, uint8_t priority = 0); +// Get the CPU Caching Allocator +C10_API Allocator* GetCPUCachingAllocator(); + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/CompileTimeFunctionPointer.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/CompileTimeFunctionPointer.h new file mode 100644 index 0000000000000000000000000000000000000000..a5fbd1f3e1f3849b4585a623c37e2a5cf2a0a924 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/CompileTimeFunctionPointer.h @@ -0,0 +1,57 @@ +#pragma once + +#include +#include + +namespace c10 { + +/** + * Represent a function pointer as a C++ type. + * This allows using the function pointer as a type + * in a template and calling it from inside the template + * allows the compiler to inline the call because it + * knows the function pointer at compile time. + * + * Example 1: + * int add(int a, int b) {return a + b;} + * using Add = TORCH_FN_TYPE(add); + * template struct Executor { + * int execute(int a, int b) { + * return Func::func_ptr()(a, b); + * } + * }; + * Executor executor; + * EXPECT_EQ(3, executor.execute(1, 2)); + * + * Example 2: + * int add(int a, int b) {return a + b;} + * template int execute(Func, int a, int b) { + * return Func::func_ptr()(a, b); + * } + * EXPECT_EQ(3, execute(TORCH_FN(add), 1, 2)); + */ +template +struct CompileTimeFunctionPointer final { + static_assert( + guts::is_function_type::value, + "TORCH_FN can only wrap function types."); + using FuncType = FuncType_; + + static constexpr FuncType* func_ptr() { + return func_ptr_; + } +}; + +template +struct is_compile_time_function_pointer : std::false_type {}; +template +struct is_compile_time_function_pointer< + CompileTimeFunctionPointer> : std::true_type {}; + +} // namespace c10 + +#define TORCH_FN_TYPE(func) \ + ::c10::CompileTimeFunctionPointer< \ + std::remove_pointer_t>, \ + func> +#define TORCH_FN(func) TORCH_FN_TYPE(func)() diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/Contiguity.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/Contiguity.h new file mode 100644 index 0000000000000000000000000000000000000000..36f41b6251c0cc64f680499c08799b1201456f3e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/Contiguity.h @@ -0,0 +1,129 @@ +#pragma once +#include +#include +#include +#include +#include + +#include +#include + +namespace c10 { + +template +bool _compute_contiguous(ArrayRef sizes, ArrayRef strides, T numel) { + bool is_contiguous = true; + if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_eq(numel, 0))) { + return is_contiguous; + } + T z = 1; + // NB: make sure we do signed arithmetic + for (int64_t d = int64_t(sizes.size()) - 1; d >= 0; d--) { + const auto& size_d = sizes[d]; + if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(size_d, 1))) { + if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_eq(strides[d], z))) { + z *= size_d; + } else { + is_contiguous = false; + break; + } + } + } + return is_contiguous; +} + +template +bool _compute_channels_last_contiguous_2d( + ArrayRef sizes, + ArrayRef strides) { + // Please don't combine these code, constant array is used here to let + // compiler fully unroll the loop to get better performance + switch (sizes.size()) { + case 4: { + T expected = 1; + for (auto& d : {1, 3, 2, 0}) { + const auto& size_d = sizes[d]; + if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(size_d, 1))) { + if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(strides[d], expected))) { + return false; + } + expected *= size_d; + } + } + return true; + } + // NOLINTNEXTLINE(bugprone-branch-clone) + case 3: + // TODO dim == 3 case will be enabled once it is fully tested + return false; + default: + return false; + } +} + +template +bool _compute_channels_last_contiguous_3d( + ArrayRef sizes, + ArrayRef strides) { + // Please don't combine these code, constant array is used here to let + // compiler fully unroll the loop to get better performance + switch (sizes.size()) { + case 5: { + T expected = 1; + for (auto& d : {1, 4, 3, 2, 0}) { + const auto& size_d = sizes[d]; + if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(size_d, 1))) { + if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(strides[d], expected))) { + return false; + } + expected *= size_d; + } + } + return true; + } + // NOLINTNEXTLINE(bugprone-branch-clone) + case 4: + // TODO dim == 4 case will be enabled once it is fully tested + return false; + default: + return false; + } +} + +template +bool _compute_non_overlapping_and_dense( + ArrayRef sizes, + ArrayRef strides) { + auto dim = sizes.size(); + if (dim == 1) { + return sizes[0] < 2 || strides[0] == 1; + } + SmallVector perm; + perm.resize(dim); + for (const auto i : c10::irange(dim)) { + perm[i] = i; + } + // Sort by strides, leaving 0 and 1 sized dims at the end of the array + std::sort(perm.begin(), perm.end(), [&](int64_t a, int64_t b) { + if (sizes[a] < 2) { + return false; + } else if (sizes[b] < 2) { + return true; + } + return strides[a] < strides[b]; + }); + T require_stride = 1; + for (const auto i : c10::irange(dim)) { + const auto& size_perm_i = sizes[perm[i]]; + if (size_perm_i < 2) { + return true; + } + if (strides[perm[i]] != require_stride) { + return false; + } + require_stride *= size_perm_i; + } + return true; +} + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/CopyBytes.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/CopyBytes.h new file mode 100644 index 0000000000000000000000000000000000000000..78eb0dc9f090a28ba10c3120ae9979b9af9f387a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/CopyBytes.h @@ -0,0 +1,48 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace c10 { + +using CopyBytesFunction = void (*)( + size_t nbytes, + const void* src, + Device src_device, + void* dst, + Device dst_device); + +struct C10_API _CopyBytesFunctionRegisterer { + _CopyBytesFunctionRegisterer( + DeviceType from, + DeviceType to, + CopyBytesFunction func_sync, + CopyBytesFunction func_async = nullptr); +}; + +#define REGISTER_COPY_BYTES_FUNCTION(from, to, ...) \ + namespace { \ + static _CopyBytesFunctionRegisterer C10_ANONYMOUS_VARIABLE( \ + g_copy_function)(from, to, __VA_ARGS__); \ + } + +/* + * WARNING: Implementations for this function are currently registered from + * ATen and caffe2, not yet from c10. Don't use this if not either ATen + * or caffe2 is present as well. + * We can't move them yet, because the CUDA implementations aren't unified yet + * between ATen and caffe2. + * We're planning to move the implementations into c10/backend/xxx + * to make c10 self contained again. + */ +C10_API void CopyBytes( + size_t nbytes, + const void* src, + Device src_device, + void* dst, + Device dst_device, + bool async); +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/DefaultDtype.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/DefaultDtype.h new file mode 100644 index 0000000000000000000000000000000000000000..8f23051dc682395ba92b2fcc4043162abaa8ec47 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/DefaultDtype.h @@ -0,0 +1,15 @@ +#pragma once + +#include +#include + +namespace caffe2 { +class TypeMeta; +} // namespace caffe2 + +namespace c10 { +C10_API void set_default_dtype(caffe2::TypeMeta dtype); +C10_API const caffe2::TypeMeta get_default_dtype(); +C10_API ScalarType get_default_dtype_as_scalartype(); +C10_API const caffe2::TypeMeta get_default_complex_dtype(); +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/DefaultTensorOptions.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/DefaultTensorOptions.h new file mode 100644 index 0000000000000000000000000000000000000000..284af1388ef648df356cf13f2737b784fc269a73 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/DefaultTensorOptions.h @@ -0,0 +1,45 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace c10 { + +struct TensorOptions; + +/// Like TensorOptions, but all fields are guaranteed to be filled. +struct DefaultTensorOptions { + DefaultTensorOptions() = default; + + caffe2::TypeMeta dtype() const noexcept { + return dtype_; + } + Device device() const noexcept { + return device_; + } + Layout layout() const noexcept { + return layout_; + } + bool requires_grad() const noexcept { + return requires_grad_; + } + + // Defined in TensorOptions.h + inline DefaultTensorOptions& merge(const TensorOptions& options); + + private: + caffe2::TypeMeta dtype_ = caffe2::TypeMeta::Make(); // 64-bit + Device device_ = at::kCPU; // 32-bit + Layout layout_ = at::kStrided; // 8-bit + bool requires_grad_ = false; // 8-bit +}; + +inline const DefaultTensorOptions& getDefaultTensorOptions() { + static const auto options = DefaultTensorOptions(); + return options; +} + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/Device.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/Device.h new file mode 100644 index 0000000000000000000000000000000000000000..cbe9129852adecc986f6b541be2c1f53aac08b40 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/Device.h @@ -0,0 +1,216 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace c10 { + +/// An index representing a specific device; e.g., the 1 in GPU 1. +/// A DeviceIndex is not independently meaningful without knowing +/// the DeviceType it is associated; try to use Device rather than +/// DeviceIndex directly. +using DeviceIndex = int8_t; + +/// Represents a compute device on which a tensor is located. A device is +/// uniquely identified by a type, which specifies the type of machine it is +/// (e.g. CPU or CUDA GPU), and a device index or ordinal, which identifies the +/// specific compute device when there is more than one of a certain type. The +/// device index is optional, and in its defaulted state represents (abstractly) +/// "the current device". Further, there are two constraints on the value of the +/// device index, if one is explicitly stored: +/// 1. A negative index represents the current device, a non-negative index +/// represents a specific, concrete device, +/// 2. When the device type is CPU, the device index must be zero. +struct C10_API Device final { + using Type = DeviceType; + + /// Constructs a new `Device` from a `DeviceType` and an optional device + /// index. + /* implicit */ Device(DeviceType type, DeviceIndex index = -1) + : type_(type), index_(index) { + validate(); + } + + /// Constructs a `Device` from a string description, for convenience. + /// The string supplied must follow the following schema: + /// `(cpu|cuda)[:]` + /// where `cpu` or `cuda` specifies the device type, and + /// `:` optionally specifies a device index. + /* implicit */ Device(const std::string& device_string); + + /// Returns true if the type and index of this `Device` matches that of + /// `other`. + bool operator==(const Device& other) const noexcept { + return this->type_ == other.type_ && this->index_ == other.index_; + } + + /// Returns true if the type or index of this `Device` differs from that of + /// `other`. + bool operator!=(const Device& other) const noexcept { + return !(*this == other); + } + + /// Sets the device index. + void set_index(DeviceIndex index) { + index_ = index; + } + + /// Returns the type of device this is. + DeviceType type() const noexcept { + return type_; + } + + /// Returns the optional index. + DeviceIndex index() const noexcept { + return index_; + } + + /// Returns true if the device has a non-default index. + bool has_index() const noexcept { + return index_ != -1; + } + + /// Return true if the device is of CUDA type. + bool is_cuda() const noexcept { + return type_ == DeviceType::CUDA; + } + + /// Return true if the device is of PrivateUse1 type. + bool is_privateuseone() const noexcept { + return type_ == DeviceType::PrivateUse1; + } + + /// Return true if the device is of MPS type. + bool is_mps() const noexcept { + return type_ == DeviceType::MPS; + } + + /// Return true if the device is of HIP type. + bool is_hip() const noexcept { + return type_ == DeviceType::HIP; + } + + /// Return true if the device is of VE type. + bool is_ve() const noexcept { + return type_ == DeviceType::VE; + } + + /// Return true if the device is of XPU type. + bool is_xpu() const noexcept { + return type_ == DeviceType::XPU; + } + + /// Return true if the device is of IPU type. + bool is_ipu() const noexcept { + return type_ == DeviceType::IPU; + } + + /// Return true if the device is of XLA type. + bool is_xla() const noexcept { + return type_ == DeviceType::XLA; + } + + /// Return true if the device is of MTIA type. + bool is_mtia() const noexcept { + return type_ == DeviceType::MTIA; + } + + /// Return true if the device is of HPU type. + bool is_hpu() const noexcept { + return type_ == DeviceType::HPU; + } + + /// Return true if the device is of Lazy type. + bool is_lazy() const noexcept { + return type_ == DeviceType::Lazy; + } + + /// Return true if the device is of Vulkan type. + bool is_vulkan() const noexcept { + return type_ == DeviceType::Vulkan; + } + + /// Return true if the device is of Metal type. + bool is_metal() const noexcept { + return type_ == DeviceType::Metal; + } + + /// Return true if the device is of MAIA type. + bool is_maia() const noexcept { + return type_ == DeviceType::MAIA; + } + + /// Return true if the device is of META type. + bool is_meta() const noexcept { + return type_ == DeviceType::Meta; + } + + /// Return true if the device is of CPU type. + bool is_cpu() const noexcept { + return type_ == DeviceType::CPU; + } + + /// Return true if the device supports arbitrary strides. + bool supports_as_strided() const noexcept { + return type_ != DeviceType::IPU && type_ != DeviceType::XLA && + type_ != DeviceType::Lazy && type_ != DeviceType::MTIA; + } + + /// Same string as returned from operator<<. + std::string str() const; + + private: + DeviceType type_; + DeviceIndex index_ = -1; + void validate() { + // Removing these checks in release builds noticeably improves + // performance in micro-benchmarks. + // This is safe to do, because backends that use the DeviceIndex + // have a later check when we actually try to switch to that device. + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + index_ >= -1, + "Device index must be -1 or non-negative, got ", + static_cast(index_)); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + !is_cpu() || index_ <= 0, + "CPU device index must be -1 or zero, got ", + static_cast(index_)); + } +}; + +C10_API std::ostream& operator<<(std::ostream& stream, const Device& device); + +} // namespace c10 + +namespace std { +template <> +struct hash { + size_t operator()(c10::Device d) const noexcept { + // Are you here because this static assert failed? Make sure you ensure + // that the bitmasking code below is updated accordingly! + static_assert(sizeof(c10::DeviceType) == 1, "DeviceType is not 8-bit"); + static_assert(sizeof(c10::DeviceIndex) == 1, "DeviceIndex is not 8-bit"); + // Note [Hazard when concatenating signed integers] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // We must first convert to a same-sized unsigned type, before promoting to + // the result type, to prevent sign extension when any of the values is -1. + // If sign extension occurs, you'll clobber all of the values in the MSB + // half of the resulting integer. + // + // Technically, by C/C++ integer promotion rules, we only need one of the + // uint32_t casts to the result type, but we put in both for explicitness's + // sake. + uint32_t bits = static_cast(static_cast(d.type())) + << 16 | + static_cast(static_cast(d.index())); + return std::hash{}(bits); + } +}; +} // namespace std diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/DeviceArray.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/DeviceArray.h new file mode 100644 index 0000000000000000000000000000000000000000..e187f5a669db5fdd75074c6045dcc6506fc304bd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/DeviceArray.h @@ -0,0 +1,28 @@ +#include +#include +#include +#include +#include + +namespace c10 { + +template +class DeviceArray { + public: + DeviceArray(c10::Allocator& allocator, size_t size) + : data_ptr_(allocator.allocate(size * sizeof(T))) { + static_assert(std::is_trivial::value, "T must be a trivial type"); + TORCH_INTERNAL_ASSERT( + 0 == (reinterpret_cast(data_ptr_.get()) % alignof(T)), + "c10::DeviceArray: Allocated memory is not aligned for this data type"); + } + + T* get() { + return static_cast(data_ptr_.get()); + } + + private: + c10::DataPtr data_ptr_; +}; + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/DispatchKey.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/DispatchKey.h new file mode 100644 index 0000000000000000000000000000000000000000..71277ebfd891ed4a7a3dfa00eafbf07a4030228f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/DispatchKey.h @@ -0,0 +1,746 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +// Semantically, each value of BackendComponent identifies a "backend" for our +// dispatch. Some functionalities that we may dispatch to are allowed to +// register different handlers for each backend. The BackendComponent is then +// used to figure out which backend implementation to dispatch to. + +// In implementation terms, the backend component identifies a specific "bit" in +// a DispatchKeySet. The bits in the DispatchKeySet are split between the bottom +// ~12 "BackendComponent" bits, while the remaining upper bits are assigned to +// functionalities. When we encounter a functionality bit that is known to be +// customizable per-backend, then we also look at the lower BackendComponent +// bits and take the highest bit to determine which backend's implementation to +// use. + +// WARNING! If you add a new backend component to the end of this list, +// make sure you register it before Meta. +// Meta must be at the end so that meta key in tls triggers meta kernels. +// (But you shouldn't: private use keys should have higher precedence than all +// built-in keys) + +// If you add a new (non-privateuse) backend here, +// make sure to add an Autograd fallthrough kernel +// in aten/src/ATen/core/VariableFallbackKernel.cpp + +#define C10_FORALL_BACKEND_COMPONENTS(_, extra) \ + _(CPU, extra) \ + _(CUDA, extra) \ + _(HIP, extra) \ + _(XLA, extra) \ + _(MPS, extra) \ + _(IPU, extra) \ + _(XPU, extra) \ + _(HPU, extra) \ + _(VE, extra) \ + _(Lazy, extra) \ + _(MTIA, extra) \ + _(PrivateUse1, extra) \ + _(PrivateUse2, extra) \ + _(PrivateUse3, extra) \ + _(Meta, extra) + +// WARNING! If we add a new per-backend functionality key that has higher +// priority than Autograd, then make sure you update EndOfRuntimeBackendKeys + +#define C10_FORALL_FUNCTIONALITY_KEYS(_) \ + _(Dense, ) \ + _(Quantized, Quantized) \ + _(Sparse, Sparse) \ + _(SparseCsr, SparseCsr) \ + _(NestedTensor, NestedTensor) \ + _(AutogradFunctionality, Autograd) + +enum class BackendComponent : uint8_t { + + // A "backend" is colloquially used to refer to handlers for dispatch + // which actually implement the numerics of an operation in question. + // + // Due to the nature of the enum, these backends are specified in + // an ordered way, but for most backends this order is not semantically + // meaningful (e.g., it's valid to reorder these backends without changing + // semantics). The only situation when backend ordering is meaningful + // is when the backend participates in multiple dispatch with another + // backend; e.g., CPU and CUDA (cuda must have higher priority). + + // These keys don't correspond to individual kernels. + // Instead, they represent the backends that are allowed to override specific + // pieces of functionality: + // - dense kernels (e.g. DispatchKey::CPU) + // - sparse kernels (e.g. DispatchKey::SparseCPU) + // - quantized kernels (e.g. DispatchKey::QuantizedCPU) + // - autograd kernels (e.g. DispatchKey::AutogradCPU) + // We reserve space in the runtime operator table for this full cross product + // of + // [backends in this enum] x [keys below that are explicitly marked as having + // per-backend functionality] + // + // A meta tensor is a tensor without any data associated with it. (They + // have also colloquially been referred to as tensors on the "null" device). + // A meta tensor can be used to dry run operators without actually doing any + // computation, e.g., add on two meta tensors would give you another meta + // tensor with the output shape and dtype, but wouldn't actually add anything. + + InvalidBit = 0, +#define DEFINE_BACKEND_COMPONENT(n, _) n##Bit, + C10_FORALL_BACKEND_COMPONENTS(DEFINE_BACKEND_COMPONENT, unused) +#undef DEFINE_BACKEND_COMPONENT + + // Define an alias to represent end of backend dispatch keys. + // If you add new backend keys after PrivateUse3, please also update it here. + EndOfBackendKeys = MetaBit, +}; + +// Semantically, a dispatch key identifies a possible "level" in our +// dispatch, for which a handler may be registered. Each handler corresponds +// to a type of functionality. +// +// In implementation terms, the dispatch key identifies a specific "bit" in a +// DispatchKeySet. Higher bit indexes get handled by dispatching first (because +// we "count leading zeros" when we extract the highest priority dispatch +// key.) +// +// Note [DispatchKey Classification] +// This enum actually contains several types of keys, which are explained +// in more detail further down: +// (1) non-customizable backends (e.g. FPGA) +// (2) non-customizable functionalities (e.g. Functionalize) +// (3) functionalized that are customizable per backend (e.g. Dense, Sparse, +// AutogradFunctionality) (4) per-backend instances of customizable +// functionalities (e.g. CPU, SparseCPU, AutogradCPU) (5) alias keys (e.g. +// CompositeImplicitAutograd) +// +// Of the categories above, it's important to note: +// (a) which keys are assigned individual bits in a DispatchKeySet +// (b) which keys are assigned individual slots in the runtime operator table +// ("Runtime keys") +// +// (1), (2) and (3) all get their own dedicated bits in the DispatchKeySet. +// (1), (2) and (4) all get their own dedicated slots in the runtime operator +// table. + +// See Note [DispatchKeySet Internal Representation] for more details. +// +// NOTE: Keep the list in sync with `DispatchKey` in torchgen/model.py +enum class DispatchKey : uint16_t { + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~ UNDEFINED ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // + // This is not a "real" functionality, but it exists to give us a "nullopt" + // element we can return for cases when a DispatchKeySet contains no elements. + // You can think a more semantically accurate definition of DispatchKey is: + // + // using DispatchKey = optional + // + // and Undefined == nullopt. We didn't actually represent + // it this way because optional would take two + // words, when DispatchKey fits in eight bits. + + Undefined = 0, + + // Define an alias for Undefined to represent CatchAll (long term + // this will get eliminated, but for now it's convenient) + CatchAll = Undefined, + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~ Functionality Keys ~~~~~~~~~~~~~~~~~~~~~~ // + // Every value in the enum (up to EndOfFunctionalityKeys) + // corresponds to an individual "functionality" that can be dispatched to. + // This is represented in the DispatchKeySet by assigning each of these enum + // values + // to each of the remaining (64 - len(BackendComponent)) bits. + // + // Most of these functionalities have a single handler assigned to them, + // making them "runtime keys". + // That map to a single slot in the runtime operator table. + // + // A few functionalities are allowed to be customizable per backend. + // See [Note: Per-Backend Functionality Dispatch Keys] for details. + + // See [Note: Per-Backend Functionality Dispatch Keys] + Dense, + + // Below are non-extensible backends. + // These are backends that currently don't have their own overrides for + // Autograd/Sparse/Quantized kernels, + // and we therefore don't waste space in the runtime operator table allocating + // space for them. + // If any of these backends ever need to customize, e.g., Autograd, then we'll + // need to add a DispatchKey::*Bit for them. + + // TODO: put this in BackendComponents + FPGA, // Xilinx support lives out of tree at + // https://gitlab.com/pytorch-complex/vitis_kernels + + // TODO: put this in BackendComponents + // MAIA backend lives out of tree + // - test/cpp_extensions/maia_extension.cpp + // - test/test_torch.py + // - aten/src/ATen/test/extension_backend_test.cpp + MAIA, + + Vulkan, // TODO: put this in BackendComponents + Metal, // TODO: put this in BackendComponents + + // See [Note: Per-Backend Functionality Dispatch Keys] + Quantized, + + // This backend is to support custom RNGs; it lets you go + // to a different kernel if you pass in a generator that is not a + // traditional CPUGeneratorImpl/CUDAGeneratorImpl. To make use of this + // key: + // 1) set it as a second parameter of at::Generator constructor call in + // the user-defined PRNG class. + // 2) use it as a dispatch key while registering custom kernels + // (templatized kernels specialized for user-defined PRNG class) + // intended for out of tree use; tested by aten/src/ATen/test/rng_test.cpp + CustomRNGKeyId, + + // TODO: Make Mkldnn a functionality key, so we can give it Meta + // support + // Here are backends which specify more specialized operators + // based on the layout of the tensor. Note that the sparse backends + // are one case where ordering matters: sparse multi-dispatches with + // the corresponding dense tensors, and must be handled before them. + MkldnnCPU, // registered at build/aten/src/ATen/RegisterMkldnnCPU.cpp + // NB: not to be confused with MKLDNN, which is Caffe2 only + + // See [Note: Per-Backend Functionality Dispatch Keys] + Sparse, + + SparseCsr, + + NestedTensor, + + // In some situations, it is not immediately obvious what the correct + // backend for function is, because the function in question doesn't + // have any "tensor" arguments. In this case, a BackendSelect function + // can be registered to implement the custom determination of the + // correct backend. + BackendSelect, + + Python, + + // Out-of-core key for Fake Tensor in torchdistx. + // See https://pytorch.org/torchdistx/latest/fake_tensor.html + // TODO: delete this in favor of Python-implemented fake tensor + Fake, + // See Note [Out-of-tree vmap+grad prototype]. The purpose of this key + // is to insert code after the "autograd subsystem" runs, so this key should + // be directly after ADInplaceOrView and all of the autograd keys. + FuncTorchDynamicLayerBackMode, + + // Alias and mutation removal. + // If some backends want to opt into only alias removal or only mutation + // removal, + // we can consider adding separate keys dedicated to those individual passes. + // See Note [Functionalization Pass In Core] for details. + Functionalize, + + // The named dispatch key is set for any tensors with named dimensions. + // Although we have a dispatch key for named tensors, for historical reasons, + // this dispatch key doesn't do any of the substantive functionality for named + // tensor (though, hypothetically, it could!) At the moment, it's just + // responsible for letting us give good error messages when operations + // don't support named tensors. + // + // NB: If you ever consider moving named tensor functionality into + // this dispatch key, note that it might be necessary add another dispatch + // key that triggers before composite operators, in case a composite operator + // has named dimension propagation that doesn't match that of its + // constituent parts. + // TODO: delete this once torchdim lands in functorch + Named, + + // The Conjugate dispatch key is set for any tensors that need to perform + // conjugation + // This is implemented at a dispatch level right before any backends run + Conjugate, + + // The Negative dispatch key is set for any tensors that need to perform + // negation + // This is implemented at a dispatch level right before any backends run + Negative, + + ZeroTensor, // registered at build/aten/src/ATen/RegisterZeroTensor.cpp + + // Note [ADInplaceOrView key] + // ADInplaceOrView key is used by inplace or view ops to register a kernel + // that does additional setup for future autograd computation. + // + // 1. For inplace ops this kernel does version bump + // 2. For view ops this kernel does `as_view` setup where we properly setup + // DifferentiableViewMeta on the view tensors. + // + // For other ops it's fallthrough kernel since there's no extra + // work to do. + // + // Note [Dream: skip VariableType kernel when requires_grad=false] + // + // In an ideal world where we can skip VariableType kernel for inputs + // with requires_grad=false, instead of a fallthrough kernel, we'll + // register a kernel shown below to all functional ops as well: + // torch::Tensor my_functional_op(...) { + // { + // // Note for every op in VariableType, you need to go through + // // `AutoDispatchBelowADInplaceOrView` guard exactly once to add the + // // key to TLS excluded set. If you don't go through it at all, + // // inplace/view ops called through `at::` inside your backend + // // kernel will dispatch to ADInplaceOrView kernels and do a lot + // // of extra work. + // at::AutoDispatchBelowADInplaceOrView guard; + // at::redispatch::my_functional_op(...); + // } + // } + // But this work is currently blocked since it adds an extra dispatch + // for all ops and it's non-trivial overhead at model level(a few percents). + // Thus our current approach takes advantage of the fact every kernel go + // through VariableType kernel first and pulls the + // `at::AutoDispatchBelowADInplaceOrView` guard of functional ops + // up to the `VariableType` kernel. Thus we only add the extra dispatch + // to view/inplace ops to minimize its perf impact to real models. + ADInplaceOrView, + // Note [Alias Dispatch Key : Autograd] + // All backends are oblivious to autograd; autograd is handled as a + // layer which happens on top of all backends. It inspects the autograd + // metadata of all inputs, determines what autograd metadata should be + // constructed by the output, and otherwise defers to the backend to + // actually do the numeric computation. Autograd contains + // the bulk of this logic. + + // Autograd is now an alias dispatch key which by default maps to all + // backend-specific autograd keys. + // Backend-specific allow backends to override the default kernel registered + // to Autograd key as needed. + // For example, XLA wants to define autograd for einsum directly. + // Registering a custom autograd implementation at the XLA key won't work + // because we process Autograd before XLA. This key has higher priority and + // gets processed first. You generally should NOT redispatch after handling + // autograd here (since that would result in execution of the Autograd + // operator, which you're trying to skip). In AutogradXLA implementations, + // you are responsible for handling autograd yourself, or deferring to other + // operators which support autograd. + + // Currently we only have backend-specific autograd keys for CPU/CUDA/XLA and + // reserved user-defined backends. All other in-tree backends share the + // AutogradOther key. We can add specific autograd key for those backends + // upon request. + AutogradOther, + + // See [Note: Per-Backend Functionality Dispatch Keys] + AutogradFunctionality, + + // NestedTensor is an example of something that isn't a "real backend" + // (because it mostly consists of redispatching kernels) + // but it would like to override autograd functionality in C++. + // We can handle cases like this by adding an extra functionality key + // exclusively for handling autograd for NestedTensor. + // lives out of tree at + // https://github.com/pytorch/nestedtensor + AutogradNestedTensor, + + Tracer, + + // TODO: make Autocast a functionality key + // Autocasting precedes VariableTypeId, to ensure casts are autograd-exposed + // and inputs are saved for backward in the post-autocast type. + AutocastCPU, + AutocastXPU, + AutocastIPU, + AutocastHPU, + AutocastXLA, + // AutocastXLA is only being used for TPUs. XLA GPUs continue to use + // AutocastCUDA. + AutocastCUDA, + AutocastPrivateUse1, + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ WRAPPERS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // + // There are a number of alternative modes which may want to handle before + // autograd; for example, error checking, tracing, profiling or vmap. They + // go here. + + FuncTorchBatched, // See Note [Out-of-tree vmap+grad prototype] + + // Dispatch key for BatchedTensorImpl wrapping a nested tensor. + BatchedNestedTensor, + + FuncTorchVmapMode, // See Note [Out-of-tree vmap+grad prototype] + + // This is the dispatch key for BatchedTensorImpl, which is used to implement + // batching rules for vmap. + Batched, + + // When we are inside a vmap, all tensors dispatch on this key. + // See Note: [DispatchKey::VmapMode usage] for more details. + VmapMode, + + FuncTorchGradWrapper, // See Note [Out-of-tree vmap+grad prototype] + + // Out-of-core key for Deferred Module Initialization in torchdistx. + // See https://pytorch.org/torchdistx/latest/deferred_init.html + DeferredInit, + + // Used by Python key logic to know the set of tls on entry to the dispatcher + // This kernel assumes it is the top-most non-functorch-related DispatchKey. + // If you add a key above, make sure to update the fallback implementation for + // this. + PythonTLSSnapshot, + + // This key should be at the very top of the dispatcher + FuncTorchDynamicLayerFrontMode, // See Note [Out-of-tree vmap+grad prototype] + + // TESTING: This is intended to be a generic testing tensor type id. + // Don't use it for anything real; its only acceptable use is within a single + // process test. Use it by creating a TensorImpl with this DispatchKey, and + // then registering operators to operate on this type id. See + // aten/src/ATen/core/dispatch/backend_fallback_test.cpp for a usage example. + TESTING_ONLY_GenericWrapper, + + // TESTING: This is intended to be a generic testing tensor type id. + // Don't use it for anything real; its only acceptable use is within a ingle + // process test. Use it by toggling the mode on and off via + // TESTING_ONLY_tls_generic_mode_set_enabled and then registering operators + // to operate on this type id. See + // aten/src/ATen/core/dispatch/backend_fallback_test.cpp + // for a usage example + TESTING_ONLY_GenericMode, + + // This key is used for pre-dispatch tracing in make_fx. + // It has lower priority than the PythonDispatcher key + // because we use the PythonDispatcher to intercept the key from python, + // and avoid having to implement it in C++. + PreDispatch, + + // This is a bypass that allows you to skip running the C++ dispatcher + // entirely + PythonDispatcher, + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FIN ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // + EndOfFunctionalityKeys, // End of functionality keys. + +// ~~~~~~~~~~~~~~ "Dense" Per-Backend Dispatch keys ~~~~~~~~~~~~~~~~~~~~ // +// Here are backends which you think of as traditionally specifying +// how to implement operations on some device. + +#define DEFINE_PER_BACKEND_KEYS_FOR_BACKEND(n, prefix) prefix##n, + +#define DEFINE_PER_BACKEND_KEYS(fullname, prefix) \ + StartOf##fullname##Backends, \ + C10_FORALL_BACKEND_COMPONENTS( \ + DEFINE_PER_BACKEND_KEYS_FOR_BACKEND, prefix) \ + EndOf##fullname##Backends = prefix##Meta, + + C10_FORALL_FUNCTIONALITY_KEYS(DEFINE_PER_BACKEND_KEYS) + +#undef DEFINE_PER_BACKEND_KEYS +#undef DEFINE_PER_BACKEND_KEYS_FOR_BACKEND + + EndOfRuntimeBackendKeys = EndOfAutogradFunctionalityBackends, + + // ~~~~~~~~~~~~~~~~~~~~~~ Alias Dispatch Keys ~~~~~~~~~~~~~~~~~~~~~~~~~~ // + // Note [Alias Dispatch Keys] + // Alias dispatch keys are synthetic dispatch keys which map to multiple + // runtime dispatch keys. Alisa keys have precedence, but they are always + // lower precedence than runtime keys. You can register a kernel to an + // alias key, the kernel might be populated to the mapped runtime keys + // during dispatch table computation. + // If a runtime dispatch key has multiple kernels from alias keys, which + // kernel wins is done based on the precedence of alias keys (but runtime + // keys always have precedence over alias keys). + // Alias keys won't be directly called during runtime. + + // See Note [Alias Dispatch Key : Autograd] + Autograd, + CompositeImplicitAutograd, // registered at + // build/aten/src/ATen/RegisterCompositeImplicitAutograd.cpp + + // Note: The alias keyset for FuncTorchBatchedDecomposition is disjoint from + // all + // other alias keysets + // and so precedence order doesn't matter + FuncTorchBatchedDecomposition, // registered at + // build/aten/src/ATen/RegisterFuncTorchBatchedDecomposition.cpp + // Note: The alias keyset for CompositeImplicitAutogradNestedTensor is + // disjoint from all other alias keysets + CompositeImplicitAutogradNestedTensor, // registered at + // build/aten/src/ATen/RegisterCompositeImplicitAutogradNestedTensor.cpp + CompositeExplicitAutograd, // registered at + // build/aten/src/ATen/RegisterCompositeExplicitAutograd.cpp + // See Note [CompositeExplicitAutogradNonFunctional Key] + CompositeExplicitAutogradNonFunctional, // registered at + // build/aten/src/ATen/RegisterCompositeExplicitAutograd.cpp + + // Define an alias key to represent end of alias dispatch keys. + // If you add new alias keys after Autograd, please also update it here. + StartOfAliasKeys = Autograd, + EndOfAliasKeys = CompositeExplicitAutogradNonFunctional, // + + // ~~~~~~~~~~~~~~~~~~~~~~~~~ BC ALIASES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // + // The aliases exist for backwards compatibility reasons, they shouldn't + // be used + CPUTensorId = CPU, + CUDATensorId = CUDA, + DefaultBackend = CompositeExplicitAutograd, + PrivateUse1_PreAutograd = AutogradPrivateUse1, + PrivateUse2_PreAutograd = AutogradPrivateUse2, + PrivateUse3_PreAutograd = AutogradPrivateUse3, + Autocast = AutocastCUDA, +}; + +// Note [Private use DispatchKey] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Private use tensor IDs are preallocated tensor type IDs for use in user +// applications. Similar to private use fields in HTTP, they can be used +// by end users for experimental or private applications, without needing +// to "standardize" the tensor ID (which would be done by submitting a PR +// to PyTorch to add your type ID). +// +// Private use tensor IDs are appropriate to use if you want to experiment +// with adding a new tensor type (without having to patch PyTorch first) or +// have a private, non-distributed application that needs to make use of a +// new tensor type. Private use tensor IDs are NOT appropriate to use for +// libraries intended to be distributed to further users: please contact +// the PyTorch developers to get a type ID registered in this case. +// +// We provide two classes of private user tensor id: regular DispatchKeys +// and Autograd DispatchKeys. DispatchKeys serve the role of ordinary "backend" +// DispatchKeys; if you were adding support for a new type of accelerator, you +// would use a backend DispatchKey, and ideally automatically reuse +// AutogradOther definitions already defined in PyTorch. AutogradPrivateUse +// DispatchKeys serve as "wrapper" DispatchKeys: they are only necessary for +// tensors that compose multiple internal tensors, and for cases when the +// built-in autograd formulas for operators are not appropriate. + +static_assert( + (static_cast(BackendComponent::EndOfBackendKeys) + + static_cast(DispatchKey::EndOfFunctionalityKeys)) <= 64, + "The BackendComponent and DispatchKey enums (below EndOfFunctionalityKeys)" + " both map to backend and functionality bits" + " into a 64-bit bitmask; you must have less than 64 total entries between them"); + +// Check if a DispatchKey is an alias mapping to other runtime keys. +constexpr bool isAliasDispatchKey(DispatchKey k) { + return k >= DispatchKey::StartOfAliasKeys && k <= DispatchKey::EndOfAliasKeys; +} + +// [Note: Per-Backend Functionality Dispatch Keys] +// Check if a DispatchKey is a per-backend functionality key +// Any functionalities that can be customized per-backend should be added here. +// These keys correspond to functionalities that can be customized individually +// per backend. While they only take up one bit in the `DispatchKeySet` bitset, +// they map to (# backends) slots in the operator table. +// Each of these keys also has a separate set of "runtime keys" in the dispatch +// key enum, per backend, which *do* map to the individual operator table slots. +// For example, the "Sparse" key maps to an individual bit in the +// DispatchKeySet, while `SparseCPU`, `SparseCUDA`, etc all map to individual +// slots in the runtime operator table. + +constexpr bool isPerBackendFunctionalityKey(DispatchKey k) { + if (k == DispatchKey::Dense || k == DispatchKey::Quantized || + k == DispatchKey::Sparse || k == DispatchKey::SparseCsr || + k == DispatchKey::AutogradFunctionality || + k == DispatchKey::NestedTensor) { + return true; + } else { + return false; + } +} + +// Note that this includes Undefined in the total count. +// BUT EndOfFunctionalityKeys is its own (placeholder) key. +// e.g. Undefined=0, Dense=1, Sparse=2, EndOfFunctionalityKeys=3. +// In the above example, there are 3 total functionality keys. +constexpr uint8_t num_functionality_keys = + static_cast(DispatchKey::EndOfFunctionalityKeys); + +constexpr uint8_t num_backends = + static_cast(BackendComponent::EndOfBackendKeys); + +// Note [No More Than 16 Backends] +// Search for this note to find places in the code where the "no more than 16 +// backends" invariant is baked in. +static_assert( + static_cast(BackendComponent::EndOfBackendKeys) <= 16, + "BackendComponent currently only supports <= 16 backends. If we really need to extend this, \ +there are a few places where this invariant is baked in"); + +constexpr uint8_t numPerBackendFunctionalityKeys() { + uint8_t count = 0; + for (uint8_t k = 0; k <= num_functionality_keys; ++k) { + if (isPerBackendFunctionalityKey(static_cast(k))) + ++count; + } + return count; +} + +#if defined(C10_MOBILE_TRIM_DISPATCH_KEYS) +// See [Note: Trimmed Mobile Dispatch Keys] +constexpr uint16_t num_runtime_entries = 8; +#else +constexpr uint16_t num_runtime_entries = num_functionality_keys + + (numPerBackendFunctionalityKeys() * (num_backends - 1)); +#endif + +// See Note [No More Than 16 Backends] +constexpr uint16_t full_backend_mask = + (static_cast(1) << num_backends) - 1; + +C10_API const char* toString(DispatchKey); +C10_API const char* toString(BackendComponent); +C10_API std::ostream& operator<<(std::ostream&, DispatchKey); +C10_API std::ostream& operator<<(std::ostream&, BackendComponent); + +C10_API DispatchKey getAutogradKeyFromBackend(BackendComponent k); + +// Parses a string into a dispatch key. +// If the string cannot be correctly parsed, throws an exception. +C10_API c10::DispatchKey parseDispatchKey(const std::string& k); + +// These are some convenience identifiers for dispatch keys which are +// shorter to type than their long counterparts. Note that some of these +// dispatch keys directly correspond to DeviceType; and most APIs that +// accept DispatchKey also accept DeviceType; e.g., +// torch::dispatch(torch::kCPU, ...) is also valid. +constexpr DispatchKey kAutograd = DispatchKey::Autograd; + +// See Note [The Ordering of Per-Backend Dispatch Keys Matters!] +// This function relies on the invariant that the dispatch keys between +// StartOfDenseBackends and EndOfRuntimeBackendKeys are ordered by backend +// in the same order as `BackendComponent`. +constexpr BackendComponent toBackendComponent(DispatchKey k) { + if (k >= DispatchKey::StartOfDenseBackends && + k <= DispatchKey::EndOfDenseBackends) { + return static_cast( + static_cast(k) - + static_cast(DispatchKey::StartOfDenseBackends)); + } else if ( + k >= DispatchKey::StartOfQuantizedBackends && + k <= DispatchKey::EndOfQuantizedBackends) { + return static_cast( + static_cast(k) - + static_cast(DispatchKey::StartOfQuantizedBackends)); + } else if ( + k >= DispatchKey::StartOfSparseBackends && + k <= DispatchKey::EndOfSparseBackends) { + return static_cast( + static_cast(k) - + static_cast(DispatchKey::StartOfSparseBackends)); + } else if ( + k >= DispatchKey::StartOfSparseCsrBackends && + k <= DispatchKey::EndOfSparseCsrBackends) { + return static_cast( + static_cast(k) - + static_cast(DispatchKey::StartOfSparseCsrBackends)); + } else if ( + k >= DispatchKey::StartOfNestedTensorBackends && + k <= DispatchKey::EndOfNestedTensorBackends) { + return static_cast( + static_cast(k) - + static_cast(DispatchKey::StartOfNestedTensorBackends)); + } else if ( + k >= DispatchKey::StartOfAutogradFunctionalityBackends && + k <= DispatchKey::EndOfAutogradFunctionalityBackends) { + return static_cast( + static_cast(k) - + static_cast( + DispatchKey::StartOfAutogradFunctionalityBackends)); + } else { + return BackendComponent::InvalidBit; + } +} + +constexpr DispatchKey toFunctionalityKey(DispatchKey k) { + if (k <= DispatchKey::EndOfFunctionalityKeys) { + return k; + } else if (k <= DispatchKey::EndOfDenseBackends) { + return DispatchKey::Dense; + } else if (k <= DispatchKey::EndOfQuantizedBackends) { + return DispatchKey::Quantized; + } else if (k <= DispatchKey::EndOfSparseBackends) { + return DispatchKey::Sparse; + } else if (k <= DispatchKey::EndOfSparseCsrBackends) { + return DispatchKey::SparseCsr; + } else if (k <= DispatchKey::EndOfNestedTensorBackends) { + return DispatchKey::NestedTensor; + } else if (k <= DispatchKey::EndOfAutogradFunctionalityBackends) { + return DispatchKey::AutogradFunctionality; + } else { + return DispatchKey::Undefined; + } +} + +BackendComponent toBackendComponent(DeviceType device_type); + +// Given (DispatchKey::Dense, BackendComponent::CUDABit), returns +// DispatchKey::CUDA. +// See Note [The Ordering of Per-Backend Dispatch Keys Matters!] +// This function relies on the invariant that the dispatch keys between +// StartOfDenseBackends and EndOfRuntimeBackendKeys are ordered by backend +// in the same order as `BackendComponent`. +constexpr DispatchKey toRuntimePerBackendFunctionalityKey( + DispatchKey functionality_k, + BackendComponent backend_k) { + if (functionality_k == DispatchKey::Dense) { + return static_cast( + static_cast(DispatchKey::StartOfDenseBackends) + + static_cast(backend_k)); + } + if (functionality_k == DispatchKey::Sparse) { + return static_cast( + static_cast(DispatchKey::StartOfSparseBackends) + + static_cast(backend_k)); + } + if (functionality_k == DispatchKey::SparseCsr) { + return static_cast( + static_cast(DispatchKey::StartOfSparseCsrBackends) + + static_cast(backend_k)); + } + if (functionality_k == DispatchKey::Quantized) { + return static_cast( + static_cast(DispatchKey::StartOfQuantizedBackends) + + static_cast(backend_k)); + } + if (functionality_k == DispatchKey::NestedTensor) { + return static_cast( + static_cast(DispatchKey::StartOfNestedTensorBackends) + + static_cast(backend_k)); + } + if (functionality_k == DispatchKey::AutogradFunctionality) { + return static_cast( + static_cast( + DispatchKey::StartOfAutogradFunctionalityBackends) + + static_cast(backend_k)); + } + return DispatchKey::Undefined; +} + +} // namespace c10 + +namespace torch { +// Expose the constant, but not the TYPE (DispatchKey is an implementation +// detail!) +// NOLINTNEXTLINE(misc-unused-using-decls) +using c10::kAutograd; +} // namespace torch + +// NB: You really shouldn't use this instance; this enum is guaranteed +// to be pretty small so a regular array should be acceptable. +namespace std { +template <> +struct hash { + typedef size_t result_type; + typedef c10::DispatchKey argument_type; + + size_t operator()(c10::DispatchKey x) const { + return static_cast(x); + } +}; +} // namespace std diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/DynamicCast.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/DynamicCast.h new file mode 100644 index 0000000000000000000000000000000000000000..0a845776a263b68ce731d15350a8b9a040b98f6b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/DynamicCast.h @@ -0,0 +1,125 @@ +#pragma once + +#include +#include +#include +#include + +namespace c10 { + +// Dynamic type casting utils: +// - fetch_and_cast +// - cast_and_store +// +// fetch_and_cast fetch a value with dynamic type specified by a ScalarType +// from a void pointer and cast it to a static type. +// +// cast_and_store casts a static typed value into dynamic type specified +// by a ScalarType, and store it into a void pointer. +// +// NOTE: +// +// Dynamic casting allows us to support type promotion without blowing up +// the combination space: For example, without dynamic cast, in order to +// implement `add_` with type promotion, we would need something like +// +// AT_DISPATCH_ALL_TYPES(output.dtype(), +// AT_DISPATCH_ALL_TYPES(input1.dtype(), +// AT_DISPATCH_ALL_TYPES(input2.dtype(), +// [](arg0_t a, arg1_t b) -> out_t { return a + b; } +// ) +// ) +// ) +// +// If we support N dtypes, the above code would generate the a+b kernel for +// all the N * N * N different supported types, the compilation time and +// binary size would become horrible. +// +// Dynamic casting might sounds like a bad idea in terms of performance. +// Especially if you ever do it in a loop, you are going to do a billion tests. +// But in practice it is not as bad as it might look: +// +// - on CPU, this is a branch that always has the same outcome, therefore +// hopefully the branch predictor could do the job pretty well +// - on GPU, these branches will not diverge, so we could still have the same +// warp executing the same line of code +// - Most kernels, like `add`, are bandwidth bound, adding a few clock cycles to +// check an integer does not hurt the performance much because the ALUs would +// wait for load instructions anyway. +// +// For the discussion and benchmark, refer to: +// - https://github.com/pytorch/pytorch/pull/28343 +// - https://github.com/pytorch/pytorch/pull/28344 +// - https://github.com/pytorch/pytorch/pull/28345 +// + +#ifdef C10_HOST_DEVICE +#define ERROR_UNSUPPORTED_CAST CUDA_KERNEL_ASSERT(false); +#else +#define ERROR_UNSUPPORTED_CAST TORCH_CHECK(false, "Unexpected scalar type"); +#endif + +// Fetch a value with dynamic type src_type from ptr, and cast it to static type +// dest_t. +#define FETCH_AND_CAST_CASE(type, scalartype) \ + case ScalarType::scalartype: \ + return c10::convert(c10::load(ptr)); + +template +C10_HOST_DEVICE inline dest_t fetch_and_cast( + const ScalarType src_type, + const void* ptr) { + switch (src_type) { + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(FETCH_AND_CAST_CASE) + FETCH_AND_CAST_CASE(uint16_t, UInt16) + FETCH_AND_CAST_CASE(uint32_t, UInt32) + FETCH_AND_CAST_CASE(uint64_t, UInt64) + default: + ERROR_UNSUPPORTED_CAST + } + return dest_t(0); // just to avoid compiler warning +} + +// Cast a value with static type src_t into dynamic dest_type, and store it to +// ptr. +#define CAST_AND_STORE_CASE(type, scalartype) \ + case ScalarType::scalartype: \ + *(type*)ptr = c10::convert(value); \ + return; +template +C10_HOST_DEVICE inline void cast_and_store( + const ScalarType dest_type, + void* ptr, + src_t value) { + switch (dest_type) { + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(CAST_AND_STORE_CASE) + CAST_AND_STORE_CASE(uint16_t, UInt16) + CAST_AND_STORE_CASE(uint32_t, UInt32) + CAST_AND_STORE_CASE(uint64_t, UInt64) + default:; + } + ERROR_UNSUPPORTED_CAST +} + +#define DEFINE_UNCASTABLE(T, scalartype_) \ + template <> \ + C10_HOST_DEVICE inline T fetch_and_cast( \ + const ScalarType src_type, const void* ptr) { \ + CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == src_type); \ + return c10::load(ptr); \ + } \ + template <> \ + C10_HOST_DEVICE inline void cast_and_store( \ + const ScalarType dest_type, void* ptr, T value) { \ + CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == dest_type); \ + *(T*)ptr = value; \ + } + +AT_FORALL_QINT_TYPES(DEFINE_UNCASTABLE) + +#undef FETCH_AND_CAST_CASE +#undef CAST_AND_STORE_CASE +#undef DEFINE_UNCASTABLE +#undef ERROR_UNSUPPORTED_CAST + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/GradMode.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/GradMode.h new file mode 100644 index 0000000000000000000000000000000000000000..d60add2cd2b0620b96ee7427752a76d41b2dd819 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/GradMode.h @@ -0,0 +1,44 @@ +#pragma once + +#include +#include + +namespace c10 { + +struct C10_API GradMode { + static bool is_enabled(); + static void set_enabled(bool enabled); +}; + +// A RAII, thread local (!) guard that enables or disables grad mode upon +// construction, and sets it back to the original value upon destruction. +struct C10_API AutoGradMode { + AutoGradMode(bool enabled) : prev_mode(GradMode::is_enabled()) { + GradMode::set_enabled(enabled); + } + ~AutoGradMode() { + GradMode::set_enabled(prev_mode); + } + bool prev_mode; +}; + +// A RAII, thread local (!) guard that stops future operations from building +// gradients. +struct C10_API NoGradGuard : public AutoGradMode { + NoGradGuard() : AutoGradMode(/*enabled=*/false) {} +}; + +// A RAII, thread local (!) guard that enables or disables forward grad mode +// upon construction, and sets it back to the original value upon destruction. +struct C10_API AutoFwGradMode { + AutoFwGradMode(bool enabled) + : prev_mode(AutogradState::get_tls_state().get_fw_grad_mode()) { + AutogradState::get_tls_state().set_fw_grad_mode(enabled); + } + ~AutoFwGradMode() { + AutogradState::get_tls_state().set_fw_grad_mode(prev_mode); + } + bool prev_mode; +}; + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/InferenceMode.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/InferenceMode.h new file mode 100644 index 0000000000000000000000000000000000000000..52541886c0aea90474bceae551ebff2681bc9f0a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/InferenceMode.h @@ -0,0 +1,86 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace c10 { + +// A RAII, thread local (!) guard that enables or disables inference mode upon +// construction, and sets it back to the original value upon destruction. +struct C10_API InferenceMode { + // Note [Expected TLS state in InferenceMode]: + // InferenceMode: ADInplaceOrView not in + // raw_local_dispatch_key_set.included(), + // Autograd in raw_local_dispatch_key_set.excluded() + // GradMode is disabled. + // NormalMode: ADInplaceOrView in raw_local_dispatch_key_set.included(), + // Autograd not in raw_local_dispatch_key_set.excluded() + // GradMode is enabled by default unless toggled manually + // through other APIs, e.g. NoGradGuard. + // + // Invariant: + // - ADInplaceOrView is never in the excluded set + // - Autograd is never in the included set + // - Setting InferenceMode will set GradMode accordingly, but not vice versa. + // + // 1. Why do we put ADInplaceOrView in included set outside InferenceMode? + // + // Inplace update to inference tensor outside InferenceMode is not + // allowed. See Note [Inplace update inference tensor] for more details. + // Without going through ADInplaceOrView kernel, we cannot throw error + // for `inference_tensor.add_(1)` case. + // + // 2. Why not put ADInplaceOrView in the excluded set inside InferenceMode? + // + // For example: + // torch::Tensor a = torch::ones({1, 2, 3}).set_requires_grad(true); + // torch::Tensor k = a + 2; + // { + // c10::InferenceMode guard(true); + // k.add_(2); + // } + // `k.add_(2)` still need to go through ADInplaceOrView kernel so that it's + // prepared for future autograd. + // + // 3. Why does setting InferenceMode also set GradMode? + // + // This is required since InferenceMode is a faster and more restrictive + // version of NoGradGuard. All runtime checks using GradMode::is_enabled() + // are applicable to InferenceMode as well, e.g. + // `tensorTypeInCurrentExecutionContext` in interpreter.cpp. + InferenceMode(bool enabled = true) + : prev_mode(AutogradState::get_tls_state()), + prev_keyset(c10::impl::tls_local_dispatch_key_set()) { + // Enabling inference mode means disabling grad modes + // And disabling inference mode means enabling grad modes + AutogradState::set_tls_state(AutogradState( + /* grad_mode */ !enabled, + /* inference_mode */ enabled, + /* fw_grad_mode */ !enabled, + /* multithreading_enabled*/ !enabled)); + DispatchKeySet included = enabled + ? prev_keyset.included_.remove(c10::DispatchKey::ADInplaceOrView) + : prev_keyset.included_.add(c10::DispatchKey::ADInplaceOrView); + DispatchKeySet excluded = enabled + ? (prev_keyset.excluded_ | c10::autograd_dispatch_keyset) + : (prev_keyset.excluded_ - c10::autograd_dispatch_keyset); + c10::impl::PODLocalDispatchKeySet cur_keyset{}; + cur_keyset.set_included(included); + cur_keyset.set_excluded(excluded); + c10::impl::_force_tls_local_dispatch_key_set(cur_keyset); + } + + ~InferenceMode() { + AutogradState::set_tls_state(prev_mode); + c10::impl::_force_tls_local_dispatch_key_set(prev_keyset); + } + static bool is_enabled(); + + private: + AutogradState prev_mode; + c10::impl::LocalDispatchKeySet prev_keyset; +}; +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/MemoryFormat.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/MemoryFormat.h new file mode 100644 index 0000000000000000000000000000000000000000..edc08bb1016c91a6fabdcbb0236fd46e5ff421f1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/MemoryFormat.h @@ -0,0 +1,290 @@ +#pragma once + +#include +#include + +#include +#include +#include + +// Memory format is not the property of a Tensor. It is the way to tell an +// operator how the result should be organized in memory and nothing more. That +// means memory format should never be used as return value for any tensor state +// interrogation functions (internally and externally). +// +// Possible options are: +// Preserve: +// If any of the input tensors is in channels_last format, operator output +// should be in channels_last format +// +// Contiguous: +// Regardless of input tensors format, the output should be contiguous +// Tensor. +// +// ChannelsLast: +// Regardless of input tensors format, the output should be in channels_last +// format. + +namespace c10 { +enum class MemoryFormat : int8_t { + Contiguous, + Preserve, + ChannelsLast, + ChannelsLast3d, + NumOptions +}; + +// If you are seeing this, it means that this call site was not checked if +// the memory format could be preserved, and it was switched to old default +// behaviour of contiguous +#define LEGACY_CONTIGUOUS_MEMORY_FORMAT c10::get_contiguous_memory_format() + +inline MemoryFormat get_contiguous_memory_format() { + return MemoryFormat::Contiguous; +} + +inline std::ostream& operator<<( + std::ostream& stream, + at::MemoryFormat memory_format) { + switch (memory_format) { + case MemoryFormat::Preserve: + return stream << "Preserve"; + case MemoryFormat::Contiguous: + return stream << "Contiguous"; + case MemoryFormat::ChannelsLast: + return stream << "ChannelsLast"; + case MemoryFormat::ChannelsLast3d: + return stream << "ChannelsLast3d"; + default: + TORCH_CHECK(false, "Unknown memory format ", memory_format); + } +} + +// Note: Hardcoded the channel last stride indices here to get better +// performance +template +inline std::vector get_channels_last_strides_2d(ArrayRef sizes) { + std::vector strides(sizes.size()); + switch (sizes.size()) { + case 4: + strides[1] = 1; + strides[3] = sizes[1]; + strides[2] = strides[3] * sizes[3]; + strides[0] = strides[2] * sizes[2]; + return strides; + case 3: + strides[0] = 1; + strides[2] = sizes[0]; + strides[1] = strides[2] * sizes[2]; + return strides; + default: + TORCH_INTERNAL_ASSERT( + false, "ChannelsLast2d doesn't support size ", sizes.size()); + } +} + +inline std::vector get_channels_last_strides_2d(IntArrayRef sizes) { + return get_channels_last_strides_2d(sizes); +} + +template +std::vector get_channels_last_strides_3d(ArrayRef sizes) { + std::vector strides(sizes.size()); + switch (sizes.size()) { + case 5: + strides[1] = 1; + strides[4] = sizes[1]; + strides[3] = strides[4] * sizes[4]; + strides[2] = strides[3] * sizes[3]; + strides[0] = strides[2] * sizes[2]; + return strides; + case 4: + strides[0] = 1; + strides[3] = sizes[0]; + strides[2] = strides[3] * sizes[3]; + strides[1] = strides[2] * sizes[2]; + return strides; + default: + TORCH_INTERNAL_ASSERT( + false, "ChannelsLast3d doesn't support size ", sizes.size()); + } +} + +inline std::vector get_channels_last_strides_3d(IntArrayRef sizes) { + return get_channels_last_strides_3d(sizes); +} + +// NOTE: +// Below are Helper functions for is_channels_last_strides_xd. +// 1. Please do not combine these helper functions, each helper function handles +// exactly one case of sizes + memory_format, by doing this, the strides indices +// will be a constant array and we can access it using constant index number, +// the compiler will fully unroll the loop on strides indices to gain a better +// performance. +// 2. No error check in helper function, caller ensures the correctness of the +// input +// 3. All helper functions have similar comments, only 1st helper function is +// commented here. +template +inline bool is_channels_last_strides_2d_s4( + const ArrayRef sizes, + const ArrayRef strides) { + T min = 0; + // special case for trivial C dimension. default to NCHW + if (strides[1] == 0) { + return false; + } + // loop strides indices + for (auto& d : {1, 3, 2, 0}) { + if (sizes[d] == 0) { + return false; + } + if (strides[d] < min) { + return false; + } + // Fallback to NCHW as default layout for ambiguous cases + // This is the flaw of implicit memory_format from strides. + // N111 tensor with identical strides for size 1 dimension; + // Two cases could lead us here: + // a. N111 contiguous Tensor ([N,1,1,1]@[1,1,1,1]) + // b. N11W contiguous Tensor sliced on the W-dimension. + // ([N,1,1,1]@[W,W,W,W]) + if (d == 0 && min == strides[1]) { + return false; + } + // This is necessary to: + // 1. distinguish the memory_format of N1H1; + // [H, 1, 1, 1] channels_last stride + // [H, H, 1, 1] contiguous stride + // 2. permutation of 1C1W: + // [1, C, 1, H]@[HC, H, H, 1] transpose(1, 3) + // [1, H, 1, C]@[HC, 1, H, H] shouldn't be identified as channels_last + min = strides[d]; + if (sizes[d] > 1) { + min *= sizes[d]; + } + } + return true; +} + +template +inline bool is_channels_last_strides_3d_s5( + const ArrayRef sizes, + const ArrayRef strides) { + T min = 0; + if (strides[1] == 0) { + return false; + } + for (auto& d : {1, 4, 3, 2, 0}) { + if (sizes[d] == 0) { + return false; + } + if (strides[d] < min) { + return false; + } + if (d == 0 && min == strides[1]) { + return false; + } + min = strides[d]; + if (sizes[d] > 1) { + min *= sizes[d]; + } + } + return true; +} + +// Note [Ambiguous is_channels_last_strides_xd] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// The flaw of carrying memory_format implicitly through strides is very hard +// to WAR properly. issue #24090 +// Without the history of permutation, we can't infer the memory_format of a +// tensor from the snapshot of its size & stride +// e.g. +// +// 1. We can NOT specify the memory_format of N111 tensor through strides in a +// meaningful way; +// +// 2. Two path that ended up with identical size/stride +// N11W contiguous tensor sliced at w-dimension becomes [N,1,1,1]@[W,W,W,W] +// NC11 channels_last tensor sliced at c-dimension becomes [N,1,1,1]@[C,C,C,C] +// So if we see a tensor [N,1,1,1]@[X,X,X,X], there's no way for us to infer +// the memory_format of the original tensor. +// +// Due to the limitations, our temporary WAR `is_channels_last_strides` does the +// best effort to infer whether the original memory_format of a tensor is +// at::MemoryFormat::ChannelsLast. The two objectives of this function (ordered +// by their importance): +// 1. Ensure that normal shape manipulation does not accidentally change the +// MemoryFormat of an existing tensor. +// 2. Allows user to mark MemoryFormat::ChannelsLast to tensors; +// +// The function does so via checking strides of the tensor, including strides of +// size-1 dimensions. Although conventionally PyTorch implies no restriction on +// trivial stride (stride for size-1 dimension). +// +// Note that this approach is a compromise. We did not solve the problem +// completely. Many cases we will not be able to infer the correct memory +// format. +// The implementation of `is_channels_last_strides` is to serve the objectives: +// MemoryFormat::ChannelsLast has to be explicitly opted-in (no accidental +// conversion); Best effort to maintain the ChannelsLast flag. +// +// Due to the fact that this is not a bulletproof solution, through testing +// (aten/src/ATen/test/memory_format_test.cpp) +// a. we ensure that the common tasks are supported; +// a. we identify corner cases where the implementation compromises on. +// +// By the time accumulated permutation is enabled to replace implicit +// memory_format through strides, we should be updating our tests and fix the +// issues in our tests. +// +// We use Channels Last 2d as an example above. +// This is a general problem for all the is_channels_last_strides_xd +// implementation. Please check the helper functions +// (is_channels_last_strides_*d_s*) for more details. + +template +inline bool is_channels_last_strides_2d( + const ArrayRef sizes, + const ArrayRef strides) { + switch (sizes.size()) { + case 4: + return is_channels_last_strides_2d_s4(sizes, strides); + // NOLINTNEXTLINE(bugprone-branch-clone) + case 3: + // TODO dim == 3 case will be enabled once it is fully tested + return false; + default: + return false; + } +} + +template +inline bool is_channels_last_strides_3d( + const ArrayRef sizes, + const ArrayRef strides) { + switch (sizes.size()) { + case 5: + return is_channels_last_strides_3d_s5(sizes, strides); + // NOLINTNEXTLINE(bugprone-branch-clone) + case 4: + // TODO dim == 4 case will be enabled once it is fully tested + return false; + default: + return false; + } +} + +inline bool is_channels_last_strides_2d( + const IntArrayRef sizes, + const IntArrayRef strides) { + return is_channels_last_strides_2d(sizes, strides); +} + +inline bool is_channels_last_strides_3d( + const IntArrayRef sizes, + const IntArrayRef strides) { + return is_channels_last_strides_3d(sizes, strides); +} + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/OptionalRef.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/OptionalRef.h new file mode 100644 index 0000000000000000000000000000000000000000..c8743e6d55b558ef3e4c201448a319885bfe52fe --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/OptionalRef.h @@ -0,0 +1,31 @@ +#pragma once + +namespace c10 { + +template +class OptionalRef { + public: + OptionalRef() : data_(nullptr) {} + OptionalRef(const T* data) : data_(data) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(data_); + } + OptionalRef(const T& data) : data_(&data) {} + + bool has_value() const { + return data_ != nullptr; + } + + const T& get() const { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(data_); + return *data_; + } + + operator bool() const { + return has_value(); + } + + private: + const T* data_; +}; + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/PyHandleCache.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/PyHandleCache.h new file mode 100644 index 0000000000000000000000000000000000000000..8861f568bd972746c533de79d8efae2875424653 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/PyHandleCache.h @@ -0,0 +1,76 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace c10 { + +// A PyHandleCache represents a cached pointer from a C++ object to +// a Python object that represents that object analogously in Python. +// Upon a cache hit, the relevant object can be retrieved after a test +// and then a memory load. Two conditions must hold to be able to use this +// class: +// +// - This must truly be a cache; e.g., the caller must be able to produce +// the object some other way if the cache hit misses. +// +// - This must truly be a handle; e.g., the Python object referenced by +// this class must have static lifetime. This means we don't have to +// maintain strong ownership or deallocate the object when the C++ object +// dies. Static lifetime is a good idea in conjunction with the cache, +// since if you are producing a fresh object on miss you won't be +// maintaining object identity. If you need bidirectional ownership, +// you will want to factor out the pattern in TensorImpl with +// resurrection. +// +// This cache is expected to not improve perf under torchdeploy, as one +// interpreter will fill up the cache, and all the interpreters will be +// unable to use the slot. A potential improvement is to have multiple +// slots (one per interpreter), which will work in deployment scenarios +// where there a stable, fixed number of interpreters. You can also store +// the relevant state in the Python library, rather than in the non-Python +// library (although in many cases, this is not convenient, as there may +// not be a way to conveniently index based on the object.) +class PyHandleCache { + public: + PyHandleCache() : pyinterpreter_(nullptr) {} + + // Attempt to fetch the pointer from the cache, if the PyInterpreter + // matches. If it doesn't exist, or the cache entry is not valid, + // use slow_accessor to get the real pointer value and return that + // (possibly writing it to the cache, if the cache entry is + // available.) + template + PyObject* ptr_or(impl::PyInterpreter* self_interpreter, F slow_accessor) + const { + // Note [Memory ordering on Python interpreter tag] + impl::PyInterpreter* interpreter = + pyinterpreter_.load(std::memory_order_acquire); + if (C10_LIKELY(interpreter == self_interpreter)) { + return data_; + } else if (interpreter == nullptr) { + auto* r = slow_accessor(); + impl::PyInterpreter* expected = nullptr; + // attempt to claim this cache entry with the specified interpreter tag + if (pyinterpreter_.compare_exchange_strong( + expected, self_interpreter, std::memory_order_acq_rel)) { + data_ = r; + } + // This shouldn't be possible, as you should be GIL protected + TORCH_INTERNAL_ASSERT(expected != self_interpreter); + return r; + } else { + return slow_accessor(); + } + } + + private: + mutable std::atomic pyinterpreter_; + mutable PyObject* data_{nullptr}; +}; + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/QScheme.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/QScheme.h new file mode 100644 index 0000000000000000000000000000000000000000..559e68508c76e3c02b97eb7650cc1143ab0de2d1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/QScheme.h @@ -0,0 +1,50 @@ +#pragma once + +#include +#include +#include + +namespace c10 { + +/** + * QScheme is an enum that specifies the type of quantization. This has a one + * to one correspondence with Quantizer + * Please refer to ATen/quantized/Quantizer.h to see the Quantizers classes. + * Keep this file in sync with torch/nn/_qscheme.py + */ +enum class QScheme : uint8_t { + PER_TENSOR_AFFINE = 0, + PER_CHANNEL_AFFINE = 1, + PER_TENSOR_SYMMETRIC = 2, + PER_CHANNEL_SYMMETRIC = 3, + PER_CHANNEL_AFFINE_FLOAT_QPARAMS = 4, + COMPILE_TIME_NUM_QSCHEMES = 5, +}; + +constexpr auto kPerTensorAffine = QScheme::PER_TENSOR_AFFINE; +constexpr auto kPerChannelAffine = QScheme::PER_CHANNEL_AFFINE; +constexpr auto kPerTensorSymmetric = QScheme::PER_TENSOR_SYMMETRIC; +constexpr auto kPerChannelSymmetric = QScheme::PER_CHANNEL_SYMMETRIC; +constexpr auto kPerChannelAffineFloatQParams = + QScheme::PER_CHANNEL_AFFINE_FLOAT_QPARAMS; +constexpr int COMPILE_TIME_NUM_QSCHEMES = + static_cast(QScheme::COMPILE_TIME_NUM_QSCHEMES); + +inline std::string toString(QScheme qscheme) { + switch (qscheme) { + case kPerTensorAffine: + return "per_tensor_affine"; + case kPerChannelAffine: + return "per_channel_affine"; + case kPerTensorSymmetric: + return "per_tensor_symmetric"; + case kPerChannelSymmetric: + return "per_channel_symmetric"; + case kPerChannelAffineFloatQParams: + return "per_channel_affine_float_qparams"; + default: + TORCH_CHECK(false, "Unrecognized qscheme: ", static_cast(qscheme)); + } +} + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/RefcountedDeleter.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/RefcountedDeleter.h new file mode 100644 index 0000000000000000000000000000000000000000..ce988864720a19758fc4dc5875d7402d36076d79 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/RefcountedDeleter.h @@ -0,0 +1,52 @@ +#pragma once + +#include +#include +#include + +#include +#include + +namespace c10 { + +// A RefcountedDeleterContext object is used as the `ctx` argument for DataPtr +// to implement a shared DataPtr. Normally, a DataPtr is unique, but we use +// this custom context and the `refcounted_deleter` function below to make the +// DataPtr act like a non-unique DataPtr. This context object holds onto an +// inner context and deleter function which handle the actual deletion of the +// data when the refcount reaches 0. +// +// This shared DataPtr feature is only used when storages are shared between +// multiple Python interpreters in MultiPy. Before storages had PyObject +// preservation, interpreters could just share the same StorageImpl instance. +// But now a StorageImpl can only be associated with one interpreter in order +// to properly manage a zombie PyObject. So we share storages across Python +// interpreters by creating a different StorageImpl instance for each one, but +// they all point to the same data. +struct C10_API RefcountedDeleterContext { + RefcountedDeleterContext(void* other_ctx, c10::DeleterFnPtr other_deleter) + : other_ctx(other_ctx, other_deleter), refcount(1) {} + + std::unique_ptr other_ctx; + std::atomic_int refcount; +}; + +// `refcounted_deleter` is used as the `ctx_deleter` for DataPtr to implement +// a shared DataPtr. +// +// Warning: This should only be called on a pointer to +// a RefcountedDeleterContext that was allocated on the heap with `new`, +// because when the refcount reaches 0, the context is deleted with `delete` +C10_API void refcounted_deleter(void* ctx_); + +// If the storage's DataPtr does not use `refcounted_deleter`, replace it with +// a DataPtr that does, so it can be shared between multiple StorageImpls +C10_API void maybeApplyRefcountedDeleter(const c10::Storage& storage); + +// Create a new StorageImpl that points to the same data. If the original +// StorageImpl's DataPtr does not use `refcounted_deleter`, it will be replaced +// with one that does +C10_API c10::Storage newStorageImplFromRefcountedDataPtr( + const c10::Storage& storage); + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/SafePyObject.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/SafePyObject.h new file mode 100644 index 0000000000000000000000000000000000000000..19f8f62c716dd8c26d1b72f24da68c016b31e5eb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/SafePyObject.h @@ -0,0 +1,99 @@ +#pragma once + +#include +#include +#include +#include + +namespace c10 { + +// This is an safe owning holder for a PyObject, akin to pybind11's +// py::object, with two major differences: +// +// - It is in c10/core; i.e., you can use this type in contexts where +// you do not have a libpython dependency +// +// - It is multi-interpreter safe (ala torchdeploy); when you fetch +// the underlying PyObject* you are required to specify what the current +// interpreter context is and we will check that you match it. +// +// It is INVALID to store a reference to a Tensor object in this way; +// you should just use TensorImpl directly in that case! +struct C10_API SafePyObject { + // Steals a reference to data + SafePyObject(PyObject* data, c10::impl::PyInterpreter* pyinterpreter) + : data_(data), pyinterpreter_(pyinterpreter) {} + SafePyObject(SafePyObject&& other) noexcept + : data_(std::exchange(other.data_, nullptr)), + pyinterpreter_(other.pyinterpreter_) {} + + // In principle this could be copyable if we add an incref to PyInterpreter + // but for now it's easier to just disallow it. + SafePyObject(SafePyObject const&) = delete; + SafePyObject& operator=(SafePyObject const&) = delete; + + ~SafePyObject() { + if (data_ != nullptr) { + (*pyinterpreter_)->decref(data_, /*has_pyobj_slot*/ false); + } + } + + c10::impl::PyInterpreter& pyinterpreter() const { + return *pyinterpreter_; + } + PyObject* ptr(const c10::impl::PyInterpreter*) const; + + // stop tracking the current object, and return it + PyObject* release() { + auto rv = data_; + data_ = nullptr; + return rv; + } + + private: + PyObject* data_; + c10::impl::PyInterpreter* pyinterpreter_; +}; + +// A newtype wrapper around SafePyObject for type safety when a python object +// represents a specific type. Note that `T` is only used as a tag and isn't +// actually used for any true purpose. +template +struct SafePyObjectT : private SafePyObject { + SafePyObjectT(PyObject* data, c10::impl::PyInterpreter* pyinterpreter) + : SafePyObject(data, pyinterpreter) {} + SafePyObjectT(SafePyObjectT&& other) noexcept : SafePyObject(other) {} + SafePyObjectT(SafePyObjectT const&) = delete; + SafePyObjectT& operator=(SafePyObjectT const&) = delete; + + using SafePyObject::ptr; + using SafePyObject::pyinterpreter; + using SafePyObject::release; +}; + +// Like SafePyObject, but non-owning. Good for references to global PyObjects +// that will be leaked on interpreter exit. You get a copy constructor/assign +// this way. +struct C10_API SafePyHandle { + SafePyHandle() : data_(nullptr), pyinterpreter_(nullptr) {} + SafePyHandle(PyObject* data, c10::impl::PyInterpreter* pyinterpreter) + : data_(data), pyinterpreter_(pyinterpreter) {} + + c10::impl::PyInterpreter& pyinterpreter() const { + return *pyinterpreter_; + } + PyObject* ptr(const c10::impl::PyInterpreter*) const; + void reset() { + data_ = nullptr; + pyinterpreter_ = nullptr; + } + operator bool() { + return data_; + } + + private: + PyObject* data_; + c10::impl::PyInterpreter* pyinterpreter_; +}; + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/ScalarType.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/ScalarType.h new file mode 100644 index 0000000000000000000000000000000000000000..f7f059fd513d0cd9df510ab1f6406ff2ff2c1879 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/ScalarType.h @@ -0,0 +1,564 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace c10 { + +// dummy struct for uint1 to uint7, actual functionality +// of these dtypes will be implemented in python with Tensor subclass +template +struct dummy_uint1_7_t {}; + +// For the macros below: +// +// For users: If you want to macro some code for all non-QInt scalar types +// (i.e. types with complete information, you probably want one of the +// AT_FORALL_SCALAR_TYPES / AT_FORALL_SCALAR_TYPES_AND macros below, which are +// designed to behave similarly to the Dispatch macros with the same name. +// +// For adding a new dtype: In the beginning, we had an idea that there was a +// list of all scalar types, and you could use AT_FORALL_SCALAR_TYPES to +// iterate over them. But over the years we added weird types which couldn't +// be handled uniformly everywhere and so in the end we ended up with some +// mish-mosh of some helper macros, but mostly use sites making a call about +// what dtypes they can or can't support. So if you want to add a new dtype, +// the preferred resolution is to find a dtype similar to what you want, +// grep for it and edit all the sites you find this way. If you need to add +// a completely new kind of dtype, you're going to have to laboriously audit +// all of the sites everywhere to figure out how it should work. Consulting +// some old PRs where we added new dtypes (check history of this file) can +// help give you an idea where to start. + +// NB: Order matters for this macro; it is relied upon in +// _promoteTypesLookup and the serialization format. +#define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(_) \ + _(uint8_t, Byte) /* 0 */ \ + _(int8_t, Char) /* 1 */ \ + _(int16_t, Short) /* 2 */ \ + _(int, Int) /* 3 */ \ + _(int64_t, Long) /* 4 */ \ + _(at::Half, Half) /* 5 */ \ + _(float, Float) /* 6 */ \ + _(double, Double) /* 7 */ \ + _(c10::complex, ComplexHalf) /* 8 */ \ + _(c10::complex, ComplexFloat) /* 9 */ \ + _(c10::complex, ComplexDouble) /* 10 */ \ + _(bool, Bool) /* 11 */ \ + _(c10::qint8, QInt8) /* 12 */ \ + _(c10::quint8, QUInt8) /* 13 */ \ + _(c10::qint32, QInt32) /* 14 */ \ + _(at::BFloat16, BFloat16) /* 15 */ \ + _(c10::quint4x2, QUInt4x2) /* 16 */ \ + _(c10::quint2x4, QUInt2x4) /* 17 */ \ + _(c10::bits1x8, Bits1x8) /* 18 */ \ + _(c10::bits2x4, Bits2x4) /* 19 */ \ + _(c10::bits4x2, Bits4x2) /* 20 */ \ + _(c10::bits8, Bits8) /* 21 */ \ + _(c10::bits16, Bits16) /* 22 */ \ + _(c10::Float8_e5m2, Float8_e5m2) /* 23 */ \ + _(c10::Float8_e4m3fn, Float8_e4m3fn) /* 24 */ \ + _(c10::Float8_e5m2fnuz, Float8_e5m2fnuz) /* 25 */ \ + _(c10::Float8_e4m3fnuz, Float8_e4m3fnuz) /* 26 */ \ + _(uint16_t, UInt16) /* 27 */ \ + _(uint32_t, UInt32) /* 28 */ \ + _(uint64_t, UInt64) /* 29 */ \ + _(c10::dummy_uint1_7_t<1>, UInt1) /* 30 */ \ + _(c10::dummy_uint1_7_t<2>, UInt2) /* 31 */ \ + _(c10::dummy_uint1_7_t<3>, UInt3) /* 32 */ \ + _(c10::dummy_uint1_7_t<4>, UInt4) /* 33 */ \ + _(c10::dummy_uint1_7_t<5>, UInt5) /* 34 */ \ + _(c10::dummy_uint1_7_t<6>, UInt6) /* 35 */ \ + _(c10::dummy_uint1_7_t<7>, UInt7) /* 36 */ + +// If you want to support ComplexHalf for real, add ComplexHalf +// into this macro (and change the name). But beware: convert() +// doesn't work for all the conversions you need... +// +// TODO: To add unsigned int types here, we must define accumulate type. +// But uint8 currently accumulates into int64, so we would have to make +// an inconsistent choice for the larger types. Difficult. +#define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_EXCEPT_COMPLEX_HALF_F8NZ(_) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(at::Half, Half) \ + _(float, Float) \ + _(double, Double) \ + _(c10::complex, ComplexFloat) \ + _(c10::complex, ComplexDouble) \ + _(bool, Bool) \ + _(at::BFloat16, BFloat16) \ + _(at::Float8_e5m2, Float8_e5m2) \ + _(at::Float8_e4m3fn, Float8_e4m3fn) + +// This macro controls many of our C++ APIs, including constructors +// for Scalar as well as the data() and item() accessors on Tensor +#define AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(_) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(at::Half, Half) \ + _(float, Float) \ + _(double, Double) \ + _(c10::complex, ComplexHalf) \ + _(c10::complex, ComplexFloat) \ + _(c10::complex, ComplexDouble) \ + _(bool, Bool) \ + _(at::BFloat16, BFloat16) \ + _(at::Float8_e5m2, Float8_e5m2) \ + _(at::Float8_e4m3fn, Float8_e4m3fn) \ + _(at::Float8_e5m2fnuz, Float8_e5m2fnuz) \ + _(at::Float8_e4m3fnuz, Float8_e4m3fnuz) + +enum class ScalarType : int8_t { +#define DEFINE_ST_ENUM_VAL_(_1, n) n, + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_ST_ENUM_VAL_) +#undef DEFINE_ENUM_ST_ENUM_VAL_ + Undefined, + NumOptions +}; + +constexpr uint16_t NumScalarTypes = + static_cast(ScalarType::NumOptions); + +namespace impl { + +// These are used to map ScalarTypes to C++ types. + +template +struct ScalarTypeToCPPType; + +#define SPECIALIZE_ScalarTypeToCPPType(cpp_type, scalar_type) \ + template <> \ + struct ScalarTypeToCPPType { \ + using type = cpp_type; \ + \ + /* This is a workaround for the CUDA bug which prevents */ \ + /* ::detail::ScalarTypeToCType::type being used directly due to */ \ + /* ambiguous reference which can't to be resolved. For some reason it */ \ + /* can't pick between at::detail and at::cuda::detail. */ \ + /* For repro example, please see: */ \ + /* https://gist.github.com/izdeby/952ae7cf256ddb740a73776d39a7e7ba */ \ + /* TODO: remove once the bug is fixed. */ \ + static type t; \ + }; + +AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(SPECIALIZE_ScalarTypeToCPPType) + +#undef SPECIALIZE_ScalarTypeToCPPType + +template +using ScalarTypeToCPPTypeT = typename ScalarTypeToCPPType::type; + +} // namespace impl + +template +struct CppTypeToScalarType; + +#define SPECIALIZE_CppTypeToScalarType(cpp_type, scalar_type) \ + template <> \ + struct CppTypeToScalarType \ + : std:: \ + integral_constant { \ + }; + +AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(SPECIALIZE_CppTypeToScalarType) + +#undef SPECIALIZE_CppTypeToScalarType + +// NB: despite its generic sounding name, the macros that don't take _AND +// are mostly only used by tensorexpr +#define AT_FORALL_INT_TYPES(_) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) + +#define AT_FORALL_SCALAR_TYPES(_) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(float, Float) \ + _(double, Double) + +// These macros are often controlling how many template instantiations we +// create for kernels. It is typically inappropriate to add new dtypes here, +// instead, new types should be added to use sites on a case-by-case basis. +// We generally are not accepting new dtypes due to binary size concerns. + +#define AT_FORALL_SCALAR_TYPES_AND(SCALARTYPE, _) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(float, Float) \ + _(double, Double) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE>::t), \ + SCALARTYPE) + +#define AT_FORALL_SCALAR_TYPES_AND2(SCALARTYPE1, SCALARTYPE2, _) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(float, Float) \ + _(double, Double) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE1>::t), \ + SCALARTYPE1) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE2>::t), \ + SCALARTYPE2) + +#define AT_FORALL_SCALAR_TYPES_AND3(SCALARTYPE1, SCALARTYPE2, SCALARTYPE3, _) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(float, Float) \ + _(double, Double) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE1>::t), \ + SCALARTYPE1) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE2>::t), \ + SCALARTYPE2) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE3>::t), \ + SCALARTYPE3) + +#define AT_FORALL_SCALAR_TYPES_AND7( \ + SCALARTYPE1, \ + SCALARTYPE2, \ + SCALARTYPE3, \ + SCALARTYPE4, \ + SCALARTYPE5, \ + SCALARTYPE6, \ + SCALARTYPE7, \ + _) \ + _(uint8_t, Byte) \ + _(int8_t, Char) \ + _(int16_t, Short) \ + _(int, Int) \ + _(int64_t, Long) \ + _(float, Float) \ + _(double, Double) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE1>::t), \ + SCALARTYPE1) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE2>::t), \ + SCALARTYPE2) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE3>::t), \ + SCALARTYPE3) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE4>::t), \ + SCALARTYPE4) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE5>::t), \ + SCALARTYPE5) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE6>::t), \ + SCALARTYPE6) \ + _(decltype(::c10::impl::ScalarTypeToCPPType< \ + ::c10::ScalarType::SCALARTYPE7>::t), \ + SCALARTYPE7) + +#define AT_FORALL_QINT_TYPES(_) \ + _(c10::qint8, QInt8) \ + _(c10::quint8, QUInt8) \ + _(c10::qint32, QInt32) \ + _(c10::quint4x2, QUInt4x2) \ + _(c10::quint2x4, QUInt2x4) + +#define AT_FORALL_COMPLEX_TYPES(_) \ + _(c10::complex, ComplexFloat) \ + _(c10::complex, ComplexDouble) + +#define DEFINE_CONSTANT(_, name) \ + constexpr ScalarType k##name = ScalarType::name; + +// NOLINTNEXTLINE(clang-diagnostic-unused-const-variable) +AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_CONSTANT) +#undef DEFINE_CONSTANT + +inline const char* toString(ScalarType t) { +#define DEFINE_CASE(_, name) \ + case ScalarType::name: \ + return #name; + + switch (t) { + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(DEFINE_CASE) + default: + return "UNKNOWN_SCALAR"; + } +#undef DEFINE_CASE +} + +inline size_t elementSize(ScalarType t) { +#define CASE_ELEMENTSIZE_CASE(ctype, name) \ + case ScalarType::name: \ + return sizeof(ctype); + + switch (t) { + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS(CASE_ELEMENTSIZE_CASE) + default: + TORCH_CHECK(false, "Unknown ScalarType"); + } +#undef CASE_ELEMENTSIZE_CASE +} + +inline bool isIntegralType(ScalarType t, bool includeBool) { + bool isIntegral = + (t == ScalarType::Byte || t == ScalarType::Char || t == ScalarType::Int || + t == ScalarType::Long || t == ScalarType::Short || + t == ScalarType::UInt16 || t == ScalarType::UInt32 || + t == ScalarType::UInt64); + + return isIntegral || (includeBool && t == ScalarType::Bool); +} + +C10_DEPRECATED_MESSAGE( + "isIntegralType is deprecated. Please use the overload with 'includeBool' parameter instead.") +inline bool isIntegralType(ScalarType t) { + return isIntegralType(t, /*includeBool=*/false); +} + +inline bool isFloat8Type(ScalarType t) { + return t == ScalarType::Float8_e5m2 || t == ScalarType::Float8_e5m2fnuz || + t == ScalarType::Float8_e4m3fn || t == ScalarType::Float8_e4m3fnuz; +} + +inline bool isReducedFloatingType(ScalarType t) { + return t == ScalarType::Half || t == ScalarType::BFloat16 || isFloat8Type(t); +} + +inline bool isFloatingType(ScalarType t) { + return t == ScalarType::Double || t == ScalarType::Float || + isReducedFloatingType(t); +} + +inline bool isComplexType(ScalarType t) { + return ( + t == ScalarType::ComplexHalf || t == ScalarType::ComplexFloat || + t == ScalarType::ComplexDouble); +} + +inline bool isQIntType(ScalarType t) { + // Don't forget to extend this when adding new QInt types + return t == ScalarType::QInt8 || t == ScalarType::QUInt8 || + t == ScalarType::QInt32 || t == ScalarType::QUInt4x2 || + t == ScalarType::QUInt2x4; +} + +inline bool isBitsType(ScalarType t) { + return t == ScalarType::Bits1x8 || t == ScalarType::Bits2x4 || + t == ScalarType::Bits4x2 || t == ScalarType::Bits8 || + t == ScalarType::Bits16; +} + +inline bool isBarebonesUnsignedType(ScalarType t) { + return t == ScalarType::UInt1 || t == ScalarType::UInt2 || + t == ScalarType::UInt3 || t == ScalarType::UInt4 || + t == ScalarType::UInt5 || t == ScalarType::UInt6 || + t == ScalarType::UInt7 || t == ScalarType::UInt16 || + t == ScalarType::UInt32 || t == ScalarType::UInt64; +} + +inline ScalarType toQIntType(ScalarType t) { + switch (t) { + case ScalarType::Byte: + return ScalarType::QUInt8; + case ScalarType::Char: + return ScalarType::QInt8; + case ScalarType::Int: + return ScalarType::QInt32; + default: + return t; + } +} + +inline ScalarType toUnderlying(ScalarType t) { + switch (t) { + case ScalarType::QUInt8: + case ScalarType::QUInt4x2: + [[fallthrough]]; + case ScalarType::QUInt2x4: + return ScalarType::Byte; + case ScalarType::QInt8: + return ScalarType::Char; + case ScalarType::QInt32: + return ScalarType::Int; + default: + return t; + } +} + +inline bool isSignedType(ScalarType t) { +#define CASE_ISSIGNED(name) \ + case ScalarType::name: \ + return std::numeric_limits< \ + ::c10::impl::ScalarTypeToCPPTypeT>::is_signed; + + switch (t) { + case ScalarType::QInt8: + case ScalarType::QUInt8: + case ScalarType::QInt32: + case ScalarType::QUInt4x2: + case ScalarType::QUInt2x4: + TORCH_CHECK(false, "isSignedType not supported for quantized types"); + case ScalarType::Bits1x8: + case ScalarType::Bits2x4: + case ScalarType::Bits4x2: + case ScalarType::Bits8: + case ScalarType::Bits16: + TORCH_CHECK(false, "Bits types are undefined"); + CASE_ISSIGNED(UInt16); + CASE_ISSIGNED(UInt32); + CASE_ISSIGNED(UInt64); + CASE_ISSIGNED(BFloat16); + CASE_ISSIGNED(Float8_e5m2); + CASE_ISSIGNED(Float8_e5m2fnuz); + CASE_ISSIGNED(Float8_e4m3fn); + CASE_ISSIGNED(Float8_e4m3fnuz); + CASE_ISSIGNED(Byte); + CASE_ISSIGNED(Char); + CASE_ISSIGNED(Short); + CASE_ISSIGNED(Int); + CASE_ISSIGNED(Long); + CASE_ISSIGNED(Half); + CASE_ISSIGNED(Float); + CASE_ISSIGNED(Double); + CASE_ISSIGNED(ComplexHalf); + CASE_ISSIGNED(ComplexFloat); + CASE_ISSIGNED(ComplexDouble); + CASE_ISSIGNED(Bool); + case ScalarType::UInt1: + case ScalarType::UInt2: + case ScalarType::UInt3: + case ScalarType::UInt4: + case ScalarType::UInt5: + case ScalarType::UInt6: + case ScalarType::UInt7: + return true; + case ScalarType::Undefined: + case ScalarType::NumOptions: + break; + // Do not add default here, but rather define behavior of every new entry + // here. `-Wswitch-enum` would raise a warning in those cases. + } + TORCH_CHECK(false, "Unknown ScalarType ", t); +#undef CASE_ISSIGNED +} + +inline bool isUnderlying(ScalarType type, ScalarType qtype) { + return type == toUnderlying(qtype); +} + +inline ScalarType toRealValueType(ScalarType t) { + switch (t) { + case ScalarType::ComplexHalf: + return ScalarType::Half; + case ScalarType::ComplexFloat: + return ScalarType::Float; + case ScalarType::ComplexDouble: + return ScalarType::Double; + default: + return t; + } +} + +inline ScalarType toComplexType(ScalarType t) { + switch (t) { + case ScalarType::BFloat16: + // BFloat16 has range equivalent to Float, + // so we map it to ComplexFloat. + return ScalarType::ComplexFloat; + case ScalarType::Half: + return ScalarType::ComplexHalf; + case ScalarType::Float: + return ScalarType::ComplexFloat; + case ScalarType::Double: + return ScalarType::ComplexDouble; + case ScalarType::ComplexHalf: + return ScalarType::ComplexHalf; + case ScalarType::ComplexFloat: + return ScalarType::ComplexFloat; + case ScalarType::ComplexDouble: + return ScalarType::ComplexDouble; + default: + TORCH_CHECK(false, "Unknown Complex ScalarType for ", t); + } +} + +// see tensor_attributes.rst for detailed explanation and examples +// of casting rules. +inline bool canCast(const ScalarType from, const ScalarType to) { + // We disallow complex -> non complex, e.g., float_tensor *= complex is + // disallowed. + if (isComplexType(from) && !isComplexType(to)) { + return false; + } + // We disallow float -> integral, e.g., int_tensor *= float is disallowed. + if (isFloatingType(from) && isIntegralType(to, false)) { + return false; + } + + // Treat bool as a distinct "category," to be consistent with type promotion + // rules (e.g. `bool_tensor + 5 -> int64_tensor`). If `5` was in the same + // category as `bool_tensor`, we would not promote. Differing categories + // implies `bool_tensor += 5` is disallowed. + // + // NB: numpy distinguishes "unsigned" as a category to get the desired + // `bool_tensor + 5 -> int64_tensor` behavior. We don't, because: + // * We don't want the performance hit of checking the runtime sign of + // Scalars. + // * `uint8_tensor + 5 -> int64_tensor` would be undesirable. + if (from != ScalarType::Bool && to == ScalarType::Bool) { + return false; + } + return true; +} + +C10_API ScalarType promoteTypes(ScalarType a, ScalarType b); + +inline std::ostream& operator<<( + std::ostream& stream, + at::ScalarType scalar_type) { + return stream << toString(scalar_type); +} + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/ScalarTypeToTypeMeta.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/ScalarTypeToTypeMeta.h new file mode 100644 index 0000000000000000000000000000000000000000..d2694c96221eb44bc6d1b29f1c430be978cf779a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/ScalarTypeToTypeMeta.h @@ -0,0 +1,57 @@ +#pragma once + +#include +#include +#include + +// these just expose TypeMeta/ScalarType bridge functions in c10 +// TODO move to typeid.h (or codemod away) when TypeMeta et al +// are moved from caffe2 to c10 (see note at top of typeid.h) + +namespace c10 { + +/** + * convert ScalarType enum values to TypeMeta handles + */ +inline caffe2::TypeMeta scalarTypeToTypeMeta(ScalarType scalar_type) { + return caffe2::TypeMeta::fromScalarType(scalar_type); +} + +/** + * convert TypeMeta handles to ScalarType enum values + */ +inline ScalarType typeMetaToScalarType(caffe2::TypeMeta dtype) { + return dtype.toScalarType(); +} + +/** + * typeMetaToScalarType(), lifted to optional + */ +inline optional optTypeMetaToScalarType( + optional type_meta) { + if (!type_meta.has_value()) { + return c10::nullopt; + } + return type_meta->toScalarType(); +} + +/** + * convenience: equality across TypeMeta/ScalarType conversion + */ +inline bool operator==(ScalarType t, caffe2::TypeMeta m) { + return m.isScalarType(t); +} + +inline bool operator==(caffe2::TypeMeta m, ScalarType t) { + return t == m; +} + +inline bool operator!=(ScalarType t, caffe2::TypeMeta m) { + return !(t == m); +} + +inline bool operator!=(caffe2::TypeMeta m, ScalarType t) { + return !(t == m); +} + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/Storage.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/Storage.h new file mode 100644 index 0000000000000000000000000000000000000000..df86463dc449cac89ab4d185d238938a8195959a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/Storage.h @@ -0,0 +1,272 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +struct Storage; + +C10_API bool isSharedStorageAlias( + const Storage& storage0, + const Storage& storage1); + +struct C10_API Storage { + public: + struct use_byte_size_t {}; + struct unsafe_borrow_t { + explicit unsafe_borrow_t() = default; + }; + + Storage() = default; + Storage(c10::intrusive_ptr ptr) + : storage_impl_(std::move(ptr)) {} + + // Allocates memory buffer using given allocator and creates a storage with it + Storage( + use_byte_size_t /*use_byte_size*/, + const SymInt& size_bytes, + Allocator* allocator = nullptr, + bool resizable = false) + : storage_impl_(c10::make_intrusive( + StorageImpl::use_byte_size_t(), + size_bytes, + allocator, + resizable)) {} + + // Creates storage with pre-allocated memory buffer. Allocator is given for + // potential future reallocations, however it can be nullptr if the storage + // is non-resizable + Storage( + use_byte_size_t /*use_byte_size*/, + size_t size_bytes, + at::DataPtr data_ptr, + at::Allocator* allocator = nullptr, + bool resizable = false) + : storage_impl_(c10::make_intrusive( + StorageImpl::use_byte_size_t(), + size_bytes, + std::move(data_ptr), + allocator, + resizable)) {} + + protected: + explicit Storage(unsafe_borrow_t, const Storage& rhs) + : storage_impl_(c10::intrusive_ptr::reclaim( + rhs.storage_impl_.get())) {} + + friend MaybeOwnedTraits; + + public: + // Legacy constructor for partially initialized (dtype or memory) storages + // that can be temporarily created with Caffe2 APIs. See the note on top of + // TensorImpl.h for details. + static Storage create_legacy(at::Device device) { + auto allocator = GetAllocator(device.type()); + return Storage(c10::make_intrusive( + StorageImpl::use_byte_size_t(), + 0, + allocator->allocate(0), // materialize a non-default Device. + allocator, + true)); + } + + // Mimic create_legacy, but without requiring a newly-created StorageImpl. + void reset_legacy() { + TORCH_CHECK(resizable() && allocator()); + set_nbytes(0); + set_data_ptr_noswap(allocator()->allocate(0)); + } + + // TODO: remove later + void set_nbytes(size_t size_bytes) const { + storage_impl_->set_nbytes(size_bytes); + } + + void set_nbytes(c10::SymInt size_bytes) const { + storage_impl_->set_nbytes(std::move(size_bytes)); + } + + bool resizable() const { + return storage_impl_->resizable(); + } + + size_t nbytes() const { + return storage_impl_->nbytes(); + } + + SymInt sym_nbytes() const { + return storage_impl_->sym_nbytes(); + } + // get() use here is to get const-correctness + + const void* data() const { + return storage_impl_->data(); + } + + void* mutable_data() const { + return storage_impl_->mutable_data(); + } + + at::DataPtr& mutable_data_ptr() const { + return storage_impl_->mutable_data_ptr(); + } + + const at::DataPtr& data_ptr() const { + return storage_impl_->data_ptr(); + } + + // Returns the previous data_ptr + at::DataPtr set_data_ptr(at::DataPtr&& data_ptr) const { + return storage_impl_->set_data_ptr(std::move(data_ptr)); + } + + void set_data_ptr_noswap(at::DataPtr&& data_ptr) const { + return storage_impl_->set_data_ptr_noswap(std::move(data_ptr)); + } + + DeviceType device_type() const { + return storage_impl_->device_type(); + } + + at::Allocator* allocator() const { + return storage_impl_->allocator(); + } + + at::Device device() const { + return storage_impl_->device(); + } + + StorageImpl* unsafeReleaseStorageImpl() { + return storage_impl_.release(); + } + + StorageImpl* unsafeGetStorageImpl() const noexcept { + return storage_impl_.get(); + } + + c10::weak_intrusive_ptr getWeakStorageImpl() const { + return c10::weak_intrusive_ptr(storage_impl_); + } + + operator bool() const { + return storage_impl_; + } + + size_t use_count() const { + return storage_impl_.use_count(); + } + + inline bool unique() const { + return storage_impl_.unique(); + } + + bool is_alias_of(const Storage& other) const { + return ( + storage_impl_ == other.storage_impl_ || + isSharedStorageAlias(*this, other)); + } + + void UniqueStorageShareExternalPointer( + void* src, + size_t capacity, + DeleterFnPtr d = nullptr) { + if (!storage_impl_.unique()) { + TORCH_CHECK( + false, + "UniqueStorageShareExternalPointer can only be called when use_count == 1"); + } + storage_impl_->UniqueStorageShareExternalPointer(src, capacity, d); + } + + void UniqueStorageShareExternalPointer( + at::DataPtr&& data_ptr, + size_t capacity) { + if (!storage_impl_.unique()) { + TORCH_CHECK( + false, + "UniqueStorageShareExternalPointer can only be called when use_count == 1"); + } + storage_impl_->UniqueStorageShareExternalPointer( + std::move(data_ptr), capacity); + } + + protected: + c10::intrusive_ptr storage_impl_; +}; + +template <> +struct MaybeOwnedTraits { + using owned_type = c10::Storage; + using borrow_type = c10::Storage; + + static borrow_type createBorrow(const owned_type& from) { + return borrow_type(borrow_type::unsafe_borrow_t{}, from); + } + + static void assignBorrow(borrow_type& lhs, const borrow_type& rhs) { + lhs.unsafeReleaseStorageImpl(); + lhs = borrow_type(borrow_type::unsafe_borrow_t{}, rhs); + } + + static void destroyBorrow(borrow_type& toDestroy) { + toDestroy.unsafeReleaseStorageImpl(); // "leak" it, but it was already +0. + } + + static const owned_type& referenceFromBorrow(const borrow_type& borrow) { + return borrow; + } + + static const owned_type* pointerFromBorrow(const borrow_type& borrow) { + return &borrow; + } + + static bool debugBorrowIsValid(const borrow_type& /*borrow*/) { + return true; + } +}; + +template <> +struct ExclusivelyOwnedTraits { + using repr_type = c10::Storage; + using pointer_type = c10::Storage*; + using const_pointer_type = const c10::Storage*; + + static repr_type nullRepr() { + return c10::Storage(); + } + + template + static repr_type createInPlace(Args&&... args) { + return c10::Storage(std::forward(args)...); + } + + static repr_type moveToRepr(c10::Storage&& x) { + return std::move(x); + } + + static c10::Storage take(c10::Storage& x) { + return std::move(x); + } + + static pointer_type getImpl(repr_type& x) { + return &x; + } + + static const_pointer_type getImpl(const repr_type& x) { + return &x; + } +}; + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/StorageImpl.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/StorageImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..abe6218fbc9411e66787debaff7d9bf006851de9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/StorageImpl.h @@ -0,0 +1,330 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +C10_API void throwNullDataPtrError(); +C10_API void warnDeprecatedDataPtr(); + +// A storage represents the underlying backing data buffer for a +// tensor. This concept was inherited from the original Torch7 +// codebase; we'd kind of like to get rid of the concept +// (see https://github.com/pytorch/pytorch/issues/14797) but +// it's hard work and no one has gotten around to doing it. +// +// NB: storage is supposed to uniquely own a data pointer; e.g., +// two non-null data pointers alias if and only if they are from +// the same storage. Technically you can violate this invariant +// (e.g., you can create a non-owning StorageImpl with at::from_blob) +// but a lot of things won't work correctly, including: +// +// - An ordinary deleter on such a storage is wrong, because normal deleters +// assume unique ownership, but if you have two storages at the same data, +// that implies there is some sort of shared ownership. So your deleter would +// have to actually be internally doing some sort of refcount thing +// - Deepcopy in Python side relies on storage equality and not data pointer +// equality; so if there are two separate storages pointing to the same data, +// the data will actually get duplicated in that case (one data ptr before, +// two data ptrs after) +// - Version counts won't work correctly, because we do all VC tracking at the +// level of storages (unless you explicitly disconnect the VC with detach); +// mutation because data pointers are the same are totally untracked +struct C10_API StorageImpl : public c10::intrusive_ptr_target { + public: + struct use_byte_size_t {}; + + StorageImpl( + use_byte_size_t /*use_byte_size*/, + SymInt size_bytes, + at::DataPtr data_ptr, + at::Allocator* allocator, + bool resizable) + : data_ptr_(std::move(data_ptr)), + size_bytes_(std::move(size_bytes)), + size_bytes_is_heap_allocated_(size_bytes_.is_heap_allocated()), + resizable_(resizable), + received_cuda_(false), + allocator_(allocator) { + if (resizable) { + TORCH_INTERNAL_ASSERT( + allocator_, "For resizable storage, allocator must be provided"); + } + refresh_has_data_ptr_check(); + } + + StorageImpl( + use_byte_size_t /*use_byte_size*/, + const SymInt& size_bytes, + at::Allocator* allocator, + bool resizable) + : StorageImpl( + use_byte_size_t(), + size_bytes, + size_bytes.is_heap_allocated() + ? allocator->allocate(0) + : allocator->allocate(size_bytes.as_int_unchecked()), + allocator, + resizable) {} + + StorageImpl& operator=(StorageImpl&& other) = delete; + StorageImpl& operator=(const StorageImpl&) = delete; + StorageImpl() = delete; + StorageImpl(StorageImpl&& other) = delete; + StorageImpl(const StorageImpl&) = delete; + ~StorageImpl() override = default; + + void reset() { + data_ptr_.clear(); + size_bytes_ = 0; + size_bytes_is_heap_allocated_ = false; + } + + // Destructor doesn't call release_resources because it's + // unnecessary; don't forget to change that if needed! + void release_resources() override { + data_ptr_.clear(); + } + + size_t nbytes() const { + // OK to do this instead of maybe_as_int as nbytes is guaranteed positive + TORCH_CHECK(!size_bytes_is_heap_allocated_); + return size_bytes_.as_int_unchecked(); + } + + SymInt sym_nbytes() const { + return size_bytes_; + } + + // TODO: remove later + void set_nbytes(size_t size_bytes) { + size_bytes_ = static_cast(size_bytes); + size_bytes_is_heap_allocated_ = false; + } + + void set_nbytes(c10::SymInt size_bytes) { + size_bytes_ = std::move(size_bytes); + } + + bool resizable() const { + return resizable_; + } + + const at::DataPtr& data_ptr() const { + return data_ptr_; + } + + at::DataPtr& mutable_data_ptr() { + if (C10_UNLIKELY(has_data_ptr_check_)) { + if (throw_on_mutable_data_ptr_) { + throwNullDataPtrError(); + } + if (warn_deprecated_on_mutable_data_ptr_) { + warnDeprecatedDataPtr(); + } + maybe_materialize_cow(); + } + return data_ptr_; + } + + // Returns the data_ptr. Bypasses all checks. + at::DataPtr& _mutable_data_ptr_no_checks() { + return data_ptr_; + } + + // Returns the previous data_ptr + at::DataPtr set_data_ptr(at::DataPtr&& data_ptr) { + // We need to materialize the old COW DataPtr because it is + // being returned as mutable. + maybe_materialize_cow(); + return set_data_ptr_no_materialize_cow(std::move(data_ptr)); + } + + void set_data_ptr_noswap(at::DataPtr&& data_ptr) { + data_ptr_ = std::move(data_ptr); + refresh_has_data_ptr_check(); + } + + const void* data() const { + return data_ptr_.get(); + } + + void* mutable_data() { + if (C10_UNLIKELY(has_data_ptr_check_)) { + if (throw_on_mutable_data_ptr_) { + throwNullDataPtrError(); + } + if (warn_deprecated_on_mutable_data_ptr_) { + warnDeprecatedDataPtr(); + } + maybe_materialize_cow(); + } + return data_ptr_.mutable_get(); + } + + at::DeviceType device_type() const { + return data_ptr_.device().type(); + } + + at::Allocator* allocator() { + return allocator_; + } + + const at::Allocator* allocator() const { + return allocator_; + } + + // You generally shouldn't use this method, but it is occasionally + // useful if you want to override how a tensor will be reallocated, + // after it was already allocated (and its initial allocator was + // set) + void set_allocator(at::Allocator* allocator) { + allocator_ = allocator; + } + + Device device() const { + return data_ptr_.device(); + } + + void set_resizable(bool resizable) { + if (resizable) { + // We need an allocator to be resizable + AT_ASSERT(allocator_); + } + resizable_ = resizable; + } + + /** + * Can only be called when use_count is 1 + */ + void UniqueStorageShareExternalPointer( + void* src, + size_t size_bytes, + DeleterFnPtr d = nullptr) { + UniqueStorageShareExternalPointer( + at::DataPtr(src, src, d, data_ptr_.device()), size_bytes); + } + + /** + * Can only be called when use_count is 1 + */ + void UniqueStorageShareExternalPointer( + at::DataPtr&& data_ptr, + size_t size_bytes) { + data_ptr_ = std::move(data_ptr); + size_bytes_ = static_cast(size_bytes); + size_bytes_is_heap_allocated_ = false; + allocator_ = nullptr; + resizable_ = false; + } + + // This method can be used only after storage construction and cannot be used + // to modify storage status + void set_received_cuda(bool received_cuda) { + received_cuda_ = received_cuda; + } + + bool received_cuda() { + return received_cuda_; + } + + impl::PyObjectSlot* pyobj_slot() { + return &pyobj_slot_; + } + + const impl::PyObjectSlot* pyobj_slot() const { + return &pyobj_slot_; + } + + void set_throw_on_mutable_data_ptr() { + throw_on_mutable_data_ptr_ = true; + refresh_has_data_ptr_check(); + } + + void set_warn_deprecated_on_mutable_data_ptr() { + warn_deprecated_on_mutable_data_ptr_ = true; + refresh_has_data_ptr_check(); + } + + protected: + // materialize_cow_storage needs to call set_data_ptr_no_materlize_cow + friend void c10::impl::cow::materialize_cow_storage(StorageImpl& storage); + + // Returns the previous data_ptr. If the old data_ptr was COW, + // this avoids materializing it + at::DataPtr set_data_ptr_no_materialize_cow(at::DataPtr&& data_ptr) { + at::DataPtr old_data_ptr(std::move(data_ptr_)); + data_ptr_ = std::move(data_ptr); + refresh_has_data_ptr_check(); + return old_data_ptr; + } + + private: + void refresh_has_data_ptr_check() { + has_data_ptr_check_ = is_cow() || throw_on_mutable_data_ptr_ || + warn_deprecated_on_mutable_data_ptr_; + } + + inline bool is_cow() const { + return c10::impl::cow::is_cow_data_ptr(data_ptr_); + } + + // Triggers a copy if this is a copy-on-write tensor. + void maybe_materialize_cow() { + if (is_cow()) { + impl::cow::materialize_cow_storage(*this); + } + } + + DataPtr data_ptr_; + SymInt size_bytes_; + bool size_bytes_is_heap_allocated_; + bool resizable_; + // Identifies that Storage was received from another process and doesn't have + // local to process cuda memory allocation + bool received_cuda_; + // All special checks in data/data_ptr calls are guarded behind this single + // boolean. This is for performance: .data/.data_ptr calls are commonly in the + // hot-path. + bool has_data_ptr_check_ = false; + // If we should throw when mutable_data_ptr() or mutable_data() is called. + bool throw_on_mutable_data_ptr_ = false; + // If we warn when mutable_data_ptr() or mutable_data() is called. + bool warn_deprecated_on_mutable_data_ptr_ = false; + Allocator* allocator_; + impl::PyObjectSlot pyobj_slot_; +}; + +// Declare StorageImpl create function pointer types. +using StorageImplCreateHelper = intrusive_ptr (*)( + StorageImpl::use_byte_size_t, + SymInt size_bytes, + DataPtr data_ptr, + Allocator* allocator, + bool resizable); + +C10_API void SetStorageImplCreate(DeviceType t, StorageImplCreateHelper fptr); + +C10_API StorageImplCreateHelper GetStorageImplCreate(DeviceType t); + +C10_API c10::intrusive_ptr make_storage_impl( + c10::StorageImpl::use_byte_size_t use_byte_size, + c10::SymInt size_bytes, + c10::DataPtr data_ptr, + c10::Allocator* allocator, + bool resizable, + std::optional device_opt); + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/Stream.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/Stream.h new file mode 100644 index 0000000000000000000000000000000000000000..a35e608202c7be4a1bc7b569051745a3f3074124 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/Stream.h @@ -0,0 +1,176 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +/// An index representing a specific stream. A StreamId is not independently +/// meaningful without knowing the Device it is associated with; try to +/// use Stream rather than StreamId directly. +/// +/// StreamIds are opaque; they are assigned by some DeviceType-specific +/// numbering system which is not visible to the user. HOWEVER, we +/// guarantee that StreamId 0 is always a valid stream, and corresponds +/// to some sort of "default" stream. +using StreamId = int64_t; + +struct C10_API StreamData3 { + StreamId stream_id; + DeviceIndex device_index; + DeviceType device_type; +}; + +// NB: I decided not to call the above StreamIndex to avoid confusion with +// DeviceIndex. This way, you access device index with index(), and stream id +// with id() + +/** + * A stream is a software mechanism used to synchronize launched kernels + * without requiring explicit synchronizations between kernels. The basic + * model is that every kernel launch is associated with a stream: every + * kernel on the same stream is implicitly synchronized so that if I launch + * kernels A and B on the same stream, A is guaranteed to finish before B + * launches. If I want B to run concurrently with A, I must schedule + * it on a different stream. + * + * The Stream class is a backend agnostic value class representing a stream + * which I may schedule a kernel on. Every stream is associated with a device, + * which is recorded in stream, which is used to avoid confusion about which + * device a stream refers to. + * + * Streams are explicitly thread-safe, in the sense that it is OK to pass + * a Stream from one thread to another, and kernels queued from two different + * threads will still get serialized appropriately. (Of course, the + * time when the kernels get queued is undetermined unless you synchronize + * host side ;) + * + * Stream does NOT have a default constructor. Streams are for expert + * users; if you want to use Streams, we're going to assume you know + * how to deal with C++ template error messages if you try to + * resize() a vector of Streams. + * + * Known instances of streams in backends: + * + * - cudaStream_t (CUDA) + * - hipStream_t (HIP) + * - cl_command_queue (OpenCL) (NB: Caffe2's existing OpenCL integration + * does NOT support command queues.) + * + * Because this class is device agnostic, it cannot provide backend-specific + * functionality (e.g., get the cudaStream_t of a CUDA stream.) There are + * wrapper classes which provide this functionality, e.g., CUDAStream. + */ +class C10_API Stream final { + private: + Device device_; + StreamId id_; + + public: + enum Unsafe { UNSAFE }; + enum Default { DEFAULT }; + + /// Unsafely construct a stream from a Device and a StreamId. In + /// general, only specific implementations of streams for a + /// backend should manufacture Stream directly in this way; other users + /// should use the provided APIs to get a stream. In particular, + /// we don't require backends to give any guarantees about non-zero + /// StreamIds; they are welcome to allocate in whatever way they like. + explicit Stream(Unsafe, Device device, StreamId id) + : device_(device), id_(id) {} + + /// Construct the default stream of a Device. The default stream is + /// NOT the same as the current stream; default stream is a fixed stream + /// that never changes, whereas the current stream may be changed by + /// StreamGuard. + explicit Stream(Default, Device device) : device_(device), id_(0) {} + + bool operator==(const Stream& other) const noexcept { + return this->device_ == other.device_ && this->id_ == other.id_; + } + bool operator!=(const Stream& other) const noexcept { + return !(*this == other); + } + + Device device() const noexcept { + return device_; + } + DeviceType device_type() const noexcept { + return device_.type(); + } + DeviceIndex device_index() const noexcept { + return device_.index(); + } + StreamId id() const noexcept { + return id_; + } + + // Enqueues a wait instruction in the stream's work queue. + // This instruction is a no-op unless the event is marked + // for recording. In that case the stream stops processing + // until the event is recorded. + template + void wait(const T& event) const { + event.block(*this); + } + + // Return whether all asynchronous work previously enqueued on this stream + // has completed running on the device. + bool query() const; + + // Wait (by blocking the calling thread) until all asynchronous work enqueued + // on this stream has completed running on the device. + void synchronize() const; + + // The purpose of this function is to more conveniently permit binding + // of Stream to and from Python. Without packing, I have to setup a whole + // class with two fields (device and stream id); with packing I can just + // store a single uint64_t. + // + // The particular way we pack streams into a uint64_t is considered an + // implementation detail and should not be relied upon. + uint64_t hash() const noexcept { + // Concat these together into a 64-bit integer + uint64_t bits = static_cast(device_type()) << 56 | + static_cast(device_index()) << 48 | + // Remove the sign extension part of the 64-bit address because + // the id might be used to hold a pointer. + (static_cast(id()) & ((1ull << 48) - 1)); + return bits; + } + + struct StreamData3 pack3() const { + return {id(), device_index(), device_type()}; + } + + static Stream unpack3( + StreamId stream_id, + DeviceIndex device_index, + DeviceType device_type) { + TORCH_CHECK(isValidDeviceType(device_type)); + return Stream(UNSAFE, Device(device_type, device_index), stream_id); + } + + // I decided NOT to provide setters on this class, because really, + // why would you change the device of a stream? Just construct + // it correctly from the beginning dude. +}; + +C10_API std::ostream& operator<<(std::ostream& stream, const Stream& s); + +} // namespace c10 + +namespace std { +template <> +struct hash { + size_t operator()(c10::Stream s) const noexcept { + return std::hash{}(s.hash()); + } +}; +} // namespace std diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/StreamGuard.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/StreamGuard.h new file mode 100644 index 0000000000000000000000000000000000000000..bd79025240fac0e9526240e4e79af3eda4e52294 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/StreamGuard.h @@ -0,0 +1,170 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace c10 { + +/** + * A StreamGuard is an RAII class that changes the current device + * to the device corresponding to some stream, and changes the + * default stream on that device to be this stream. + * + * Use of StreamGuard is HIGHLY discouraged in operator definitions. In + * a single operator, you probably don't know enough about the global + * state of the world to profitably decide how to set streams. Let + * the caller handle this appropriately, and just use the current stream + * in your operator code. + * + * This StreamGuard does NOT have an uninitialized state; it is guaranteed + * to reset the stream and device on exit. If you are in a situation + * where you *might* want to setup a stream guard, see OptionalStreamGuard. + */ +struct StreamGuard { + /// No default constructor, see Note [Omitted default constructor from RAII] + explicit StreamGuard() = delete; + + /// Set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream. + explicit StreamGuard(Stream stream) : guard_(stream) {} + + /// Copy is disallowed + StreamGuard(const StreamGuard&) = delete; + StreamGuard& operator=(const StreamGuard&) = delete; + + /// Move is disallowed, as StreamGuard does not have an uninitialized state, + /// which is required for moves on types with nontrivial destructors. + StreamGuard(StreamGuard&& other) = delete; + StreamGuard& operator=(StreamGuard&& other) = delete; + + /// Resets the currently set stream to the original stream and + /// the currently set device to the original device. Then, + /// set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream. + /// + /// NOTE: this implementation may skip some stream/device setting if + /// it can prove that it is unnecessary. + /// + /// WARNING: reset_stream does NOT preserve previously set streams on + /// different devices. If you need to set streams on multiple devices + /// on , use MultiStreamGuard instead. + void reset_stream(Stream stream) { + guard_.reset_stream(stream); + } + + /// Returns the stream that was set at the time the guard was constructed. + Stream original_stream() const { + return guard_.original_stream(); + } + + /// Returns the most recent stream that was set using this device guard, + /// either from construction, or via set_stream. + Stream current_stream() const { + return guard_.current_stream(); + } + + /// Returns the most recent device that was set using this device guard, + /// either from construction, or via set_device/reset_device/set_index. + Device current_device() const { + return guard_.current_device(); + } + + /// Returns the device that was set at the most recent reset_stream(), + /// or otherwise the device at construction time. + Device original_device() const { + return guard_.original_device(); + } + + private: + c10::impl::InlineStreamGuard guard_; +}; + +/** + * An OptionalStreamGuard is an RAII class that sets a device to some value on + * initialization, and resets the device to its original value on destruction. + * See OptionalDeviceGuard for more guidance on how to use this class. + */ +struct OptionalStreamGuard { + /// Create an uninitialized guard. + explicit OptionalStreamGuard() = default; + + /// Set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream. + explicit OptionalStreamGuard(Stream stream) : guard_(stream) {} + + /// Set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream, + /// if the passed stream is not nullopt. + explicit OptionalStreamGuard(optional stream_opt) + : guard_(stream_opt) {} + + /// Copy is disallowed + OptionalStreamGuard(const OptionalStreamGuard&) = delete; + OptionalStreamGuard& operator=(const OptionalStreamGuard&) = delete; + + // See Note [Move construction for RAII guards is tricky] + OptionalStreamGuard(OptionalStreamGuard&& other) = delete; + + // See Note [Move assignment for RAII guards is tricky] + OptionalStreamGuard& operator=(OptionalStreamGuard&& other) = delete; + + /// Resets the currently set stream to the original stream and + /// the currently set device to the original device. Then, + /// set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream. + /// Initializes the guard if it was not previously initialized. + void reset_stream(Stream stream) { + guard_.reset_stream(stream); + } + + /// Returns the stream that was set at the time the guard was most recently + /// initialized, or nullopt if the guard is uninitialized. + optional original_stream() const { + return guard_.original_stream(); + } + + /// Returns the most recent stream that was set using this stream guard, + /// either from construction, or via reset_stream, if the guard is + /// initialized, or nullopt if the guard is uninitialized. + optional current_stream() const { + return guard_.current_stream(); + } + + /// Restore the original device and stream, resetting this guard to + /// uninitialized state. + void reset() { + guard_.reset(); + } + + private: + c10::impl::InlineOptionalStreamGuard guard_{}; +}; + +/** + * A MultiStreamGuard is an RAII class that sets the current streams of a set of + * devices all at once, and resets them to their original values on destruction. + */ +struct MultiStreamGuard { + /// Set the current streams to the passed streams on each of their respective + /// devices. + explicit MultiStreamGuard(ArrayRef streams) : guard_(streams) {} + + /// Copy is disallowed + MultiStreamGuard(const MultiStreamGuard&) = delete; + MultiStreamGuard& operator=(const MultiStreamGuard&) = delete; + + // See Note [Move construction for RAII guards is tricky] + MultiStreamGuard(MultiStreamGuard&& other) = delete; + + // See Note [Move assignment for RAII guards is tricky] + MultiStreamGuard& operator=(MultiStreamGuard&& other) = delete; + + private: + c10::impl::InlineMultiStreamGuard guard_; +}; + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/SymFloat.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/SymFloat.h new file mode 100644 index 0000000000000000000000000000000000000000..6b0c7a2688c9cc616434038e5742351b9884b4cf --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/SymFloat.h @@ -0,0 +1,113 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace c10 { + +// NB: this is actually double precision; we're using the Python naming here +class C10_API SymFloat { + public: + /*implicit*/ SymFloat(double d) : data_(d){}; + SymFloat(SymNode ptr) + : data_(std::numeric_limits::quiet_NaN()), ptr_(std::move(ptr)) { + TORCH_CHECK(ptr_->is_float()); + }; + SymFloat() : data_(0.0) {} + + SymNodeImpl* toSymNodeImplUnowned() const { + return ptr_.get(); + } + + SymNodeImpl* release() && { + return std::move(ptr_).release(); + } + + // Only valid if is_symbolic() + SymNode toSymNodeImpl() const; + + // Guaranteed to return a SymNode, wrapping using base if necessary + SymNode wrap_node(const SymNode& base) const; + + double expect_float() const { + TORCH_CHECK(!is_symbolic()); + return data_; + } + + SymFloat operator+(const SymFloat&) const; + SymFloat operator-(const SymFloat&) const; + SymFloat operator*(const SymFloat&) const; + SymFloat operator/(const SymFloat&) const; + + SymBool sym_eq(const SymFloat&) const; + SymBool sym_ne(const SymFloat&) const; + SymBool sym_lt(const SymFloat&) const; + SymBool sym_le(const SymFloat&) const; + SymBool sym_gt(const SymFloat&) const; + SymBool sym_ge(const SymFloat&) const; + + bool operator==(const SymFloat& o) const { + return sym_eq(o).guard_bool(__FILE__, __LINE__); + } + bool operator!=(const SymFloat& o) const { + return sym_ne(o).guard_bool(__FILE__, __LINE__); + } + bool operator<(const SymFloat& o) const { + return sym_lt(o).guard_bool(__FILE__, __LINE__); + } + bool operator<=(const SymFloat& o) const { + return sym_le(o).guard_bool(__FILE__, __LINE__); + } + bool operator>(const SymFloat& o) const { + return sym_gt(o).guard_bool(__FILE__, __LINE__); + } + bool operator>=(const SymFloat& o) const { + return sym_ge(o).guard_bool(__FILE__, __LINE__); + } + + SymFloat min(const SymFloat& sci) const; + SymFloat max(const SymFloat& sci) const; + + // Need guidance on where to put this code + SymFloat sqrt() const; + + // Insert a guard for the float to be its concrete value, and then return + // that value. This operation always works, even if the float is symbolic, + // so long as we know what the underlying value is. Don't blindly put this + // everywhere; you can cause overspecialization of PyTorch programs with + // this method. + // + // It should be called as guard_float(__FILE__, __LINE__). The file and line + // number can be used to diagnose overspecialization. + double guard_float(const char* file, int64_t line) const; + + bool has_hint() const; + + // N.B. It's important to keep this definition in the header + // as we expect if checks to be folded for mobile builds + // where `is_symbolic` is always false + C10_ALWAYS_INLINE bool is_symbolic() const { + return ptr_; + } + + double as_float_unchecked() const { + return data_; + } + + private: + // TODO: optimize to union + double data_; + SymNode ptr_; +}; + +C10_API std::ostream& operator<<(std::ostream& os, const SymFloat& s); +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/SymInt.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/SymInt.h new file mode 100644 index 0000000000000000000000000000000000000000..025c351334a016cb1366e00930217e3edebf7a99 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/SymInt.h @@ -0,0 +1,423 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace c10 { + +class SymFloat; + +// SymInt represents either a regular int64_t, or a symbolic integer +// (represented in a type erased way as SymNode). The intention is for SymInt +// to represent symbolic sizes that arise when doing shape computation in +// operator kernels. This allows for tracing through programs without baking in +// concrete sizes into kernel calls. +// +// SymInt has an API equivalent to int64_t. In particular, it is a value type. +// Internally, SymInt is represented in a clever packed way, so that it only +// occupies one word of space; but morally, it is a union between an int64_t +// and an intrusive pointer to SymNodeImpl. +// +// Invariant: the referenced SymNodeImpl is guaranteed to be a SymNode where +// is_int() returns true + +class C10_API SymInt { + public: + enum Unchecked { + UNCHECKED, + }; + + /*implicit*/ SymInt(int64_t d) : data_(d) { + if (is_heap_allocated()) { + // Large negative number, heap allocate it + promote_to_negative(); + } + }; + SymInt() : data_(0) {} + SymInt(SymNode n); + + // unchecked c-tor accepting raw `data_` + // One appropriate use for this is when you are constructing a symint + // in a situation where you know it is non-negative (or, if it is negative, + // the negative value is -1; i.e., not user controlled) + SymInt(Unchecked, int64_t d) : data_(d) {} + + // TODO: these implementations are not optimal because they allocate a + // temporary and then use the move constructor/assignment + SymInt(const SymInt& s) : data_(0) { + if (s.is_heap_allocated()) { + *this = SymInt(s.toSymNode()); + } else { + data_ = s.data_; + } + } + SymInt(SymInt&& s) noexcept : data_(s.data_) { + s.data_ = 0; + } + + SymInt& operator=(const SymInt& s) { + if (this != &s) { + if (s.is_heap_allocated()) { + *this = SymInt(s.toSymNode()); + } else { + data_ = s.data_; + } + } + return *this; + } + SymInt& operator=(SymInt&& s) noexcept { + if (this != &s) { + release_(); // release the current SymNode if any + data_ = s.data_; + if (s.is_heap_allocated()) + s.data_ = 0; + }; + return *this; + } + + SymNodeImpl* toSymNodeImplUnowned() const { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(is_heap_allocated()); + uint64_t unextended_bits = static_cast(data_) & ~MASK; + uint64_t sign_bit_mask = 1ULL << (62 - 1); + // https://stackoverflow.com/questions/42534749/signed-extension-from-24-bit-to-32-bit-in-c + uint64_t extended_bits = (unextended_bits ^ sign_bit_mask) - sign_bit_mask; + return static_cast( + // NOLINTNEXTLINE(performance-no-int-to-ptr) + reinterpret_cast(static_cast(extended_bits))); + } + + void release_() { + if (is_heap_allocated()) { + SymNode::reclaim(toSymNodeImplUnowned()); // steal + } + } + + SymNodeImpl* release() && { +#ifndef C10_MOBILE + TORCH_INTERNAL_ASSERT(is_heap_allocated()); + auto* r = toSymNodeImplUnowned(); + data_ = 0; // transfer ownership + return r; +#else + TORCH_INTERNAL_ASSERT(false); +#endif + } + + // Only valid if is_heap_allocated() + SymNode toSymNode() const; + + // Guaranteed to return a SymNode, wrapping using base if necessary + SymNode wrap_node(const SymNode& base) const; + + ~SymInt() { + release_(); + } + + // Require the int to be non-symbolic, and if it is symbolic raise an + // error. This is safe to use for C++ code that doesn't work for symbolic + // shapes, and you don't have time to fix it immediately, as if we + // try to trigger the path in C++ you'll appropriately get an error + int64_t expect_int() const { + if (auto r = maybe_as_int()) { + return *r; + } + TORCH_CHECK_ALWAYS_SHOW_CPP_STACKTRACE( + false, "when unpacking SymInt, expected int but got ", *this); + } + + // Test if we have a hint for this int (e.g., guard_int would work). + // Most of the time this is true; it is only false when you have + // an unbacked SymInt. + bool has_hint() const; + + // Insert a guard for the int to be its concrete value, and then return + // that value. This operation always works, even if the int is symbolic, + // so long as we know what the underlying value is (e.g., this won't work + // if you call it on the size of nonzero output). Don't blindly put this + // everywhere; you can cause overspecialization of PyTorch programs with + // this method. + // + // It should be called as guard_int(__FILE__, __LINE__). The file and line + // number can be used to diagnose overspecialization. + int64_t guard_int(const char* file, int64_t line) const; + + // Insert a guard that this SymInt must be size-like, returning true if + // the integer actually is >= 0. Unlike manually performing a >= 0 test, + // if the SymInt in question is an unbacked SymInt (or, potentially in the + // future, if it contains unbacked SymInts), we will also treat the + // unbacked SymInt as statically testing >= 2 (which will prevent us from + // choking on, e.g., contiguity checks.) + bool expect_size(const char* file, int64_t line) const; + + // Distinguish actual symbolic values from constants stored on the heap + bool is_symbolic() const { + return is_heap_allocated() && + !toSymNodeImplUnowned()->constant_int().has_value(); + } + + // N.B. It's important to keep this definition in the header + // as we expect if checks to be folded for mobile builds + // where `is_heap_allocated` is always false and optimize dead code paths + C10_ALWAYS_INLINE bool is_heap_allocated() const { +#ifdef C10_MOBILE + return false; +#else + return !check_range(data_); +#endif + } + + SymInt operator+(const SymInt& sci) const; + SymInt operator-(const SymInt& sci) const; + SymInt operator*(const SymInt& sci) const; + SymInt operator/(const SymInt& sci) const; + SymInt operator%(const SymInt& sci) const; + void operator*=(const SymInt& sci); + void operator+=(const SymInt& sci); + void operator/=(const SymInt& sci); + + SymInt clone() const; + + SymBool sym_eq(const SymInt&) const; + SymBool sym_ne(const SymInt&) const; + SymBool sym_lt(const SymInt&) const; + SymBool sym_le(const SymInt&) const; + SymBool sym_gt(const SymInt&) const; + SymBool sym_ge(const SymInt&) const; + + bool operator==(const SymInt& o) const { + return sym_eq(o).guard_bool(__FILE__, __LINE__); + } + bool operator!=(const SymInt& o) const { + return sym_ne(o).guard_bool(__FILE__, __LINE__); + } + bool operator<(const SymInt& o) const { + return sym_lt(o).guard_bool(__FILE__, __LINE__); + } + bool operator<=(const SymInt& o) const { + return sym_le(o).guard_bool(__FILE__, __LINE__); + } + bool operator>(const SymInt& o) const { + return sym_gt(o).guard_bool(__FILE__, __LINE__); + } + bool operator>=(const SymInt& o) const { + return sym_ge(o).guard_bool(__FILE__, __LINE__); + } + + SymInt min(const SymInt& sci) const; + SymInt max(const SymInt& sci) const; + + // If both are symbolic, this checks if + // they share the same node. + // If both are not symbolic this just checks normal equality. + bool is_same(const SymInt& other) const; + + operator SymFloat() const; + + // Don't use this. Prefer maybe_as_int instead + int64_t as_int_unchecked() const { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!is_heap_allocated()); + return data_; + } + + std::optional maybe_as_int() const { + if (!is_heap_allocated()) { + return c10::make_optional(data_); + } + auto* node = toSymNodeImplUnowned(); + if (auto c = node->constant_int()) { + return c; + } + return node->maybe_as_int(); + } + + // Return whether the integer is directly coercible to a SymInt + // without requiring heap allocation. You don't need to use this + // to check if you can pass an integer to SymInt; this is guaranteed + // to work (it just might heap allocate!) + static bool check_range(int64_t i) { + return i > MAX_UNREPRESENTABLE_INT; + } + + // Return the min representable integer as a SymInt without + // heap allocation. For quantities that count bytes (or larger), + // this is still much larger than you need, so you may consider + // using this as a more efficient version of MIN_INT + static constexpr int64_t min_representable_int() { + return MAX_UNREPRESENTABLE_INT + 1; + } + + private: + void promote_to_negative(); + + // Constraints on the internal representation: + // + // - Should represent positive and small negative ints + // - No conversion necessary for operations on ints + // - Must represent valid 64-bit pointers + // - Is symbolic test should be FAST (two arithmetic instructions is too + // much). + // This code being a hotpath is based on Strobelight profiles of + // is_heap_allocated(). FB only: https://fburl.com/strobelight/5l50ncxd + // (you will need to change the time window). + // + // So, the scheme is to reserve large negative numbers (assuming + // two's complement): + // + // - 0b0.... means we are a positive int + // - 0b11... means we are a small negative int + // - 0b10... means we are are a pointer. This means that + // [-2^63, -2^62-1] are not representable as ints. + // We don't actually need all of this space as on x86_64 + // as the top 16bits aren't used for anything + static constexpr uint64_t MASK = 1ULL << 63 | 1ULL << 62 | 1ULL << 61; + static constexpr uint64_t IS_SYM = 1ULL << 63 | 1ULL << 61; + // We must manually translate the bit pattern test into a greater + // than test because compiler doesn't figure it out: + // https://godbolt.org/z/356aferaW + static constexpr int64_t MAX_UNREPRESENTABLE_INT = + -1LL & static_cast(~(1ULL << 62)); + int64_t data_; +}; + +/// Sum of a list of SymInt; accumulates into the c10::SymInt expression +template < + typename C, + typename std::enable_if_t< + std::is_same_v, + int> = 0> +inline c10::SymInt multiply_integers(const C& container) { + return std::accumulate( + container.begin(), + container.end(), + c10::SymInt(1), + [](const c10::SymInt& a, const c10::SymInt& b) { return a * b; }); +} + +template < + typename Iter, + typename = std::enable_if_t::value_type, + c10::SymInt>>> +inline c10::SymInt multiply_integers(Iter begin, Iter end) { + return std::accumulate( + begin, + end, + c10::SymInt(1), + [](const c10::SymInt& a, const c10::SymInt& b) { return a * b; }); +} + +#define DECLARE_SYMINT_OP_INTONLY(scalar_t, RetTy) \ + C10_API RetTy operator%(const SymInt& a, scalar_t b); \ + C10_API RetTy operator%(scalar_t a, const SymInt& b); + +#define DECLARE_SYMINT_OP(scalar_t, RetTy) \ + C10_API RetTy operator+(const SymInt& a, scalar_t b); \ + C10_API RetTy operator-(const SymInt& a, scalar_t b); \ + C10_API RetTy operator*(const SymInt& a, scalar_t b); \ + C10_API RetTy operator/(const SymInt& a, scalar_t b); \ + C10_API RetTy operator+(scalar_t a, const SymInt& b); \ + C10_API RetTy operator-(scalar_t a, const SymInt& b); \ + C10_API RetTy operator*(scalar_t a, const SymInt& b); \ + C10_API RetTy operator/(scalar_t a, const SymInt& b); \ + C10_API bool operator==(const SymInt& a, scalar_t b); \ + C10_API bool operator!=(const SymInt& a, scalar_t b); \ + C10_API bool operator<(const SymInt& a, scalar_t b); \ + C10_API bool operator<=(const SymInt& a, scalar_t b); \ + C10_API bool operator>(const SymInt& a, scalar_t b); \ + C10_API bool operator>=(const SymInt& a, scalar_t b); \ + C10_API bool operator==(scalar_t a, const SymInt& b); \ + C10_API bool operator!=(scalar_t a, const SymInt& b); \ + C10_API bool operator<(scalar_t a, const SymInt& b); \ + C10_API bool operator<=(scalar_t a, const SymInt& b); \ + C10_API bool operator>(scalar_t a, const SymInt& b); \ + C10_API bool operator>=(scalar_t a, const SymInt& b); + +DECLARE_SYMINT_OP_INTONLY(int64_t, SymInt) +DECLARE_SYMINT_OP_INTONLY(int32_t, SymInt) +DECLARE_SYMINT_OP_INTONLY(uint64_t, SymInt) +DECLARE_SYMINT_OP_INTONLY(uint32_t, SymInt) +DECLARE_SYMINT_OP(int64_t, SymInt) +DECLARE_SYMINT_OP(int32_t, SymInt) // make sure constants work +DECLARE_SYMINT_OP(uint64_t, SymInt) +DECLARE_SYMINT_OP(uint32_t, SymInt) +DECLARE_SYMINT_OP(double, SymFloat) +DECLARE_SYMINT_OP(float, SymFloat) // just for completeness + +// On OSX size_t is different than uint64_t so we have to +// define it separately +#if defined(__APPLE__) +DECLARE_SYMINT_OP_INTONLY(size_t, SymInt) +DECLARE_SYMINT_OP(size_t, SymInt) +#endif + +#undef DECLARE_SYMINT_OP + +C10_API std::ostream& operator<<(std::ostream& os, const SymInt& s); +C10_API SymInt operator-(const SymInt& s); + +inline bool sym_eq(int64_t a, int64_t b) { + return a == b; +} + +inline SymBool sym_eq(const SymInt& a, const SymInt& b) { + return a.sym_eq(b); +} + +inline bool sym_ne(int64_t a, int64_t b) { + return a != b; +} + +inline SymBool sym_ne(const SymInt& a, const SymInt& b) { + return a.sym_ne(b); +} + +inline bool sym_lt(int64_t a, int64_t b) { + return a < b; +} + +inline SymBool sym_lt(const SymInt& a, const SymInt& b) { + return a.sym_lt(b); +} + +inline bool sym_le(int64_t a, int64_t b) { + return a <= b; +} + +inline SymBool sym_le(const SymInt& a, const SymInt& b) { + return a.sym_le(b); +} + +inline bool sym_gt(int64_t a, int64_t b) { + return a > b; +} + +inline SymBool sym_gt(const SymInt& a, const SymInt& b) { + return a.sym_gt(b); +} + +inline bool sym_ge(int64_t a, int64_t b) { + return a >= b; +} + +inline SymBool sym_ge(const SymInt& a, const SymInt& b) { + return a.sym_ge(b); +} + +inline bool definitely_true( + const c10::SymBool& b, + const char* file, + int64_t line) { + return b.has_hint() && b.guard_bool(file, line); +} + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/SymNodeImpl.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/SymNodeImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..bb92b09775b7b42c6af20526495560908b9e7cbc --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/SymNodeImpl.h @@ -0,0 +1,236 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +class SymNodeImpl; +using SymNode = c10::intrusive_ptr; + +// When you add a method, you also need to edit +// torch/csrc/jit/python/init.cpp +// torch/csrc/utils/python_symnode.h +// c10/core/ConstantSymNodeImpl.h +class C10_API SymNodeImpl : public c10::intrusive_ptr_target { + public: + ~SymNodeImpl() override = default; + + template + c10::intrusive_ptr dyn_cast() const { + return c10::intrusive_ptr::reclaim_copy(dynamic_cast(this)); + } + + // these could be pure virtual when we implement LTC versions + virtual bool is_int() { + TORCH_CHECK(false, "NYI"); + } + virtual bool is_bool() { + TORCH_CHECK(false, "NYI"); + } + virtual bool is_float() { + TORCH_CHECK(false, "NYI"); + } + virtual bool is_nested_int() const { + return false; + } + virtual SymNode add(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + } + virtual SymNode sub(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + } + virtual SymNode mul(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + } + // NB: legacy, prefer float_truediv or int_truediv + virtual SymNode truediv(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + } + virtual SymNode float_truediv(const SymNode& other) { + return truediv(other); + } + virtual SymNode int_truediv(const SymNode& other) { + return truediv(other); + } + // NB: legacy, prefer float_pow or pow_by_natural + virtual SymNode pow(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + } + virtual SymNode float_pow(const SymNode& other) { + return pow(other); + } + virtual SymNode pow_by_natural(const SymNode& other) { + return pow(other); + } + // NB: legacy, prefer int_floordiv + virtual SymNode floordiv(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + } + virtual SymNode int_floordiv(const SymNode& other) { + return floordiv(other); + } + virtual SymNode mod(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + } + virtual SymNode eq(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + } + virtual SymNode ne(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + } + virtual SymNode gt(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + } + virtual SymNode lt(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + } + virtual SymNode le(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + } + virtual SymNode ge(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + } + virtual SymNode ceil() { + TORCH_CHECK(false, "NYI"); + } + virtual SymNode floor() { + TORCH_CHECK(false, "NYI"); + } + virtual SymNode neg() { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_min(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_max(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_or(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_and(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_not() { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_ite(const SymNode& then_val, const SymNode& else_val) { + TORCH_CHECK(false, "NYI"); + }; + // NB: self is ignored here, only the arguments are used + virtual SymNode is_contiguous( + ArrayRef sizes, + ArrayRef strides) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode is_channels_last_contiguous_2d( + ArrayRef sizes, + ArrayRef strides) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode is_channels_last_contiguous_3d( + ArrayRef sizes, + ArrayRef strides) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode is_channels_last_strides_2d( + ArrayRef sizes, + ArrayRef strides) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode is_channels_last_strides_3d( + ArrayRef sizes, + ArrayRef strides) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode is_non_overlapping_and_dense( + ArrayRef sizes, + ArrayRef strides) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode clone() { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_float() { + TORCH_CHECK(false, "NYI"); + } + virtual SymNode wrap_int(int64_t num) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode wrap_float(double num) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode wrap_bool(bool num) { + TORCH_CHECK(false, "NYI"); + }; + virtual int64_t guard_int(const char* file, int64_t line) { + TORCH_CHECK(false, "NYI"); + }; + virtual bool guard_bool(const char* file, int64_t line) { + TORCH_CHECK(false, "NYI"); + }; + virtual double guard_float(const char* file, int64_t line) { + TORCH_CHECK(false, "NYI"); + }; + virtual bool guard_size_oblivious(const char* file, int64_t line) { + // No improvement for unbacked SymBools by default, replace this + // with a better implementation! + return guard_bool(file, line); + } + virtual bool expect_true(const char* file, int64_t line) { + // No improvement for unbacked SymBools by default, replace this + // with a better implementation! + return guard_bool(file, line); + }; + virtual bool expect_size(const char* file, int64_t line) { + // No improvement for unbacked SymInts by default, replace this + // with a better implementation! + return ge(wrap_int(0))->guard_bool(file, line); + }; + virtual int64_t int_() { + TORCH_CHECK(false, "NYI"); + }; + virtual bool bool_() { + TORCH_CHECK(false, "NYI"); + }; + virtual bool has_hint() { + TORCH_CHECK(false, "NYI"); + }; + virtual std::string str() { + TORCH_CHECK(false, "NYI"); + }; + virtual std::optional nested_int() { + return c10::nullopt; + } + virtual std::optional nested_int_coeff() { + return c10::nullopt; + } + virtual std::optional constant_int() { + return c10::nullopt; + } + virtual std::optional constant_bool() { + return c10::nullopt; + } + virtual std::optional maybe_as_int() { + return c10::nullopt; + } + virtual bool is_constant() { + return false; + } + virtual bool is_symbolic() { + return true; + } + std::ostream& operator<<(std::ostream& os) { + os << str(); + return os; + } +}; + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/SymbolicShapeMeta.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/SymbolicShapeMeta.h new file mode 100644 index 0000000000000000000000000000000000000000..6ccfb849373ce0f91152d9c8de1b5c6fb12decf8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/SymbolicShapeMeta.h @@ -0,0 +1,214 @@ +#pragma once +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace c10 { + +class C10_API SymbolicShapeMeta { + public: + // Basic metadata from which other quantities are derived + SymDimVector sizes_ = {0}; + SymDimVector strides_ = {1}; + SymInt storage_offset_ = 0; + + bool strides_valid_ = true; // e.g. for sparse where there are no strides + + SymbolicShapeMeta() = default; + SymbolicShapeMeta(const SymbolicShapeMeta& other); + + void refresh_numel() { + // Non-const, don't need to hold mutables_ lock + available_.fetch_and(~numel_avail); + numel_ = 1; + } + + void refresh_contiguous() { + // Non-const, don't need to hold mutables_ lock + available_.fetch_and(numel_avail); + is_contiguous_ = false; + is_channels_last_contiguous_ = false; + is_channels_last_3d_contiguous_ = false; + is_channels_last_ = false; + is_channels_last_3d_ = false; + is_non_overlapping_and_dense_ = false; + } + + int64_t dim() const { + return static_cast(sizes_.size()); + } + + // Accessors for derived quantities, computed lazily on first access + + bool has_numel() const { + return available_.load() & numel_avail; + } + bool has_is_contiguous() const { + return available_.load() & is_contiguous_avail; + } + bool has_is_channels_last_contiguous() const { + return available_.load() & is_channels_last_contiguous_avail; + } + bool has_is_channels_last_3d_contiguous() const { + return available_.load() & is_channels_last_3d_contiguous_avail; + } + bool has_is_channels_last() const { + return available_.load() & is_channels_last_avail; + } + bool has_is_channels_last_3d() const { + return available_.load() & is_channels_last_3d_avail; + } + bool has_is_non_overlapping_and_dense() const { + return available_.load() & is_non_overlapping_and_dense_avail; + } + + // Accessors to cached derived properties + // DO NOT call with mutables_ lock held + const SymInt& numel() const { + if (C10_UNLIKELY(!has_numel())) { + init_numel(); + } + return numel_; + } + + const SymBool& is_contiguous() const { + if (C10_UNLIKELY(!has_is_contiguous())) { + init_is_contiguous(); + } + return is_contiguous_; + } + + const SymBool& is_channels_last_contiguous() const { + if (C10_UNLIKELY(!has_is_channels_last_contiguous())) { + init_is_channels_last_contiguous(); + } + return is_channels_last_contiguous_; + } + + const SymBool& is_channels_last_3d_contiguous() const { + if (C10_UNLIKELY(!has_is_channels_last_3d_contiguous())) { + init_is_channels_last_3d_contiguous(); + } + return is_channels_last_3d_contiguous_; + } + + const SymBool& is_channels_last() const { + if (C10_UNLIKELY(!has_is_channels_last())) { + init_is_channels_last(); + } + return is_channels_last_; + } + + const SymBool& is_channels_last_3d() const { + if (C10_UNLIKELY(!has_is_channels_last_3d())) { + init_is_channels_last_3d(); + } + return is_channels_last_3d_; + } + + const SymBool& is_non_overlapping_and_dense() const { + if (C10_UNLIKELY(!has_is_non_overlapping_and_dense())) { + init_is_non_overlapping_and_dense(); + } + return is_non_overlapping_and_dense_; + } + + // Assumptions so we can short-circuit computation + // NOTE: Don't need to lock mutables_ since these aren't const + void assume_contiguous(SymBool val = true) { + is_contiguous_ = std::move(val); + available_.fetch_or(is_contiguous_avail); + } + void assume_channels_last_contiguous(SymBool val = true) { + is_contiguous_ = std::move(val); + available_.fetch_or(is_channels_last_contiguous_avail); + } + void assume_channels_last_3d_contiguous(SymBool val = true) { + is_channels_last_3d_contiguous_ = std::move(val); + available_.fetch_or(is_channels_last_3d_contiguous_avail); + } + void assume_channels_last(SymBool val = true) { + is_channels_last_ = std::move(val); + available_.fetch_or(is_channels_last_avail); + } + void assume_channels_last_3d(SymBool val = true) { + is_channels_last_3d_ = std::move(val); + available_.fetch_or(is_channels_last_3d_avail); + } + void assume_non_overlapping_and_dense(SymBool val = true) { + is_non_overlapping_and_dense_ = std::move(val); + available_.fetch_or(is_non_overlapping_and_dense_avail); + } + + private: + SymBool compute_contiguous() const; + SymBool compute_channels_last_contiguous_2d() const; + SymBool compute_channels_last_contiguous_3d() const; + SymBool compute_strides_like_channels_last_2d() const; + SymBool compute_strides_like_channels_last_3d() const; + SymBool compute_non_overlapping_and_dense() const; + + // These are little wrappers over the real compute_ functions that + // can make use of other contiguity fields to short circuit. + // They need to be implemented separately for SymBool, as SymBool does + // not short circuit. + // TODO: should the SymBool cases avoid the short circuit? Need to reason + // if its correct, and reason if the simpler expressions are better for + // analysis (maybe not!) + + SymBool compute_channels_last_contiguous_3d_dim5() const; + SymBool compute_channels_last_2d_dim5() const; + SymBool compute_channels_last_3d_dim5() const; + SymBool compute_is_non_overlapping_and_dense_dim4() const; + SymBool compute_is_non_overlapping_and_dense_dim5() const; + SymBool compute_is_non_overlapping_and_dense_anydim() const; + + void init_numel() const; + void init_is_contiguous() const; + void init_is_channels_last_contiguous() const; + void init_is_channels_last_3d_contiguous() const; + void init_is_channels_last() const; + void init_is_channels_last_3d() const; + void init_is_non_overlapping_and_dense() const; + + // NOTE: These only set if !has_foo() + void set_numel(SymInt val) const; + void set_is_contiguous(SymBool val) const; + void set_is_channels_last_contiguous(SymBool val) const; + void set_is_channels_last_3d_contiguous(SymBool val) const; + void set_is_channels_last(SymBool val) const; + void set_is_channels_last_3d(SymBool val) const; + void set_is_non_overlapping_and_dense(SymBool val) const; + + // Lazily initialized variables, with the corresponding available_ flag + // indicating whether the value has been initialized + mutable std::atomic available_{0}; + enum avail { + numel_avail = 1 << 0, + is_contiguous_avail = 1 << 1, + is_channels_last_contiguous_avail = 1 << 2, + is_channels_last_3d_contiguous_avail = 1 << 3, + is_channels_last_avail = 1 << 4, + is_channels_last_3d_avail = 1 << 5, + is_non_overlapping_and_dense_avail = 1 << 6, + }; + + // Mutex to prevent races when initializing the variable from const accessors + mutable std::mutex mutables_; + mutable SymInt numel_ = 1; + mutable SymBool is_contiguous_{true}; + mutable SymBool is_channels_last_contiguous_{false}; + mutable SymBool is_channels_last_3d_contiguous_{false}; + mutable SymBool is_channels_last_{false}; + mutable SymBool is_channels_last_3d_{false}; + mutable SymBool is_non_overlapping_and_dense_{true}; +}; + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/TensorImpl.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/TensorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..8d7dbf4251a12d3455b4b3415350fd9f5c910419 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/TensorImpl.h @@ -0,0 +1,3255 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// A global boolean variable to control whether we free memory when a Tensor +// is shrunk to a smaller size. As a result, a Tensor is always going to +// keep the memory allocated for its maximum capacity reshaped to so far. +// +// This parameter is respected "upper-case" methods which call Resize() +// (e.g., CopyFrom, ResizeLike); it is NOT respected by Tensor::resize_ +// or ShrinkTo, both of which guarantee to never to free memory. +C10_DECLARE_bool(caffe2_keep_on_shrink); + +// Since we can have high variance in blob memory allocated across different +// inputs in the same run, we will shrink the blob only if the memory gain +// is larger than this flag in bytes. This only applies to functions which +// respect caffe2_keep_on_shrink. +C10_DECLARE_int64(caffe2_max_keep_on_shrink_memory); + +namespace at { +class Tensor; +class TensorBase; +} // namespace at + +namespace c10 { + +/** + * A utility function to convert vector to vector. + */ +inline std::vector ToVectorint64_t(const ArrayRef& src) { + return std::vector(src.begin(), src.end()); +} + +/** + * Return product of all dimensions starting from k + */ +inline int64_t size_from_dim_(int k, IntArrayRef dims) { + int64_t r = 1; + for (const auto i : c10::irange(k, dims.size())) { + r *= dims[i]; + } + return r; +} + +// Product of all dims up to k (not including dims[k]) +inline int64_t size_to_dim_(int k, IntArrayRef dims) { + TORCH_CHECK(k >= 0 && static_cast(k) <= dims.size()); + int64_t r = 1; + for (const auto i : c10::irange(k)) { + r *= dims[i]; + } + return r; +} + +// Product of all dims between k and l (not including dims[k] and dims[l]) +inline int64_t size_between_dim_(int k, int l, IntArrayRef dims) { + TORCH_CHECK((unsigned)l < dims.size() && (unsigned)k < dims.size()); + int64_t r = 1; + if (k < l) { + for (int i = k + 1; i < l; ++i) { + r *= dims[i]; + } + } else { + for (int i = l + 1; i < k; ++i) { + r *= dims[i]; + } + } + return r; +} + +// Wrap around axis_index if it is negative, s.t., -1 is the last dim +inline int canonical_axis_index_(int axis_index, int ndims) { + TORCH_CHECK(axis_index >= -ndims); + TORCH_CHECK(axis_index < ndims); + if (axis_index < 0) { + return axis_index + ndims; + } + return axis_index; +} + +using PlacementDtor = void (*)(void*, size_t); + +/* + * A Context that will call extra placement deleter during + * deconstruction. + * + * Accept a already constructed DataPtr and store it as member + * during destruction, we'll call extra deleter on the underlying + * data pointer before the DataPtr is destructed. + * `data_ptr_` owns the memory. + */ +struct C10_API PlacementDeleteContext { + DataPtr data_ptr_; + PlacementDtor placement_dtor_; + size_t size_; + PlacementDeleteContext( + DataPtr&& data_ptr, + PlacementDtor placement_dtor, + size_t size) + : data_ptr_(std::move(data_ptr)), + placement_dtor_(placement_dtor), + size_(size) {} + static DataPtr makeDataPtr( + DataPtr&& data_ptr, + PlacementDtor placement_dtor, + size_t size, + Device device); + ~PlacementDeleteContext() { + placement_dtor_(data_ptr_.get(), size_); + // original memory will be freed when data_ptr_ is destructed + } +}; + +struct C10_API AutogradMetaInterface { + virtual void set_requires_grad( + bool requires_grad, + at::TensorImpl* self_impl) = 0; + virtual bool requires_grad() const = 0; + virtual at::Tensor& mutable_grad() = 0; + virtual const at::Tensor& grad() const = 0; + virtual const at::Tensor& fw_grad(uint64_t level, const at::TensorBase& self) + const = 0; + virtual void set_fw_grad( + const at::TensorBase& new_grad, + const at::TensorBase& self, + uint64_t level, + bool is_inplace_op) = 0; + virtual ~AutogradMetaInterface(); +}; + +namespace impl { + +// Unfortunately, the definition of AutogradMeta lives in a separate +// compilation unit than TensorImpl (libtorch.so versus libc10.so) +// which means that we cannot construct an AutogradMeta from TensorImpl, +// not even from the cpp file. So we have to indirect it through a factory +// function which will be initialized when we load libtorch.so. + +struct C10_API AutogradMetaFactory { + virtual ~AutogradMetaFactory() = default; + virtual std::unique_ptr make() const = 0; + // This method is the dumbest method. But I don't have access + // to Tensor (not TensorImpl) which is undefined in this header. + virtual const at::Tensor& undefined_tensor() const = 0; +}; + +C10_API void SetAutogradMetaFactory(AutogradMetaFactory* factory); +C10_API AutogradMetaFactory* GetAutogradMetaFactory(); + +struct C10_API AutogradMetaFactoryRegisterer { + explicit AutogradMetaFactoryRegisterer(AutogradMetaFactory* factory) { + SetAutogradMetaFactory(factory); + } +}; + +} // namespace impl + +struct C10_API NamedTensorMetaInterface { + virtual ~NamedTensorMetaInterface() = default; + virtual std::unique_ptr clone() const { + TORCH_INTERNAL_ASSERT( + false, "Not implemented: NamedTensorMetaInterface::clone"); + }; + virtual int64_t slow_dim() const { + TORCH_INTERNAL_ASSERT( + false, "Not implemented: NamedTensorMetaInterface::slow_dim"); + }; +}; + +// For ease of copy pasting +#if 0 +is_contiguous +is_channels_last_contiguous +is_channels_last_3d_contiguous +is_channels_last +is_channels_last_3d +is_non_overlapping_and_dense +#endif + +/** + * This structure is intended to hold additional metadata of the specific device + * backend. + **/ +struct C10_API BackendMeta : intrusive_ptr_target { + ~BackendMeta() override = default; + virtual intrusive_ptr clone( + const intrusive_ptr& ptr) const { + return ptr; + } +}; + +struct C10_API ExtraMeta { + std::unique_ptr symbolic_shape_meta_ = nullptr; + std::unique_ptr named_tensor_meta_ = nullptr; + intrusive_ptr backend_meta_ = nullptr; + std::optional custom_data_ptr_error_msg_ = c10::nullopt; + std::optional custom_storage_error_msg_ = c10::nullopt; + + ExtraMeta() = default; + ExtraMeta(const ExtraMeta& other) { + if (other.symbolic_shape_meta_) { + symbolic_shape_meta_ = + std::make_unique(*other.symbolic_shape_meta_); + } + if (other.named_tensor_meta_) { + named_tensor_meta_ = other.named_tensor_meta_->clone(); + } + if (other.backend_meta_) { + backend_meta_ = other.backend_meta_->clone(other.backend_meta_); + } + if (other.custom_data_ptr_error_msg_) { + custom_data_ptr_error_msg_ = other.custom_data_ptr_error_msg_; + } + if (other.custom_storage_error_msg_) { + custom_storage_error_msg_ = other.custom_storage_error_msg_; + } + } + + ExtraMeta( + std::unique_ptr symbolic_shape_meta, + std::unique_ptr named_tensor_meta, + intrusive_ptr backend_meta, + std::optional custom_data_ptr_error_msg = c10::nullopt, + std::optional custom_storage_access_error_msg = c10::nullopt) + : symbolic_shape_meta_(std::move(symbolic_shape_meta)), + named_tensor_meta_(std::move(named_tensor_meta)), + backend_meta_(std::move(backend_meta)), + custom_data_ptr_error_msg_(std::move(custom_data_ptr_error_msg)), + custom_storage_error_msg_(std::move(custom_storage_access_error_msg)) {} + + std::unique_ptr clone() const { + return std::make_unique(*this); + } +}; + +// NOTE [ Version Counter Sharing ] +// +// Every Tensor has a version counter. Version counters are incremented whenever +// the data or size of a tensor changes through in-place Variable operations. +// Version counters are used to detect modifications to saved variables which +// would result in incorrect gradient calculations. Version counters may be +// shared between Variables: +// +// 1. A view shares the version counter of the base Variable, +// 2. `x.detach()` shares the version counter of `x`, +// 3. Unpacked saved variables share the version counter of the source. +// +// Version counters are not shared in these scenarios: +// +// 1. When we replace a `Variable`'s underlying `Tensor` by calling +// `set_data(...)`, +// 2. `x.data` does not share the version counter of `x`. (See discussion at +// https://github.com/pytorch/pytorch/issues/5396) +// +// Question: Why do we put the version counter in TensorImpl instead of +// AutogradMeta? +// +// Answer: After the Variable/Tensor merge, a tensor will not have AutogradMeta +// when its `requires_grad_` is false, but when we use this tensor in the +// forward pass of a function that requires saving this tensor for backward, we +// need to keep track of this tensor's version to make sure it's always valid in +// the autograd graph. +// +// To achieve this goal, we put the version counter in TensorImpl instead of +// AutogradMeta, and have it always be available. This allows us to have the +// optimization of not carrying AutogradMeta when a tensor doesn't require +// gradient. +// +// A hypothetical alternative way to achieve this goal is to initialize +// AutogradMeta and create the version counter for the non-requires-grad tensor +// only when it's saved for backward. However, since saving a tensor for +// backward happens in the forward pass, and our invariant is that forward pass +// needs to be thread-safe, lazy-initializing AutogradMeta when saving a tensor +// can introduce race conditions when we are running the forward pass in +// multi-thread scenarios, thus making the forward pass not thread-safe anymore, +// which breaks the invariant. +struct C10_API VariableVersion { + private: + struct VersionCounter : intrusive_ptr_target { + VersionCounter(uint32_t version) : version_(version) {} + std::atomic version_; + }; + c10::intrusive_ptr version_counter_; + + public: + // Note [Disabled VariableVersion] + // VariableVersion struct has an intrusive_ptr pointing VersionCounter struct + // with an atomic variable. Thus `VariableVersion(/*version=*/0)` is not as + // cheap as we expected. In some cases constructing a VariableVersion with + // version 0 is not necessary so we add a cheap constructor which + // doesn't allocate the intrusive_ptr. + // Example use cases are: + // - Inference tensors don't track version counter, so they'll just always + // have disabled VariableVersion. + // - In SavedVariable class we override version_counter_ inside its + // constructor + // so that we can use the cheap constructor there. + enum Disabled { DISABLED }; + // It's okay to return true even for inference tensor which + // doesn't have version counter enabled. + // We want to be permissive here since in many cases (e.g. make_variable) + // we can std::move a TensorImpl if there's no other uses which saves us + // an additional TensorImpl allocation. + bool unique() const { + return version_counter_ ? 1 == version_counter_.use_count() : true; + } + // NOTE: As of C++11 and 14, default-constructing a std::atomic variable + // leaves it in a persistently undefined state. See + // https://cplusplus.github.io/LWG/issue2334. + VariableVersion(uint32_t version) + : version_counter_(c10::make_intrusive(version)) {} + VariableVersion(Disabled = DISABLED) {} + + bool enabled() const { + return version_counter_; + } + + // Note [Inplace update inference tensor] + // 1. Inplace update to inference tensor is forbidden in normal mode. + // For example: + // inference_tensor.copy_(normal_tensor_requires_grad) + // This inplace makes inference_tensor have requires_grad=True and + // have a grad_fn. This is bad because views of `inference_tensor` + // created in InferenceMode won't be able to know the grad_fn since + // their ViewMeta were not recorded. To match NoGradMode behavior + // that "inplace update to a view created in NoGradMode raise an error", + // we just ban inplace update to inference tensor since we can't tell + // if an inference tensor is a view created in InferenceMode. + // + // Note that views of normal tensor created in InferenceMode has proper + // ViewMeta so that they're aware of the grad_fn correctly. + // + // 2. Inplace update to inference tensor in inference tensor doesn't bump + // version counter. + // * It either doesn't call bump() by skipping ADInplaceOrView kernel, + // - e.g. inference_tensor.add_(1) + // * or bump() is a no-op for inference tensor. + // - e.g. inference_tensor.add_(normal_tensor) + void bump() { + // TODO: Replace the link to the documentation once it's available. + TORCH_CHECK( + version_counter_ || InferenceMode::is_enabled(), + "Inplace update to inference tensor outside InferenceMode is not allowed." + "You can make a clone to get a normal tensor before doing inplace update." + "See https://github.com/pytorch/rfcs/pull/17 for more details."); + if (version_counter_) { + ++version_counter_->version_; + } + } + + void set_version(int64_t i) { + TORCH_CHECK( + version_counter_, + "Tried to call torch.autograd._unsafe_set_version() on a tensor " + "that does not have a version counter. Was it created in inference mode?"); + TORCH_CHECK(i >= 0, "Cannot set a version_counter to a value below 0: ", i); + version_counter_->version_ = i; + } + + // Inference tensor doesn't have version counter so it shouldn't be + // accessed. + uint32_t current_version() const { + TORCH_CHECK( + version_counter_, "Inference tensors do not track version counter."); + return version_counter_->version_; + } +}; + +// Forward declaration of TensorImpl needed for forward declaration of +// C10_TensorImpl_Size_Check_Dummy_Class +struct C10_API TensorImpl; + +/** + * NOTE: Some TensorImpl methods are small and not overridden in the + * PyTorch codebase itself, but may theoretically need to be + * overridden by third-party TensorImpl subclasses. This macro allows + * users that need maximum performance and don't need these extension + * points to disable them with a build-time flag. (In particular, + * XLA's XLATensorImpl currently overrides these methods, so we can't + * enable this flag by default.) + */ +#ifdef C10_DISABLE_TENSORIMPL_EXTENSIBILITY +#define TENSORIMPL_MAYBE_VIRTUAL +#else +#define TENSORIMPL_MAYBE_VIRTUAL virtual +#endif + +/** + * The low-level representation of a tensor, which contains a pointer + * to a storage (which contains the actual data) and metadata (e.g., sizes and + * strides) describing this particular view of the data as a tensor. + * + * Some basic characteristics about our in-memory representation of + * tensors: + * + * - It contains a pointer to a storage struct (Storage/StorageImpl) + * which contains the pointer to the actual data and records the + * data type and device of the view. This allows multiple tensors + * to alias the same underlying data, which allows to efficiently + * implement differing *views* on a tensor. + * + * - The tensor struct itself records view-specific metadata about + * the tensor, e.g., sizes, strides and offset into storage. + * Each view of a storage can have a different size or offset. + * + * - This class is intrusively refcounted. It is refcounted so that + * we can support prompt deallocation of large tensors; it is + * intrusively refcounted so that we can still perform reference + * counted operations on raw pointers, which is often more convenient + * when passing tensors across language boundaries. + * + * - For backwards-compatibility reasons, a tensor may be in an + * uninitialized state. A tensor may be uninitialized in the following + * two ways: + * + * - A tensor may be DTYPE UNINITIALIZED. A tensor of this + * form has an uninitialized dtype. This situation most + * frequently arises when a user writes Tensor x(CPU). The dtype + * is subsequently initialized when mutable_data() is + * invoked for the first time. + * + * - A tensor may be STORAGE UNINITIALIZED. A tensor of this form + * has non-zero size, but has a storage with a null data pointer. + * This situation most frequently arises when a user calls + * Resize() or FreeMemory(). This is because Caffe2 historically + * does lazy allocation: allocation of data doesn't occur until + * mutable_data() is invoked. A tensor with zero size is + * always storage initialized, because no allocation is necessary + * in this case. + * + * All combinations of these two uninitialized states are possible. + * Consider the following transcript in idiomatic Caffe2 API: + * + * Tensor x(CPU); // x is storage-initialized, dtype-UNINITIALIZED + * x.Resize(4); // x is storage-UNINITIALIZED, dtype-UNINITIALIZED + * x.mutable_data(); // x is storage-initialized, dtype-initialized + * x.FreeMemory(); // x is storage-UNINITIALIZED, dtype-initialized. + * + * All other fields on tensor are always initialized. In particular, + * size is always valid. (Historically, a tensor declared as Tensor x(CPU) + * also had uninitialized size, encoded as numel == -1, but we have now + * decided to default to zero size, resulting in numel == 0). + * + * Uninitialized storages MUST be uniquely owned, to keep our model + * simple. Thus, we will reject operations which could cause an + * uninitialized storage to become shared (or a shared storage to + * become uninitialized, e.g., from FreeMemory). + * + * In practice, tensors which are storage-UNINITIALIZED and + * dtype-UNINITIALIZED are *extremely* ephemeral: essentially, + * after you do a Resize(), you basically always call mutable_data() + * immediately afterwards. Most functions are not designed to + * work if given a storage-UNINITIALIZED, dtype-UNINITIALIZED tensor. + * + * We intend to eliminate all uninitialized states, so that every + * tensor is fully initialized in all fields. Please do not write new code + * that depends on these uninitialized states. + */ +struct C10_API TensorImpl : public c10::intrusive_ptr_target { + TensorImpl() = delete; + ~TensorImpl() override; + // Note [Enum ImplType] + // This enum is temporary. In the followup refactor we should + // think about how to specialize TensorImpl creation for view + // tensors. Currently we only special case its key_set_ but + // there's also potential to share version_counter_ directly + // without creating first and then override in as_view. + enum ImplType { VIEW }; + + /** + * Construct a 1-dim 0-size tensor backed by the given storage. + */ + TensorImpl( + Storage&& storage, + DispatchKeySet, + const caffe2::TypeMeta data_type); + + // See Note [Enum ImplType] + TensorImpl( + ImplType, + Storage&& storage, + DispatchKeySet, + const caffe2::TypeMeta data_type); + + /** + * Construct a 1-dim 0 size tensor that doesn't have a storage. + */ + TensorImpl( + DispatchKeySet, + const caffe2::TypeMeta data_type, + std::optional device_opt); + + // Legacy constructors so I don't have to go update call sites. + // TODO: When Variable is added, delete these constructors + TensorImpl( + Storage&& storage, + DispatchKey dispatch_key, + const caffe2::TypeMeta data_type) + : TensorImpl( + std::move(storage), + DispatchKeySet(dispatch_key), + data_type) {} + TensorImpl( + DispatchKey dispatch_key, + const caffe2::TypeMeta data_type, + std::optional device_opt) + : TensorImpl(DispatchKeySet(dispatch_key), data_type, device_opt) {} + + private: + // This constructor is private, because the data_type is redundant with + // storage. Still, we pass it in separately because it's easier to write + // the initializer list if we're not worried about storage being moved out + // from under us. + TensorImpl( + Storage&& storage, + DispatchKeySet, + const caffe2::TypeMeta data_type, + std::optional); + + public: + TensorImpl(const TensorImpl&) = delete; + TensorImpl& operator=(const TensorImpl&) = delete; + TensorImpl(TensorImpl&&) = delete; + TensorImpl& operator=(TensorImpl&&) = delete; + + /** + * Release (decref) storage, and any other external allocations. This + * override is for `intrusive_ptr_target` and is used to implement weak + * tensors. + */ + void release_resources() override; + + public: + /** + * Return the DispatchKeySet corresponding to this Tensor, specifying + * all of the DispatchKeys that this Tensor identifies as. This is the + * information used to dispatch operations on this tensor. + */ + DispatchKeySet key_set() const { + return key_set_; + } + + private: + [[noreturn]] void throw_cannot_call_with_symbolic(const char* meth) const; + + // NOTE: The general recipe for customizable methods is that the fastpath + // function (e.g., sizes()) does an unlikely policy test, and if doesn't + // trigger, it does the fast path implementation with no checks and going + // directly to on-TensorImpl fields. In particular, you never need to + // check ExtraMeta if the policy doesn't trigger, as non-trivial ExtraMeta + // implies the policy will always match. + // + // The default implementations of methods are "safe": they do extra tests + // to make sure the internal state is consistent no matter if you are + // doing symbolic shapes or not. If you don't want the tests, directly + // override the custom method (e.g., custom_sizes()) to do your preferred + // behavior. + + public: + /** + * Return a reference to the sizes of this tensor. This reference remains + * valid as long as the tensor is live and not resized. + */ + IntArrayRef sizes() const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) { + return sizes_custom(); + } + return sizes_and_strides_.sizes_arrayref(); + } + + SymIntArrayRef sym_sizes() const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) { + return sym_sizes_custom(); + } + // Sizes guaranteed to be non-negative, so unchecked cast is OK + return c10::fromIntArrayRefKnownNonNegative( + sizes_and_strides_.sizes_arrayref()); + } + + IntArrayRef sizes_default() const { + if (C10_UNLIKELY(has_symbolic_sizes_strides_)) { + throw_cannot_call_with_symbolic("sizes"); + } + return sizes_and_strides_.sizes_arrayref(); + } + + SymIntArrayRef sym_sizes_default() const { + if (has_symbolic_sizes_strides_) { + return symbolic_shape_meta().sizes_; + } else { + // Sizes guaranteed to be non-negative, so unchecked cast is OK + return c10::fromIntArrayRefKnownNonNegative(sizes_default()); + } + } + + // From https://stackoverflow.com/a/3057522/23845 + // TODO: does C++14 have a stdlib template for this? + template + struct identity { + typedef T type; + }; + + template + ArrayRef generic_sizes() { + return _generic_sizes(identity()); + } + + ArrayRef _generic_sizes(identity) { + return sizes(); + } + ArrayRef _generic_sizes(identity) { + return sym_sizes(); + } + + template + ArrayRef generic_strides() { + return _generic_strides(identity()); + } + + ArrayRef _generic_strides(identity) { + return strides(); + } + ArrayRef _generic_strides(identity) { + return sym_strides(); + } + + template + T generic_storage_offset() { + return _generic_storage_offset(identity()); + } + + int64_t _generic_storage_offset(identity) { + return storage_offset(); + } + c10::SymInt _generic_storage_offset(identity) { + return sym_storage_offset(); + } + + /** + * The number of elements in a tensor. + * + * WARNING: Previously, if you were using the Caffe2 API, you could + * test numel() == -1 to see if a tensor was uninitialized. This + * is no longer true; numel always accurately reports the product + * of sizes of a tensor. + */ + int64_t numel() const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) { + return numel_custom(); + } + return numel_; + } + + c10::SymInt sym_numel() const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) { + return sym_numel_custom(); + } + return c10::SymInt(SymInt::UNCHECKED, numel_); + } + + int64_t numel_default() const { + if (C10_UNLIKELY(has_symbolic_sizes_strides_)) { + throw_cannot_call_with_symbolic("numel"); + } + return numel_; + } + + c10::SymInt sym_numel_default() const { + if (has_symbolic_sizes_strides_) { + return symbolic_shape_meta().numel(); + } else { + return c10::SymInt(SymInt::UNCHECKED, numel_); + } + } + + /** + * Return the number of dimensions of this tensor. Note that 0-dimension + * represents a Tensor that is a Scalar, e.g., one that has a single element. + */ + int64_t dim() const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) { + return dim_custom(); + } + return static_cast(sizes_and_strides_.size()); + } + + int64_t dim_default() const { + if (has_symbolic_sizes_strides_) { + return static_cast(symbolic_shape_meta().sizes_.size()); + } else { + return static_cast(sizes_and_strides_.size()); + } + } + + /** + * Return the offset in number of elements into the storage that this + * tensor points to. Most tensors have storage_offset() == 0, but, + * for example, an index into a tensor will have a non-zero storage_offset(). + * + * WARNING: This is NOT computed in bytes. + */ + int64_t storage_offset() const { + // TODO: maybe this should be toggled by strides + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) { + return storage_offset_custom(); + } + return storage_offset_; + } + + c10::SymInt sym_storage_offset() const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) { + return sym_storage_offset_custom(); + } + return c10::SymInt(SymInt::UNCHECKED, storage_offset_); + } + + int64_t storage_offset_default() const { + if (C10_UNLIKELY(has_symbolic_sizes_strides_)) { + throw_cannot_call_with_symbolic("storage_offset"); + } + return storage_offset_; + } + + c10::SymInt sym_storage_offset_default() const { + if (has_symbolic_sizes_strides_) { + return symbolic_shape_meta().storage_offset_; + } else { + return c10::SymInt(SymInt::UNCHECKED, storage_offset_); + } + } + + /** + * Return a reference to the strides of this tensor. This reference remains + * valid as long as the tensor is live and not restrided. + */ + IntArrayRef strides() const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomStrides))) { + return strides_custom(); + } + return sizes_and_strides_.strides_arrayref(); + } + + c10::SymIntArrayRef sym_strides() const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomStrides))) { + return sym_strides_custom(); + } + return c10::fromIntArrayRefKnownNonNegative(strides_default()); + } + + IntArrayRef strides_default() const { + if (C10_UNLIKELY(has_symbolic_sizes_strides_)) { + throw_cannot_call_with_symbolic("strides"); + } + return sizes_and_strides_.strides_arrayref(); + } + + c10::SymIntArrayRef sym_strides_default() const { + if (has_symbolic_sizes_strides_) { + return symbolic_shape_meta().strides_; + } else { + return c10::fromIntArrayRefKnownNonNegative(strides_default()); + } + } + + /** + * Whether or not a tensor is laid out in contiguous memory. + * + * Tensors with non-trivial strides are not contiguous. See + * compute_contiguous() for the exact definition of whether or not + * a tensor is contiguous or not. + */ + bool is_contiguous( + at::MemoryFormat memory_format = at::MemoryFormat::Contiguous) const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomStrides))) { + return is_contiguous_custom(memory_format); + } + return is_contiguous_default(memory_format); + } + + // These are factored into separate functions in case subclasses + // want to use them + bool is_contiguous_default(at::MemoryFormat memory_format) const { + if (has_symbolic_sizes_strides_) { + if (memory_format == at::MemoryFormat::ChannelsLast) { + return symbolic_shape_meta().is_channels_last_contiguous().guard_bool( + __FILE__, __LINE__); + } else if (memory_format == at::MemoryFormat::ChannelsLast3d) { + return symbolic_shape_meta() + .is_channels_last_3d_contiguous() + .guard_bool(__FILE__, __LINE__); + } + return symbolic_shape_meta().is_contiguous().guard_bool( + __FILE__, __LINE__); + } + + if (memory_format == at::MemoryFormat::ChannelsLast) { + return is_channels_last_contiguous_; + } else if (memory_format == at::MemoryFormat::ChannelsLast3d) { + return is_channels_last_3d_contiguous_; + } + return is_contiguous_; + } + + bool is_strides_like_default(at::MemoryFormat memory_format) const { + if (has_symbolic_sizes_strides_) { + if (memory_format == at::MemoryFormat::ChannelsLast) { + return symbolic_shape_meta().is_channels_last().guard_bool( + __FILE__, __LINE__); + } else if (memory_format == at::MemoryFormat::ChannelsLast3d) { + return symbolic_shape_meta().is_channels_last_3d().guard_bool( + __FILE__, __LINE__); + } else { + return false; + } + } + + if (memory_format == at::MemoryFormat::ChannelsLast) { + return is_channels_last_; + } else if (memory_format == at::MemoryFormat::ChannelsLast3d) { + return is_channels_last_3d_; + } else { + return false; + } + } + + bool is_non_overlapping_and_dense_default() const { + if (has_symbolic_sizes_strides_) { + return symbolic_shape_meta().is_non_overlapping_and_dense().guard_bool( + __FILE__, __LINE__); + } else { + return is_non_overlapping_and_dense_; + } + } + + // NB: these dim accessor functions don't have _default(), as you can use + // sizes_default/strides_default + /** + * Return the size of a tensor at some dimension, wrapping the dimension if + * necessary. + * + * NOTE: if you know wrapping is unnecessary, do sizes()[d] instead; it will + * be faster + */ + int64_t size(int64_t d) const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) { + return size_custom(d); + } + d = maybe_wrap_dim(d, dim(), /*wrap_scalar=*/false); + return sizes_and_strides_.size_at_unchecked(d); + } + + c10::SymInt sym_size(int64_t d) const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomSizes))) { + return sym_size_custom(d); + } + d = maybe_wrap_dim(d, dim(), /*wrap_scalar=*/false); + const auto sizes = this->sym_sizes(); + return sizes[d]; + } + + /** + * Return the stride of a tensor at some dimension, wrapping the dimension + * if necessary. + * + * NOTE: if you know wrapping is unnecessary, do sizes()[d] instead; it will + * be faster + */ + int64_t stride(int64_t d) const { + d = maybe_wrap_dim(d, dim(), false); + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomStrides))) { + // TODO: provide stride_custom, symmetrically with size_custom. + // There is presently no user for it; only NestedTensor is using + // size_custom overrideability + return strides_custom()[d]; // unchecked (maybe_wrap_dim enforces bounds) + } + // Intentionally don't call default, which also handles symbolic + return sizes_and_strides_.stride_at_unchecked(d); + } + + enum class SizesStridesPolicy : uint8_t { + // Default behavior, e.g., dense tensor. + // + // Can override: nothing + Default = 0, + // Customizable strides behavior, e.g., sparse tensor, + // mkldnn tensor. + // + // Can override: strides(), is_contiguous() + CustomStrides = 1, + // Customizable sizes behavior, e.g., nested tensor + // + // Can override: strides(), is_contiguous(), sizes(), dim(), numel() + CustomSizes = 2 + }; + + protected: + inline bool matches_policy(SizesStridesPolicy policy) const { + return sizes_strides_policy_ >= static_cast(policy); + } + + inline bool matches_custom(SizesStridesPolicy policy) const { + return custom_sizes_strides_ >= static_cast(policy); + } + + inline bool matches_python_custom(SizesStridesPolicy policy) const { + auto r = python_custom_sizes_strides_ >= static_cast(policy); + if (r) { + TORCH_INTERNAL_ASSERT(is_python_dispatch()) + } + return r; + } + + /** + * Customization points for the functions above. sizes_strides_policy_ + * must be set to enable these. + * + * NB: dim is overrideable separately from sizes because it is possible + * for a tensor to have rank, but not well defined sizes. + */ + // sizes_strides_policy_ >= CustomStrides + virtual bool is_contiguous_custom(at::MemoryFormat memory_format) const; + virtual bool is_strides_like_custom(at::MemoryFormat memory_format) const; + virtual bool is_non_overlapping_and_dense_custom() const; + // sizes_strides_policy_ >= CustomSizes + // Currently this method only exists to be overwritten by subclasses such as + // NestedTensorImpl. + virtual int64_t size_custom(int64_t d) const { + // TODO: We could add support to Python dispatch here. + // TODO: We could call into aten::size.int instead of + // sizes_custom()[d] and enable use of the dispatcher. + d = maybe_wrap_dim(d, dim(), /*wrap_scalar=*/false); + return sizes_custom()[d]; // unchecked (maybe_wrap_dim enforces bounds) + } + + virtual c10::SymInt sym_size_custom(int64_t d) const { + // TODO: We could add support to Python dispatch here. + // TODO: We could call into aten::size.int instead of + // sym_sizes_custom()[d] and enable use of the dispatcher. + d = maybe_wrap_dim(d, dim(), /*wrap_scalar=*/false); + return sym_sizes_custom()[d]; // unchecked (maybe_wrap_dim enforces bounds) + } + + virtual IntArrayRef sizes_custom() const; + virtual IntArrayRef strides_custom() const; + virtual int64_t numel_custom() const; + virtual int64_t storage_offset_custom() const; + virtual int64_t dim_custom() const; + virtual Device device_custom() const; + virtual Layout layout_custom() const; + + virtual c10::SymIntArrayRef sym_sizes_custom() const; + virtual c10::SymIntArrayRef sym_strides_custom() const; + virtual c10::SymInt sym_numel_custom() const; + virtual c10::SymInt sym_storage_offset_custom() const; + + public: + /** + * True if this tensor has storage. See storage() for details. + */ +#ifdef DEBUG + // Allow subclasses to check that their storage_ is never getting set in debug + // builds. + virtual +#else + TENSORIMPL_MAYBE_VIRTUAL +#endif + bool + has_storage() const + // NOTE: we devirtualize this because it arguably shouldn't be an + // error just to ask subclasses if they have storage. + // This used to throw for most subclasses, but OpaqueTensorImpl + // wanted it to successfully return false, so we went ahead and made + // it a non-error. +#ifdef C10_DISABLE_TENSORIMPL_EXTENSIBILITY + { + return storage_; + } +#else + ; +#endif + + /** + * Return the underlying storage of a Tensor. Multiple tensors may share + * a single storage. A Storage is an impoverished, Tensor-like class + * which supports far less operations than Tensor. + * + * Avoid using this method if possible; try to use only Tensor APIs to perform + * operations. + */ + TENSORIMPL_MAYBE_VIRTUAL const Storage& storage() const { + if (C10_UNLIKELY(storage_access_should_throw_)) { + throw_storage_access_error(); + } + return storage_; + } + + /** + * Return the underlying storage, unsafely assuming this is a basic strided + * tensor. In cases where `storage` access would throw, this returns a + * default-constructed Storage. + */ + inline const Storage& unsafe_storage() const { + return storage_; + } + + bool unique_version() const { + return version_counter_.unique(); + } + + protected: + virtual Layout layout_impl() const { + TORCH_CHECK( + false, "layout_impl is only implemented for TensorImpl subclasses."); + } + + public: + // Whether a tensor is sparse COO or not. + bool is_sparse() const { + // NB: This method is not virtual and avoid dispatches for performance + // reasons. + return key_set_.has_all(c10::sparse_ks); + } + + // Whether a tensor is sparse CSR or not. + bool is_sparse_csr() const { + return layout() == kSparseCsr; + } + + // Whether a tensor is sparse CSR/CSC/BSR/BSC or not. + bool is_sparse_compressed() const { + return key_set_.has_all(c10::sparse_csr_ks); + } + + bool is_quantized() const { + // NB: This method is not virtual and avoid dispatches for performance + // reasons. + constexpr auto quantized_ks = DispatchKeySet(DispatchKey::Quantized); + return key_set_.has_all(quantized_ks); + } + + bool is_meta() const { + // NB: This method is not virtual and avoid dispatches for performance + // reasons. + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_meta(); + } + return device_opt_.has_value() && device_opt_->type() == kMeta; + } + + bool is_cpu() const { + // NB: This method is not virtual and avoid dispatches for performance + // reasons. + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_cpu(); + } + // Note: we cannot rely on dispatch keys to determine the device type + // of a tensor, because "wrapper" tensors (like FunctionalTensorWrapper) + // don't include backend dispatch keys. + return device_opt_.has_value() && device_opt_->type() == kCPU; + } + + bool is_cuda() const { + // NB: This method is not virtual and avoid dispatches for performance + // reasons. + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_cuda(); + } + return device_opt_.has_value() && device_opt_->type() == kCUDA; + } + + bool is_xpu() const { + // NB: This method is not virtual and avoid dispatches for performance + // reasons. + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_xpu(); + } + return device_opt_.has_value() && device_opt_->type() == kXPU; + } + + bool is_ipu() const { + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_ipu(); + } + return device_opt_.has_value() && device_opt_->type() == kIPU; + } + + bool is_xla() const { + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_xla(); + } + return device_opt_.has_value() && device_opt_->type() == kXLA; + } + + bool is_mtia() const { + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_mtia(); + } + return device_opt_.has_value() && device_opt_->type() == kMTIA; + } + + bool is_hpu() const { + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_hpu(); + } + return device_opt_.has_value() && device_opt_->type() == kHPU; + } + + bool is_lazy() const { + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_lazy(); + } + return device_opt_.has_value() && device_opt_->type() == kLazy; + } + + bool is_hip() const { + // NB: This method is not virtual and avoid dispatches for performance + // reasons. + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_hip(); + } + return device_opt_.has_value() && device_opt_->type() == kHIP; + } + + bool is_ve() const { + // NB: This method is not virtual and avoid dispatches for performance + // reasons. + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_ve(); + } + return device_opt_.has_value() && device_opt_->type() == kVE; + } + + bool is_privateuseone() const { + // NB: This method is not virtual and avoid dispatches for performance + // reasons. + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_privateuseone(); + } + return device_opt_.has_value() && device_opt_->type() == kPrivateUse1; + } + + bool is_mkldnn() const { + return key_set_.has_all(c10::mkldnn_ks); + } + + bool is_vulkan() const { + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_vulkan(); + } + return device_opt_.has_value() && device_opt_->type() == kVulkan; + } + + bool is_metal() const { + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_metal(); + } + return device_opt_.has_value() && device_opt_->type() == kMetal; + } + + bool is_mps() const { + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_mps(); + } + return device_opt_.has_value() && device_opt_->type() == kMPS; + } + + bool is_maia() const { + if (C10_UNLIKELY(device_policy_)) { + return device_custom().is_maia(); + } + return device_opt_.has_value() && device_opt_->type() == kMAIA; + } + + bool is_nested() const { + return key_set_.has(DispatchKey::NestedTensor); + } + + // TODO: remove this once we don't automatically enabled Autograd dispatch + // keys + // in TensorImpl constructor. + // DON'T USE THIS API!! It's only created for testing purpose in + // file aten/src/ATen/core/boxing/impl/test_helpers.h + void remove_autograd_key() { + key_set_ = key_set_ - autograd_dispatch_keyset; + } + + // Inference tensor doesn't have autograd or ADInplaceOrView key. + // Invariant: + // Inference tensor has version_counter_.enabled() == false + bool is_inference() { + bool no_ADInplaceOrView = !key_set_.has_any(c10::inplace_or_view_ks); + bool no_Autograd = !key_set_.has_any(c10::autograd_dispatch_keyset); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + no_ADInplaceOrView == no_Autograd, + "ADInplaceOrView and Autograd keys must be on/off at the same time."); + return no_ADInplaceOrView && no_Autograd; + } + + DeviceIndex get_device() const { + if (C10_UNLIKELY(device_policy_)) { + return device_custom().index(); + } + return device_default().index(); + } + + Device device() const { + if (C10_UNLIKELY(device_policy_)) { + return device_custom(); + } + return device_default(); + } + + protected: + c10::Device device_default() const { + TORCH_CHECK(device_opt_.has_value(), "tensor does not have a device"); + // See NOTE [std::optional operator usage in CUDA] + return *device_opt_; + } + + public: + Layout layout() const { + if (C10_UNLIKELY(layout_policy_)) { + return layout_custom(); + } + + // NB: This method is not virtual and avoid dispatches for perf. + // strided is also the most common layout type, so we check for + // strided case first. + // This keyset must also be kept in sync with the logic in + // is_sparse() / is_sparse_csr() / is_mkldnn() + constexpr auto sparse_and_sparsecsr_and_mkldnn_ks = + c10::sparse_ks | c10::sparse_csr_ks | c10::mkldnn_ks; + if (!key_set_.has_any(sparse_and_sparsecsr_and_mkldnn_ks)) { + return kStrided; + } else if (is_sparse()) { + return kSparse; + } else if (is_sparse_compressed()) { + // Typically, the tensor dispatch keys define the tensor layout + // uniquely. This allows using non-virtual layout method for + // better performance. However, when tensor's layout depends, + // say, on tensor attributes, one must use this execution path + // where the corresponding tensor impl class overwrites virtual + // layout_impl() method. + // + // TODO: implement layout() as native function/method so that + // __torch_dispatch__ users will be able to redefine the + // layout() method. + return layout_impl(); + } else { + TORCH_INTERNAL_ASSERT( + is_mkldnn(), "There is an error in the layout calculation logic."); + return kMkldnn; + } + } + + /** + * True if a tensor was auto-wrapped from a C++ or Python number. + * For example, when you write 't + 2', 2 is auto-wrapped into a Tensor + * with `is_wrapped_number_` set to true. + * + * Wrapped numbers do not participate in the result type computation for + * mixed-type operations if there are any Tensors that are not wrapped + * numbers. This is useful, because we want 't + 2' to work with + * any type of tensor, not just LongTensor (which is what integers + * in Python represent). + * + * Otherwise, they behave like their non-wrapped equivalents. + * See [Result type computation] in TensorIterator.h. + * + * Why did we opt for wrapped numbers, as opposed to just having + * an extra function add(Tensor, Scalar)? This helps greatly reduce + * the amount of code we have to write for add, when actually + * a Tensor-Scalar addition is really just a Tensor-Tensor + * addition when the RHS is 0-dim (except for promotion behavior.) + */ + bool is_wrapped_number() const { + return is_wrapped_number_; + } + + /** + * Set whether or not a tensor was auto-wrapped from a C++ or Python + * number. You probably don't want to call this, unless you are + * writing binding code. + */ + void set_wrapped_number(bool value) { + TORCH_INTERNAL_ASSERT(dim() == 0); + is_wrapped_number_ = value; + } + + /** + * Returns true if Tensor supports as_strided and as_strided_backward. + * This is used in autograd to perform inplace update on view Tensors. + * See Note [View + Inplace update for base tensor] and + * [View + Inplace update for view tensor] for details. + * Note this method only returns true for XLA backend, where it + * simulates strided Tensor to support most view ops, but it cannot + * fully support general `as_strided` case. + * It can be expanded as needed in the future, e.g sparse Tensor. + */ + inline bool support_as_strided() const { + if (is_nested()) { + return false; + } + if (key_set_.has(DispatchKey::Functionalize)) { + return false; + } + return device().supports_as_strided(); + } + + // ~~~~~ Autograd API ~~~~~ + // Some methods below are defined in TensorImpl.cpp because Tensor is an + // incomplete type. + + /** + * Set whether or not a tensor requires gradient. + */ + void set_requires_grad(bool requires_grad); + + /** + * True if a tensor requires gradient. Tensors which require gradient + * have history tracked for any operations performed on them, so that + * we can automatically differentiate back to them. A tensor that + * requires gradient and has no history is a "leaf" tensor, which we + * accumulate gradients into. + */ + bool requires_grad() const; + + /** + * Return a mutable reference to the gradient. This is conventionally + * used as `t.grad() = x` to set a gradient to a completely new tensor. + */ + at::Tensor& mutable_grad(); + + /** + * Return the accumulated gradient of a tensor. This gradient is written + * into when performing backwards, when this tensor is a leaf tensor. + */ + const at::Tensor& grad() const; + + /** + * Whether or not the imaginary part of the tensor should be negated + */ + inline bool is_conj() const { + constexpr auto conjugate_ks = DispatchKeySet(DispatchKey::Conjugate); + return key_set_.has_all(conjugate_ks); + } + + /** + * Set whether or not to take the conjugate of the tensor (flip the imaginary + * bit). + */ + void _set_conj(bool value) { + if (value) { + key_set_ = key_set_.add(DispatchKey::Conjugate); + TORCH_INTERNAL_ASSERT(isComplexType(typeMetaToScalarType(dtype()))); + } else { + key_set_ = key_set_.remove(DispatchKey::Conjugate); + } + } + + /** + * XXX: do not use, private api! + * Update the backend component related keys to the backend component + * corresponding to this device. + */ + void _change_backend_component_keys(c10::Device device); + + /** + * Whether or not the tensor is a zerotensor + */ + inline bool _is_zerotensor() const { + constexpr auto zerotensor_ks = DispatchKeySet(DispatchKey::ZeroTensor); + return key_set_.has_all(zerotensor_ks); + } + + /** + Set whether or not the tensor is a zero tensor + */ + void _set_zero(bool value) { + if (value) { + TORCH_INTERNAL_ASSERT( + false, + "Please call `torch._efficientzerotensor` if you want to create a tensor with no storage."); + } else { + key_set_ = key_set_.remove(DispatchKey::ZeroTensor); + } + } + + /** + * Whether or not the tensor should be negated + */ + inline bool is_neg() const { + constexpr auto negative_ks = DispatchKeySet(DispatchKey::Negative); + return key_set_.has_all(negative_ks); + } + + /** + * Set whether or not to take the conjugate of the tensor (flip the imaginary + * bit). + */ + void _set_neg(bool value) { + if (value) { + key_set_ = key_set_.add(DispatchKey::Negative); + } else { + key_set_ = key_set_.remove(DispatchKey::Negative); + } + } + + /** + * Return the accumulated gradient of a tensor. This gradient is computed + * using forward mode AD. + * + * This is an internal API that should never be used by end users. + * + * The API is as follows: + * - "level" allows to specify the level of forward AD nesting for which the + * gradient should be returned. Note that since levels are not fully + * supported yet, this argument should be 0. See documentation for + * torch::autograd::enter_dual_level for more details about forward AD + * nesting. + * - "self" should represent the Tensor whose forward grad is accessed. It + * is required when dealing with view. + */ + const at::Tensor& _fw_grad(uint64_t level, const at::TensorBase& self) const; + + /** + * Sets the forward gradient for this Tensor. + * The given Tensor might not be used directly and its content will be copied. + * + * This is an internal API that should never be used by end users. + * + * The API is as follows: + * - "new_grad" is a Tensor containing the new value of the gradient that + * should be set + * - "self" should represent the Tensor whose forward grad is accessed. It + * is required when dealing with view. + * - "level" allows to specify the level of forward AD nesting for which the + * gradient should be set. Note that since levels are not fully supported + * yet, this argument should be 0. See documentation for + * torch::autograd::enter_dual_level for more details about forward AD + * nesting. + * - "is_inplace_op" is a boolean flag that tells if this gradient was + * generated by an inplace operation or an out of place one. This allows + * better error checking. + */ + void _set_fw_grad( + const at::TensorBase& new_grad, + const at::TensorBase& self, + uint64_t level, + bool is_inplace_op); + + /** + * Return a typed data pointer to the actual data which this tensor refers to. + * This checks that the requested type (from the template parameter) matches + * the internal type of the tensor. + * + * It is invalid to call data() on a dtype-uninitialized tensor, even if + * the size is 0. + * + * WARNING: If a tensor is not contiguous, you MUST use strides when + * performing index calculations to determine the location of elements in + * the tensor. We recommend using 'TensorAccessor' to handle this computation + * for you; this class is available from 'Tensor'. + */ + template + const T* data_dtype_initialized() const { + return data_dtype_initialized_impl( + [this] { return static_cast(storage_.data()); }); + } + + /** + * Return a mutable typed data pointer to the actual data which this + * tensor refers to. This checks that the requested type (from the + * template parameter) matches the internal type of the tensor. + * + * It is invalid to call data() on a dtype-uninitialized tensor, even if + * the size is 0. + * + * WARNING: If a tensor is not contiguous, you MUST use strides when + * performing index calculations to determine the location of elements in + * the tensor. We recommend using 'TensorAccessor' to handle this computation + * for you; this class is available from 'Tensor'. + */ + template + T* mutable_data_dtype_initialized() { + return data_dtype_initialized_impl( + [this] { return static_cast(storage_.mutable_data()); }); + } + + private: + // Shared implementation of data_dtype_initialized() and + // mutable_data_dtype_initialized(). + template + T* data_dtype_initialized_impl(const Func& get_data) const { + TORCH_CHECK( + data_type_.Match>(), + "Tensor type mismatch, caller expects elements to be ", + caffe2::TypeMeta::TypeName>(), + ", while tensor contains ", + data_type_.name(), + ". "); + return data_ptr_impl_impl(get_data); + } + + public: + /** + * More efficient helper for Tensor::data_ptr(). Like data(), but + * does not do a type check. Unlike the untemplated data(), does + * check has_storage() and storage_initialized(). + */ + template + inline const T* data_ptr_impl() const { + return data_ptr_impl_impl( + [this] { return static_cast(storage_.data()); }); + } + + /** + * More efficient helper for Tensor::data_ptr(). Like data(), but + * does not do a type check. Unlike the untemplated data(), does + * check has_storage() and storage_initialized(). + */ + template + inline T* mutable_data_ptr_impl() { + return data_ptr_impl_impl( + [this] { return static_cast(storage_.mutable_data()); }); + } + + private: + // Shared implementation of mutable_data_ptr_impl() and the future + // mutable_data_ptr_impl(). + template + __ubsan_ignore_pointer_overflow__ T* data_ptr_impl_impl( + const Func& get_data) const { + if (C10_UNLIKELY(!has_storage())) { + throw_data_ptr_access_error(); + } + TORCH_CHECK( + storage_initialized(), + "The tensor has a non-zero number of elements, but its data is not allocated yet.\n" + "If you're using torch.compile/export/fx, it is likely that we are erroneously " + "tracing into a custom kernel. To fix this, please wrap the custom kernel into " + "an opaque custom op. Please see the following for details: " + "https://pytorch.org/tutorials/advanced/custom_ops_landing_page.html\n" + "If you're using Caffe2, Caffe2 uses a lazy allocation, so you will need to call " + "mutable_data() or raw_mutable_data() to actually allocate memory."); + // Caller does the type check. + // Note: storage_offset_ can be non-null even for zero-elements tensors + // (for example if created as `torch.empty(5)[10:]`) that triggers + // applying non-zero offset to null pointer in UBSan + return get_data() + storage_offset_; + } + + public: + /** + * Return a const void* data pointer to the actual data which this + * tensor refers to. + * + * It is invalid to call data() on a dtype-uninitialized tensor, even if the + * size is 0. + * + * WARNING: The data pointed to by this tensor may not contiguous; do NOT + * assume that itemsize() * numel() is sufficient to compute the bytes that + * can be validly read from this tensor. + */ + inline const void* data() const { + return data_impl( + [this] { return static_cast(storage_.data()); }); + } + + /** + * Return a void* data pointer to the actual data which this tensor refers to. + * + * It is invalid to call mutable_data() on a dtype-uninitialized + * tensor, even if the size is 0. + * + * WARNING: The data pointed to by this tensor may not contiguous; do NOT + * assume that itemsize() * numel() is sufficient to compute the bytes that + * can be validly read from this tensor. + */ + inline void* mutable_data() { + return data_impl( + [this] { return static_cast(storage_.mutable_data()); }); + } + + private: + /// Shared implementation of data() and mutable_data(). + /// + /// get_data must return a byte-addressed pointer, e.g. char*, + /// std::byte const*, etc. + template + Void* data_impl(const Func& get_data) const { + if (C10_UNLIKELY(!has_storage())) { + throw_data_ptr_access_error(); + } + TORCH_CHECK( + dtype_initialized(), + "Cannot access data pointer of Tensor that doesn't have initialized dtype " + "(e.g., caffe2::Tensor x(CPU), prior to calling mutable_data() on x)"); + auto* data = get_data(); + static_assert( + sizeof(*data) == 1, "get_data must return a byte-addressed pointer."); + // Computing an offset into an empty tensor would be UB, since an empty + // tensor's storage will be nullptr, and adding a nonzero offset to nullptr + // is UB. So we skip the offset computation in this case. + if (is_empty()) { + return nullptr; + } + return data + data_type_.itemsize() * storage_offset_; + } + + public: + /** + * Returns the TypeMeta of a tensor, which describes what data type + * it is (e.g., int, float, ...) + */ + const caffe2::TypeMeta dtype() const { + return data_type_; + } + + /** + * Return the size of a single element of this tensor in bytes. + */ + size_t itemsize() const { + TORCH_CHECK( + dtype_initialized(), + "Cannot report itemsize of Tensor that doesn't have initialized dtype " + "(e.g., caffe2::Tensor x(CPU), prior to calling mutable_data() on x)"); + return data_type_.itemsize(); + } + + void set_backend_meta(intrusive_ptr backend_meta) { + get_extra_meta().backend_meta_ = std::move(backend_meta); + } + + c10::BackendMeta* get_backend_meta() { + if (!extra_meta_) { + return nullptr; + } + return extra_meta_->backend_meta_.get(); + } + + intrusive_ptr get_backend_meta_intrusive_ptr() const { + if (!extra_meta_) { + return nullptr; + } + return extra_meta_->backend_meta_; + } + + void release_storage_and_set_meta_custom_data_ptr_error_msg_( + std::optional s) { + storage_ = {}; + set_storage_access_should_throw(); + get_extra_meta().custom_data_ptr_error_msg_ = s; + get_extra_meta().custom_storage_error_msg_ = std::move(s); + } + + protected: + /** + * Returns the human-readable name of the actual type of this object (e.g., + * TensorImpl, BatchedTensorImpl, etc.). Used for error messages. + */ + virtual const char* tensorimpl_type_name() const { + return "TensorImpl"; + } + + private: + [[noreturn]] void throw_storage_access_error() const; + [[noreturn]] void throw_data_ptr_access_error() const; + + ExtraMeta& get_extra_meta() { + if (!extra_meta_) { + extra_meta_ = std::make_unique(); + } + return *extra_meta_; + } + + c10::SymbolicShapeMeta& symbolic_shape_meta() { + TORCH_INTERNAL_ASSERT(extra_meta_ && extra_meta_->symbolic_shape_meta_); + return *extra_meta_->symbolic_shape_meta_; + } + + const c10::SymbolicShapeMeta& symbolic_shape_meta() const { + TORCH_INTERNAL_ASSERT(extra_meta_ && extra_meta_->symbolic_shape_meta_); + return *extra_meta_->symbolic_shape_meta_; + } + + public: + /** + * True if a tensor has no elements (e.g., numel() == 0). + */ + inline bool is_empty() const { + return numel() == 0; + } + + // if we are going to use sym sizes, we should be setting sym strides at the + // same time, otherwise it's very easy to misuse this API + void set_sizes_and_strides( + c10::SymIntArrayRef sizes, + c10::SymIntArrayRef strides, + std::optional storage_offset = c10::nullopt); + // This is renamed to avoid breaking overload BC + void generic_set_sizes_contiguous(c10::SymIntArrayRef sizes); + void generic_set_sizes_contiguous(c10::IntArrayRef sizes) { + set_sizes_contiguous(sizes); + } + + /** + * Change the size at some dimension. This DOES NOT update strides; + * thus, most changes to size will not preserve contiguity. You probably + * also want to call set_stride() when you call this. + * + * TODO: This should be jettisoned in favor of `set_sizes_and_strides`, + * which is harder to misuse. + */ + virtual void set_size(int64_t dim, int64_t new_size) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "set_size ", + err_msg_tensor_metadata_change_not_allowed); + TORCH_CHECK( + !matches_policy(SizesStridesPolicy::CustomSizes), + "set_size() called on tensor with dynamic shapes or customized size behavior") + sizes_and_strides_.size_at(dim) = new_size; + refresh_numel(); + refresh_contiguous(); + } + + /** + * Change the stride at some dimension. + * + * TODO: This should be jettisoned in favor of `set_sizes_and_strides`, + * which is harder to misuse. + */ + virtual void set_stride(int64_t dim, int64_t new_stride) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "set_stride ", + err_msg_tensor_metadata_change_not_allowed); + TORCH_CHECK( + !has_symbolic_sizes_strides_, + "set_stride() called on tensor with symbolic shape") + sizes_and_strides_.stride_at_unchecked(dim) = new_stride; + refresh_contiguous(); + } + + /** + * Set the offset into the storage of this tensor. + * + * WARNING: This does NOT check if the tensor is in bounds for the new + * location at the storage; the caller is responsible for checking this + * (and resizing if necessary.) + */ + virtual void set_storage_offset(int64_t storage_offset) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "set_storage_offset ", + err_msg_tensor_metadata_change_not_allowed); + // TODO: this should probably consult policy + TORCH_CHECK( + !has_symbolic_sizes_strides_, + "set_storage_offset() called on tensor with symbolic shape") + storage_offset_ = storage_offset; + } + + /** + * Like set_sizes_and_strides but assumes contiguous strides. + * + * WARNING: This function does not check if the requested + * sizes/strides are in bounds for the storage that is allocated; + * this is the responsibility of the caller + */ + void set_sizes_contiguous(IntArrayRef new_size) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "set_sizes_contiguous ", + err_msg_tensor_metadata_change_not_allowed); + TORCH_CHECK( + !matches_policy(SizesStridesPolicy::CustomStrides), + "tried to directly modify sizes for customized tensor"); + sizes_and_strides_.set_sizes(new_size); + + refresh_numel(); + empty_tensor_restride( + MemoryFormat::Contiguous); // calls refresh_contiguous() + } + + /** + * Set the sizes and strides of a tensor. + * + * WARNING: This function does not check if the requested + * sizes/strides are in bounds for the storage that is allocated; + * this is the responsibility of the caller + */ + void set_sizes_and_strides( + IntArrayRef new_size, + IntArrayRef new_stride, + std::optional storage_offset = c10::nullopt) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "set_sizes_and_strides ", + err_msg_tensor_metadata_change_not_allowed); + TORCH_CHECK( + !has_symbolic_sizes_strides_, + "set_sizes_and_strides() called on tensor with symbolic shape") + TORCH_CHECK( + new_size.size() == new_stride.size(), + "dimensionality of sizes (", + new_size.size(), + ") must match dimensionality of strides (", + new_stride.size(), + ")"); + const auto new_dim = new_size.size(); + bool overflowed = false; + sizes_and_strides_.set_sizes(new_size); + + if (new_dim > 0) { + for (size_t dim = new_dim - 1;; dim--) { + if (new_stride[dim] >= 0) { + sizes_and_strides_.stride_at_unchecked(dim) = new_stride[dim]; + } else { + // XXX: This behavior is surprising and may need to be removed to + // support negative strides. Some pytorch functions rely on it: + // for example, torch.cat (run TestTorch.test_cat_empty). + if (dim == new_dim - 1) { + sizes_and_strides_.stride_at_unchecked(dim) = 1; + } else { + // Keep stride monotonically increasing to match NumPy. + overflowed |= c10::mul_overflows( + sizes_and_strides_.stride_at_unchecked(dim + 1), + std::max( + sizes_and_strides_.size_at_unchecked(dim + 1), 1), + std::addressof(sizes_and_strides_.stride_at_unchecked(dim))); + } + } + if (dim == 0) + break; + } + TORCH_CHECK(!overflowed, "Stride calculation overflowed"); + } + + refresh_numel(); + refresh_contiguous(); + + if (storage_offset.has_value()) { + storage_offset_ = *storage_offset; + } + } + + /** + * Set whether a tensor allows changes to its metadata (e.g. sizes / strides / + * storage / storage_offset). See NOTE [ Metadata Change for a Detached Tensor + * ] for details. + */ + void set_allow_tensor_metadata_change(bool value) { + // TODO: at some point, we should kill this field completely. + allow_tensor_metadata_change_ = true; + } + + /** + * True if a tensor allows changes to its metadata (e.g. sizes / strides / + * storage / storage_offset). See NOTE [ Metadata Change for a Detached Tensor + * ] for details. + */ + bool allow_tensor_metadata_change() const { + return allow_tensor_metadata_change_; + } + + /** + * Set the pointer to autograd metadata. + */ + void set_autograd_meta( + std::unique_ptr autograd_meta); + + /** + * Return the pointer to autograd metadata. May return nullptr if the + * tensor does not track gradients. + */ + c10::AutogradMetaInterface* autograd_meta() const; + + /** + * Set the pointer to named tensor metadata. + */ + void set_named_tensor_meta( + std::unique_ptr named_tensor_meta) { + TORCH_WARN_ONCE( + "Named tensors and all their associated APIs are an experimental feature ", + "and subject to change. Please do not use them for anything important ", + "until they are released as stable."); +#ifdef DEBUG + if (named_tensor_meta) { + TORCH_INTERNAL_ASSERT(named_tensor_meta->slow_dim() == dim()); + } +#endif + if (named_tensor_meta) { + get_extra_meta().named_tensor_meta_ = std::move(named_tensor_meta); + key_set_ = key_set_.add(DispatchKey::Named); + } else { + if (extra_meta_) { + extra_meta_->named_tensor_meta_ = nullptr; + } + key_set_ = key_set_.remove(DispatchKey::Named); + } + } + + void set_python_dispatch(bool k) { + if (k) { + key_set_ = key_set_.add(c10::python_ks); + } else { + key_set_ = key_set_ - c10::python_ks; + } + } + + bool is_python_dispatch() const { + return key_set_.has_all(c10::python_ks); + } + + /** + * Return the pointer to named tensor metadata. + */ + const c10::NamedTensorMetaInterface* named_tensor_meta() const { + if (!extra_meta_) { + return nullptr; + } + return extra_meta_->named_tensor_meta_.get(); + } + + c10::NamedTensorMetaInterface* named_tensor_meta() { + if (!extra_meta_) { + return nullptr; + } + return extra_meta_->named_tensor_meta_.get(); + } + + bool has_named_tensor_meta() const { + if (!extra_meta_) { + return false; + } + return extra_meta_->named_tensor_meta_ != nullptr; + } + + // NOTE [ TensorImpl Shallow-Copying ] + // + // TensorImpl shallow-copying is used when we want to have two Variables share + // the same tensor metadata (e.g. sizes / strides / storage pointer / + // storage_offset), but each with a different autograd history. Example call + // sites: + // + // 1. `var_detached = var.detach()` uses `shallow_copy_and_detach()` to create + // `var_detached` that shares the same tensor metadata with `var`, but with a + // completely new autograd history. + // 2. `var.set_data(tensor)` uses `shallow_copy_from()` to copy tensor + // metadata from `tensor` into `var`, while keeping `var`'s original + // AutogradMeta. + // + // Functions that shallow-copy a TensorImpl (such as + // `shallow_copy_and_detach()` / `shallow_copy_from()` / + // `copy_tensor_metadata()`) copy the tensor metadata fields (e.g. sizes / + // strides / storage pointer / storage_offset) by value. However, the + // following fields are not copied: + // + // 1. the AutogradMeta pointer, because it is unique for each Variable. + // 2. the version counter, because the destination TensorImpl's version + // counter is either set to the passed-in `version_counter` (in + // `shallow_copy_and_detach()` and `copy_tensor_metadata()`), or it is kept + // intact (in `shallow_copy_from()`). See NOTE [ Version Counter Sharing ] for + // details. + // + // In `shallow_copy_and_detach()` and `copy_tensor_metadata()`, the passed-in + // `allow_tensor_metadata_change` determines whether the TensorImpl + // shallow-copy allows changes to its metadata (e.g. sizes / strides / storage + // / storage_offset). See NOTE [ Metadata Change for a Detached Tensor ] for + // details. + // + // In `shallow_copy_from()`, we don't check the destination TensorImpl's + // `allow_tensor_metadata_change_`, because `shallow_copy_from()` is used for + // implementing functions such as `var.set_data(tensor)`, which changes + // `var`'s tensor metadata and expects its `allow_tensor_metadata_change_` to + // be ignored. + + /** + * One TensorImpl can be copied to another TensorImpl if they have the same + * DispatchKeySet. The only two special cases (for legacy reason) are: + * CPU is compatible with CUDA and SparseCPU is + * compatible with SparseCUDA. + */ + inline bool has_compatible_shallow_copy_type(DispatchKeySet from) { + auto is_dense = [](DispatchKeySet ts) { + constexpr auto dense_backends = DispatchKeySet( + {BackendComponent::CPUBit, + BackendComponent::CUDABit, + BackendComponent::MPSBit, + BackendComponent::HIPBit, + BackendComponent::XPUBit, + BackendComponent::HPUBit}); + constexpr auto dense_k = DispatchKeySet(DispatchKey::Dense); + return ts.has_any(dense_k) && ts.has_any(dense_backends); + }; + auto is_sparse = [](DispatchKeySet ts) { + constexpr auto sparse_backends = DispatchKeySet( + {BackendComponent::CPUBit, + BackendComponent::CUDABit, + BackendComponent::HIPBit, + BackendComponent::XPUBit}); + constexpr auto sparse_k = DispatchKeySet(DispatchKey::Sparse); + return ts.has_any(sparse_k) && ts.has_any(sparse_backends); + }; + auto is_sparse_compressed = [](DispatchKeySet ts) { + constexpr auto sparse_compressed_k = + DispatchKeySet(DispatchKey::SparseCsr); + return ts.has_any(sparse_compressed_k); + }; + return (key_set_ == from) || (is_dense(key_set_) && is_dense(from)) || + (is_sparse(key_set_) && is_sparse(from)) || + (is_sparse_compressed(key_set_) && is_sparse_compressed(from)); + ; + } + + private: + template + c10::intrusive_ptr shallow_copy_and_detach_core( + VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const; + + public: + /** + * Return a TensorImpl that is a shallow-copy of this TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, + * see NOTE [ TensorImpl Shallow-Copying ]. + */ + virtual c10::intrusive_ptr shallow_copy_and_detach( + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change) const; + + /** + * Return a TensorImpl that is a shallow-copy of this TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, + * see NOTE [ TensorImpl Shallow-Copying ]. + */ + virtual c10::intrusive_ptr shallow_copy_and_detach( + c10::VariableVersion&& version_counter, + bool allow_tensor_metadata_change) const; + + /** + * Shallow-copies data from another TensorImpl into this TensorImpl. + * + * For why this function doesn't check this TensorImpl's + * `allow_tensor_metadata_change_`, see NOTE [ TensorImpl Shallow-Copying ]. + */ + virtual void shallow_copy_from(const c10::intrusive_ptr& impl) { + copy_tensor_metadata( + /*src_impl=*/impl.get(), + /*dest_impl=*/this, + /*version_counter=*/version_counter(), + /*allow_tensor_metadata_change=*/allow_tensor_metadata_change()); + } + + // Inference tensor doesn't have version counter, + // set_version_counter is no-op for them. + void set_version_counter(const c10::VariableVersion& version_counter) { + TORCH_CHECK( + !(is_inference() && version_counter.enabled()), + "Cannot set version_counter for inference tensor"); + version_counter_ = version_counter; + } + + void set_version_counter(c10::VariableVersion&& version_counter) { + TORCH_CHECK( + !(is_inference() && version_counter.enabled()), + "Cannot set version_counter for inference tensor"); + version_counter_ = std::move(version_counter); + } + + const c10::VariableVersion& version_counter() const noexcept { + return version_counter_; + } + + void bump_version() { + version_counter_.bump(); + } + + impl::PyObjectSlot* pyobj_slot() { + return &pyobj_slot_; + } + + const impl::PyObjectSlot* pyobj_slot() const { + return &pyobj_slot_; + } + + private: + // See NOTE [std::optional operator usage in CUDA] + // We probably don't want to expose this publicly until + // the note is addressed. + std::optional device_opt() const { + return device_opt_; + } + + public: + /** + * The device type of a Tensor, e.g., DeviceType::CPU or DeviceType::CUDA. + */ + DeviceType device_type() const { + // TODO: A useful internal assert would be to show that device_opt_ is null + // only if you are an undefined tensor + TORCH_CHECK( + device_opt_.has_value(), + "device_type cannot be run on undefined Tensor"); + // See NOTE [std::optional operator usage in CUDA] + return (*device_opt_).type(); + } + + /** + * @brief Extends the outer-most dimension of this tensor by num elements, + * preserving the existing data. + * + * The underlying data may be reallocated in order to accommodate the new + * elements, in which case this tensors' capacity is grown at a factor of + * growthPct. This ensures that Extend runs on an amortized O(1) time + * complexity. + * + * This op is auto-asynchronous if the underlying device (CUDA) supports it. + */ + void Extend(int64_t num, float growthPct); + + /** + * @brief Reserve space for the underlying tensor. + * + * This must be called after Resize(), since we only specify the first + * dimension This does not copy over the old data to the newly allocated space + */ + void ReserveSpace(int64_t outer_dim); + + /** + * @brief Resizes a tensor. + * + * Resize takes in a vector of ints specifying the dimensions of the tensor. + * You can pass in an empty vector to specify that it is a scalar (i.e. + * containing one single item). + * + * The underlying storage may be deleted after calling Resize: if the new + * shape leads to a different number of items in the tensor, the old memory + * is deleted and new memory will be allocated next time you call + * mutable_data(). However, if the shape is different but the total number of + * items is the same, the underlying storage is kept. + * + * This method respects caffe2_keep_on_shrink. Consult the internal logic + * of this method to see exactly under what circumstances this flag matters. + */ + template + void Resize(Ts... dim_source) { + bool size_changed = SetDims(dim_source...); + if (size_changed) { + HandleResize(); + } + } + + template + void Resize(const std::vector& dim_source) { + Resize(ArrayRef(dim_source)); + } + + /** + * Resizes the tensor without touching underlying storage. + * This requires the total size of the tensor to remains constant. + */ + void Reshape(const std::vector& dims); + + /** + * Release whatever memory the tensor was holding but keep size and type + * information. Subsequent call to mutable_data will trigger new memory + * allocation. + */ + void FreeMemory(); + + /** + * @brief Shares the data with another tensor. + * + * To share data between two tensors, the sizes of the two tensors must be + * equal already. The reason we do not implicitly do a Resize to make the two + * tensors have the same shape is that we want to allow tensors of different + * shapes but the same number of items to still be able to share data. This + * allows one to e.g. have a n-dimensional Tensor and a flattened version + * sharing the same underlying storage. + * + * The source tensor should already have its data allocated. + */ + // To be deprecated + void ShareData(const TensorImpl& src); + + void ShareExternalPointer( + DataPtr&& data_ptr, + const caffe2::TypeMeta data_type, + size_t size_bytes); + + /** + * Returns a mutable raw pointer of the underlying storage. Since we will need + * to know the type of the data for allocation, a TypeMeta object is passed in + * to specify the necessary information. This is conceptually equivalent of + * calling mutable_data() where the TypeMeta parameter meta is derived from + * the type T. This function differs from mutable_data() in the sense that + * the type T can be specified during runtime via the TypeMeta object. + * + * If the existing data does not match the desired type, it will be deleted + * and a new storage will be created. + */ + inline void* raw_mutable_data(const caffe2::TypeMeta& meta) { + // For 0-size tensors it's fine to return any pointer (including nullptr) + if (data_type_ == meta && storage_initialized()) { + return static_cast( + static_cast(storage_.mutable_data()) + + storage_offset_ * meta.itemsize()); + } else { + bool had_special_dtor = data_type_.placementDelete() != nullptr; + storage_offset_ = 0; + data_type_ = meta; + // NB: device is not changed + + // We can reuse the existing buffer if the current data does not have + // a special destructor and the new data doesn't have a special + // constructor. + if (numel_ == 0 || + (meta.placementNew() == nullptr && !had_special_dtor && + (storage_.nbytes() >= (numel_ * data_type_.itemsize())))) { + TORCH_INTERNAL_ASSERT( + storage_offset_ == 0); // because we just reallocated + return storage_.mutable_data(); + } + Allocator* allocator = storage_.allocator(); + // Storage might have nullptr allocator in rare cases, for example, if + // an external memory segment has been wrapped with Tensor and we don't + // know how to reallocate it. However, in order to preserve legacy C2 + // behavior, we allow reallocating the memory using default allocator. + if (allocator == nullptr) { + allocator = GetAllocator(storage_.device_type()); + } + if (meta.placementNew()) { + // For types that need placement new, we will call it, as well as + // making sure that when the data is freed, it calls the right + // destruction procedure. + auto size = numel_; + auto dtor = data_type_.placementDelete(); + auto data_ptr = allocator->allocate(numel_ * data_type_.itemsize()); + storage_.set_data_ptr_noswap(PlacementDeleteContext::makeDataPtr( + std::move(data_ptr), dtor, size, storage_.device())); + data_type_.placementNew()(storage_.mutable_data(), numel_); + } else { + // For fundamental type, new and delete is easier. + storage_.set_data_ptr_noswap( + allocator->allocate(numel_ * data_type_.itemsize())); + } + storage_.set_nbytes(numel_ * data_type_.itemsize()); + TORCH_INTERNAL_ASSERT( + storage_offset_ == 0); // because we just reallocated + device_opt_ = storage_.device(); + return storage_.mutable_data(); + } + } + + /** + * Returns a typed pointer of the underlying storage. + * + * For fundamental types, we reuse possible existing storage if there + * is sufficient capacity. + */ + template + inline T* mutable_data() { + if (storage_initialized() && data_type_.Match()) { + return static_cast(storage_.mutable_data()) + storage_offset_; + } + // Check it here statically - otherwise TypeMeta would throw the runtime + // error in attempt to invoke TypeMeta::ctor() + static_assert( + std::is_default_constructible::value, + "Tensor can't hold non-default-constructable types"); + return static_cast(raw_mutable_data(caffe2::TypeMeta::Make())); + } + + /** + * True if a tensor is storage initialized. A tensor may become + * storage UNINITIALIZED after a Resize() or FreeMemory() + */ + bool storage_initialized() const { + TORCH_CHECK( + has_storage(), + "cannot call storage_initialized on tensor that does not have storage"); + return storage_.data() || numel_ == 0; + } + + /** + * True if a tensor is dtype initialized. A tensor allocated with + * Caffe2-style constructors is dtype uninitialized until the + * first time mutable_data() is called. + */ + bool dtype_initialized() const noexcept { + return data_type_ != caffe2::TypeMeta(); + } + + void set_storage_keep_dtype(at::Storage storage) { + TORCH_CHECK( + allow_tensor_metadata_change(), + "set_storage ", + err_msg_tensor_metadata_change_not_allowed); + storage_ = std::move(storage); + device_opt_ = storage_.device(); + } + + void set_storage_and_dtype( + at::Storage storage, + const caffe2::TypeMeta data_type) { + set_storage_keep_dtype(std::move(storage)); + data_type_ = data_type; + } + + void empty_tensor_restride_symint(MemoryFormat memory_format); + + /** + * Set the strides of the tensor to match memory_format + * + * WARNING: This function doesn't rearrange data and assumes tensor is a + * memory contiguous + */ + void empty_tensor_restride(MemoryFormat memory_format) { + if (has_symbolic_sizes_strides_) { + empty_tensor_restride_symint(memory_format); + return; + } +#ifdef DEBUG + TORCH_INTERNAL_ASSERT( + compute_numel() == numel_, + "If you are seeing this error, that means empty_tensor_restride was " + "called before setting correct numel"); +#endif + switch (memory_format) { + case MemoryFormat::Contiguous: { + // dim_ is a virtual call, don't repeat it + const auto dim_ = dim(); + sizes_and_strides_.resize(dim_); + if (dim_ > 0) { + bool overflowed = false; + const auto last_idx = dim_ - 1; + sizes_and_strides_.stride_at_unchecked(last_idx) = 1; + for (auto i = last_idx - 1; i >= 0; --i) { + overflowed |= c10::mul_overflows( + sizes_and_strides_.stride_at_unchecked(i + 1), + std::max( + sizes_and_strides_.size_at_unchecked(i + 1), 1), + std::addressof(sizes_and_strides_.stride_at_unchecked(i))); + } + TORCH_CHECK(!overflowed, "Stride calculation overflowed"); + } + break; + } + case MemoryFormat::ChannelsLast: { + TORCH_CHECK( + dim() == 4, "required rank 4 tensor to use channels_last format"); + set_sizes_and_strides(sizes(), get_channels_last_strides_2d(sizes())); + break; + } + case MemoryFormat::ChannelsLast3d: { + TORCH_CHECK( + dim() == 5, + "required rank 5 tensor to use channels_last_3d format"); + set_sizes_and_strides(sizes(), get_channels_last_strides_3d(sizes())); + break; + } + case MemoryFormat::Preserve: + TORCH_CHECK(false, "unsupported memory format ", memory_format); + // Cleaning warning messages, no need to break as TORCH_CHECK(false) + // terminates flow. + // break; + case MemoryFormat::NumOptions: + TORCH_INTERNAL_ASSERT(false, "invalid memory format ", memory_format); + } + // recompute contiguous flag, as currently NHWC/NCHW flags are not mutually + // exclusive see #24090 + refresh_contiguous(); + } + + bool is_strides_like(at::MemoryFormat memory_format) const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomStrides))) { + return is_strides_like_custom(memory_format); + } + return is_strides_like_default(memory_format); + } + + bool is_strides_like_channels_last() const { + return is_strides_like(at::MemoryFormat::ChannelsLast); + } + + bool is_strides_like_channels_last_3d() const { + return is_strides_like(at::MemoryFormat::ChannelsLast3d); + } + + bool is_non_overlapping_and_dense() const { + if (C10_UNLIKELY(matches_policy(SizesStridesPolicy::CustomStrides))) { + return is_non_overlapping_and_dense_custom(); + } + return is_non_overlapping_and_dense_default(); + } + + // if this returns true, then it is guaranteed that this tensor has symbolic + // sizes/strides + bool has_symbolic_sizes_strides() const { + return has_symbolic_sizes_strides_; + } + + private: + void HandleResize(); + + // The Caffe2 Resize() method supports being called both as Resize({2,2}) as + // well as variadic with Resize(2, 2). These overloads provide all of the + // supported calling configurations, while being overloads (and not templates) + // so that implicit conversions still work. + // + // SetDims on ArrayRef is internally implemented as a template, so we can + // handle both ArrayRefs of different types (there are some uses of + // Resize in Caffe2 which pass in int, not int64_t.) + + template < + typename T, + typename = typename std::enable_if_t>> + bool SetDimsTemplate(ArrayRef src) { + TORCH_CHECK( + !has_symbolic_sizes_strides_, + "SetDims() called on tensor with symbolic shape") + + auto old_numel = numel_; + sizes_and_strides_.resize(src.size()); + int64_t new_numel = 1; + for (const auto i : c10::irange(src.size())) { + new_numel *= src[i]; + sizes_and_strides_.size_at_unchecked(i) = src[i]; + } + numel_ = new_numel; + empty_tensor_restride(MemoryFormat::Contiguous); + return numel_ != old_numel; + } + + bool SetDims(ArrayRef s) { + return SetDimsTemplate(s); + } + + bool SetDims(ArrayRef s) { + return SetDimsTemplate(s); + } + + bool SetDims(ArrayRef s) { + return SetDimsTemplate(s); + } + + bool SetDims() { + return SetDims(IntArrayRef{}); + } + + bool SetDims(const int64_t d0) { + return SetDims(IntArrayRef{d0}); + } + + bool SetDims(const int64_t d0, const int64_t d1) { + return SetDims(IntArrayRef{d0, d1}); + } + + bool SetDims(const int64_t d0, const int64_t d1, const int64_t d2) { + return SetDims(IntArrayRef{d0, d1, d2}); + } + + bool SetDims( + const int64_t d0, + const int64_t d1, + const int64_t d2, + const int64_t d3) { + return SetDims(IntArrayRef{d0, d1, d2, d3}); + } + + /** + * Compute the number of elements based on the sizes of a tensor. + */ + // NB: This is ONLY called when sizes_and_strides_ is used directly; if + // we are virtualizing, then numel calls are virtualized as well, and this + // should never get called + int64_t compute_numel() const { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!has_symbolic_sizes_strides_); +#if C10_HAS_BUILTIN_OVERFLOW() && !defined(C10_MOBILE) + // Use overflow checks if supported by the compiler + return safe_compute_numel(); +#else + return c10::multiply_integers(sizes_and_strides_.sizes_arrayref()); +#endif + } + + /** + * Compute the number of elements based on the sizes of a + * tensor. Catches integer overflow that may occur when a tensor + * using a sparse layout has multiple dimensions with large sizes. + */ + int64_t safe_compute_numel() const { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!has_symbolic_sizes_strides_); + uint64_t n = 1; + bool overflows = + c10::safe_multiplies_u64(sizes_and_strides_.sizes_arrayref(), &n); + constexpr auto numel_max = std::min( + static_cast(std::numeric_limits::max()), + static_cast(std::numeric_limits::max())); + + overflows |= (n > numel_max); + TORCH_CHECK(!overflows, "numel: integer multiplication overflow"); + return static_cast(n); + } + + /** + * Compute whether or not a tensor is contiguous based on the sizes and + * strides of a tensor. + */ + bool compute_contiguous(identity) const; + + bool compute_channels_last_contiguous_2d(identity) const; + + bool compute_channels_last_contiguous_3d(identity) const; + + bool compute_strides_like_channels_last_2d(identity) const; + + bool compute_strides_like_channels_last_3d(identity) const; + + bool compute_non_overlapping_and_dense(identity) const; + + protected: + /** + * Recompute the cached numel of a tensor. Call this if you modify + * sizes. + * + * For tensors with sparse layouts, use safe_refresh_numel() instead + * because it will catch integer overflow that may occur for tensors + * with sparse layouts and large dimensions. + * + * NB: We may uselessly recompute cached numel even in situations where + * it is completely never used (e.g., if CustomSizes for Python). However, + * we still must keep it up to date in case the Python overload + * returns None (in which case we will consult the field here). This also + * implies that sizes/strides will never be complete garbage; in the + * very worst case scenario, it will reflect a 1-dim zero size tensor. + */ + void refresh_numel() { + if (has_symbolic_sizes_strides_) { + symbolic_shape_meta().refresh_numel(); + } else { + numel_ = compute_numel(); + } + } + + /** + * Recompute the cached numel of a tensor. Call this if you modify + * sizes. Use only for tensors with sparse layouts because only + * sparse tensor are likely to have sizes that may lead to integer + * overflow when computing numel. + */ + void safe_refresh_numel() { + if (has_symbolic_sizes_strides_) { + // NB: sym numel is done with symbolic integers, which handle overflow + // checking + symbolic_shape_meta().refresh_numel(); + } else { + numel_ = safe_compute_numel(); + } + } + + private: + // NB: the TypeId argument prevents confusion where you pass a true/false + // literal and pick the wrong overload + + void _set_is_contiguous(identity, bool b) { + is_contiguous_ = b; + } + + void _set_is_channels_last_contiguous(identity, bool b) { + is_channels_last_contiguous_ = b; + } + + void _set_is_channels_last_3d_contiguous(identity, bool b) { + is_channels_last_3d_contiguous_ = b; + } + + void _set_is_channels_last(identity, bool b) { + is_channels_last_ = b; + } + + void _set_is_channels_last_3d(identity, bool b) { + is_channels_last_3d_ = b; + } + + void _set_is_non_overlapping_and_dense(identity, bool b) { + is_non_overlapping_and_dense_ = b; + } + + // These are little wrappers over the real compute_ functions that + // can make use of other contiguity fields to short circuit. + + bool compute_is_non_overlapping_and_dense_dim4(identity type_id) { + return is_contiguous_ || is_channels_last_contiguous_ || + compute_non_overlapping_and_dense(type_id); + } + + bool compute_channels_last_contiguous_3d_dim5(identity type_id) { + return !is_channels_last_contiguous_ && + compute_channels_last_contiguous_3d(type_id); + } + + bool compute_channels_last_2d_dim5(identity type_id) { + return !is_channels_last_3d_contiguous_ && + compute_strides_like_channels_last_2d(type_id); + } + + bool compute_channels_last_3d_dim5(identity type_id) { + return !is_channels_last_ && compute_strides_like_channels_last_3d(type_id); + } + + bool compute_is_non_overlapping_and_dense_dim5(identity type_id) { + return is_contiguous_ || is_channels_last_contiguous_ || + is_channels_last_3d_contiguous_ || + compute_non_overlapping_and_dense(type_id); + } + + bool compute_is_non_overlapping_and_dense_anydim(identity type_id) { + return is_contiguous_ || compute_non_overlapping_and_dense(type_id); + } + + template + void _refresh_contiguous() { + auto type_id = identity(); + // Note: + // Dim 0, 1, 2 will never be a channels last 2d/3d format + // Dim 3+ is possibly be a channels last 2d format (Dim 4 only at this + // point) Dim 4+ is possibly be a channels last 3d format (Dim 5 only at + // this point) + switch (dim()) { + case 4: { + _set_is_contiguous(type_id, compute_contiguous(type_id)); + _set_is_channels_last_contiguous( + type_id, compute_channels_last_contiguous_2d(type_id)); + _set_is_channels_last_3d_contiguous(type_id, false); + _set_is_channels_last( + type_id, compute_strides_like_channels_last_2d(type_id)); + _set_is_channels_last_3d(type_id, false); + _set_is_non_overlapping_and_dense( + type_id, compute_is_non_overlapping_and_dense_dim4(type_id)); + break; + } + case 5: { + _set_is_contiguous(type_id, compute_contiguous(type_id)); + _set_is_channels_last_contiguous( + type_id, compute_channels_last_contiguous_2d(type_id)); + _set_is_channels_last_3d_contiguous( + type_id, compute_channels_last_contiguous_3d_dim5(type_id)); + _set_is_channels_last(type_id, compute_channels_last_2d_dim5(type_id)); + _set_is_channels_last_3d( + type_id, compute_channels_last_3d_dim5(type_id)); + _set_is_non_overlapping_and_dense( + type_id, compute_is_non_overlapping_and_dense_dim5(type_id)); + break; + } + default: + // is_channels_last_ and is_channels_last_3d_ are suggested + // memory_format. Being channels_last_contiguous doesn't necessarily + // mean the tensor is strided like channels_last: for strides on channel + // dimension could suggest desired memory_layout, but it doesn't affect + // memory storage + _set_is_contiguous(type_id, compute_contiguous(type_id)); + _set_is_channels_last_contiguous(type_id, false); + _set_is_channels_last_3d_contiguous(type_id, false); + _set_is_channels_last(type_id, false); + _set_is_channels_last_3d(type_id, false); + _set_is_non_overlapping_and_dense( + type_id, compute_is_non_overlapping_and_dense_anydim(type_id)); + break; + } + } + + protected: + /** + * Recompute the cached contiguity of a tensor. Call this if you modify sizes + * or strides. + */ + void refresh_contiguous() { + if (has_symbolic_sizes_strides_) { + symbolic_shape_meta().refresh_contiguous(); + } else { + _refresh_contiguous(); + } + } + + /** + * Copy the tensor metadata fields (e.g. sizes / strides / storage pointer / + * storage_offset) from one TensorImpl to another TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE + * [ TensorImpl Shallow-Copying ]. + */ + static void copy_tensor_metadata( + const TensorImpl* src_impl, + TensorImpl* dest_impl, + const c10::VariableVersion& version_counter, + bool allow_tensor_metadata_change); + + /** + * Copy the tensor metadata fields (e.g. sizes / strides / storage pointer / + * storage_offset) from one TensorImpl to another TensorImpl. + * + * For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE + * [ TensorImpl Shallow-Copying ]. + */ + static void copy_tensor_metadata( + const TensorImpl* src_impl, + TensorImpl* dest_impl, + c10::VariableVersion&& version_counter, + bool allow_tensor_metadata_change); + + private: + static void copy_tensor_metadata_except_version_counter( + const TensorImpl* src_impl, + TensorImpl* dest_impl, + bool allow_tensor_metadata_change); + + protected: + // Error message to show when the user tries to change tensor metadata on + // Tensor created from .data or .detach(). + // + // See NOTE [ Metadata Change for a Detached Tensor ] for details. + static const char* const err_msg_tensor_metadata_change_not_allowed; + + static void copy_generic_tensor_metadata( + const TensorImpl* src_impl, + TensorImpl* dest_impl); + + public: + void set_storage_access_should_throw() { + storage_access_should_throw_ = true; + } + + public: + void set_custom_sizes_strides(SizesStridesPolicy policy) { + custom_sizes_strides_ = static_cast(policy); + refresh_sizes_strides_policy(); + } + + void set_python_custom_sizes_strides(SizesStridesPolicy policy) { + python_custom_sizes_strides_ = static_cast(policy); + refresh_sizes_strides_policy(); + } + + void set_custom_device(bool custom_device) { + custom_device_ = custom_device; + refresh_device_policy(); + } + + void set_custom_layout(bool custom_layout) { + custom_layout_ = custom_layout; + refresh_layout_policy(); + } + + void set_python_custom_device(bool custom_device) { + python_custom_device_ = custom_device; + refresh_device_policy(); + } + + void set_python_custom_layout(bool custom_layout) { + python_custom_layout_ = custom_layout; + refresh_layout_policy(); + } + + protected: + void refresh_sizes_strides_policy() { + if (has_symbolic_sizes_strides_) { + sizes_strides_policy_ = + static_cast(SizesStridesPolicy::CustomSizes); + } else { + sizes_strides_policy_ = + std::max(custom_sizes_strides_, python_custom_sizes_strides_); + } + } + + void refresh_device_policy() { + device_policy_ = custom_device_ || python_custom_device_; + } + + void refresh_layout_policy() { + layout_policy_ = custom_layout_ || python_custom_layout_; + } + + protected: + Storage storage_; + + private: + // This pointer points to an AutogradMeta struct that stores autograd-specific + // fields (such as grad_ / grad_fn_ / grad_accumulator_). This pointer always + // has unique ownership (meaning only one TensorImpl can own it at a time). + // + // autograd_meta_ can be nullptr, as an optimization. When this occurs, it is + // equivalent to having an autograd_meta_ pointing to a default constructed + // AutogradMeta; intuitively, tensors which don't require grad will have this + // field set to null. + // + // This means accessors on autograd_meta_ have to be careful to test if they + // got a nullptr, and handle default behavior appropriately in that case. + // + // Note that we don't enforce the invariant that if the AutogradMeta is + // default constructed, it is nullptr (to do this, we'd have to continuously + // check if an AutogradMeta became, by mutation, equal to the default + // constructed form. (This might be useful, but it seems rare enough that + // a requires_grad=True variable will turn back into the requires_grad=False + // version.) So there are three representable states: + // + // 1. autograd_meta_ == nullptr + // 2. autograd_meta_ is default constructed (semantically, same as (1)) + // 3. autograd_meta_ has nontrivial information content + // + std::unique_ptr autograd_meta_ = nullptr; + + protected: + std::unique_ptr extra_meta_ = nullptr; + + c10::VariableVersion version_counter_; + + impl::PyObjectSlot pyobj_slot_; + + c10::impl::SizesAndStrides sizes_and_strides_; + + int64_t storage_offset_ = 0; + // If sizes and strides are empty, the numel is 1!! However, most of the + // time, we will immediately set sizes to {0} and reset numel to 0. + // (Can't do that in the default initializers, because there's no way to + // spell "allocate a one-element array" for strides_). + int64_t numel_ = 1; + + // INVARIANT: When storage is non-null, this type meta must + // agree with the type meta in storage + caffe2::TypeMeta data_type_; + + // NOTE [std::optional operator usage in CUDA] + // Our optional definition doesn't compile in .cu file if `value()` or + // `operator->` are used. Instead, we always use `operator*`. + // See https://github.com/pytorch/pytorch/issues/18496 for more info. + // If this is too burdensome to maintain, we can just + // manually implement this with an additional bool. + + // INVARIANT: When storage is non-null, this Device must + // agree with the type meta in storage. + // + // INVARIANT: device_opt_ is only nullopt for undefined tensors + // (which do not have a device.) + std::optional device_opt_; + + // default member initializers for bit-fields only available with -std=c++2a + // or -std=gnu++2a + inline void init_bitfields() { + is_contiguous_ = true; + is_channels_last_ = false; + is_channels_last_contiguous_ = false; + is_channels_last_3d_ = false; + is_channels_last_3d_contiguous_ = false; + is_non_overlapping_and_dense_ = true; + is_wrapped_number_ = false; + allow_tensor_metadata_change_ = true; + reserved_ = false; + sizes_strides_policy_ = static_cast(SizesStridesPolicy::Default); + custom_sizes_strides_ = static_cast(SizesStridesPolicy::Default); + python_custom_sizes_strides_ = + static_cast(SizesStridesPolicy::Default); + python_custom_device_ = false; + python_custom_layout_ = false; + custom_device_ = false; + custom_layout_ = false; + device_policy_ = false; + layout_policy_ = false; + storage_access_should_throw_ = false; + has_symbolic_sizes_strides_ = false; + } + + // Tensor is contiguous + bool is_contiguous_ : 1; + + // Tensor is a subclass that does not permit storage access. + bool storage_access_should_throw_ : 1; + + // Tensor is stored in the channels last 2d memory format, when dimensions + // order is (N)CHW and C-strides < W-strides < H-strides (< N-strides) + // (If size of any dimension is equal to 1, this dimension strides value + // is not taken into account). + bool is_channels_last_ : 1; + + // Channels last contiguous tensor is channel last tensor which occupies + // contiguous memory block. + bool is_channels_last_contiguous_ : 1; + + // Tensor is stored in the channels last 3d memory format, when dimensions + // order is (N)CDHW and C-strides < W-strides < H-strides < D - strides (< + // N-strides) (If size of any dimension is equal to 1, this dimension strides + // value is not taken into account). + bool is_channels_last_3d_ : 1; + + // Channels last 3d contiguous tensor is channel last 3d tensor which occupies + // contiguous memory block. + bool is_channels_last_3d_contiguous_ : 1; + + // Dense tensor is the tensor that store values in a contiguous block of + // memory. Non-overlapping tensor is the tensor in which elements occupy + // individual non-repetitive memory. + bool is_non_overlapping_and_dense_ : 1; + + bool is_wrapped_number_ : 1; + + // NOTE [ Metadata Change for a Detached Tensor ] + // + // Normally, a user is allowed to change the tensor metadata + // (e.g. sizes / strides / storage / storage_offset) of a tensor. + // However, if the tensor is created by `t1_detached = t1.data` in Python + // or `t1_detached = t1.detach()` in Python/C++, those changes to the + // tensor metadata of `t1_detached` will not be propagated back to the + // original tensor `t1`. In order to make such changes explicitly illegal, + // we created the `allow_tensor_metadata_change_` flag, to prevent users + // from changing metadata of the detached tensor and expecting the original + // tensor to also be updated. + // + // NOTE: For a full list of tensor metadata fields, please see + // `copy_tensor_metadata()` in TensorImpl and its subclasses to find + // which fields are copied by value. + bool allow_tensor_metadata_change_ : 1; + + // we decide to keep reserved_ and it will + // live in Tensor after the split + // The logic is that if Extend() or ReserveSpace() were ever called, + // then subsequent Resize()s will not free up Storage. + bool reserved_ : 1; + + // Call _custom() virtual methods for + // strides()/is_contiguous()/sizes()/dim()/numel() + // This is a combination of sizes_strides_custom_dispatch_ + // and has_symbolic_sizes_strides_ + uint8_t sizes_strides_policy_ : 2; + + // Whether or not sizes_and_strides_ contains a symbolic value. + bool has_symbolic_sizes_strides_ : 1; + + // Call _custom() virtual method for + // strides()/is_contiguous()/sizes()/dim()/numel() + uint8_t custom_sizes_strides_ : 2; + + // Combo of custom_ and python_custom_ + bool device_policy_ : 1; + bool layout_policy_ : 1; + + // Call _custom() virtual method for device() + bool custom_device_ : 1; + + // Call _custom() virtual method for layout() + bool custom_layout_ : 1; + + // Call into Python for + // strides()/is_contiguous()/sizes()/dim()/numel() + uint8_t python_custom_sizes_strides_ : 2; + + // Call into Python for device() + bool python_custom_device_ : 1; + + // Call into Python for layout() + bool python_custom_layout_ : 1; + + // The set of DispatchKeys which describe this tensor. NB: this + // does NOT include Autograd (historically, it did, but + // not anymore!) + // + // INVARIANT: extra_meta_->named_tensor_meta_ != nullptr <==> + // key_set_.has(DispatchKey::Named) + DispatchKeySet key_set_; + + private: + // C10_TensorImpl_Size_Check_Dummy_Class needs to be friends with + // TensorImpl so it can inspect the size of private fields + template < + size_t cplusplus, + size_t clang_ver_major, + size_t gcc_ver, + size_t gcc_ver_minor, + size_t nvcc, + size_t cuda_version, + size_t cuda_version_major, + size_t ptr_size> + friend class C10_TensorImpl_Size_Check_Dummy_Class; +}; + +// Note [TensorImpl size constraints] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Changed the size of TensorImpl? If the size went down, good for +// you! Adjust the documentation below and the expected size. +// Did it go up? Read on... +// +// Struct size matters. In some production systems at Facebook, we have +// 400M live tensors during a training run. Do the math: every 64-bit +// word you add to Tensor is an extra 3.2 gigabytes in RAM. +// +// If you are a Facebook employee, you can check if the run in question +// has tipped you over the point using the command here: +// https://fburl.com/q5enpv98 +// +// For reference, we OOMed at 160 bytes (20 words) per TensorImpl. +// This is not counting overhead from strides out-of-line allocation and +// StorageImpl space and this is from before we inlined sizes and strides +// directly into TensorImpl as SmallVectors. +// +// Our memory usage on 32-bit systems is suboptimal, but we're not checking +// for it at the moment (to help avoid rage inducing cycles when the +// 32-bit number is wrong). +// +// Current breakdown: +// +// vtable pointer +// strong refcount TODO: pack these into one word +// weak refcount +// storage pointer +// autograd metadata pointer +// named tensor metadata pointer +// version counter pointer +// PyObjectSlot +// SizesAndStrides size/pointer +// SizesAndStrides sizes (pre-allocated 0) +// SizesAndStrides sizes (pre-allocated 1) +// SizesAndStrides sizes (pre-allocated 2) +// SizesAndStrides sizes (pre-allocated 3) +// SizesAndStrides sizes (pre-allocated 4) +// SizesAndStrides strides (pre-allocated 0) +// SizesAndStrides strides (pre-allocated 1) +// SizesAndStrides strides (pre-allocated 2) +// SizesAndStrides strides (pre-allocated 3) +// SizesAndStrides strides (pre-allocated 4) +// storage offset +// numel +// data type, device, is_contiguous, storage_access_should_throw_, bitfields +// DispatchKeySet +// + +// Various preprocessor macros we use to check that the +// TensorImpl size hasn't changed unexpectedly. We undef +// these later. +#ifndef __NVCC__ +#define C10_NVCC 0 +#else +#define C10_NVCC __NVCC__ +#endif + +#ifndef __CUDA_VER_MAJOR__ +#define C10_CUDA_VERSION_MAJOR 0 +#else +#define C10_CUDA_VERSION_MAJOR __CUDA_VER_MAJOR__ +#endif + +#ifndef CUDA_VERSION +#define C10_CUDA_VERSION 0 +#else +#define C10_CUDA_VERSION CUDA_VERSION +#endif + +#ifndef __clang_major__ +#define C10_CLANG_MAJOR_VERSION 0 +#else +#define C10_CLANG_MAJOR_VERSION __clang_major__ +#endif + +#ifndef __GNUC__ +#define C10_GCC_VERSION 0 +#else +#define C10_GCC_VERSION __GNUC__ +#endif + +#ifndef __GNUC_MINOR__ +#define C10_GCC_VERSION_MINOR 0 +#else +#define C10_GCC_VERSION_MINOR __GNUC_MINOR__ +#endif + +// We use a templatized class to both contain the logic of checking the sizes +// as well as to provide compile-time information that might be useful in +// figuring out why sizes may have changed. +// All the compile time information is given by the template fields that are +// always printed by the compiler when the static_assert fails. +template < + size_t cplusplus = __cplusplus, + size_t clang_ver_major = C10_CLANG_MAJOR_VERSION, + size_t gcc_ver = C10_GCC_VERSION, + size_t gcc_ver_minor = C10_GCC_VERSION_MINOR, + size_t nvcc = C10_NVCC, + size_t cuda_version = C10_CUDA_VERSION, + size_t cuda_version_major = C10_CUDA_VERSION_MAJOR, + size_t ptr_size = sizeof(void*)> +class C10_TensorImpl_Size_Check_Dummy_Class : private TensorImpl { + // Names of (non-bitfield) fields in TensorImpl; used to provide + // compile-time info about fields whose size changes unexpectedly. + enum class FieldNameEnum { + storage_, + autograd_meta_, + extra_meta_, + version_counter_, + pyobj_slot_, + sizes_and_strides_, + storage_offset_, + numel_, + data_type_, + device_opt_, + key_set_, + TOTAL_SIZE + }; + + // Provides compile-time equality check that reveals what numbers + // were used and on which quantity + template + constexpr static bool are_equal() { + static_assert( + Actual == Expected, + "Actual and Expected sizes of a field did not match!"); + return true; + } + + // Provides compile-time <= check that reveals what numbers + // were used and on which quantity + template + constexpr static bool is_le() { + static_assert( + Actual <= Expected, + "Actual and Expected sizes of a field did not match!"); + return true; + } + + public: + // Compile-time check that TensorImpl field sizes are as expected + // + // Observed total sizes and associated versions + // If you find a flag that predicts when unique_ptr has 16 bytes + // on 64-bit systems or when sizes_and_strides_ is 84 vs 88 bytes + // on 32-bit systems you get a cookie! + // Length | LLVM | GCC | C++ | CUDA + // 192 | ? | 11.2 | 201703 | 11040 + // 208 | ? | 11.2 | 201703 | 11040 + // 208 | ? | 11.2 | 201402 | 11040 + // 192 | ? | 11.2 | 201402 | 11040 + // 160 | 12 | 4.2 | 201703 | 0 + // + // To keep things clean, we split on systems here. + +#if UINTPTR_MAX == 0xFFFFFFFF + // This is a 32-bit system + static constexpr bool check_sizes() { + constexpr size_t tsize = 20 * sizeof(int64_t); + + // clang-format off + are_equal(); + are_equal(); + are_equal(); + are_equal(); + are_equal(); + is_le(); + are_equal(); + are_equal(); + are_equal(); + are_equal(); + are_equal(); + is_le(); + // clang-format on + + return true; + } +#else + // This is a 64-bit system + static constexpr bool check_sizes() { + constexpr size_t tsize = 26 * sizeof(int64_t); + + // clang-format off + are_equal(); + // On some systems involving NVCC the size of unique_ptr is 16 bytes. We haven't + // figured out how to detect those via macro preprocessors yet, so we use <= + // comparisons for the relevant fields. + is_le(); + is_le(); + are_equal(); + are_equal(); + are_equal(); + are_equal(); + are_equal(); + are_equal(); + are_equal(); + are_equal(); + is_le(); + // clang-format on + + return true; + } +#endif +}; + +// We use a class to encapsulate size-checking logic with +// templates to capture sizes and flags. We call this within +// a static assert to prove there is no run-time behaviour. +// Since the methods we call return either true or fail their +// own static_asserts, we should never see the error messages +// below. We have to provide it though for c++ <17. +static_assert( + C10_TensorImpl_Size_Check_Dummy_Class<>::check_sizes(), + "You should not see this message."); + +// Clean up after ourselves +#undef C10_NVCC +#undef C10_CUDA_VERSION_MAJOR +#undef C10_CUDA_VERSION +#undef C10_CLANG_MAJOR_VERSION +#undef C10_GCC_VERSION +#undef C10_GCC_VERSION_MINOR + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/TensorOptions.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/TensorOptions.h new file mode 100644 index 0000000000000000000000000000000000000000..d99005d3d28f859d50db1c8e428f38b612954a32 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/TensorOptions.h @@ -0,0 +1,787 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace c10 { + +DispatchKey computeDispatchKey( + std::optional dtype, + std::optional layout, + std::optional device); + +inline ScalarType dtype_or_default(std::optional dtype) { + return value_or_else(dtype, [] { return get_default_dtype_as_scalartype(); }); +} + +inline caffe2::TypeMeta dtype_or_default( + std::optional dtype) { + return value_or_else(dtype, [] { return get_default_dtype(); }); +} + +inline Layout layout_or_default(std::optional layout) { + return layout.value_or(kStrided); +} + +inline Device device_or_default(std::optional device) { + return value_or_else(device, [] { return Device(kCPU); }); +} + +inline bool pinned_memory_or_default(std::optional pinned_memory) { + return pinned_memory.value_or(false); +} + +/// A class to encapsulate construction axes of an Tensor. TensorOptions was +/// designed to support the Python style API for specifying construction options +/// on factory functions, e.g., +/// +/// torch.zeros(2, 3, dtype=torch.int32) +/// +/// Because C++ doesn't natively support keyword arguments, there must be +/// another way of specifying keyword-like arguments. TensorOptions is a +/// builder class which can be used to construct this "dictionary" of keyword +/// arguments: functions which support TensorOptions conventionally take this +/// argument optionally as their last argument. +/// +/// WARNING: In PyTorch, there are `torch::` variants of factory functions, +/// e.g., torch::zeros for at::zeros. These return Variables (while the +/// stock ATen functions return plain Tensors). If you mix these functions +/// up, you WILL BE SAD. +/// +/// Rather than use the constructor of this class directly, you should prefer to +/// use the constructor functions, and then chain setter methods on top of them. +/// +/// at::device(at::kCUDA).dtype(kInt) +/// at::dtype(at::kInt) +/// +/// Additionally, anywhere a TensorOptions is expected, you can directly +/// pass at::kCUDA / at::kInt, and it will implicitly convert to a +/// TensorOptions. +/// +/// Here are some recommended ways to create a 2x2 tensor of zeros +/// with certain properties. These all *implicitly* make use of +/// TensorOptions, even if they don't mention the class explicitly: +/// +/// at::zeros({2,2}, at::kCUDA); +/// at::zeros({2,2}, at::kLong); +/// at::zeros({2,2}, at::device(at::kCUDA).dtype(at::kLong())); +/// at::zeros({2,2}, at::device({at::kCUDA, 1})); // place on device 1 +/// at::zeros({2,2}, at::requires_grad()); +/// + +/// NOTE [ TensorOptions Constructors ] +/// +/// TensorOptions is like a dictionary with entries from the set: +/// {requires_grad, device, dtype, layout}, where each entry may be +/// unspecified (i.e., is optional). It is used to specify the properties of +/// tensors in many places both in C++ internal and API, e.g., tensor factory +/// methods like `at::empty({10}, options)`, tensor conversions like +/// `tensor.to(...)`, etc. +/// +/// To provide a simple API that is consistent with Python, where one can do +/// `torch.empty(sizes, X)` with `X` being a `torch.device`, `torch.dtype`, or a +/// `torch.layout`, we want TensorOptions to be implicitly convertible from +/// `ScalarType dtype`, `Layout layout` and `Device device`. Therefore, we have +/// three implicit constructors from each of these three types. +/// +/// This is sufficient for `ScalarType` and `Layout` as they are simple Enum +/// classes. However, `Device` is an ordinary class with implicit constructors +/// `Device(DeviceType, DeviceIndex = -1)` and `Device(std::string)` to be +/// consistent with Python API, where strings are treated as equivalent with a +/// `torch.device` object (e.g., "cuda:1" can be passed to everywhere a +/// `torch.device("cuda:1")` is accepted). To support the syntax +/// `at::empty({10}, {kCUDA, 1})` and `tensor.to(kCUDA)`, we need to make sure +/// that `TensorOptions` is implicitly constructible with any arguments that a +/// `Device` can constructed from. So we have, +/// +/// /* implicit */ TensorOptions(T&& device) : TensorOptions() { +/// this->set_device(device); +/// } +/// +/// template ::value>> +/// /* implicit */ TensorOptions(Args&&... args) +/// : TensorOptions(Device(std::forward(args)...)) {} +/// +/// +/// But this will be problematic. Consider this: `TensorOptions({kCUDA, 1})`. +/// Compiler will complain about ambiguity between the copy constructor and the +/// `Device` constructor because `{kCUDA, 1}` can be converted to both a +/// `TensorOption` and a `Device`. +/// +/// To get around this, we templatize the `Device` constructor. Since overload +/// resolution is done before template resolution, our problem is solved. + +DispatchKey computeDispatchKey( + optional dtype, + optional layout, + optional device); + +struct C10_API TensorOptions { + TensorOptions() + : requires_grad_(false), + pinned_memory_(false), + has_device_(false), + has_dtype_(false), + has_layout_(false), + has_requires_grad_(false), + has_pinned_memory_(false), + has_memory_format_(false) {} + + /// Constructs a `TensorOptions` object with the given layout. + /* implicit */ TensorOptions(Layout layout) : TensorOptions() { + this->set_layout(layout); + } + + /// Constructs a `TensorOptions` object with the given device. + /// See NOTE [ TensorOptions Constructors ] on why this is templatized. + template < + typename T, + typename = std::enable_if_t, Device>>> + /* implicit */ TensorOptions(T&& device) : TensorOptions() { + this->set_device(std::forward(device)); + } + + /// Constructs a `TensorOptions` object from arguments allowed in `Device` + /// constructors. + /// + /// See NOTE [ TensorOptions Constructors ]. + /// + /// NB: Ideally we only allow implicit constructors here. But there is no easy + /// way to detect them. So we have this one that allows explicit + /// constructors too. + template < + typename... Args, + typename = std::enable_if_t>> + /* implicit */ TensorOptions(Args&&... args) + : TensorOptions(Device(std::forward(args)...)) {} + + /// Constructs a `TensorOptions` object with the given dtype. + /* implicit */ TensorOptions(caffe2::TypeMeta dtype) : TensorOptions() { + this->set_dtype(dtype); + } + + /// legacy constructor to support ScalarType + /* implicit */ TensorOptions(ScalarType dtype) : TensorOptions() { + this->set_dtype(dtype); + } + + /// Constructs a `TensorOptions` object with the given memory format. + /* implicit */ TensorOptions(MemoryFormat memory_format) : TensorOptions() { + set_memory_format(memory_format); + } + + /// Return a copy of `TensorOptions` with `device` set to the given one, or + /// cleared if `device` is `nullopt`. + C10_NODISCARD TensorOptions + device(std::optional device) const noexcept { + TensorOptions r = *this; + r.set_device(device); + return r; + } + + /// Return a copy of `TensorOptions` with `device` set to the given one. + /// (This overload ensures that variadic template std::optional constructor + /// for Device work correctly.) + template + C10_NODISCARD TensorOptions device(Args&&... args) const noexcept { + return device( + std::optional(std::in_place, std::forward(args)...)); + } + + /// Return a copy of `TensorOptions`, but with device set to CUDA, and the + /// device index set to the given one. + /// + /// TODO: This function encourages bad behavior (assuming CUDA is + /// the only device that matters). Get rid of it / rename it. + C10_NODISCARD TensorOptions + device_index(c10::DeviceIndex device_index) const noexcept { + return device(Device::Type::CUDA, device_index); + } + + /// Return a copy of `TensorOptions` with `dtype` set to the given one. + C10_NODISCARD TensorOptions + dtype(std::optional dtype) const noexcept { + TensorOptions r = *this; + r.set_dtype(dtype); + return r; + } + + // legacy function to support ScalarType + C10_NODISCARD TensorOptions + dtype(std::optional dtype) const noexcept { + TensorOptions r = *this; + r.set_dtype(dtype); + return r; + } + + // Since dtype is taken... + template + TensorOptions& dtype() { + dtype_ = caffe2::TypeMeta::Make(); + has_dtype_ = true; + return *this; + } + + /// Sets the layout of the `TensorOptions`. + C10_NODISCARD TensorOptions + layout(std::optional layout) const noexcept { + TensorOptions r = *this; + r.set_layout(layout); + return r; + } + + /// Sets the `requires_grad` property of the `TensorOptions`. + C10_NODISCARD TensorOptions + requires_grad(std::optional requires_grad) const noexcept { + TensorOptions r = *this; + r.set_requires_grad(requires_grad); + return r; + } + + /// Sets the `pinned_memory` property on the `TensorOptions`. + C10_NODISCARD TensorOptions + pinned_memory(std::optional pinned_memory) const noexcept { + TensorOptions r = *this; + r.set_pinned_memory(pinned_memory); + return r; + } + + /// Sets the `memory_format` property on `TensorOptions`. + C10_NODISCARD TensorOptions + memory_format(std::optional memory_format) const noexcept { + TensorOptions r = *this; + r.set_memory_format(memory_format); + return r; + } + + /// Returns the device of the `TensorOptions`. + Device device() const noexcept { + return device_or_default(device_opt()); + } + + /// Returns whether the device is specified. + bool has_device() const noexcept { + return has_device_; + } + + /// Returns the device of the `TensorOptions`, or `c10::nullopt` if + /// device is not specified. + std::optional device_opt() const noexcept { + return has_device_ ? c10::make_optional(device_) : c10::nullopt; + } + + /// Returns the device index of the `TensorOptions`. + c10::DeviceIndex device_index() const noexcept { + return device().index(); + } + + /// Returns the dtype of the `TensorOptions`. + caffe2::TypeMeta dtype() const noexcept { + return dtype_or_default(dtype_opt()); + } + + /// Returns whether the dtype is specified. + bool has_dtype() const noexcept { + return has_dtype_; + } + + /// Returns the dtype of the `TensorOptions`, or `c10::nullopt` if + /// device is not specified. + std::optional dtype_opt() const noexcept { + return has_dtype_ ? c10::make_optional(dtype_) : c10::nullopt; + } + + /// Returns the layout of the `TensorOptions`. + Layout layout() const noexcept { + return layout_or_default(layout_opt()); + } + + /// Returns whether the layout is specified. + bool has_layout() const noexcept { + return has_layout_; + } + + /// Returns the layout of the `TensorOptions`, or `c10::nullopt` if + /// layout is not specified. + std::optional layout_opt() const noexcept { + return has_layout_ ? c10::make_optional(layout_) : c10::nullopt; + } + + /// Returns the `requires_grad` property of the `TensorOptions`. + bool requires_grad() const noexcept { + return has_requires_grad_ ? requires_grad_ : false; + } + + /// Returns whether the `requires_grad` is specified. + bool has_requires_grad() const noexcept { + return has_requires_grad_; + } + + /// Returns the `requires_grad` property of the `TensorOptions`, or + /// `c10::nullopt` if `requires_grad` is not specified. + std::optional requires_grad_opt() const noexcept { + return has_requires_grad_ ? c10::make_optional(requires_grad_) + : c10::nullopt; + } + + /// Returns the `pinned_memory` property of the `TensorOptions`. + bool pinned_memory() const noexcept { + return pinned_memory_or_default(pinned_memory_opt()); + } + + /// Returns whether the `pinned_memory` is specified. + bool has_pinned_memory() const noexcept { + return has_pinned_memory_; + } + + /// Returns if the layout is sparse + bool is_sparse() const { + return layout_ == c10::Layout::Sparse; + } + + /// Returns if the layout is sparse CSR, deprecated, use + /// is_sparse_compressed() instead + bool is_sparse_csr() const { + return layout_ == c10::Layout::SparseCsr; + } + + bool is_sparse_compressed() const { + return layout_ == c10::Layout::SparseCsr || + layout_ == c10::Layout::SparseCsc || + layout_ == c10::Layout::SparseBsr || layout_ == c10::Layout::SparseBsc; + } + + // For compatibility with legacy tensor.type() comparisons + bool type_equal(const TensorOptions& other) const { + return computeDispatchKey() == other.computeDispatchKey() && + typeMetaToScalarType(dtype_) == typeMetaToScalarType(other.dtype()); + } + + /// Returns the `pinned_memory` property of the `TensorOptions`, or + /// `c10::nullopt` if `pinned_memory` is not specified. + std::optional pinned_memory_opt() const noexcept { + return has_pinned_memory_ ? c10::make_optional(pinned_memory_) + : c10::nullopt; + } + + /// Returns whether the `memory_layout` is specified + bool has_memory_format() const noexcept { + return has_memory_format_; + } + + // NB: memory_format() getter is PURPOSELY not defined, as the default + // behavior of memory_format varies from function to function. + + /// Returns the `memory_layout` property of `TensorOptions, or + /// `c10::nullopt` if `memory_format` is not specified. + std::optional memory_format_opt() const noexcept { + return has_memory_format_ ? c10::make_optional(memory_format_) + : c10::nullopt; + } + + // Resolves the ATen backend specified by the current construction axes. + // TODO: Deprecate this + Backend backend() const { + return at::dispatchKeyToBackend(computeDispatchKey()); + } + + /// Return the right-biased merge of two TensorOptions. This has the + /// effect of overwriting settings from self with specified options + /// of options. + /// + /// NB: This merging operation does NOT respect device merges. + /// For example, if you device({kCUDA, 1}).merge_in(kCUDA) + /// you will get kCUDA in the end! Functions like Tensor.new_empty + /// ensure the right device is selected anyway by way of a + /// device guard. + /// + TensorOptions merge_in(TensorOptions options) const noexcept { + TensorOptions merged = *this; + if (options.has_device()) + merged.set_device(options.device_opt()); + if (options.has_dtype()) + merged.set_dtype(options.dtype_opt()); + if (options.has_layout()) + merged.set_layout(options.layout_opt()); + // NB: requires grad is right biased; not a logical AND/OR! + if (options.has_requires_grad()) + merged.set_requires_grad(options.requires_grad_opt()); + if (options.has_pinned_memory()) + merged.set_pinned_memory(options.pinned_memory_opt()); + if (options.has_memory_format()) + merged.set_memory_format(options.memory_format_opt()); + return merged; + } + + // TODO remove after TensorOptions rationalization + TensorOptions merge_memory_format( + std::optional optional_memory_format) const noexcept { + TensorOptions merged = *this; + if (optional_memory_format.has_value()) { + merged.set_memory_format(*optional_memory_format); + } + return merged; + } + + // INVARIANT: computeDispatchKey returns only the subset of dispatch keys for + // which dispatchKeyToBackend is injective, if it is defined at all (for + // the most part, this just means that this function never returns an + // Autograd key) + DispatchKey computeDispatchKey() const { + return c10::computeDispatchKey( + optTypeMetaToScalarType(dtype_opt()), layout_opt(), device_opt()); + } + + private: + // These methods are currently private because I'm not sure if it's wise + // to actually publish them. They are methods because I need them in + // the constructor and the functional API implementation. + // + // If you really, really need it, you can make these public, but check if you + // couldn't just do what you need with the functional API. Similarly, these + // methods are not chainable, because if you wanted chaining, you probably + // want to use the functional API instead. (It's probably OK to make + // these chainable, because these functions are all explicitly annotated + // with a ref-qualifier, the trailing &, that makes them illegal to call + // on temporaries.) + + /// Mutably set the device of `TensorOptions`. + void set_device(std::optional device) & noexcept { + if (device) { + device_ = *device; + has_device_ = true; + } else { + has_device_ = false; + } + } + + /// Mutably set the dtype of `TensorOptions`. + void set_dtype(std::optional dtype) & noexcept { + if (dtype) { + dtype_ = *dtype; + has_dtype_ = true; + } else { + has_dtype_ = false; + } + } + + // legacy function to support ScalarType + void set_dtype(std::optional dtype) & noexcept { + if (dtype) { + dtype_ = scalarTypeToTypeMeta(*dtype); + has_dtype_ = true; + } else { + has_dtype_ = false; + } + } + + /// Mutably set the layout of `TensorOptions`. + void set_layout(std::optional layout) & noexcept { + if (layout) { + layout_ = *layout; + has_layout_ = true; + } else { + has_layout_ = false; + } + } + + /// Mutably set the `requires_grad` property of `TensorOptions`. + void set_requires_grad(std::optional requires_grad) & noexcept { + if (requires_grad) { + requires_grad_ = *requires_grad; + has_requires_grad_ = true; + } else { + has_requires_grad_ = false; + } + } + + /// Mutably set the `pinned_memory` property of `TensorOptions`. + void set_pinned_memory(std::optional pinned_memory) & noexcept { + if (pinned_memory) { + pinned_memory_ = *pinned_memory; + has_pinned_memory_ = true; + } else { + has_pinned_memory_ = false; + } + } + + /// Mutably set the `memory_Format` property of `TensorOptions`. + void set_memory_format(std::optional memory_format) & noexcept { + if (memory_format) { + memory_format_ = *memory_format; + has_memory_format_ = true; + } else { + has_memory_format_ = false; + } + } + + // WARNING: If you edit TensorOptions to add more options, you + // may need to adjust the implementation of Tensor::options. + // The criteria for whether or not Tensor::options must be adjusted + // is whether or not the new option you added should preserved + // by functions such as empty_like(); if it should be preserved, + // you must adjust options(). + // + // TODO: MemoryFormat is not implemented in this way + + // NB: We didn't use std::optional here, because then we can't pack + // the has_***_ boolean fields. + + Device device_ = at::kCPU; // 16-bit + caffe2::TypeMeta dtype_ = caffe2::TypeMeta::Make(); // 16-bit + Layout layout_ = at::kStrided; // 8-bit + MemoryFormat memory_format_ = MemoryFormat::Contiguous; // 8-bit + + // Bitmask required here to get this to fit inside 32 bits (or even 64 bits, + // for that matter) + + bool requires_grad_ : 1; + bool pinned_memory_ : 1; + + bool has_device_ : 1; + bool has_dtype_ : 1; + bool has_layout_ : 1; + bool has_requires_grad_ : 1; + bool has_pinned_memory_ : 1; + bool has_memory_format_ : 1; +}; + +// We should aspire to fit in one machine-size word; but a size greater than two +// words is too much. (We are doing terribly on 32-bit archs, where we require +// three machine size words to store tensor options. Eek!) +static_assert( + sizeof(TensorOptions) <= sizeof(int64_t) * 2, + "TensorOptions must fit in 128-bits"); + +/// Convenience function that returns a `TensorOptions` object with the `dtype` +/// set to the given one. +inline TensorOptions dtype(caffe2::TypeMeta dtype) { + return TensorOptions().dtype(dtype); +} + +// legacy function to support ScalarType +inline TensorOptions dtype(ScalarType dtype) { + return TensorOptions().dtype(scalarTypeToTypeMeta(dtype)); +} + +/// Convenience function that returns a `TensorOptions` object with the `layout` +/// set to the given one. +inline TensorOptions layout(Layout layout) { + return TensorOptions().layout(layout); +} + +/// Convenience function that returns a `TensorOptions` object with the `device` +/// set to the given one. +inline TensorOptions device(Device device) { + return TensorOptions().device(device); +} + +/// Convenience function that returns a `TensorOptions` object with the +/// `device` set to CUDA and the `device_index` set to the given one. +inline TensorOptions device_index(c10::DeviceIndex device_index) { + return TensorOptions().device_index(device_index); +} + +/// Convenience function that returns a `TensorOptions` object with the +/// `requires_grad` set to the given one. +inline TensorOptions requires_grad(bool requires_grad = true) { + return TensorOptions().requires_grad(requires_grad); +} + +/// Convenience function that returns a `TensorOptions` object with the +/// `memory_format` set to the given one. +inline TensorOptions memory_format(MemoryFormat memory_format) { + return TensorOptions().memory_format(memory_format); +} + +C10_API std::ostream& operator<<( + std::ostream& stream, + const TensorOptions& options); + +template +inline TensorOptions dtype() { + return dtype(caffe2::TypeMeta::Make()); +} + +inline std::string toString(const TensorOptions& options) { + std::ostringstream stream; + stream << options; + return stream.str(); +} + +// This is intended to be a centralized location by which we can determine +// what an appropriate DispatchKey for a tensor is. +inline DispatchKey computeDispatchKey( + std::optional dtype, + std::optional layout, + std::optional device) { + const auto layout_ = layout_or_default(layout); + const auto device_ = device_or_default(device); + switch (layout_) { + case Layout::Jagged: + case Layout::Strided: { + const auto dtype_ = dtype_or_default(dtype); + switch (device_.type()) { +#define DO_CASE(device, _) \ + case c10::DeviceType::device: { \ + if (isQIntType(dtype_)) { \ + return DispatchKey::Quantized##device; \ + } \ + return DispatchKey::device; \ + } + C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, unused) +#undef DO_CASE + case c10::DeviceType::FPGA: + return DispatchKey::FPGA; + case c10::DeviceType::MAIA: + return DispatchKey::MAIA; + case c10::DeviceType::Vulkan: + return DispatchKey::Vulkan; + case c10::DeviceType::Metal: + return DispatchKey::Metal; + case c10::DeviceType::MKLDNN: + case c10::DeviceType::OPENGL: + case c10::DeviceType::OPENCL: + case c10::DeviceType::IDEEP: + TORCH_INTERNAL_ASSERT( + 0, + "This is a grandfathered Caffe2 device type ", + device_.type(), + ", it shouldn't ever convert to a DispatchKey. File a bug describing what you were doing if you think this is in error."); + default: + TORCH_CHECK_NOT_IMPLEMENTED( + false, + "Unsupported device type for dense layout: ", + device_.type()); + } + } + case Layout::Sparse: + switch (device_.type()) { +#define DO_CASE(device, _) \ + case c10::DeviceType::device: { \ + return DispatchKey::Sparse##device; \ + } + C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, unused) +#undef DO_CASE + default: + TORCH_CHECK_NOT_IMPLEMENTED( + false, + "Unsupported device type for sparse layout: ", + device_.type()); + } + case Layout::Mkldnn: + switch (device_.type()) { + case c10::DeviceType::CPU: + return DispatchKey::MkldnnCPU; + default: + TORCH_CHECK_NOT_IMPLEMENTED( + false, + "Unsupported device type for mkldnn layout: ", + device_.type()); + } + case Layout::SparseCsr: + case Layout::SparseCsc: + case Layout::SparseBsr: + case Layout::SparseBsc: + switch (device_.type()) { +#define DO_CASE(device, _) \ + case c10::DeviceType::device: { \ + return DispatchKey::SparseCsr##device; \ + } + C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, unused) +#undef DO_CASE + default: + TORCH_CHECK_NOT_IMPLEMENTED( + false, + "Unsupported device type for ", + layout_, + " layout: ", + device_.type()); + } + default: + TORCH_CHECK(false, "Unsupported layout: ", layout_); + } +} + +inline Layout dispatchKeyToLayout(DispatchKey dispatch_key) { + switch (dispatch_key) { +#define DO_CASE(bc, _) case DispatchKey::Sparse##bc: + C10_FORALL_BACKEND_COMPONENTS(DO_CASE, unused) +#undef DO_CASE + return Layout::Sparse; +#define DO_CASE(bc, _) case DispatchKey::SparseCsr##bc: + C10_FORALL_BACKEND_COMPONENTS(DO_CASE, unused) +#undef DO_CASE + TORCH_CHECK( + false, "Cannot map DispatchKey ", dispatch_key, " to a unique layout."); + case DispatchKey::MkldnnCPU: + return Layout::Mkldnn; + default: + return Layout::Strided; + } +} + +inline c10::DeviceType dispatchKeyToDeviceType(DispatchKey dispatch_key) { + switch (dispatch_key) { + // stuff that's real +#define DO_CASE(suffix, prefix) \ + case DispatchKey::prefix##suffix: \ + return c10::DeviceType::suffix; +#define DO_CASES(_, prefix) C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, prefix) + C10_FORALL_FUNCTIONALITY_KEYS(DO_CASES) +#undef DO_CASES +#undef DO_CASE + + case DispatchKey::MkldnnCPU: + return c10::DeviceType::CPU; + case DispatchKey::Vulkan: + return c10::DeviceType::Vulkan; + + case DispatchKey::MAIA: + return c10::DeviceType::MAIA; + default: + TORCH_CHECK( + false, + "DispatchKey ", + dispatch_key, + " doesn't correspond to a device"); + } +} + +inline TensorOptions dispatchKeyToTensorOptions(DispatchKey dispatch_key) { + return TensorOptions() + .layout(dispatchKeyToLayout(dispatch_key)) + .device(dispatchKeyToDeviceType(dispatch_key)); +} + +namespace detail { +inline bool backend_supports_empty_operator(const TensorOptions& options) { + // Quantized backends don't support at::empty(). + // They have separate operators like at::empty_quantized() that take in + // extra information about how to quantize the tensor. + return !isQIntType(typeMetaToScalarType(options.dtype())); +} + +} // namespace detail + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/UndefinedTensorImpl.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/UndefinedTensorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..73bcd698d44d4a5cb224ce9513a6e5434497e6d3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/UndefinedTensorImpl.h @@ -0,0 +1,42 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace c10 { + +struct C10_API UndefinedTensorImpl final : public TensorImpl { + public: + // Without this, we get: + // error: identifier "at::UndefinedTensorImpl::_singleton" is undefined in + // device code + // (ostensibly because the constexpr tricks MSVC into trying to compile this + // function for device as well). +#ifdef _WIN32 + static inline TensorImpl* singleton() { +#else + static constexpr inline TensorImpl* singleton() { +#endif + return &_singleton; + } +#ifdef DEBUG + bool has_storage() const override; +#endif + void set_storage_offset(int64_t offset) override; + + protected: + bool is_contiguous_custom(MemoryFormat format) const override; + IntArrayRef strides_custom() const override; + SymIntArrayRef sym_strides_custom() const override; + + private: + UndefinedTensorImpl(); + static UndefinedTensorImpl _singleton; + const char* tensorimpl_type_name() const override; +}; + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/WrapDimMinimal.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/WrapDimMinimal.h new file mode 100644 index 0000000000000000000000000000000000000000..623da03503ae1780f02d68193b276c32383764af --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/WrapDimMinimal.h @@ -0,0 +1,48 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace c10 { + +namespace detail { +// This template can only be specialized at int64_t and c10::SymInt; +// you'll get linker errors otherwise +template +C10_API T maybe_wrap_dim_slow(T dim, T dim_post_expr, bool wrap_scalar); +} // namespace detail + +template +T _maybe_wrap_dim(T dim, T dim_post_expr, bool wrap_scalar = true) { + // Inline the fast paths + if (C10_LIKELY(dim_post_expr * -1 <= dim && dim < dim_post_expr)) { + // For SymInts, we want an explicit control flow to trigger a guard, so we + // may as well branch too. + if (dim < 0) { + return dim + dim_post_expr; + } + return dim; + } + // Check edge-cases out-of-line (wrapping scalars and out-of-bounds errors) + return c10::detail::maybe_wrap_dim_slow( + std::move(dim), std::move(dim_post_expr), wrap_scalar); +} + +inline int64_t maybe_wrap_dim( + int64_t dim, + int64_t dim_post_expr, + bool wrap_scalar = true) { + return _maybe_wrap_dim(dim, dim_post_expr, wrap_scalar); +} + +inline c10::SymInt maybe_wrap_dim( + c10::SymInt dim, + c10::SymInt dim_post_expr, + bool wrap_scalar = true) { + return _maybe_wrap_dim(std::move(dim), std::move(dim_post_expr), wrap_scalar); +} + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/alignment.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/alignment.h new file mode 100644 index 0000000000000000000000000000000000000000..fcb960134a68aa788392e12066a205560c4f44fb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/alignment.h @@ -0,0 +1,21 @@ +#pragma once + +#include + +namespace c10 { + +#ifdef C10_MOBILE +// Use 16-byte alignment on mobile +// - ARM NEON AArch32 and AArch64 +// - x86[-64] < AVX +constexpr size_t gAlignment = 16; +#else +// Use 64-byte alignment should be enough for computation up to AVX512. +constexpr size_t gAlignment = 64; +#endif + +constexpr size_t gPagesize = 4096; +// since the default thp pagesize is 2MB, enable thp only +// for buffers of size 2MB or larger to avoid memory bloating +constexpr size_t gAlloc_threshold_thp = static_cast(2) * 1024 * 1024; +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/DeviceGuardImplInterface.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/DeviceGuardImplInterface.h new file mode 100644 index 0000000000000000000000000000000000000000..a9b9b1219dfedf8094e8fdcf021bf01966098235 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/DeviceGuardImplInterface.h @@ -0,0 +1,365 @@ +#pragma once + +#include +#include +#include +#include + +// Just for C10_ANONYMOUS_VARIABLE +#include + +#include + +namespace c10 { + +// Forward declaration +class DataPtr; + +/** + * Note [Flags defining the behavior of events] + * + * PYTORCH_DEFAULT and BACKEND_DEFAULT are valid for all backends. The + * BACKEND_DEFAULT is what a particular backend would select if no + * flags were given. PYTORCH_DEFAULT is the PyTorch's framework default + * choice for events on that backend, which may not be the same. + * + * The mapping of PYTORCH_DEFAULT and BACKEND_DEFAULT is done by each + * backend implementation. + */ +enum class EventFlag { + // Disable timing + PYTORCH_DEFAULT, + // Enable timing + BACKEND_DEFAULT, + // FOR TESTING ONLY + INVALID +}; + +namespace impl { + +/** + * DeviceGuardImplInterface represents the virtual interface which provides + * functionality to provide an RAII class for device and stream switching, + * via DeviceGuard. Every distinct device type, e.g., CUDA and HIP, is + * expected to implement and register an implementation of this interface. + * All classes which inherit from DeviceGuardImplInterface should be declared + * 'final'. + * + * This class exists because we provide a unified interface for performing + * device guards via DeviceGuard, but we cannot assume that we have actually + * compiled against the, e.g., CUDA library, which actually implements + * this guard functionality. In this case, a dynamic dispatch is required + * to cross the library boundary. + * + * If possible, you should directly use implementations of this interface; + * those uses will be devirtualized. + */ +struct C10_API DeviceGuardImplInterface { + DeviceGuardImplInterface() = default; + DeviceGuardImplInterface(const DeviceGuardImplInterface&) = default; + DeviceGuardImplInterface& operator=(const DeviceGuardImplInterface&) = + default; + DeviceGuardImplInterface(DeviceGuardImplInterface&&) noexcept = default; + DeviceGuardImplInterface& operator=(DeviceGuardImplInterface&&) noexcept = + default; + + /** + * Return the type of device managed by this guard implementation. + */ + virtual DeviceType type() const = 0; + + /** + * Set the current device to Device, and return the previous Device. + */ + virtual Device exchangeDevice(Device) const = 0; + // NB: Implementations of exchangeDevice can be a bit boilerplatey. You might + // consider replacing exchangeDevice with a non-virtual function with a baked + // in implementation; however, note that this will triple the number of + // virtual calls (when you implement exchangeDevice in a final subclass, + // the compiler gets to devirtualize everything; it won't do that if you don't + // define it in the subclass!) A common way to solve this problem is to use + // some sort of CRTP; however, we can template DeviceGuardImplInterface since + // we really *do* need it to be virtual. A little boilerplate seems easiest + // to explain. (Another way around this problem is to provide inline + // functions that provide the default implementations, but this seems a little + // hard to explain. In any case, we're only going to have on order of ten + // implementations of this anyway.) + + /** + * Get the current device. + */ + virtual Device getDevice() const = 0; + + /** + * Set the current device to Device. + */ + virtual void setDevice(Device) const = 0; + + /** + * Set the current device to Device, without checking for errors + * (so, e.g., this can be called from a destructor). + */ + virtual void uncheckedSetDevice(Device) const noexcept = 0; + + /** + * Get the current stream for a given device. + */ + virtual Stream getStream(Device) const noexcept = 0; + + /** + * Get the default stream for a given device. + */ + virtual Stream getDefaultStream(Device) const { + TORCH_CHECK(false, "Backend doesn't support acquiring a default stream.") + } + + /** + * Get a stream from the global pool for a given device. + */ + virtual Stream getStreamFromGlobalPool(Device, bool isHighPriority = false) + const { + (void)isHighPriority; // Suppress unused variable warning + TORCH_CHECK(false, "Backend doesn't support acquiring a stream from pool.") + } + + /** + * Return a new stream for a given device and priority. The stream will be + * copied and shared around, device backend should be able to correctly handle + * the lifetime of the stream. + */ + virtual Stream getNewStream(Device, int priority = 0) const { + (void)priority; + TORCH_CHECK(false, "Backend doesn't support create a new Stream.") + } + + /** + * Set a stream to be the thread local current stream for its device. + * Return the previous stream for that device. You are NOT required + * to set the current device to match the device of this stream. + */ + virtual Stream exchangeStream(Stream) const noexcept = 0; + + /** + * Destroys the given event. + */ + virtual void destroyEvent(void* /*event*/, const DeviceIndex /*device_index*/) + const noexcept {} + + /** + * Increments the event's version and enqueues a job with this version + * in the stream's work queue. When the stream process that job + * it notifies all streams waiting on / blocked by that version of the + * event to continue and marks that version as recorded. + * */ + virtual void record( + void** /*event*/, + const Stream& /*stream*/, + const DeviceIndex /*device_index*/, + const c10::EventFlag /*flag*/) const { + TORCH_CHECK(false, "Backend doesn't support events."); + } + + /** + * Does nothing if the event has not been scheduled to be recorded. + * If the event was previously enqueued to be recorded, a command + * to wait for the version of the event that exists at the time of this call + * is inserted in the stream's work queue. + * When the stream reaches this command it will stop processing + * additional commands until that version of the event is marked as recorded. + */ + virtual void block(void* /*event*/, const Stream& /*stream*/) const { + TORCH_CHECK(false, "Backend doesn't support events."); + } + + /** + * Returns true if (and only if) + * (1) the event has never been scheduled to be recorded + * (2) the current version is marked as recorded. + * Returns false otherwise. + */ + virtual bool queryEvent(void* /*event*/) const { + TORCH_CHECK(false, "Backend doesn't support events."); + } + + /** + * Get the number of devices. WARNING: This is REQUIRED to not raise + * an exception. If there is some sort of problem, e.g., driver error, + * you should report that there are zero available devices. + */ + virtual DeviceIndex deviceCount() const noexcept = 0; + + /** + * Return true if all the work previously enqueued on the stream for + * asynchronous execution has completed running on the device. + */ + virtual bool queryStream(const Stream& /*stream*/) const { + TORCH_CHECK(false, "Backend doesn't support querying streams."); + } + + /** + * Wait (by blocking the calling thread) until all the work previously + * enqueued on the stream has completed running on the device. + */ + virtual void synchronizeStream(const Stream& /*stream*/) const { + TORCH_CHECK(false, "Backend doesn't support synchronizing streams."); + } + + /** + * Wait (by blocking the calling thread) until all the work previously + * recorded on the event has completed running on the device. + */ + virtual void synchronizeEvent(void* /*event*/) const { + TORCH_CHECK(false, "Backend doesn't support synchronizing events."); + } + + /** + * Ensure the caching allocator (if any) is aware that the given DataPtr is + * being used on the given stream, and that it should thus avoid recycling the + * DataPtr until all work on that stream is done. + */ + virtual void recordDataPtrOnStream(const c10::DataPtr&, const Stream&) const { + } + + /** + * Fetch the elapsed time between two recorded events. + */ + virtual double elapsedTime( + void* /*event1*/, + void* /*event2*/, + const DeviceIndex /*device_index*/) const { + TORCH_CHECK(false, "Backend doesn't support elapsedTime."); + } + + /** + * Intended use of this class is to leak the DeviceGuardImpl at program end. + * So you better not call the destructor, buster! + */ + virtual ~DeviceGuardImplInterface() = default; +}; + +// A no-op device guard impl that doesn't do anything interesting. Useful +// for devices that don't actually have a concept of device index. Prominent +// examples are CPU and Meta. +template +struct NoOpDeviceGuardImpl final : public DeviceGuardImplInterface { + NoOpDeviceGuardImpl() = default; + DeviceType type() const override { + return D; + } + Device exchangeDevice(Device) const override { + return Device(D, -1); // no-op + } + Device getDevice() const override { + return Device(D, -1); + } + void setDevice(Device) const override { + // no-op + } + void uncheckedSetDevice(Device) const noexcept override { + // no-op + } + Stream getStream(Device) const noexcept override { + // no-op + return Stream(Stream::DEFAULT, Device(D, -1)); + } + + Stream getNewStream(Device, int priority = 0) const override { + // no-op + (void)priority; + return Stream(Stream::DEFAULT, Device(D, -1)); + } + + // NB: These do NOT set the current device + Stream exchangeStream(Stream) const noexcept override { + // no-op + return Stream(Stream::DEFAULT, Device(D, -1)); + } + DeviceIndex deviceCount() const noexcept override { + return 1; + } + + // Event-related functions + void record( + void** /*event*/, + const Stream& /*stream*/, + const DeviceIndex /*device_index*/, + const EventFlag /*flag*/) const override { + TORCH_CHECK(false, D, " backend doesn't support events."); + } + void block(void* /*event*/, const Stream& /*stream*/) const override { + TORCH_CHECK(false, D, " backend doesn't support events.") + } + bool queryEvent(void* /*event*/) const override { + TORCH_CHECK(false, D, " backend doesn't support events.") + } + void destroyEvent(void* /*event*/, const DeviceIndex /*device_index*/) + const noexcept override {} + + // Stream-related functions + bool queryStream(const Stream& /*stream*/) const override { + return true; + } + void synchronizeStream(const Stream& /*stream*/) const override { + // Don't wait for anything. + } +}; + +// The registry is NON-owning. Each stored pointer is std::atomic so +// that under all interleavings of registry calls the structure is +// race-free. This doesn't cost us anything on reads in X86. (An +// unsynchronized implementation probably is OK too, but I didn't want +// to prove that we never read from device_guard_impl_registry at the +// same time some registration is occurring. Shiver.) +// +// I'd like this registry to be valid even at program destruction time +// (in case someone uses a DeviceGuard in a destructor to do some cleanup +// in the CUDA API.) Since there are no direct accesses of the underlying +// owning objects which I can use to enforce initialization order (unlike +// in a Meyer singleton), it implies that you must *leak* objects when +// putting them in the registry. This is done by deleting the destructor +// on DeviceGuardImplInterface. +// NOLINTNEXTLINE(*c-arrays*) +extern C10_API std::atomic + device_guard_impl_registry[static_cast( + DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES)]; + +// I can't conveniently use c10/util/Registry.h for the following reason: +// c10/util/Registry.h gives me a slow way of Create'ing a object of some +// interface from the registry, but no way of quickly accessing an already +// created object. I'll be banging on getDeviceGuardImpl every time we do a +// DeviceGuard, so I really don't want to be doing an unordered_map lookup. +// Better if the registration mechanism directly drops its implementation +// into device_guard_impl_registry. + +class C10_API DeviceGuardImplRegistrar { + public: + DeviceGuardImplRegistrar(DeviceType, const DeviceGuardImplInterface*); +}; + +#define C10_REGISTER_GUARD_IMPL(DevType, DeviceGuardImpl) \ + static ::c10::impl::DeviceGuardImplRegistrar C10_ANONYMOUS_VARIABLE( \ + g_##DeviceType)(::c10::DeviceType::DevType, new DeviceGuardImpl()); + +inline const DeviceGuardImplInterface* getDeviceGuardImpl(DeviceType type) { + // Two adjacent int16_t fields DeviceType and DeviceIndex has field access + // miscompiled on NVCC. To workaround this issue, we apply a mask to the + // DeviceType. First check if the DeviceType is 16-bit. + // FB employees can see + // https://fb.workplace.com/groups/llvm.gcc/permalink/4053565044692080/ + // for more details + static_assert(sizeof(DeviceType) == 1, "DeviceType is not 8-bit"); + auto p = device_guard_impl_registry[static_cast(type) & 0xFF].load(); + + // This seems to be the first place where you make use of a device + // when you pass devices to factory functions. Give a nicer error + // message in this case. + TORCH_CHECK(p, "PyTorch is not linked with support for ", type, " devices"); + return p; +} + +inline bool hasDeviceGuardImpl(DeviceType type) { + return device_guard_impl_registry[static_cast(type)].load(); +} + +} // namespace impl +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/HermeticPyObjectTLS.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/HermeticPyObjectTLS.h new file mode 100644 index 0000000000000000000000000000000000000000..741132b9f967c19a29e2d130890e0f5ef99289a2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/HermeticPyObjectTLS.h @@ -0,0 +1,59 @@ +#pragma once + +#include +#include + +namespace c10::impl { + +// This TLS controls whether or not we permanently associate PyObject +// with Tensor the first time it is allocated. When hermetic PyObject +// TLS is enabled (state is true), we DO NOT save PyObjects to Tensor, +// meaning you get a distinct PyObject whenever you execute the code in +// question. +struct C10_API HermeticPyObjectTLS { + static void set_state(bool state); + static bool get_state() { + // Hypothetical fastpath if torchdeploy/multipy isn't used. Per + // https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p2055r0.pdf + // this qualifies relaxed access because it is a single-location data + // structure (only the boolean here). + // + // Forgetting about data races for a moment, is there a logical race? + // + // - Boolean only ever transitions from false to true. So the + // critical situation is when one interpreter is already running + // when a second interpreter switches haveState from false to true. + // + // - The first interpreter is indifferent whether or not it sees + // hasState true/false; obviously false works (this is what the + // interpreter was previously using; more directly, the interpreter + // calls into itself as the handler, so being hermetic is not + // required), and true simply means serviced python operator calls will + // be hermetic; in these cases it is expected to be functionally + // equivalent. + // + // - The second interpreter MUST see hasState true (as its requests will + // be forwarded to the first interpreter), but it is assumed that there + // is a synchronization between the interpreter initialization, and + // when we actually perform operations, so it is guaranteed to see + // hasState true. + // + // QED. + // + // This fastpath is currently disabled so that we can more easily test that + // hermetic mode works correctly even on stock build of PyTorch. + if (false && !haveState_.load(std::memory_order_relaxed)) + return false; + return get_tls_state(); + } + // Call this from the multipy/torchdeploy top level + static void init_state(); + + private: + // This only flipped once from false to true during torchdeploy/multipy + // initialization, and never again. + static std::atomic haveState_; + static bool get_tls_state(); +}; + +} // namespace c10::impl diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineDeviceGuard.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineDeviceGuard.h new file mode 100644 index 0000000000000000000000000000000000000000..3e9f91eff6170025b54763f985e25c44adacf474 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineDeviceGuard.h @@ -0,0 +1,428 @@ +#pragma once + +// This file provides implementations of InlineDeviceGuard and +// InlineOptionalDeviceGuard. + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace c10::impl { + +/** + * A DeviceGuard is an RAII class that sets a device to some value + * on construction, and resets the device to its original value on + * destruction. + * + * InlineDeviceGuard is a helper class for implementing DeviceGuards. + * It is templated over a DeviceGuardImpl (anything that implements + * DeviceGuardImplInterface). There are two primary ways to instantiate + * InlineDeviceGuard: + * + * - With a concrete implementation of DeviceGuardImpl, e.g., CUDAGuardImpl. + * This is the best way to use InlineDeviceGuard, as all calls are + * devirtualized, giving you code as efficient as straight line + * calls to cudaGetDevice/cudaSetDevice. + * + * - With VirtualGuardImpl, which does a virtual dispatch to a DeviceGuardImpl + * retrieved from a DeviceType registry. We have explicitly instantiated + * InlineDeviceGuard this way as c10::DeviceGuard. + * + * If you are in a hurry, you can use InlineDeviceGuard directly: + * + * using CUDAGuard = impl::InlineDeviceGuard; + * + * However, you can provide a better user experience if you explicitly write a + * wrapper class that itself contains the template instantiation: + * + * class CUDAGuard { + * public: + * // ... the API ... + * private: + * impl::InlineDeviceGuard guard_; + * } + * + * The wrapper class provides a good place to write documentation, and helps + * avoid weird template instantiation errors when a user incorrectly uses the + * class. + * + * If you need to test this class, consider instantiating it with FakeGuardImpl. + */ +template +class InlineDeviceGuard { + public: + // Note [Omitted default constructor from RAII] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // In principle, we could add a default constructor to + // DeviceGuard which reads the current device and promises to + // restore to that device on exit. However, most cases where you + // would have written this, you probably meant to actually just + // use OptionalDeviceGuard (since you don't actually need the + // restore to happen if you don't ever actually set the device). + // We remove the constructor here to encourage you to think about + // what you actually want to happen. + explicit InlineDeviceGuard() = delete; + + /// Set the current device to the passed Device. + explicit InlineDeviceGuard(Device device) + : impl_(device.type()), + original_device_( + device.index() == -1 ? impl_.getDevice() + : impl_.exchangeDevice(device)), + current_device_(device.index() == -1 ? original_device_ : device) {} + + /// Set the current device index to the passed DeviceIndex. (The + /// device type is inferred from the template parameter T). + template < + typename U = T, + typename = + typename std::enable_if_t>> + explicit InlineDeviceGuard(DeviceIndex device_index) + : InlineDeviceGuard(Device(U::static_type, device_index)) {} + + /// Construct an InlineDeviceGuard using VirtualGuardImpl with an explicit + /// DeviceGuardImplInterface pointer. + template < + typename U = T, + typename = typename std::enable_if_t>> + explicit InlineDeviceGuard( + Device device, + const DeviceGuardImplInterface* impl) + : impl_( + VirtualGuardImpl(impl ? impl : getDeviceGuardImpl(device.type()))), + original_device_( + device.index() == -1 ? impl_.getDevice() + : impl_.exchangeDevice(device)), + current_device_(device.index() == -1 ? original_device_ : device) {} + + /// Copy is disallowed + InlineDeviceGuard(const InlineDeviceGuard&) = delete; + InlineDeviceGuard& operator=(const InlineDeviceGuard&) = delete; + + /// Move is disallowed, as DeviceGuard does not have an uninitialized state, + /// which is required for moves on types with nontrivial destructors. + InlineDeviceGuard(InlineDeviceGuard&& other) = delete; + InlineDeviceGuard& operator=(InlineDeviceGuard&& other) = delete; + + ~InlineDeviceGuard() { + impl_.uncheckedSetDevice(original_device_); + } + + /// Sets the device to the given one. + template < + typename U = T, + typename std::enable_if_t, int> = 0> + void set_device(at::Device device) { + AT_ASSERT( + (U::static_type == DeviceType::HIP && device.is_cuda()) || + device.type() == U::static_type); + auto index = device.index(); + if (index == -1) + return; + impl_.setDevice(device); + current_device_ = device; + } + + /// Resets the currently set device to its original device, and then sets the + /// current device to the passed device. This is effectively equivalent to + /// set_device when a guard supports only a single device type. + template + typename std::enable_if_t> reset_device( + at::Device device) { + set_device(device); + } + + /// Resets the currently set device to its original device, and then sets the + /// current device to the passed device (for a possibly different device + /// type). + /// + /// This method is named reset_device to highlight the fact that previous + /// device settings from this guard are NOT preserved, even if the device + /// has a different device type. For example: + /// + /// // CUDA device is 0 + /// DeviceGuard g(Device(kCUDA, 1)); + /// g.reset_device(Device(kHIP, 2)); + /// // CUDA device is 0 (!!) + /// + /// NOTE: this implementation may skip some device setting if it can prove + /// that it is unnecessary. + /// + /// Optional argument is for testing only. + template + typename std::enable_if_t> reset_device( + at::Device device, + const impl::DeviceGuardImplInterface* impl = nullptr) { + auto index = device.index(); + if (index == -1) + return; + if (device.type() == original_device_.type()) { + AT_ASSERT(impl == nullptr || impl->type() == device.type()); + impl_.setDevice(device); + current_device_ = device; + } else { + // Destruct and reconstruct the DeviceGuard in place + impl_.setDevice(original_device_); + impl_ = !impl ? VirtualGuardImpl(device.type()) : VirtualGuardImpl(impl); + original_device_ = impl_.exchangeDevice(device); + current_device_ = device; + } + } + + /// Sets the device index to the given one. The device type is inferred + /// from the original device type. + void set_index(DeviceIndex index) { + reset_device(Device(original_device_.type(), index)); + } + + /// Returns the device that was set at the time the most recent + /// reset_device(), or otherwise the device at construction time. + Device original_device() const { + return original_device_; + } + + /// Returns the most recent device that was set using this device guard, + /// either from construction, or via set_device/reset_device/set_index. + Device current_device() const { + return current_device_; + } + + protected: + T impl_; + + private: + Device original_device_; + Device current_device_; +}; + +/** + * A OptionalDeviceGuard is an RAII class that sets a device to some value on + * initialization, and resets the device to its original value on destruction. + * + * InlineOptionalDeviceGuard is a helper class for implementing + * OptionalDeviceGuards. See guidance in InlineDeviceGuard on how to + * use this. See OptionalDeviceGuard for user-oriented usage notes. + */ +template +class InlineOptionalDeviceGuard { + public: + // Note [Explicit initialization of optional fields] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Explicit initialization of optional fields + // required to workaround an nvcc bug; see + // https://github.com/pytorch/pytorch/issues/12117 + + /// Creates an uninitialized OptionalDeviceGuard. + explicit InlineOptionalDeviceGuard() + : guard_() // See Note [Explicit initialization of optional fields] + {} + + /// Set the current device to the passed Device, if it is not nullopt. + explicit InlineOptionalDeviceGuard(optional device_opt) + : guard_() { // See Note [Explicit initialization of optional fields] + if (device_opt.has_value()) { + guard_.emplace(device_opt.value()); + } + } + + /// Set the current device to the passed DeviceIndex, if it is not nullopt. + template < + typename U = T, + typename = + typename std::enable_if_t>> + explicit InlineOptionalDeviceGuard(optional device_index_opt) + : guard_() { // See Note [Explicit initialization of optional fields] + if (device_index_opt.has_value()) { + guard_.emplace(device_index_opt.value()); + } + } + + /// All constructors of DeviceGuard are valid for OptionalDeviceGuard + /// and result in initialized OptionalDeviceGuard. + template + explicit InlineOptionalDeviceGuard(Args&&... args) + : guard_(std::in_place, std::forward(args)...) {} + + // TODO: Consider reading Tensor and TensorList constructors here, when + // Tensor moves to c10. (These are only valid on OptionalDeviceGuard, + // because a Tensor may be undefined, in which case we need an uninitialized + // tensor guard.) + + // Note [Move construction for RAII guards is tricky] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // In principle, move construction is useful for terminating + // the lifetime of a `OptionalDeviceGuard` early; for example: + // + // // current device is d0 + // OptionalDeviceGuard g1(d1); + // // current device is d1 + // { + // OptionalDeviceGuard g2(std::move(g1)); + // } + // // current device is d0!! + // + // However, it's difficult to implement the move constructor + // in a way that works in all situations. For example, consider + // the following example: + // + // OptionalDeviceGuard g1(d1); + // { + // OptionalDeviceGuard g2(d2); + // { + // OptionalDeviceGuard g3(std::move(g1)); // !!! + // } + // } + // + // What should the current device be while g3 in scope... and what + // should it be after it goes out of scope? What about g2? + // There don't seem to be satisfactory answers for these questions. + // + // It's in principle possible to raise an error when this occurs + // by doing some extra thread-local bookkeeping. But why bother? + // Just don't provide the constructor. + InlineOptionalDeviceGuard(InlineOptionalDeviceGuard&& other) = delete; + + // Note [Move assignment for RAII guards is tricky] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Move assignment is deleted, because you need to know which guard was + // defined "first", as that guard's original_device_ wins--with the current + // representation, we have no way of telling which is the case. (Move + // construction does not have this problem, as one guard is always + // uninitialized.) + // + // We can make this clear by way of a pair of examples: + // + // Example 1: + // + // // initial device is n0 + // { + // CUDAGuard g1(n1); + // { + // CUDAGuard g2(n2); + // // current device should be n2 + // g1 = std::move(g2); + // // current device should still be n2 + // } + // // current device should still be n2 + // } + // // current device should be n0 + // + // Example 2 (flip the order of the two guards): + // + // // initial device is n0 + // { + // CUDAGuard g2(n2); + // { + // CUDAGuard g1(n1); + // // current device should be n1 + // g1 = std::move(g2); + // // current device should be n2 + // } + // // current device should be n0 (since g2 has been vacated) + // } + // + // In both examples, we need g1 to restore to n0 after move assignment. + // However, in example 1, this is determined by the restore value of g1 + // (prior to the move). In example 2, however, it is determined by the the + // restore value of g2(!!). We don't know which one should win, without having + // a way of telling which guard was allocated first. + // + // We could solve this with an extra thread-local variable. But no one is + // actually using move-assignment. So just get rid of it. + InlineOptionalDeviceGuard& operator=(InlineOptionalDeviceGuard&& other) = + delete; + + /// Sets the device to the given one. Initializes OptionalDeviceGuard if it + /// is not already initialized. + template < + typename U = T, + typename = + typename std::enable_if_t>> + void set_device(at::Device device) { + if (!guard_.has_value()) { + guard_.emplace(device); + } else { + guard_->set_device(device); + } + } + + /// Resets the currently set device to its original device, and then sets the + /// current device to the passed device (for a possibly different device + /// type). Initializes OptionalDeviceGuard if it is not already initialized. + /// + /// See notes on why this is called reset_device on InlineDeviceGuard. + /// + /// Optional argument is for testing only. + template < + typename U = T, + typename = typename std::enable_if_t>> + void reset_device( + at::Device device, + const DeviceGuardImplInterface* impl = nullptr) { + if (!guard_.has_value()) { + guard_.emplace(device, impl); + } else { + guard_->reset_device(device, impl); + } + } + + /// Resets the currently set device to its original device, and then sets the + /// current device to the passed device. Initializes the guard if it is + /// not already initialized. This is effectively equivalent to set_device + /// when a guard supports only a single device type. + template < + typename U = T, + typename = + typename std::enable_if_t>> + void reset_device(at::Device device) { + if (!guard_.has_value()) { + guard_.emplace(device); + } else { + guard_->reset_device(device); + } + } + + /// Sets the device index to the given one. The device type is statically + /// known. + template < + typename U = T, + typename = + typename std::enable_if_t>> + void set_index(DeviceIndex index) { + if (!guard_.has_value()) { + guard_.emplace(index); + } else { + guard_->set_index(index); + } + } + + /// Returns the device that was set immediately prior to initialization of + /// the, guard, or nullopt if the guard is uninitialized. + optional original_device() const { + return guard_.has_value() ? make_optional(guard_->original_device()) + : nullopt; + } + + /// Returns the most recent device that was set using this device guard, + /// either from construction, or via set_device, if the guard is initialized, + /// or nullopt if the guard is uninitialized. + optional current_device() const { + return guard_.has_value() ? make_optional(guard_->current_device()) + : nullopt; + } + + /// Restore the original device, resetting this guard to uninitialized state. + void reset() { + guard_.reset(); + } + + private: + optional> guard_; +}; + +} // namespace c10::impl diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineStreamGuard.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineStreamGuard.h new file mode 100644 index 0000000000000000000000000000000000000000..b99e7db72addc66d0fae310728dc89f660562f1c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineStreamGuard.h @@ -0,0 +1,255 @@ +#pragma once + +#include +#include +#include + +namespace c10::impl { + +/** + * A StreamGuard is an RAII class that changes the current device + * to the device corresponding to some stream, and changes the + * default stream on that device to be this stream. + * + * InlineStreamGuard is a helper class for implementing StreamGuards. + * See InlineDeviceGuard for guidance on how to use this class. + */ +template +class InlineStreamGuard : private InlineDeviceGuard { + public: + /// No default constructor, see Note [Omitted default constructor from RAII] + explicit InlineStreamGuard() = delete; + + /// Set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream. + explicit InlineStreamGuard(Stream stream) + : InlineDeviceGuard(stream.device()), + original_stream_of_original_device_( + this->impl_.getStream(original_device())), + original_stream_of_current_device_(this->impl_.exchangeStream(stream)), + current_stream_(stream) {} + + /// This constructor exists purely for testing + template < + typename U = T, + typename = typename std::enable_if_t>> + explicit InlineStreamGuard( + Stream stream, + const DeviceGuardImplInterface* impl) + : InlineDeviceGuard( + stream.device(), + impl ? impl : getDeviceGuardImpl(stream.device_type())), + original_stream_of_original_device_( + this->impl_.getStream(original_device())), + original_stream_of_current_device_(this->impl_.exchangeStream(stream)), + current_stream_(stream) {} + + /// Copy is disallowed + InlineStreamGuard(const InlineStreamGuard&) = delete; + InlineStreamGuard& operator=(const InlineStreamGuard&) = delete; + + /// Move is disallowed, as StreamGuard does not have an uninitialized state, + /// which is required for moves on types with nontrivial destructors. + InlineStreamGuard(InlineStreamGuard&& other) = delete; + InlineStreamGuard& operator=(InlineStreamGuard&& other) = delete; + + ~InlineStreamGuard() { + this->impl_.exchangeStream(original_stream_of_current_device_); + } + + /// Resets the currently set stream to the original stream and + /// the currently set device to the original device. Then, + /// set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream. + /// + /// NOTE: this implementation may skip some stream/device setting if + /// it can prove that it is unnecessary. + /// + /// WARNING: reset_stream does NOT preserve previously set streams on + /// different devices. If you need to set streams on multiple devices + /// use MultiStreamGuard instead. + void reset_stream(Stream stream) { + // TODO: make a version that takes an impl argument. Unfortunately, + // that will require SFINAE because impl is only valid for the + // VirtualGuardImpl specialization. + if (stream.device() == this->current_device()) { + this->impl_.exchangeStream(stream); + current_stream_ = stream; + } else { + // Destruct and reconstruct the StreamGuard in-place + this->impl_.exchangeStream(original_stream_of_current_device_); + this->reset_device(stream.device()); + original_stream_of_current_device_ = this->impl_.exchangeStream(stream); + current_stream_ = stream; + } + } + + // It's not clear if set_device should also reset the current stream + // if the device is unchanged; therefore, we don't provide it. + // The situation is somewhat clearer with reset_device, but it's still + // a pretty weird thing to do, so haven't added this either. + + /// Returns the stream of the original device prior to this guard. Subtly, + /// the stream returned here is the original stream of the *original* + /// device; i.e., it's the stream that your computation *would* have + /// been put on, if it hadn't been for this meddling stream guard. + /// This is usually what you want. + Stream original_stream() const { + return original_stream_of_original_device_; + } + + /// Returns the most recent stream that was set using this device guard, + /// either from construction, or via set_stream. + Stream current_stream() const { + return current_stream_; + } + + /// Returns the most recent device that was set using this device guard, + /// either from construction, or via set_device/reset_device/set_index. + Device current_device() const { + return InlineDeviceGuard::current_device(); + } + + /// Returns the device that was set at the most recent reset_stream(), + /// or otherwise the device at construction time. + Device original_device() const { + return InlineDeviceGuard::original_device(); + } + + private: + Stream + original_stream_of_original_device_; // what the user probably cares about + Stream original_stream_of_current_device_; // what we need to restore + Stream current_stream_; +}; + +/** + * An OptionalStreamGuard is an RAII class that sets a device to some value on + * initialization, and resets the device to its original value on destruction. + * See InlineOptionalDeviceGuard for more guidance on how to use this class. + */ +template +class InlineOptionalStreamGuard { + public: + /// Creates an uninitialized stream guard. + explicit InlineOptionalStreamGuard() + : guard_() // See Note [Explicit initialization of optional fields] + {} + + /// Set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream, + /// if the passed stream is not nullopt. + explicit InlineOptionalStreamGuard(optional stream_opt) : guard_() { + if (stream_opt.has_value()) { + guard_.emplace(stream_opt.value()); + } + } + + /// All constructors of StreamGuard are valid for OptionalStreamGuard + template + explicit InlineOptionalStreamGuard(Args&&... args) + : guard_(std::in_place, std::forward(args)...) {} + + // See Note [Move construction for RAII guards is tricky] + InlineOptionalStreamGuard(InlineOptionalStreamGuard&& other) = delete; + + // See Note [Move assignment for RAII guards is tricky] + InlineOptionalStreamGuard& operator=(InlineOptionalStreamGuard&& other) = + delete; + + /// Resets the currently set stream to the original stream and + /// the currently set device to the original device. Then, + /// set the current device to the device associated with the passed stream, + /// and set the current stream on that device to the passed stream. + /// Initializes the OptionalStreamGuard if it was not previously initialized. + void reset_stream(Stream stream) { + if (guard_.has_value()) { + guard_->reset_stream(stream); + } else { + guard_.emplace(stream); + } + } + + /// Returns the stream that was set at the time the guard was most recently + /// initialized, or nullopt if the guard is uninitialized. + optional original_stream() const { + return guard_.has_value() ? make_optional(guard_->original_stream()) + : nullopt; + } + + /// Returns the most recent stream that was set using this stream guard, + /// either from construction, or via reset_stream, if the guard is + /// initialized, or nullopt if the guard is uninitialized. + optional current_stream() const { + return guard_.has_value() ? make_optional(guard_->current_stream()) + : nullopt; + } + + /// Restore the original device and stream, resetting this guard to + /// uninitialized state. + void reset() { + guard_.reset(); + } + + private: + optional> guard_; +}; + +template +class InlineMultiStreamGuard { + public: + /// Calls `set_stream` on each of the streams in the list. + /// This may be useful if you need to set different streams + /// for different devices. + explicit InlineMultiStreamGuard(ArrayRef streams) { + if (!streams.empty()) { + impl_.emplace(getDeviceTypeOfStreams(streams)); + original_streams_.reserve(streams.size()); + for (const Stream& s : streams) { + original_streams_.emplace_back(this->impl_->exchangeStream(s)); + } + } + } + + /// Copy is disallowed + InlineMultiStreamGuard(const InlineMultiStreamGuard&) = delete; + InlineMultiStreamGuard& operator=(const InlineMultiStreamGuard&) = delete; + + /// Move is disallowed, as StreamGuard does not have an uninitialized state, + /// which is required for moves on types with nontrivial destructors. + InlineMultiStreamGuard(InlineMultiStreamGuard&& other) = delete; + InlineMultiStreamGuard& operator=(InlineMultiStreamGuard&& other) = delete; + + ~InlineMultiStreamGuard() noexcept { + if (this->impl_.has_value()) { + for (const Stream& s : original_streams_) { + this->impl_->exchangeStream(s); + } + } + } + + protected: + optional impl_; + + private: + /// The original streams that were active on all devices. + std::vector original_streams_; + + static DeviceType getDeviceTypeOfStreams(ArrayRef streams) { + TORCH_INTERNAL_ASSERT(!streams.empty()); + DeviceType type = streams[0].device_type(); + for (const auto idx : c10::irange(1, streams.size())) { + TORCH_CHECK_VALUE( + streams[idx].device_type() == type, + "Streams have a mix of device types: stream 0 is on ", + streams[0].device(), + " while stream ", + idx, + " is on device ", + streams[idx].device()); + } + return type; + } +}; + +} // namespace c10::impl diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/LocalDispatchKeySet.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/LocalDispatchKeySet.h new file mode 100644 index 0000000000000000000000000000000000000000..176d0a6b6421922948cb8ebb5932c437c4552021 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/LocalDispatchKeySet.h @@ -0,0 +1,164 @@ +#pragma once + +#include +#include + +// TLS management for DispatchKeySet (the "local" DispatchKeySet(s)) +// +// This manages two thread-local DispatchKeySets: +// +// - The included type set, which adds a tensor type for consideration +// in dispatch. (For example, you might add Profiling to +// the included type set to turn on profiling on all tensor operations.) +// +// - The excluded type set, which disqualifies a tensor type from dispatch. +// (For example, after redispatching on variable, we disqualify +// Autograd so we don't attempt to handle variable again.) +// (Exclusion wins over inclusion.) +// +// NB: Originally, I implemented the excluded type set as storing the inverted +// set, but TLS is defined to be zero-initialized, so this doesn't actually work +// (if it's inverted, you want the set to be -1 initialized). + +namespace c10::impl { + +// POD version of LocalDispatchKeySet. Declared here just so that +// we can put it in the guards. +// This struct encapsulates special handling for TLS initialization +// in set_included()/included() API so that they reflect the truth. +// If you want to create PODLocalDispatchKeySet with non-zero state, +// use set_included() instead of default constructor. +struct C10_API PODLocalDispatchKeySet { + uint64_t included_; + uint64_t excluded_; + + // See Note [TLS Initialization] + DispatchKeySet included() const { + return DispatchKeySet(DispatchKeySet::RAW, included_) ^ + c10::default_included_set; + } + DispatchKeySet excluded() const { + return DispatchKeySet(DispatchKeySet::RAW, excluded_) ^ + c10::default_excluded_set; + } + + void set_included(DispatchKeySet x) { + included_ = (x ^ c10::default_included_set).raw_repr(); + } + void set_excluded(DispatchKeySet x) { + excluded_ = (x ^ c10::default_excluded_set).raw_repr(); + } +}; +static_assert( + std::is_trivial_v, + "PODLocalDispatchKeySet must be a POD type."); + +struct C10_API LocalDispatchKeySet { + /* implicit */ LocalDispatchKeySet(PODLocalDispatchKeySet x) + : included_(x.included()), excluded_(x.excluded()) {} + DispatchKeySet included_; + DispatchKeySet excluded_; +}; + +// thread_local variables cannot be C10_API on Windows. +// Inlining this seems to break AutoDispatchBelowAutograd on Android. +#if defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) +C10_API LocalDispatchKeySet tls_local_dispatch_key_set(); +#else // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) +extern C10_API thread_local PODLocalDispatchKeySet raw_local_dispatch_key_set; + +inline C10_API LocalDispatchKeySet tls_local_dispatch_key_set() { + // Don't let people fiddle with the thread_local directly just + // because they include this header. + return raw_local_dispatch_key_set; +} +#endif // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE) + +// Internal, use ThreadLocalStateGuard +C10_API void _force_tls_local_dispatch_key_set(LocalDispatchKeySet key_set); + +// RAII API for manipulating the thread-local dispatch state. + +class C10_API IncludeDispatchKeyGuard { + public: + IncludeDispatchKeyGuard(DispatchKeySet); + IncludeDispatchKeyGuard(DispatchKey k) + : IncludeDispatchKeyGuard(DispatchKeySet(k)) {} + IncludeDispatchKeyGuard(const IncludeDispatchKeyGuard&) = delete; + IncludeDispatchKeyGuard operator=(const IncludeDispatchKeyGuard&) = delete; + IncludeDispatchKeyGuard(IncludeDispatchKeyGuard&&) = delete; + IncludeDispatchKeyGuard operator=(IncludeDispatchKeyGuard&&) = delete; + ~IncludeDispatchKeyGuard(); + + private: + // A little micro-optimization to save us from tls_get_addr call + // on destruction + PODLocalDispatchKeySet* tls_; + DispatchKeySet include_; +}; + +class C10_API ExcludeDispatchKeyGuard { + public: + ExcludeDispatchKeyGuard(DispatchKeySet); + ExcludeDispatchKeyGuard(DispatchKey k) + : ExcludeDispatchKeyGuard(DispatchKeySet(k)) {} + ExcludeDispatchKeyGuard(const ExcludeDispatchKeyGuard&) = delete; + ExcludeDispatchKeyGuard operator=(const ExcludeDispatchKeyGuard&) = delete; + ExcludeDispatchKeyGuard(ExcludeDispatchKeyGuard&&) = delete; + ExcludeDispatchKeyGuard operator=(ExcludeDispatchKeyGuard&&) = delete; + ~ExcludeDispatchKeyGuard(); + + private: + // A little micro-optimization to save us from tls_get_addr call + // on destruction + PODLocalDispatchKeySet* tls_; + DispatchKeySet exclude_; +}; + +struct C10_API ForceDispatchKeyGuard { + public: + ForceDispatchKeyGuard() + : saved_keyset_(c10::impl::tls_local_dispatch_key_set()) {} + ForceDispatchKeyGuard(c10::impl::LocalDispatchKeySet key_set) + : ForceDispatchKeyGuard() { + c10::impl::_force_tls_local_dispatch_key_set(key_set); + } + ForceDispatchKeyGuard( + c10::DispatchKeySet include, + c10::DispatchKeySet exclude) + : ForceDispatchKeyGuard() { + auto updated_set = saved_keyset_; + updated_set.included_ = include; + updated_set.excluded_ = exclude; + c10::impl::_force_tls_local_dispatch_key_set(updated_set); + } + ~ForceDispatchKeyGuard() { + c10::impl::_force_tls_local_dispatch_key_set(saved_keyset_); + } + + private: + c10::impl::LocalDispatchKeySet saved_keyset_; +}; + +// Non-RAII API for manipulating the thread-local dispatch state. +// Please prefer the RAII API. The non-RAII API may be useful when +// the included/excluded state of a given DispatchKey must span +// many calls from the Python to the C++, so you cannot conveniently +// use an RAII guard. +// +// Example use case: a Python context manager that includes a certain +// DispatchKey, to ensure ops running under the context manager dispatch +// through that DispatchKey's registered overrides. +// +// The non-RAII API is less efficient than the RAII guards because both the +// getter and setter will do a tls_getaddr lookup (the RAII struct only needs +// one!) + +C10_API bool tls_is_dispatch_key_excluded(DispatchKey x); +C10_API void tls_set_dispatch_key_excluded(DispatchKey x, bool desired_state); +C10_API bool tls_is_dispatch_key_included(DispatchKey x); +C10_API void tls_set_dispatch_key_included(DispatchKey x, bool desired_state); +C10_API bool tls_is_dispatch_keyset_excluded(DispatchKeySet ks); +C10_API bool tls_is_dispatch_keyset_included(DispatchKeySet ks); + +} // namespace c10::impl diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/PyObjectSlot.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/PyObjectSlot.h new file mode 100644 index 0000000000000000000000000000000000000000..518b0e63e492178d3bd861f4eba113cdf3108c77 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/PyObjectSlot.h @@ -0,0 +1,190 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace c10::impl { + +struct C10_API PyObjectSlot { + public: + PyObjectSlot(); + + ~PyObjectSlot(); + + void maybe_destroy_pyobj(); + + // Associate the TensorImpl with the specified PyObject, and, if necessary, + // also tag the interpreter. + // + // NB: This lives in a header so that we can inline away the switch on status + // + // NB: THIS FUNCTION CAN RAISE AN EXCEPTION. Make sure to clean up after + // PyObject if necessary! + void init_pyobj( + PyInterpreter* self_interpreter, + PyObject* pyobj, + PyInterpreterStatus status) { + impl::PyInterpreter* expected = nullptr; + switch (status) { + case impl::PyInterpreterStatus::DEFINITELY_UNINITIALIZED: + // caller guarantees there is no multithreaded access; if there is + // no data race OK to do a relaxed store + pyobj_interpreter_.store(self_interpreter, std::memory_order_relaxed); + break; + case impl::PyInterpreterStatus::TAGGED_BY_US: + // no tagging is necessary, the tag is already correct + break; + case impl::PyInterpreterStatus::MAYBE_UNINITIALIZED: + // attempt to claim this TensorImpl with the specified interpreter + // tag + if (pyobj_interpreter_.compare_exchange_strong( + expected, self_interpreter, std::memory_order_acq_rel)) { + break; + } + // test if, actually, it was already tagged by us! this situation can't + // be caused by a race, but it could be caused by a situation + // where someone conservatively tagged the tensor as MAYBE_UNINITIALIZED + // (because they didn't pre-check the tag) when actually it was + // owned by the interpreter + if (expected == self_interpreter) { + break; + } + // fallthrough, we lost the race. We are guaranteed not to lose the + // race with ourself, as calls to init_pyobj with the same interpreter + // ID must be sequentialized by the GIL + [[fallthrough]]; + case impl::PyInterpreterStatus::TAGGED_BY_OTHER: + TORCH_CHECK( + false, + "cannot allocate PyObject for Tensor on interpreter ", + self_interpreter, + " that has already been used by another torch deploy interpreter ", + pyobj_interpreter_.load()); + } + + // we are the ONLY thread that can have gotten to this point. It is not + // possible to conflict with another zero interpreter as access is protected + // by GIL + // NB: owns_pyobj tag is initially false + pyobj_ = pyobj; + } + + // Query the PyObject interpreter. This may return null if there is no + // interpreter. This is racy! + PyInterpreter* pyobj_interpreter(); + + PyObject* _unchecked_untagged_pyobj() const; + + // Test the interpreter tag. If tagged for the current interpreter, return + // a non-nullopt (but possibly null) PyObject. If (possibly) untagged, + // returns a nullopt. If it is definitely invalid, raises an error. + // + // If `ignore_hermetic_tls` is false and this function is called from a + // hermetic context (ie, `HermeticPyObjectTLS::get_state()` is true), then + // nullopt is returned. If `ignore_hermetic_tls` is true, then the hermetic + // context is ignored, allowing you to check the interpreter tag of a + // nonhermetic PyObject from within a hermetic context. This is necessary + // because there are some cases where the deallocator function of a + // nonhermetic PyObject is called from within a hermetic context, so it must + // be properly treated as a nonhermetic PyObject. + // + // NB: this lives in header so that we can avoid actually creating the + // std::optional + std::optional check_pyobj( + PyInterpreter* self_interpreter, + bool ignore_hermetic_tls = false) const { + // Note [Memory ordering on Python interpreter tag] + impl::PyInterpreter* interpreter = + pyobj_interpreter_.load(std::memory_order_acquire); + if (interpreter == nullptr) { + // NB: This never returns DEFINITELY_UNINITIALIZED because there is + // always the possibility that another thread races to initialize + // after we query here. The only time when we can conclude a tensor + // is definitely uninitialized is when we have just allocated it and + // it cannot have escaped to other threads yet + return c10::nullopt; + } else if (interpreter == self_interpreter) { + // NB: pyobj_ could still be null! + if (!ignore_hermetic_tls && c10::impl::HermeticPyObjectTLS::get_state()) { + return c10::nullopt; + } else { + return c10::make_optional(_unchecked_untagged_pyobj()); + } + } else { + TORCH_CHECK( + false, + "cannot access PyObject for Tensor on interpreter ", + (*self_interpreter)->name(), + " that has already been used by another torch deploy interpreter ", + (*pyobj_interpreter_.load())->name()); + } + } + + // Clear the PyObject field for an interpreter, in situations where we + // statically know the tensor is tagged with our interpreter. + void unchecked_clear_pyobj(PyInterpreter* interpreter); + + PyInterpreter& load_pyobj_interpreter() const; + + // Check if the PyObjectSlot's interpreter is the same as the specified + // interpreter + bool check_interpreter(PyInterpreter* interpreter); + + // Check if the PyObjectSlot is holding a PyObject, owned or non-owned + bool has_pyobj_nonhermetic(); + + bool owns_pyobj(); + + void set_owns_pyobj(bool b); + + private: + // This field contains the interpreter tag for this object. See + // Note [Python interpreter tag] for general context + // + // Note [Memory ordering on Python interpreter tag] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // What memory_order do we need when accessing this atomic? We don't + // need a single total modification order (as provided by + // memory_order_seq_cst) as pyobj_interpreter_ is monotonic: it can only + // transition from -1 to some positive integer and never changes afterwards. + // Because there is only one modification, it trivially already has a total + // modification order (e.g., we don't need fences or locked instructions on + // x86) + // + // In fact, one could make a reasonable argument that relaxed reads are OK, + // due to the presence of external locking (GIL) to ensure that interactions + // with other data structures are still correctly synchronized, so that + // we fall in the "Single-Location Data Structures" case as described in + // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p2055r0.pdf + // However, on x86, it doesn't matter if I use acquire or relaxed on the load + // as I get the same assembly in both cases. So I just use the more + // conservative acquire (which will impede compiler optimizations but I don't + // care) + std::atomic pyobj_interpreter_; + + // This field contains a reference to a PyObject representing this Tensor. + // If pyobj is nullptr, when we transfer Tensor to Python, we allocate a new + // PyObject for it and set this field. This field does not have to be + // protected by an atomic as it is only allowed to be accessed when you hold + // the GIL, or during destruction of the tensor. + // + // When a PyObject dies, you are obligated to clear this field + // (otherwise, you will try to use-after-free the pyobj); this currently + // occurs in THPVariable_clear in torch/csrc/autograd/python_variable.cpp + // + // NB: Ordinarily, this should not be a strong reference, as if the + // PyObject owns the Tensor, this would create a reference cycle. + // However, sometimes this ownership flips. To track who owns + // who, this has a single pointer tag indicating whether or not the + // C++ object owns the PyObject (the common case, zero, means PyObject + // owns the C++ object); see _unchecked_untagged_pyobj for raw access + // or check_pyobj for checked access. See references to PyObject + // resurrection in torch/csrc/autograd/python_variable.cpp + PyObject* pyobj_; +}; + +} // namespace c10::impl diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/PythonDispatcherTLS.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/PythonDispatcherTLS.h new file mode 100644 index 0000000000000000000000000000000000000000..9016c3e11e1579feb48058a928c3dc033d7d0d24 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/PythonDispatcherTLS.h @@ -0,0 +1,24 @@ +#pragma once + +#include +#include + +namespace c10::impl { + +struct C10_API PythonDispatcherTLS { + static void set_state(PyInterpreter* state); + static PyInterpreter* get_state(); + static void reset_state(); +}; + +struct C10_API DisablePythonDispatcher { + DisablePythonDispatcher() : old_(PythonDispatcherTLS::get_state()) { + PythonDispatcherTLS::set_state({}); + } + ~DisablePythonDispatcher() { + PythonDispatcherTLS::set_state(old_); + } + PyInterpreter* old_; +}; + +} // namespace c10::impl diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/alloc_cpu.h b/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/alloc_cpu.h new file mode 100644 index 0000000000000000000000000000000000000000..ee32a0f463068dc91bffb7ee2c8f736893b87b81 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/core/impl/alloc_cpu.h @@ -0,0 +1,12 @@ +#pragma once + +#include + +#include + +namespace c10 { + +C10_API void* alloc_cpu(size_t nbytes); +C10_API void free_cpu(void* data); + +} // namespace c10 diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/macros/Export.h b/parrot/lib/python3.10/site-packages/torch/include/c10/macros/Export.h new file mode 100644 index 0000000000000000000000000000000000000000..cb68060ed8129d408f1d4fdddd4bb1cdd9cd5053 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/macros/Export.h @@ -0,0 +1,160 @@ +#ifndef C10_MACROS_EXPORT_H_ +#define C10_MACROS_EXPORT_H_ + +/* Header file to define the common scaffolding for exported symbols. + * + * Export is by itself a quite tricky situation to deal with, and if you are + * hitting this file, make sure you start with the background here: + * - Linux: https://gcc.gnu.org/wiki/Visibility + * - Windows: + * https://docs.microsoft.com/en-us/cpp/cpp/dllexport-dllimport?view=vs-2017 + * + * Do NOT include this file directly. Instead, use c10/macros/Macros.h + */ + +// You do not need to edit this part of file unless you are changing the core +// pytorch export abstractions. +// +// This part defines the C10 core export and import macros. This is controlled +// by whether we are building shared libraries or not, which is determined +// during build time and codified in c10/core/cmake_macros.h. +// When the library is built as a shared lib, EXPORT and IMPORT will contain +// visibility attributes. If it is being built as a static lib, then EXPORT +// and IMPORT basically have no effect. + +// As a rule of thumb, you should almost NEVER mix static and shared builds for +// libraries that depend on c10. AKA, if c10 is built as a static library, we +// recommend everything dependent on c10 to be built statically. If c10 is built +// as a shared library, everything dependent on it should be built as shared. In +// the PyTorch project, all native libraries shall use the macro +// C10_BUILD_SHARED_LIB to check whether pytorch is building shared or static +// libraries. + +// For build systems that do not directly depend on CMake and directly build +// from the source directory (such as Buck), one may not have a cmake_macros.h +// file at all. In this case, the build system is responsible for providing +// correct macro definitions corresponding to the cmake_macros.h.in file. +// +// In such scenarios, one should define the macro +// C10_USING_CUSTOM_GENERATED_MACROS +// to inform this header that it does not need to include the cmake_macros.h +// file. + +#ifndef C10_USING_CUSTOM_GENERATED_MACROS +#include +#endif // C10_USING_CUSTOM_GENERATED_MACROS + +#ifdef _WIN32 +#define C10_HIDDEN +#if defined(C10_BUILD_SHARED_LIBS) +#define C10_EXPORT __declspec(dllexport) +#define C10_IMPORT __declspec(dllimport) +#else +#define C10_EXPORT +#define C10_IMPORT +#endif +#else // _WIN32 +#if defined(__GNUC__) +#define C10_EXPORT __attribute__((__visibility__("default"))) +#define C10_HIDDEN __attribute__((__visibility__("hidden"))) +#else // defined(__GNUC__) +#define C10_EXPORT +#define C10_HIDDEN +#endif // defined(__GNUC__) +#define C10_IMPORT C10_EXPORT +#endif // _WIN32 + +#ifdef NO_EXPORT +#undef C10_EXPORT +#define C10_EXPORT +#endif + +// Definition of an adaptive XX_API macro, that depends on whether you are +// building the library itself or not, routes to XX_EXPORT and XX_IMPORT. +// Basically, you will need to do this for each shared library that you are +// building, and the instruction is as follows: assuming that you are building +// a library called libawesome.so. You should: +// (1) for your cmake target (usually done by "add_library(awesome, ...)"), +// define a macro called AWESOME_BUILD_MAIN_LIB using +// target_compile_options. +// (2) define the AWESOME_API macro similar to the one below. +// And in the source file of your awesome library, use AWESOME_API to +// annotate public symbols. + +// Here, for the C10 library, we will define the macro C10_API for both import +// and export. + +// This one is being used by libc10.so +#ifdef C10_BUILD_MAIN_LIB +#define C10_API C10_EXPORT +#else +#define C10_API C10_IMPORT +#endif + +// This one is being used by libtorch.so +#ifdef CAFFE2_BUILD_MAIN_LIB +#define TORCH_API C10_EXPORT +#else +#define TORCH_API C10_IMPORT +#endif + +// You may be wondering: Whose brilliant idea was it to split torch_cuda into +// two pieces with confusing names? +// Once upon a time, there _was_ only TORCH_CUDA_API. All was happy until we +// tried to compile PyTorch for CUDA 11.1, which ran into relocation marker +// issues when linking big binaries. +// (https://github.com/pytorch/pytorch/issues/39968) We had two choices: +// (1) Stop supporting so many GPU architectures +// (2) Do something else +// We chose #2 and decided to split the behemoth that was torch_cuda into two +// smaller libraries, one with most of the core kernel functions (torch_cuda_cu) +// and the other that had..well..everything else (torch_cuda_cpp). The idea was +// this: instead of linking our static libraries (like the hefty +// libcudnn_static.a) with another huge library, torch_cuda, and run into pesky +// relocation marker issues, we could link our static libraries to a smaller +// part of torch_cuda (torch_cuda_cpp) and avoid the issues. + +// libtorch_cuda_cu.so +#ifdef TORCH_CUDA_CU_BUILD_MAIN_LIB +#define TORCH_CUDA_CU_API C10_EXPORT +#elif defined(BUILD_SPLIT_CUDA) +#define TORCH_CUDA_CU_API C10_IMPORT +#endif + +// libtorch_cuda_cpp.so +#ifdef TORCH_CUDA_CPP_BUILD_MAIN_LIB +#define TORCH_CUDA_CPP_API C10_EXPORT +#elif defined(BUILD_SPLIT_CUDA) +#define TORCH_CUDA_CPP_API C10_IMPORT +#endif + +// libtorch_cuda.so (where torch_cuda_cu and torch_cuda_cpp are a part of the +// same api) +#ifdef TORCH_CUDA_BUILD_MAIN_LIB +#define TORCH_CUDA_CPP_API C10_EXPORT +#define TORCH_CUDA_CU_API C10_EXPORT +#elif !defined(BUILD_SPLIT_CUDA) +#define TORCH_CUDA_CPP_API C10_IMPORT +#define TORCH_CUDA_CU_API C10_IMPORT +#endif + +#if defined(TORCH_HIP_BUILD_MAIN_LIB) +#define TORCH_HIP_API C10_EXPORT +#else +#define TORCH_HIP_API C10_IMPORT +#endif + +#if defined(TORCH_XPU_BUILD_MAIN_LIB) +#define TORCH_XPU_API C10_EXPORT +#else +#define TORCH_XPU_API C10_IMPORT +#endif + +// Enums only need to be exported on windows for non-CUDA files +#if defined(_WIN32) && defined(__CUDACC__) +#define C10_API_ENUM C10_API +#else +#define C10_API_ENUM +#endif + +#endif // C10_MACROS_MACROS_H_ diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/macros/Macros.h b/parrot/lib/python3.10/site-packages/torch/include/c10/macros/Macros.h new file mode 100644 index 0000000000000000000000000000000000000000..a66933823d80f75c3b9872f4030aedc19f963e20 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/macros/Macros.h @@ -0,0 +1,545 @@ +#ifndef C10_MACROS_MACROS_H_ +#define C10_MACROS_MACROS_H_ +#include + +/* Main entry for c10/macros. + * + * In your code, include c10/macros/Macros.h directly, instead of individual + * files in this folder. + */ + +// For build systems that do not directly depend on CMake and directly build +// from the source directory (such as Buck), one may not have a cmake_macros.h +// file at all. In this case, the build system is responsible for providing +// correct macro definitions corresponding to the cmake_macros.h.in file. +// +// In such scenarios, one should define the macro +// C10_USING_CUSTOM_GENERATED_MACROS +// to inform this header that it does not need to include the cmake_macros.h +// file. + +#ifndef C10_USING_CUSTOM_GENERATED_MACROS +#include +#endif // C10_USING_CUSTOM_GENERATED_MACROS + +#include + +#if defined(__clang__) +#define __ubsan_ignore_float_divide_by_zero__ \ + __attribute__((no_sanitize("float-divide-by-zero"))) +#define __ubsan_ignore_undefined__ __attribute__((no_sanitize("undefined"))) +#define __ubsan_ignore_signed_int_overflow__ \ + __attribute__((no_sanitize("signed-integer-overflow"))) +#define __ubsan_ignore_pointer_overflow__ \ + __attribute__((no_sanitize("pointer-overflow"))) +#define __ubsan_ignore_function__ __attribute__((no_sanitize("function"))) +#else +#define __ubsan_ignore_float_divide_by_zero__ +#define __ubsan_ignore_undefined__ +#define __ubsan_ignore_signed_int_overflow__ +#define __ubsan_ignore_pointer_overflow__ +#define __ubsan_ignore_function__ +#endif + +// Detect address sanitizer as some stuff doesn't work with it +#undef C10_ASAN_ENABLED + +// for clang +#if defined(__has_feature) +#if ((__has_feature(address_sanitizer))) +#define C10_ASAN_ENABLED 1 +#endif +#endif + +// for gcc +#if defined(__SANITIZE_ADDRESS__) +#if __SANITIZE_ADDRESS__ +#if !defined(C10_ASAN_ENABLED) +#define C10_ASAN_ENABLED 1 +#endif +#endif +#endif + +#if !defined(C10_ASAN_ENABLED) +#define C10_ASAN_ENABLED 0 +#endif + +// Disable the copy and assignment operator for a class. Note that this will +// disable the usage of the class in std containers. +#define C10_DISABLE_COPY_AND_ASSIGN(classname) \ + classname(const classname&) = delete; \ + classname& operator=(const classname&) = delete + +#define C10_CONCATENATE_IMPL(s1, s2) s1##s2 +#define C10_CONCATENATE(s1, s2) C10_CONCATENATE_IMPL(s1, s2) + +#define C10_MACRO_EXPAND(args) args + +#define C10_STRINGIZE_IMPL(x) #x +#define C10_STRINGIZE(x) C10_STRINGIZE_IMPL(x) + +/** + * C10_ANONYMOUS_VARIABLE(str) introduces a new identifier which starts with + * str and ends with a unique number. + */ +#ifdef __COUNTER__ +#define C10_UID __COUNTER__ +#define C10_ANONYMOUS_VARIABLE(str) C10_CONCATENATE(str, __COUNTER__) +#else +#define C10_UID __LINE__ +#define C10_ANONYMOUS_VARIABLE(str) C10_CONCATENATE(str, __LINE__) +#endif + +#ifdef __has_cpp_attribute +#define C10_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +#define C10_HAS_CPP_ATTRIBUTE(x) (0) +#endif + +/// C10_NODISCARD - Warn if a type or return value is discarded. + +// Technically, we should check if __cplusplus > 201402L here, because +// [[nodiscard]] is only defined in C++17. However, some compilers +// we care about don't advertise being C++17 (e.g., clang), but +// support the attribute anyway. In fact, this is not just a good idea, +// it's the law: clang::warn_unused_result doesn't work on nvcc + clang +// and the best workaround for this case is to use [[nodiscard]] +// instead; see https://github.com/pytorch/pytorch/issues/13118 +// +// Note to future editors: if you have noticed that a compiler is +// misbehaving (e.g., it advertises support, but the support doesn't +// actually work, or it is emitting warnings). Some compilers which +// are strict about the matter include MSVC, which will complain: +// +// error C2429: attribute 'nodiscard' requires compiler flag '/std:c++latest' +// +// Exhibits: +// - MSVC 19.14: https://godbolt.org/z/Dzd7gn (requires /std:c++latest) +// - Clang 8.0.0: https://godbolt.org/z/3PYL4Z (always advertises support) +// - gcc 8.3: https://godbolt.org/z/4tLMQS (always advertises support) +#if C10_HAS_CPP_ATTRIBUTE(nodiscard) +#define C10_NODISCARD [[nodiscard]] +// Workaround for llvm.org/PR23435, since clang 3.6 and below emit a spurious +// error when __has_cpp_attribute is given a scoped attribute in C mode. +#elif __cplusplus && C10_HAS_CPP_ATTRIBUTE(clang::warn_unused_result) +// TODO: It's possible this is still triggering +// https://github.com/pytorch/pytorch/issues/13118 on Windows; if it is, better +// fix it. +#define C10_NODISCARD [[clang::warn_unused_result]] +#else +#define C10_NODISCARD +#endif + +// suppress an unused variable. +#if defined(_MSC_VER) && !defined(__clang__) +#define C10_UNUSED __pragma(warning(suppress : 4100 4101)) +#else +#define C10_UNUSED __attribute__((__unused__)) +#endif //_MSC_VER + +#if !defined(__has_attribute) +#define __has_attribute(x) 0 +#endif + +// Direct port of LLVM_ATTRIBUTE_USED. +#if __has_attribute(used) +#define C10_USED __attribute__((__used__)) +#else +#define C10_USED +#endif + +#define C10_RESTRICT __restrict + +// Simply define the namespace, in case a dependent library want to refer to +// the c10 namespace but not any nontrivial files. +namespace c10 {} +namespace c10::cuda {} +namespace c10::hip {} +namespace c10::xpu {} + +// Since C10 is the core library for caffe2 (and aten), we will simply reroute +// all abstractions defined in c10 to be available in caffe2 as well. +// This is only for backwards compatibility. Please use the symbols from the +// c10 namespace where possible. +namespace caffe2 { +using namespace c10; +} +namespace at { +using namespace c10; +} +namespace at::cuda { +using namespace c10::cuda; +} // namespace at::cuda + +// WARNING!!! THIS IS A GIANT HACK!!! +// This line means you cannot simultaneously include c10/hip +// and c10/cuda and then use them from the at::cuda namespace. +// This is true in practice, because HIPIFY works inplace on +// files in ATen/cuda, so it assumes that c10::hip is available +// from at::cuda. This namespace makes that happen. When +// HIPIFY is no longer out-of-place, we can switch the cuda +// here to hip and everyone is happy. +namespace at::cuda { +using namespace c10::hip; +} // namespace at::cuda + +namespace at::xpu { +using namespace c10::xpu; +} // namespace at::xpu + +// C10_LIKELY/C10_UNLIKELY +// +// These macros provide parentheses, so you can use these macros as: +// +// if C10_LIKELY(some_expr) { +// ... +// } +// +// NB: static_cast to boolean is mandatory in C++, because __builtin_expect +// takes a long argument, which means you may trigger the wrong conversion +// without it. +// +#if defined(__GNUC__) || defined(__ICL) || defined(__clang__) +#define C10_LIKELY(expr) (__builtin_expect(static_cast(expr), 1)) +#define C10_UNLIKELY(expr) (__builtin_expect(static_cast(expr), 0)) +#else +#define C10_LIKELY(expr) (expr) +#define C10_UNLIKELY(expr) (expr) +#endif + +/// C10_NOINLINE - Functions whose declaration is annotated with this will not +/// be inlined. +#ifdef __GNUC__ +#define C10_NOINLINE __attribute__((noinline)) +#elif _MSC_VER +#define C10_NOINLINE __declspec(noinline) +#else +#define C10_NOINLINE +#endif + +#if defined(_MSC_VER) +#define C10_ALWAYS_INLINE __forceinline +#elif __has_attribute(always_inline) || defined(__GNUC__) +#define C10_ALWAYS_INLINE __attribute__((__always_inline__)) inline +#else +#define C10_ALWAYS_INLINE inline +#endif + +#if defined(_MSC_VER) +#define C10_ATTR_VISIBILITY_HIDDEN +#elif defined(__GNUC__) +#define C10_ATTR_VISIBILITY_HIDDEN __attribute__((__visibility__("hidden"))) +#else +#define C10_ATTR_VISIBILITY_HIDDEN +#endif + +#define C10_ERASE C10_ALWAYS_INLINE C10_ATTR_VISIBILITY_HIDDEN + +#include + +#ifdef __HIPCC__ +// Unlike CUDA, HIP requires a HIP header to be included for __host__ to work. +// We do this #include here so that C10_HOST_DEVICE and friends will Just Work. +// See https://github.com/ROCm-Developer-Tools/HIP/issues/441 +#include +#endif + +#if defined(__CUDACC__) || defined(__HIPCC__) +// Designates functions callable from the host (CPU) and the device (GPU) +#define C10_HOST_DEVICE __host__ __device__ +#define C10_DEVICE __device__ +#define C10_HOST __host__ +// constants from +// (https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications) +// The maximum number of threads per multiprocessor is 1024 for Turing +// architecture (7.5), 1536 for Geforce Ampere (8.6)/Jetson Orin (8.7), and +// 2048 for all other architectures. You'll get warnings if you exceed these +// constants. Hence, the following macros adjust the input values from the user +// to resolve potential warnings. +#if __CUDA_ARCH__ == 750 +constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 1024; +#elif __CUDA_ARCH__ == 860 || __CUDA_ARCH__ == 870 || __CUDA_ARCH__ == 890 +constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 1536; +#else +constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 2048; +#endif +// CUDA_MAX_THREADS_PER_BLOCK is same for all architectures currently +constexpr uint32_t CUDA_MAX_THREADS_PER_BLOCK = 1024; +// CUDA_THREADS_PER_BLOCK_FALLBACK is the "canonical fallback" choice of block +// size. 256 is a good number for this fallback and should give good occupancy +// and versatility across all architectures. +constexpr uint32_t CUDA_THREADS_PER_BLOCK_FALLBACK = 256; +// NOTE: if you are thinking of constexpr-ify the inputs to launch bounds, it +// turns out that although __launch_bounds__ can take constexpr, it +// can't take a constexpr that has anything to do with templates. +// Currently we use launch_bounds that depend on template arguments in +// Loops.cuh, Reduce.cuh and LossCTC.cuh. Hence, C10_MAX_THREADS_PER_BLOCK +// and C10_MIN_BLOCKS_PER_SM are kept as macros. +// Suppose you were planning to write __launch_bounds__(a, b), based on your +// performance tuning on a modern GPU. Instead, you should write +// __launch_bounds__(C10_MAX_THREADS_PER_BLOCK(a), C10_MIN_BLOCKS_PER_SM(a, b)), +// which will also properly respect limits on old architectures. +#define C10_MAX_THREADS_PER_BLOCK(val) \ + (((val) <= CUDA_MAX_THREADS_PER_BLOCK) ? (val) \ + : CUDA_THREADS_PER_BLOCK_FALLBACK) +#define C10_MIN_BLOCKS_PER_SM(threads_per_block, blocks_per_sm) \ + ((((threads_per_block) * (blocks_per_sm) <= CUDA_MAX_THREADS_PER_SM) \ + ? (blocks_per_sm) \ + : ((CUDA_MAX_THREADS_PER_SM + (threads_per_block)-1) / \ + (threads_per_block)))) +// C10_LAUNCH_BOUNDS is analogous to __launch_bounds__ +#define C10_LAUNCH_BOUNDS_0 \ + __launch_bounds__( \ + 256, 4) // default launch bounds that should give good occupancy and + // versatility across all architectures. +#define C10_LAUNCH_BOUNDS_1(max_threads_per_block) \ + __launch_bounds__((C10_MAX_THREADS_PER_BLOCK((max_threads_per_block)))) +#define C10_LAUNCH_BOUNDS_2(max_threads_per_block, min_blocks_per_sm) \ + __launch_bounds__( \ + (C10_MAX_THREADS_PER_BLOCK((max_threads_per_block))), \ + (C10_MIN_BLOCKS_PER_SM((max_threads_per_block), (min_blocks_per_sm)))) +#else +#define C10_HOST_DEVICE +#define C10_HOST +#define C10_DEVICE +#endif + +#if defined(USE_ROCM) +#define C10_HIP_HOST_DEVICE __host__ __device__ +#else +#define C10_HIP_HOST_DEVICE +#endif + +#if defined(USE_ROCM) +#define C10_WARP_SIZE warpSize // = 64 or 32 (Defined in hip_runtime.h) +#else +#define C10_WARP_SIZE 32 +#endif + +#if defined(_MSC_VER) && _MSC_VER <= 1900 +#define __func__ __FUNCTION__ +#endif + +// CUDA_KERNEL_ASSERT checks the assertion +// even when NDEBUG is defined. This is useful for important assertions in CUDA +// code that would otherwise be suppressed when building Release. +#if defined(__ANDROID__) || defined(__APPLE__) || defined(__FreeBSD__) +// Those platforms do not support assert() +#define CUDA_KERNEL_ASSERT(cond) +#define SYCL_KERNEL_ASSERT(cond) +#elif defined(_MSC_VER) +#if defined(NDEBUG) +extern "C" { +C10_IMPORT +#if defined(__SYCL_DEVICE_ONLY__) +extern SYCL_EXTERNAL void _wassert( + const wchar_t* wexpr, + const wchar_t* wfile, + unsigned line); +#else +#if defined(__CUDA_ARCH__) +__host__ __device__ +#endif // __CUDA_ARCH__ + void + _wassert(wchar_t const* _Message, wchar_t const* _File, unsigned _Line); +#endif // __SYCL_DEVICE_ONLY__ +} +#endif // NDEBUG +#define CUDA_KERNEL_ASSERT(cond) \ + if (C10_UNLIKELY(!(cond))) { \ + (void)(_wassert( \ + _CRT_WIDE(#cond), \ + _CRT_WIDE(__FILE__), \ + static_cast(__LINE__)), \ + 0); \ + } +#define SYCL_KERNEL_ASSERT(cond) \ + if (C10_UNLIKELY(!(cond))) { \ + (void)(_wassert( \ + _CRT_WIDE(#cond), \ + _CRT_WIDE(__FILE__), \ + static_cast(__LINE__)), \ + 0); \ + } +#else // __APPLE__, _MSC_VER +#if defined(NDEBUG) +extern "C" { +#if defined(__SYCL_DEVICE_ONLY__) +extern SYCL_EXTERNAL void __assert_fail( + const char* expr, + const char* file, + unsigned int line, + const char* func); +#else // __SYCL_DEVICE_ONLY__ +#if (defined(__CUDA_ARCH__) && !(defined(__clang__) && defined(__CUDA__))) +// CUDA supports __assert_fail function which are common for both device +// and host side code. +__host__ __device__ +#endif + + // This forward declaration matching the declaration of __assert_fail + // exactly how it is in glibc in case parts of the program are compiled with + // different NDEBUG settings. Otherwise we might get 'ambiguous declaration' + // error. Note: On ROCm - this declaration serves for host side compilation. + void + __assert_fail( + const char* assertion, + const char* file, + unsigned int line, + const char* function) noexcept __attribute__((__noreturn__)); + +#endif // __SYCL_DEVICE_ONLY__ +} +#endif // NDEBUG +// ROCm disable kernel assert by default +#if !defined(C10_USE_ROCM_KERNEL_ASSERT) and defined(USE_ROCM) +#define CUDA_KERNEL_ASSERT(cond) +#define SYCL_KERNEL_ASSERT(cond) +#else +#define CUDA_KERNEL_ASSERT(cond) \ + if (C10_UNLIKELY(!(cond))) { \ + __assert_fail( \ + #cond, __FILE__, static_cast(__LINE__), __func__); \ + } +#define SYCL_KERNEL_ASSERT(cond) \ + if (C10_UNLIKELY(!(cond))) { \ + __assert_fail( \ + #cond, __FILE__, static_cast(__LINE__), __func__); \ + } +#endif // C10_USE_ROCM_KERNEL_ASSERT and USE_ROCM +#endif // __APPLE__ + +#ifdef __APPLE__ +#include +#endif + +#if defined(__ANDROID__) +#define C10_ANDROID 1 +#define C10_MOBILE 1 +#elif ( \ + defined(__APPLE__) && \ + (TARGET_IPHONE_SIMULATOR || TARGET_OS_SIMULATOR || TARGET_OS_IPHONE)) +#define C10_IOS 1 +#define C10_MOBILE 1 +#endif // ANDROID / IOS + +#if defined(C10_MOBILE) && C10_MOBILE +#define C10_ALWAYS_INLINE_UNLESS_MOBILE inline +#else +#define C10_ALWAYS_INLINE_UNLESS_MOBILE C10_ALWAYS_INLINE +#endif + +#if defined(__CUDA_ARCH__) +#if defined(_MSC_VER) && defined(__CUDACC__) +#define CONSTEXPR_EXCEPT_WIN_CUDA const +#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA __host__ + +// Note [static constexpr char* members for windows NVCC] +// The Windows NVCC compiler doesn't handle static constexpr class members, +// although it's fixed in a later version. +// (see +// https://developercommunity.visualstudio.com/t/intellisense-error-c11-static-constexpr-member-ini/245425) +// +// If we want to ensure that our field is static under all builds, then we need +// to work around it specifically for windows NVCC by making it (a) const, (b) +// defined outside of the class definition We need to define it outside of the +// class definition because of the C++ standard; char* is not an integral type +// (see +// https://stackoverflow.com/questions/24278473/intellisense-a-member-of-type-const-char-const-cannot-have-an-in-class-in) +// +// So instead of this: +// struct Foo { +// static constexpr const char* name = "foo"; +// } +// In Windows NVCC, we end up with this: +// struct Foo { +// static const char* name; +// } +// const char* Foo::name = "foo"; +// +// This gives us a small perf hit for any code that wants to access these field +// members, but right now it isn't used in any perf-critical code paths. +#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \ + static const char* field; +#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val) \ + const char* cls::field = val; +#else +#define CONSTEXPR_EXCEPT_WIN_CUDA constexpr +#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA __host__ + +#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \ + static constexpr const char* field = val; +#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val) +#endif +#else +#if defined(_MSC_VER) && defined(__CUDACC__) +#define CONSTEXPR_EXCEPT_WIN_CUDA const +#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA + +#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \ + static const char* field; +#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val) \ + const char* cls::field = val; +#else +#define CONSTEXPR_EXCEPT_WIN_CUDA constexpr +#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA constexpr + +#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \ + static constexpr const char* field = val; +#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val) +#endif +#endif + +#ifndef HAS_DEMANGLE +#if defined(__ANDROID__) || defined(_WIN32) || defined(__EMSCRIPTEN__) +#define HAS_DEMANGLE 0 +#elif defined(__APPLE__) && \ + (TARGET_IPHONE_SIMULATOR || TARGET_OS_SIMULATOR || TARGET_OS_IPHONE) +#define HAS_DEMANGLE 0 +#else +#define HAS_DEMANGLE 1 +#endif +#endif // HAS_DEMANGLE + +#define _C10_PRAGMA__(string) _Pragma(#string) +#define _C10_PRAGMA_(string) _C10_PRAGMA__(string) + +#ifdef __clang__ +#define C10_CLANG_DIAGNOSTIC_PUSH() _Pragma("clang diagnostic push") +#define C10_CLANG_DIAGNOSTIC_POP() _Pragma("clang diagnostic pop") +#define C10_CLANG_DIAGNOSTIC_IGNORE(flag) \ + _C10_PRAGMA_(clang diagnostic ignored flag) +#define C10_CLANG_HAS_WARNING(flag) __has_warning(flag) +#else +#define C10_CLANG_DIAGNOSTIC_PUSH() +#define C10_CLANG_DIAGNOSTIC_POP() +#define C10_CLANG_DIAGNOSTIC_IGNORE(flag) +#define C10_CLANG_HAS_WARNING(flag) 0 +#endif + +#ifdef __clang__ + +#define C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED(warning) \ + _C10_PRAGMA_(clang diagnostic push) \ + _C10_PRAGMA_(clang diagnostic ignored "-Wunknown-warning-option") \ + _C10_PRAGMA_(clang diagnostic ignored warning) + +#define C10_DIAGNOSTIC_POP() _C10_PRAGMA_(clang diagnostic pop) + +#elif __GNUC__ + +#define C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED(warning) \ + _C10_PRAGMA_(GCC diagnostic push) \ + _C10_PRAGMA_(GCC diagnostic ignored "-Wpragmas") \ + _C10_PRAGMA_(GCC diagnostic ignored warning) + +#define C10_DIAGNOSTIC_POP() _C10_PRAGMA_(GCC diagnostic pop) + +#else + +#define C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED(warning) +#define C10_DIAGNOSTIC_POP() + +#endif + +#endif // C10_MACROS_MACROS_H_ diff --git a/parrot/lib/python3.10/site-packages/torch/include/c10/macros/cmake_macros.h b/parrot/lib/python3.10/site-packages/torch/include/c10/macros/cmake_macros.h new file mode 100644 index 0000000000000000000000000000000000000000..b7bab536564cb8475d2dc9edb645a37547f0914a --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/c10/macros/cmake_macros.h @@ -0,0 +1,14 @@ +#ifndef C10_MACROS_CMAKE_MACROS_H_ +#define C10_MACROS_CMAKE_MACROS_H_ + +// Automatically generated header file for the C10 library. +// Do not include this file directly. Instead, include c10/macros/Macros.h. + +#define C10_BUILD_SHARED_LIBS +/* #undef C10_USE_GLOG */ +/* #undef C10_USE_GFLAGS */ +/* #undef C10_USE_NUMA */ +/* #undef C10_USE_MSVC_STATIC_RUNTIME */ +/* #undef C10_USE_ROCM_KERNEL_ASSERT */ + +#endif // C10_MACROS_CMAKE_MACROS_H_ diff --git a/parrot/lib/python3.10/site-packages/torch/include/clog.h b/parrot/lib/python3.10/site-packages/torch/include/clog.h new file mode 100644 index 0000000000000000000000000000000000000000..bf09cd0cb6de4ff632807ad2e58df9e402906878 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/clog.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) Facebook, Inc. and its affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include +#include + +#define CLOG_NONE 0 +#define CLOG_FATAL 1 +#define CLOG_ERROR 2 +#define CLOG_WARNING 3 +#define CLOG_INFO 4 +#define CLOG_DEBUG 5 + +#ifndef CLOG_VISIBILITY +#if defined(__ELF__) +#define CLOG_VISIBILITY __attribute__((__visibility__("internal"))) +#elif defined(__MACH__) +#define CLOG_VISIBILITY __attribute__((__visibility__("hidden"))) +#else +#define CLOG_VISIBILITY +#endif +#endif + +#ifndef CLOG_ARGUMENTS_FORMAT +#if defined(__GNUC__) +#define CLOG_ARGUMENTS_FORMAT __attribute__((__format__(__printf__, 1, 2))) +#else +#define CLOG_ARGUMENTS_FORMAT +#endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +CLOG_VISIBILITY void clog_vlog_debug( + const char* module, + const char* format, + va_list args); +CLOG_VISIBILITY void clog_vlog_info( + const char* module, + const char* format, + va_list args); +CLOG_VISIBILITY void clog_vlog_warning( + const char* module, + const char* format, + va_list args); +CLOG_VISIBILITY void clog_vlog_error( + const char* module, + const char* format, + va_list args); +CLOG_VISIBILITY void clog_vlog_fatal( + const char* module, + const char* format, + va_list args); + +#define CLOG_DEFINE_LOG_DEBUG(log_debug_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_debug_function_name(const char* format, ...) { \ + if (level >= CLOG_DEBUG) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_debug(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_INFO(log_info_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_info_function_name(const char* format, ...) { \ + if (level >= CLOG_INFO) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_info(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_WARNING(log_warning_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_warning_function_name(const char* format, ...) { \ + if (level >= CLOG_WARNING) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_warning(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_ERROR(log_error_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_error_function_name(const char* format, ...) { \ + if (level >= CLOG_ERROR) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_error(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_FATAL(log_fatal_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_fatal_function_name(const char* format, ...) { \ + if (level >= CLOG_FATAL) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_fatal(module, format, args); \ + va_end(args); \ + } \ + abort(); \ + } + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/parrot/lib/python3.10/site-packages/torch/include/dnnl.h b/parrot/lib/python3.10/site-packages/torch/include/dnnl.h new file mode 100644 index 0000000000000000000000000000000000000000..bc74bf644f4b628018d7a9103ba63320abc466d5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/dnnl.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_H +#define DNNL_H + +#include "oneapi/dnnl/dnnl.h" + +#endif /* DNNL_H */ diff --git a/parrot/lib/python3.10/site-packages/torch/include/fxdiv.h b/parrot/lib/python3.10/site-packages/torch/include/fxdiv.h new file mode 100644 index 0000000000000000000000000000000000000000..2c35038d97c55c524bb97caba2e3560cab9da504 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/fxdiv.h @@ -0,0 +1,425 @@ +#pragma once +#ifndef FXDIV_H +#define FXDIV_H + +#if defined(__cplusplus) && (__cplusplus >= 201103L) + #include + #include + #include +#elif !defined(__OPENCL_VERSION__) + #include + #include + #include +#endif + +#if defined(_MSC_VER) + #include + #if defined(_M_IX86) || defined(_M_X64) + #include + #endif +#endif + +#ifndef FXDIV_USE_INLINE_ASSEMBLY + #define FXDIV_USE_INLINE_ASSEMBLY 0 +#endif + +static inline uint64_t fxdiv_mulext_uint32_t(uint32_t a, uint32_t b) { +#if defined(_MSC_VER) && defined(_M_IX86) + return (uint64_t) __emulu((unsigned int) a, (unsigned int) b); +#else + return (uint64_t) a * (uint64_t) b; +#endif +} + +static inline uint32_t fxdiv_mulhi_uint32_t(uint32_t a, uint32_t b) { +#if defined(__OPENCL_VERSION__) + return mul_hi(a, b); +#elif defined(__CUDA_ARCH__) + return (uint32_t) __umulhi((unsigned int) a, (unsigned int) b); +#elif defined(_MSC_VER) && defined(_M_IX86) + return (uint32_t) (__emulu((unsigned int) a, (unsigned int) b) >> 32); +#elif defined(_MSC_VER) && defined(_M_ARM) + return (uint32_t) _MulUnsignedHigh((unsigned long) a, (unsigned long) b); +#else + return (uint32_t) (((uint64_t) a * (uint64_t) b) >> 32); +#endif +} + +static inline uint64_t fxdiv_mulhi_uint64_t(uint64_t a, uint64_t b) { +#if defined(__OPENCL_VERSION__) + return mul_hi(a, b); +#elif defined(__CUDA_ARCH__) + return (uint64_t) __umul64hi((unsigned long long) a, (unsigned long long) b); +#elif defined(_MSC_VER) && defined(_M_X64) + return (uint64_t) __umulh((unsigned __int64) a, (unsigned __int64) b); +#elif defined(__GNUC__) && defined(__SIZEOF_INT128__) + return (uint64_t) (((((unsigned __int128) a) * ((unsigned __int128) b))) >> 64); +#else + const uint32_t a_lo = (uint32_t) a; + const uint32_t a_hi = (uint32_t) (a >> 32); + const uint32_t b_lo = (uint32_t) b; + const uint32_t b_hi = (uint32_t) (b >> 32); + + const uint64_t t = fxdiv_mulext_uint32_t(a_hi, b_lo) + + (uint64_t) fxdiv_mulhi_uint32_t(a_lo, b_lo); + return fxdiv_mulext_uint32_t(a_hi, b_hi) + (t >> 32) + + ((fxdiv_mulext_uint32_t(a_lo, b_hi) + (uint64_t) (uint32_t) t) >> 32); +#endif +} + +static inline size_t fxdiv_mulhi_size_t(size_t a, size_t b) { +#if SIZE_MAX == UINT32_MAX + return (size_t) fxdiv_mulhi_uint32_t((uint32_t) a, (uint32_t) b); +#elif SIZE_MAX == UINT64_MAX + return (size_t) fxdiv_mulhi_uint64_t((uint64_t) a, (uint64_t) b); +#else + #error Unsupported platform +#endif +} + +struct fxdiv_divisor_uint32_t { + uint32_t value; + uint32_t m; + uint8_t s1; + uint8_t s2; +}; + +struct fxdiv_result_uint32_t { + uint32_t quotient; + uint32_t remainder; +}; + +struct fxdiv_divisor_uint64_t { + uint64_t value; + uint64_t m; + uint8_t s1; + uint8_t s2; +}; + +struct fxdiv_result_uint64_t { + uint64_t quotient; + uint64_t remainder; +}; + +struct fxdiv_divisor_size_t { + size_t value; + size_t m; + uint8_t s1; + uint8_t s2; +}; + +struct fxdiv_result_size_t { + size_t quotient; + size_t remainder; +}; + +static inline struct fxdiv_divisor_uint32_t fxdiv_init_uint32_t(uint32_t d) { + struct fxdiv_divisor_uint32_t result = { d }; + if (d == 1) { + result.m = UINT32_C(1); + result.s1 = 0; + result.s2 = 0; + } else { + #if defined(__OPENCL_VERSION__) + const uint32_t l_minus_1 = 31 - clz(d - 1); + #elif defined(__CUDA_ARCH__) + const uint32_t l_minus_1 = 31 - __clz((int) (d - 1)); + #elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64)) + unsigned long l_minus_1; + _BitScanReverse(&l_minus_1, (unsigned long) (d - 1)); + #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) && FXDIV_USE_INLINE_ASSEMBLY + uint32_t l_minus_1; + __asm__("BSRL %[d_minus_1], %[l_minus_1]" + : [l_minus_1] "=r" (l_minus_1) + : [d_minus_1] "r" (d - 1) + : "cc"); + #elif defined(__GNUC__) + const uint32_t l_minus_1 = 31 - __builtin_clz(d - 1); + #else + /* Based on Algorithm 2 from Hacker's delight */ + + uint32_t l_minus_1 = 0; + uint32_t x = d - 1; + uint32_t y = x >> 16; + if (y != 0) { + l_minus_1 += 16; + x = y; + } + y = x >> 8; + if (y != 0) { + l_minus_1 += 8; + x = y; + } + y = x >> 4; + if (y != 0) { + l_minus_1 += 4; + x = y; + } + y = x >> 2; + if (y != 0) { + l_minus_1 += 2; + x = y; + } + if ((x & 2) != 0) { + l_minus_1 += 1; + } + #endif + uint32_t u_hi = (UINT32_C(2) << (uint32_t) l_minus_1) - d; + + /* Division of 64-bit number u_hi:UINT32_C(0) by 32-bit number d, 32-bit quotient output q */ + #if defined(__GNUC__) && defined(__i386__) && FXDIV_USE_INLINE_ASSEMBLY + uint32_t q; + __asm__("DIVL %[d]" + : "=a" (q), "+d" (u_hi) + : [d] "r" (d), "a" (0) + : "cc"); + #elif (defined(_MSC_VER) && _MSC_VER >= 1920) && !defined(__clang__) && !defined(__INTEL_COMPILER) && (defined(_M_IX86) || defined(_M_X64)) + unsigned int remainder; + const uint32_t q = (uint32_t) _udiv64((unsigned __int64) ((uint64_t) u_hi << 32), (unsigned int) d, &remainder); + #else + const uint32_t q = ((uint64_t) u_hi << 32) / d; + #endif + + result.m = q + UINT32_C(1); + result.s1 = 1; + result.s2 = (uint8_t) l_minus_1; + } + return result; +} + +static inline struct fxdiv_divisor_uint64_t fxdiv_init_uint64_t(uint64_t d) { + struct fxdiv_divisor_uint64_t result = { d }; + if (d == 1) { + result.m = UINT64_C(1); + result.s1 = 0; + result.s2 = 0; + } else { + #if defined(__OPENCL_VERSION__) + const uint32_t nlz_d = clz(d); + const uint32_t l_minus_1 = 63 - clz(d - 1); + #elif defined(__CUDA_ARCH__) + const uint32_t nlz_d = __clzll((long long) d); + const uint32_t l_minus_1 = 63 - __clzll((long long) (d - 1)); + #elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64)) + unsigned long l_minus_1; + _BitScanReverse64(&l_minus_1, (unsigned __int64) (d - 1)); + unsigned long bsr_d; + _BitScanReverse64(&bsr_d, (unsigned __int64) d); + const uint32_t nlz_d = bsr_d ^ 0x3F; + #elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_ARM)) + const uint64_t d_minus_1 = d - 1; + const uint8_t d_is_power_of_2 = (d & d_minus_1) == 0; + unsigned long l_minus_1; + if ((uint32_t) (d_minus_1 >> 32) == 0) { + _BitScanReverse(&l_minus_1, (unsigned long) d_minus_1); + } else { + _BitScanReverse(&l_minus_1, (unsigned long) (uint32_t) (d_minus_1 >> 32)); + l_minus_1 += 32; + } + const uint32_t nlz_d = ((uint8_t) l_minus_1 ^ UINT8_C(0x3F)) - d_is_power_of_2; + #elif defined(__GNUC__) && defined(__x86_64__) && FXDIV_USE_INLINE_ASSEMBLY + uint64_t l_minus_1; + __asm__("BSRQ %[d_minus_1], %[l_minus_1]" + : [l_minus_1] "=r" (l_minus_1) + : [d_minus_1] "r" (d - 1) + : "cc"); + #elif defined(__GNUC__) + const uint32_t l_minus_1 = 63 - __builtin_clzll(d - 1); + const uint32_t nlz_d = __builtin_clzll(d); + #else + /* Based on Algorithm 2 from Hacker's delight */ + const uint64_t d_minus_1 = d - 1; + const uint32_t d_is_power_of_2 = (d & d_minus_1) == 0; + uint32_t l_minus_1 = 0; + uint32_t x = (uint32_t) d_minus_1; + uint32_t y = d_minus_1 >> 32; + if (y != 0) { + l_minus_1 += 32; + x = y; + } + y = x >> 16; + if (y != 0) { + l_minus_1 += 16; + x = y; + } + y = x >> 8; + if (y != 0) { + l_minus_1 += 8; + x = y; + } + y = x >> 4; + if (y != 0) { + l_minus_1 += 4; + x = y; + } + y = x >> 2; + if (y != 0) { + l_minus_1 += 2; + x = y; + } + if ((x & 2) != 0) { + l_minus_1 += 1; + } + const uint32_t nlz_d = (l_minus_1 ^ UINT32_C(0x3F)) - d_is_power_of_2; + #endif + uint64_t u_hi = (UINT64_C(2) << (uint32_t) l_minus_1) - d; + + /* Division of 128-bit number u_hi:UINT64_C(0) by 64-bit number d, 64-bit quotient output q */ + #if defined(__GNUC__) && defined(__x86_64__) && FXDIV_USE_INLINE_ASSEMBLY + uint64_t q; + __asm__("DIVQ %[d]" + : "=a" (q), "+d" (u_hi) + : [d] "r" (d), "a" (UINT64_C(0)) + : "cc"); + #elif 0 && defined(__GNUC__) && defined(__SIZEOF_INT128__) + /* GCC, Clang, and Intel Compiler fail to inline optimized implementation and call into support library for 128-bit division */ + const uint64_t q = (uint64_t) (((unsigned __int128) u_hi << 64) / ((unsigned __int128) d)); + #elif (defined(_MSC_VER) && _MSC_VER >= 1920) && !defined(__clang__) && !defined(__INTEL_COMPILER) && defined(_M_X64) + unsigned __int64 remainder; + const uint64_t q = (uint64_t) _udiv128((unsigned __int64) u_hi, 0, (unsigned __int64) d, &remainder); + #else + /* Implementation based on code from Hacker's delight */ + + /* Normalize divisor and shift divident left */ + d <<= nlz_d; + u_hi <<= nlz_d; + /* Break divisor up into two 32-bit digits */ + const uint64_t d_hi = (uint32_t) (d >> 32); + const uint32_t d_lo = (uint32_t) d; + + /* Compute the first quotient digit, q1 */ + uint64_t q1 = u_hi / d_hi; + uint64_t r1 = u_hi - q1 * d_hi; + + while ((q1 >> 32) != 0 || fxdiv_mulext_uint32_t((uint32_t) q1, d_lo) > (r1 << 32)) { + q1 -= 1; + r1 += d_hi; + if ((r1 >> 32) != 0) { + break; + } + } + + /* Multiply and subtract. */ + u_hi = (u_hi << 32) - q1 * d; + + /* Compute the second quotient digit, q0 */ + uint64_t q0 = u_hi / d_hi; + uint64_t r0 = u_hi - q0 * d_hi; + + while ((q0 >> 32) != 0 || fxdiv_mulext_uint32_t((uint32_t) q0, d_lo) > (r0 << 32)) { + q0 -= 1; + r0 += d_hi; + if ((r0 >> 32) != 0) { + break; + } + } + const uint64_t q = (q1 << 32) | (uint32_t) q0; + #endif + result.m = q + UINT64_C(1); + result.s1 = 1; + result.s2 = (uint8_t) l_minus_1; + } + return result; +} + +static inline struct fxdiv_divisor_size_t fxdiv_init_size_t(size_t d) { +#if SIZE_MAX == UINT32_MAX + const struct fxdiv_divisor_uint32_t uint_result = fxdiv_init_uint32_t((uint32_t) d); +#elif SIZE_MAX == UINT64_MAX + const struct fxdiv_divisor_uint64_t uint_result = fxdiv_init_uint64_t((uint64_t) d); +#else + #error Unsupported platform +#endif + struct fxdiv_divisor_size_t size_result = { + (size_t) uint_result.value, + (size_t) uint_result.m, + uint_result.s1, + uint_result.s2 + }; + return size_result; +} + +static inline uint32_t fxdiv_quotient_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) { + const uint32_t t = fxdiv_mulhi_uint32_t(n, divisor.m); + return (t + ((n - t) >> divisor.s1)) >> divisor.s2; +} + +static inline uint64_t fxdiv_quotient_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) { + const uint64_t t = fxdiv_mulhi_uint64_t(n, divisor.m); + return (t + ((n - t) >> divisor.s1)) >> divisor.s2; +} + +static inline size_t fxdiv_quotient_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) { +#if SIZE_MAX == UINT32_MAX + const struct fxdiv_divisor_uint32_t uint32_divisor = { + (uint32_t) divisor.value, + (uint32_t) divisor.m, + divisor.s1, + divisor.s2 + }; + return fxdiv_quotient_uint32_t((uint32_t) n, uint32_divisor); +#elif SIZE_MAX == UINT64_MAX + const struct fxdiv_divisor_uint64_t uint64_divisor = { + (uint64_t) divisor.value, + (uint64_t) divisor.m, + divisor.s1, + divisor.s2 + }; + return fxdiv_quotient_uint64_t((uint64_t) n, uint64_divisor); +#else + #error Unsupported platform +#endif +} + +static inline uint32_t fxdiv_remainder_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) { + const uint32_t quotient = fxdiv_quotient_uint32_t(n, divisor); + return n - quotient * divisor.value; +} + +static inline uint64_t fxdiv_remainder_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) { + const uint64_t quotient = fxdiv_quotient_uint64_t(n, divisor); + return n - quotient * divisor.value; +} + +static inline size_t fxdiv_remainder_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) { + const size_t quotient = fxdiv_quotient_size_t(n, divisor); + return n - quotient * divisor.value; +} + +static inline uint32_t fxdiv_round_down_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t granularity) { + const uint32_t quotient = fxdiv_quotient_uint32_t(n, granularity); + return quotient * granularity.value; +} + +static inline uint64_t fxdiv_round_down_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t granularity) { + const uint64_t quotient = fxdiv_quotient_uint64_t(n, granularity); + return quotient * granularity.value; +} + +static inline size_t fxdiv_round_down_size_t(size_t n, const struct fxdiv_divisor_size_t granularity) { + const size_t quotient = fxdiv_quotient_size_t(n, granularity); + return quotient * granularity.value; +} + +static inline struct fxdiv_result_uint32_t fxdiv_divide_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) { + const uint32_t quotient = fxdiv_quotient_uint32_t(n, divisor); + const uint32_t remainder = n - quotient * divisor.value; + struct fxdiv_result_uint32_t result = { quotient, remainder }; + return result; +} + +static inline struct fxdiv_result_uint64_t fxdiv_divide_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) { + const uint64_t quotient = fxdiv_quotient_uint64_t(n, divisor); + const uint64_t remainder = n - quotient * divisor.value; + struct fxdiv_result_uint64_t result = { quotient, remainder }; + return result; +} + +static inline struct fxdiv_result_size_t fxdiv_divide_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) { + const size_t quotient = fxdiv_quotient_size_t(n, divisor); + const size_t remainder = n - quotient * divisor.value; + struct fxdiv_result_size_t result = { quotient, remainder }; + return result; +} + +#endif /* FXDIV_H */ diff --git a/parrot/lib/python3.10/site-packages/torch/include/sleef.h b/parrot/lib/python3.10/site-packages/torch/include/sleef.h new file mode 100644 index 0000000000000000000000000000000000000000..292ac5b8be30c5766679ce2dd562014fdf50d4f2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/sleef.h @@ -0,0 +1,4170 @@ +// Copyright Naoki Shibata and contributors 2010 - 2023. +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#ifndef __SLEEF_H__ +#define __SLEEF_H__ + +#define SLEEF_VERSION_MAJOR 3 +#define SLEEF_VERSION_MINOR 6 +#define SLEEF_VERSION_PATCHLEVEL 0 + +#include +#include + +#if defined (__GNUC__) || defined (__clang__) || defined(__INTEL_COMPILER) +#define SLEEF_CONST __attribute__((const)) +#define SLEEF_INLINE __attribute__((always_inline)) +#elif defined(_MSC_VER) +#define SLEEF_CONST +#define SLEEF_INLINE __forceinline +#endif + +#if defined(__AVX2__) || defined(__aarch64__) || defined(__arm__) || defined(__powerpc64__) || defined(__zarch__) +#ifndef FP_FAST_FMA +#define FP_FAST_FMA +#endif +#ifndef FP_FAST_FMAF +#define FP_FAST_FMAF +#endif +#endif + +#if defined(_MSC_VER) && !defined(__STDC__) +#define __STDC__ 1 +#endif + +#if (defined(__MINGW32__) || defined(__MINGW64__) || defined(__CYGWIN__) || defined(_MSC_VER)) && !defined(SLEEF_STATIC_LIBS) +#ifdef SLEEF_IMPORT_IS_EXPORT +#define SLEEF_IMPORT __declspec(dllexport) +#else // #ifdef SLEEF_IMPORT_IS_EXPORT +#define SLEEF_IMPORT __declspec(dllimport) +#if (defined(_MSC_VER)) +#pragma comment(lib,"sleef.lib") +#endif // #if (defined(_MSC_VER)) +#endif // #ifdef SLEEF_IMPORT_IS_EXPORT +#else // #if (defined(__MINGW32__) || defined(__MINGW64__) || defined(__CYGWIN__) || defined(_MSC_VER)) && !defined(SLEEF_STATIC_LIBS) +#define SLEEF_IMPORT +#endif // #if (defined(__MINGW32__) || defined(__MINGW64__) || defined(__CYGWIN__) || defined(_MSC_VER)) && !defined(SLEEF_STATIC_LIBS) + +#if (defined(__GNUC__) || defined(__CLANG__)) && (defined(__i386__) || defined(__x86_64__)) +#include +#endif + +#if (defined(_MSC_VER)) +#include +#endif + +#if defined(__ARM_NEON__) || defined(__ARM_NEON) +#include +#endif + +#if defined(__ARM_FEATURE_SVE) +#include +#endif + +#if defined(__VSX__) && defined(__PPC64__) && defined(__LITTLE_ENDIAN__) +#include +typedef __vector double SLEEF_VECTOR_DOUBLE; +typedef __vector float SLEEF_VECTOR_FLOAT; +typedef __vector int SLEEF_VECTOR_INT; +typedef __vector unsigned int SLEEF_VECTOR_UINT; +typedef __vector long long SLEEF_VECTOR_LONGLONG; +typedef __vector unsigned long long SLEEF_VECTOR_ULONGLONG; +#endif + +#if defined(__VX__) && defined(__VEC__) +#ifndef SLEEF_VECINTRIN_H_INCLUDED +#include +#define SLEEF_VECINTRIN_H_INCLUDED +#endif +typedef __vector double SLEEF_VECTOR_DOUBLE; +typedef __vector float SLEEF_VECTOR_FLOAT; +typedef __vector int SLEEF_VECTOR_INT; +typedef __vector unsigned int SLEEF_VECTOR_UINT; +typedef __vector long long SLEEF_VECTOR_LONGLONG; +typedef __vector unsigned long long SLEEF_VECTOR_ULONGLONG; +#endif + +// + +#if defined(SLEEF_ENABLE_OMP_SIMD) && (defined(__GNUC__) || defined(__CLANG__)) && !defined(__INTEL_COMPILER) +#if defined(__aarch64__) +//#define SLEEF_PRAGMA_OMP_SIMD_DP _Pragma ("omp declare simd simdlen(2) notinbranch") +//#define SLEEF_PRAGMA_OMP_SIMD_SP _Pragma ("omp declare simd simdlen(4) notinbranch") +//#elif defined(__x86_64__) && defined(__AVX512F__) +//#define SLEEF_PRAGMA_OMP_SIMD_DP _Pragma ("omp declare simd simdlen(8) notinbranch") +//#define SLEEF_PRAGMA_OMP_SIMD_SP _Pragma ("omp declare simd simdlen(16) notinbranch") +#elif defined(__x86_64__) && defined(__AVX__) +#define SLEEF_PRAGMA_OMP_SIMD_DP _Pragma ("omp declare simd simdlen(4) notinbranch") +#define SLEEF_PRAGMA_OMP_SIMD_SP _Pragma ("omp declare simd simdlen(8) notinbranch") +#elif defined(__x86_64__) && defined(__SSE2__) +#define SLEEF_PRAGMA_OMP_SIMD_DP _Pragma ("omp declare simd simdlen(2) notinbranch") +#define SLEEF_PRAGMA_OMP_SIMD_SP _Pragma ("omp declare simd simdlen(4) notinbranch") +#endif +#endif + +#ifndef SLEEF_PRAGMA_OMP_SIMD_DP +#define SLEEF_PRAGMA_OMP_SIMD_DP +#define SLEEF_PRAGMA_OMP_SIMD_SP +#endif + +// + +#ifndef SLEEF_FP_ILOGB0 +#define SLEEF_FP_ILOGB0 ((int)0x80000000) +#endif + +#ifndef SLEEF_FP_ILOGBNAN +#define SLEEF_FP_ILOGBNAN ((int)2147483647) +#endif + +// + +SLEEF_IMPORT void *Sleef_malloc(size_t z); +SLEEF_IMPORT void Sleef_free(void *ptr); +SLEEF_IMPORT uint64_t Sleef_currentTimeMicros(); + +#if defined(__i386__) || defined(__x86_64__) || defined(_MSC_VER) +SLEEF_IMPORT void Sleef_x86CpuID(int32_t out[4], uint32_t eax, uint32_t ecx); +#endif + +// + +#if defined(__riscv_v) +#include +typedef vfloat64m2_t Sleef_vfloat64m1_t_2; +typedef vfloat32m2_t Sleef_vfloat32m1_t_2; +typedef vfloat64m4_t Sleef_vfloat64m2_t_2; +typedef vfloat32m4_t Sleef_vfloat32m2_t_2; +#define Sleef_vfloat64m1_t_2_DEFINED +#define Sleef_vfloat32m1_t_2_DEFINED +#define Sleef_vfloat64m2_t_2_DEFINED +#define Sleef_vfloat32m2_t_2_DEFINED +#endif + +#ifndef Sleef_double2_DEFINED +#define Sleef_double2_DEFINED +typedef struct { + double x, y; +} Sleef_double2; +#endif + +#ifndef Sleef_float2_DEFINED +#define Sleef_float2_DEFINED +typedef struct { + float x, y; +} Sleef_float2; +#endif + +#ifdef __cplusplus +extern "C" +{ +#endif + +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sin_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cos_u35(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double2 Sleef_sincos_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_tan_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_asin_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_acos_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_atan_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_atan2_u35(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_log_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cbrt_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sin_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cos_u10(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double2 Sleef_sincos_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_tan_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_asin_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_acos_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_atan_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_atan2_u10(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_log_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cbrt_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_exp_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_pow_u10(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sinh_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cosh_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_tanh_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sinh_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cosh_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_tanh_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_asinh_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_acosh_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_atanh_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_exp2_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_exp10_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_exp2_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_exp10_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_expm1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_log10_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_log2_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_log2_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_log1p_u10(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double2 Sleef_sincospi_u05(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double2 Sleef_sincospi_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sinpi_u05(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cospi_u05(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_ldexp(double, int); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST int Sleef_ilogb(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fma(double, double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sqrt(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sqrt_u05(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sqrt_u35(double); + +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_hypot_u05(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_hypot_u35(double, double); + +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fabs(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_copysign(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fmax(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fmin(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fdim(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_trunc(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_floor(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_ceil(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_round(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_rint(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_nextafter(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_frfrexp(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST int Sleef_expfrexp(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fmod(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_remainder(double, double); +SLEEF_IMPORT SLEEF_CONST Sleef_double2 Sleef_modf(double); + +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_lgamma_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_tgamma_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_erf_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_erfc_u15(double); + +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sinf_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_cosf_u35(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float2 Sleef_sincosf_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_tanf_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_asinf_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_acosf_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_atanf_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_atan2f_u35(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_logf_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_cbrtf_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sinf_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_cosf_u10(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float2 Sleef_sincosf_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fastsinf_u3500(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fastcosf_u3500(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_tanf_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_asinf_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_acosf_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_atanf_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_atan2f_u10(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_logf_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_cbrtf_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_expf_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_powf_u10(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fastpowf_u3500(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sinhf_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_coshf_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_tanhf_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sinhf_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_coshf_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_tanhf_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_asinhf_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_acoshf_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_atanhf_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_exp2f_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_exp10f_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_exp2f_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_exp10f_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_expm1f_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_log10f_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_log2f_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_log2f_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_log1pf_u10(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float2 Sleef_sincospif_u05(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float2 Sleef_sincospif_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sinpif_u05(float d); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_cospif_u05(float d); +SLEEF_IMPORT SLEEF_CONST float Sleef_ldexpf(float, int); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST int Sleef_ilogbf(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fmaf(float, float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf_u05(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf_u35(float); + +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_hypotf_u05(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_hypotf_u35(float, float); + +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fabsf(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_copysignf(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fmaxf(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fminf(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fdimf(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_truncf(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_floorf(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_ceilf(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_roundf(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_rintf(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_nextafterf(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_frfrexpf(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST int Sleef_expfrexpf(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fmodf(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_remainderf(float, float); +SLEEF_IMPORT SLEEF_CONST Sleef_float2 Sleef_modff(float); + +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_lgammaf_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_tgammaf_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_erff_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_erfcf_u15(float); +#ifdef __SSE2__ + +#ifndef Sleef___m128d_2_DEFINED +typedef struct { + __m128d x, y; +} Sleef___m128d_2; +#define Sleef___m128d_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sind2_u35(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cosd2_u35(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincosd2_u35(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tand2_u35(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asind2_u35(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acosd2_u35(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atand2_u35(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atan2d2_u35(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_logd2_u35(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cbrtd2_u35(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sind2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cosd2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincosd2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tand2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asind2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acosd2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atand2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atan2d2_u10(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_logd2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cbrtd2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_expd2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_powd2_u10(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinhd2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_coshd2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tanhd2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinhd2_u35(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_coshd2_u35(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tanhd2_u35(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastsind2_u3500(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastcosd2_u3500(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastpowd2_u3500(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asinhd2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acoshd2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atanhd2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp2d2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp2d2_u35(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp10d2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp10d2_u35(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_expm1d2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log10d2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log2d2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log2d2_u35(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log1pd2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincospid2_u05(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincospid2_u35(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinpid2_u05(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cospid2_u05(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_ldexpd2(__m128d, __m128i); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_ilogbd2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmad2(__m128d, __m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_u05(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_u35(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_hypotd2_u05(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_hypotd2_u35(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fabsd2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_copysignd2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmaxd2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmind2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fdimd2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_truncd2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_floord2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_ceild2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_roundd2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_rintd2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_nextafterd2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_frfrexpd2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_expfrexpd2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmodd2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_remainderd2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_modfd2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_lgammad2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tgammad2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_erfd2_u10(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_erfcd2_u15(__m128d); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd2(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd2(int); + +#ifndef Sleef___m128_2_DEFINED +typedef struct { + __m128 x, y; +} Sleef___m128_2; +#define Sleef___m128_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinf4_u35(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cosf4_u35(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincosf4_u35(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanf4_u35(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinf4_u35(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acosf4_u35(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanf4_u35(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atan2f4_u35(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_logf4_u35(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cbrtf4_u35(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinf4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cosf4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincosf4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanf4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinf4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acosf4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanf4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atan2f4_u10(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_logf4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cbrtf4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_expf4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_powf4_u10(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinhf4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_coshf4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanhf4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinhf4_u35(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_coshf4_u35(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanhf4_u35(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastsinf4_u3500(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastcosf4_u3500(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastpowf4_u3500(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinhf4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acoshf4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanhf4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp2f4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp2f4_u35(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp10f4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp10f4_u35(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_expm1f4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log10f4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log2f4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log2f4_u35(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log1pf4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincospif4_u05(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincospif4_u35(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinpif4_u05(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cospif4_u05(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmaf4(__m128, __m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_u05(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_u35(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_hypotf4_u05(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_hypotf4_u35(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fabsf4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_copysignf4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmaxf4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fminf4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fdimf4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_truncf4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_floorf4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_ceilf4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_roundf4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_rintf4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_nextafterf4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_frfrexpf4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmodf4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_remainderf4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_modff4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_lgammaf4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tgammaf4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_erff4_u10(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_erfcf4_u15(__m128); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf4(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf4(int); +#endif +#ifdef __SSE2__ + +#ifndef Sleef___m128d_2_DEFINED +typedef struct { + __m128d x, y; +} Sleef___m128d_2; +#define Sleef___m128d_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sind2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sind2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cosd2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_cosd2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincosd2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tand2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_tand2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asind2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_asind2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acosd2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_acosd2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atand2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_atand2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atan2d2_u35sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_atan2d2_u35sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_logd2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_logd2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cbrtd2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_cbrtd2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sind2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sind2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cosd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_cosd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincosd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tand2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_tand2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asind2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_asind2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acosd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_acosd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atand2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_atand2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atan2d2_u10sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_atan2d2_u10sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_logd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_logd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cbrtd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_cbrtd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_expd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_expd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_powd2_u10sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_powd2_u10sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinhd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sinhd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_coshd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_coshd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tanhd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_tanhd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinhd2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sinhd2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_coshd2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_coshd2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tanhd2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_tanhd2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastsind2_u3500sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fastsind2_u3500sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastcosd2_u3500sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fastcosd2_u3500sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastpowd2_u3500sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fastpowd2_u3500sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asinhd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_asinhd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acoshd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_acoshd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atanhd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_atanhd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp2d2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_exp2d2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp2d2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_exp2d2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp10d2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_exp10d2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp10d2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_exp10d2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_expm1d2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_expm1d2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log10d2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_log10d2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log2d2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_log2d2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log2d2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_log2d2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log1pd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_log1pd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincospid2_u05sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u05sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincospid2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinpid2_u05sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sinpid2_u05sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cospid2_u05sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_cospid2_u05sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_ldexpd2_sse2(__m128d, __m128i); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_ldexpd2_sse2(__m128d, __m128i); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_ilogbd2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_cinz_ilogbd2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmad2_sse2(__m128d, __m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fmad2_sse2(__m128d, __m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sqrtd2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_u05sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sqrtd2_u05sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sqrtd2_u35sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_hypotd2_u05sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_hypotd2_u05sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_hypotd2_u35sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_hypotd2_u35sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fabsd2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fabsd2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_copysignd2_sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_copysignd2_sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmaxd2_sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fmaxd2_sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmind2_sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fmind2_sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fdimd2_sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fdimd2_sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_truncd2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_truncd2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_floord2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_floord2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_ceild2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_ceild2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_roundd2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_roundd2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_rintd2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_rintd2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_nextafterd2_sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_nextafterd2_sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_frfrexpd2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_frfrexpd2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_expfrexpd2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_cinz_expfrexpd2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmodd2_sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fmodd2_sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_remainderd2_sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_remainderd2_sse2(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_modfd2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_cinz_modfd2_sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_lgammad2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_lgammad2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tgammad2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_tgammad2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_erfd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_erfd2_u10sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_erfcd2_u15sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_erfcd2_u15sse2(__m128d); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd2_sse2(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd2_sse2(int); + +#ifndef Sleef___m128_2_DEFINED +typedef struct { + __m128 x, y; +} Sleef___m128_2; +#define Sleef___m128_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sinf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cosf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_cosf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincosf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_cinz_sincosf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_tanf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_asinf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acosf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_acosf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_atanf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atan2f4_u35sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_atan2f4_u35sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_logf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_logf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cbrtf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_cbrtf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sinf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cosf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_cosf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincosf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_cinz_sincosf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_tanf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_asinf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acosf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_acosf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_atanf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atan2f4_u10sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_atan2f4_u10sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_logf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_logf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cbrtf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_cbrtf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_expf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_expf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_powf4_u10sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_powf4_u10sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinhf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sinhf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_coshf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_coshf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanhf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_tanhf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinhf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sinhf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_coshf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_coshf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanhf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_tanhf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastsinf4_u3500sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fastsinf4_u3500sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastcosf4_u3500sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fastcosf4_u3500sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastpowf4_u3500sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fastpowf4_u3500sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinhf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_asinhf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acoshf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_acoshf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanhf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_atanhf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp2f4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_exp2f4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp2f4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_exp2f4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp10f4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_exp10f4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp10f4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_exp10f4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_expm1f4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_expm1f4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log10f4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_log10f4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log2f4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_log2f4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log2f4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_log2f4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log1pf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_log1pf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincospif4_u05sse2(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_cinz_sincospif4_u05sse2(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincospif4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_cinz_sincospif4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinpif4_u05sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sinpif4_u05sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cospif4_u05sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_cospif4_u05sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmaf4_sse2(__m128, __m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fmaf4_sse2(__m128, __m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sqrtf4_sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_u05sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sqrtf4_u05sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sqrtf4_u35sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_hypotf4_u05sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_hypotf4_u05sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_hypotf4_u35sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_hypotf4_u35sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fabsf4_sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fabsf4_sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_copysignf4_sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_copysignf4_sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmaxf4_sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fmaxf4_sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fminf4_sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fminf4_sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fdimf4_sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fdimf4_sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_truncf4_sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_truncf4_sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_floorf4_sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_floorf4_sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_ceilf4_sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_ceilf4_sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_roundf4_sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_roundf4_sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_rintf4_sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_rintf4_sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_nextafterf4_sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_nextafterf4_sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_frfrexpf4_sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_frfrexpf4_sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmodf4_sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fmodf4_sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_remainderf4_sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_remainderf4_sse2(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_modff4_sse2(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_cinz_modff4_sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_lgammaf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_lgammaf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tgammaf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_tgammaf4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_erff4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_erff4_u10sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_erfcf4_u15sse2(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_erfcf4_u15sse2(__m128); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf4_sse2(int); +SLEEF_IMPORT SLEEF_CONST int Sleef_cinz_getIntf4_sse2(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf4_sse2(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_cinz_getPtrf4_sse2(int); +#endif +#ifdef __SSE2__ + +#ifndef Sleef___m128d_2_DEFINED +typedef struct { + __m128d x, y; +} Sleef___m128d_2; +#define Sleef___m128d_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sind2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sind2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cosd2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_cosd2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincosd2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tand2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_tand2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asind2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_asind2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acosd2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_acosd2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atand2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_atand2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atan2d2_u35sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_atan2d2_u35sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_logd2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_logd2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cbrtd2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_cbrtd2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sind2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sind2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cosd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_cosd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincosd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tand2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_tand2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asind2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_asind2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acosd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_acosd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atand2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_atand2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atan2d2_u10sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_atan2d2_u10sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_logd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_logd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cbrtd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_cbrtd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_expd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_expd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_powd2_u10sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_powd2_u10sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinhd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sinhd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_coshd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_coshd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tanhd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_tanhd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinhd2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sinhd2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_coshd2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_coshd2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tanhd2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_tanhd2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastsind2_u3500sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fastsind2_u3500sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastcosd2_u3500sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fastcosd2_u3500sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastpowd2_u3500sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fastpowd2_u3500sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asinhd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_asinhd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acoshd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_acoshd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atanhd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_atanhd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp2d2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_exp2d2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp2d2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_exp2d2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp10d2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_exp10d2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp10d2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_exp10d2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_expm1d2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_expm1d2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log10d2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_log10d2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log2d2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_log2d2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log2d2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_log2d2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log1pd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_log1pd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincospid2_u05sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u05sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincospid2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinpid2_u05sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sinpid2_u05sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cospid2_u05sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_cospid2_u05sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_ldexpd2_sse4(__m128d, __m128i); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_ldexpd2_sse4(__m128d, __m128i); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_ilogbd2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_cinz_ilogbd2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmad2_sse4(__m128d, __m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fmad2_sse4(__m128d, __m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sqrtd2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_u05sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sqrtd2_u05sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_sqrtd2_u35sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_hypotd2_u05sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_hypotd2_u05sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_hypotd2_u35sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_hypotd2_u35sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fabsd2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fabsd2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_copysignd2_sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_copysignd2_sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmaxd2_sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fmaxd2_sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmind2_sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fmind2_sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fdimd2_sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fdimd2_sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_truncd2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_truncd2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_floord2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_floord2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_ceild2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_ceild2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_roundd2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_roundd2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_rintd2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_rintd2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_nextafterd2_sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_nextafterd2_sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_frfrexpd2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_frfrexpd2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_expfrexpd2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_cinz_expfrexpd2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmodd2_sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_fmodd2_sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_remainderd2_sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_remainderd2_sse4(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_modfd2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_cinz_modfd2_sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_lgammad2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_lgammad2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tgammad2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_tgammad2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_erfd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_erfd2_u10sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_erfcd2_u15sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cinz_erfcd2_u15sse4(__m128d); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd2_sse4(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd2_sse4(int); + +#ifndef Sleef___m128_2_DEFINED +typedef struct { + __m128 x, y; +} Sleef___m128_2; +#define Sleef___m128_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sinf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cosf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_cosf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincosf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_cinz_sincosf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_tanf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_asinf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acosf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_acosf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_atanf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atan2f4_u35sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_atan2f4_u35sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_logf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_logf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cbrtf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_cbrtf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sinf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cosf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_cosf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincosf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_cinz_sincosf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_tanf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_asinf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acosf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_acosf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_atanf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atan2f4_u10sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_atan2f4_u10sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_logf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_logf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cbrtf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_cbrtf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_expf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_expf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_powf4_u10sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_powf4_u10sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinhf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sinhf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_coshf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_coshf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanhf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_tanhf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinhf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sinhf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_coshf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_coshf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanhf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_tanhf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastsinf4_u3500sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fastsinf4_u3500sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastcosf4_u3500sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fastcosf4_u3500sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastpowf4_u3500sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fastpowf4_u3500sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinhf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_asinhf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acoshf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_acoshf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanhf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_atanhf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp2f4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_exp2f4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp2f4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_exp2f4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp10f4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_exp10f4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp10f4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_exp10f4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_expm1f4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_expm1f4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log10f4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_log10f4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log2f4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_log2f4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log2f4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_log2f4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log1pf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_log1pf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincospif4_u05sse4(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_cinz_sincospif4_u05sse4(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincospif4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_cinz_sincospif4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinpif4_u05sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sinpif4_u05sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cospif4_u05sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_cospif4_u05sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmaf4_sse4(__m128, __m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fmaf4_sse4(__m128, __m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sqrtf4_sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_u05sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sqrtf4_u05sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_sqrtf4_u35sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_hypotf4_u05sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_hypotf4_u05sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_hypotf4_u35sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_hypotf4_u35sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fabsf4_sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fabsf4_sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_copysignf4_sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_copysignf4_sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmaxf4_sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fmaxf4_sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fminf4_sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fminf4_sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fdimf4_sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fdimf4_sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_truncf4_sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_truncf4_sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_floorf4_sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_floorf4_sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_ceilf4_sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_ceilf4_sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_roundf4_sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_roundf4_sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_rintf4_sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_rintf4_sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_nextafterf4_sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_nextafterf4_sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_frfrexpf4_sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_frfrexpf4_sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmodf4_sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_fmodf4_sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_remainderf4_sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_remainderf4_sse4(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_modff4_sse4(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_cinz_modff4_sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_lgammaf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_lgammaf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tgammaf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_tgammaf4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_erff4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_erff4_u10sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_erfcf4_u15sse4(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cinz_erfcf4_u15sse4(__m128); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf4_sse4(int); +SLEEF_IMPORT SLEEF_CONST int Sleef_cinz_getIntf4_sse4(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf4_sse4(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_cinz_getPtrf4_sse4(int); +#endif +#ifdef __AVX__ + +#ifndef Sleef___m256d_2_DEFINED +typedef struct { + __m256d x, y; +} Sleef___m256d_2; +#define Sleef___m256d_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sind4_u35(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cosd4_u35(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincosd4_u35(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tand4_u35(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asind4_u35(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acosd4_u35(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atand4_u35(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atan2d4_u35(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_logd4_u35(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cbrtd4_u35(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sind4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cosd4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincosd4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tand4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asind4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acosd4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atand4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atan2d4_u10(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_logd4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cbrtd4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_expd4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_powd4_u10(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinhd4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_coshd4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tanhd4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinhd4_u35(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_coshd4_u35(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tanhd4_u35(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastsind4_u3500(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastcosd4_u3500(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastpowd4_u3500(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asinhd4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acoshd4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atanhd4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp2d4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp2d4_u35(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp10d4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp10d4_u35(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_expm1d4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log10d4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log2d4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log2d4_u35(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log1pd4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincospid4_u05(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincospid4_u35(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinpid4_u05(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cospid4_u05(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_ldexpd4(__m256d, __m128i); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_ilogbd4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmad4(__m256d, __m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_u05(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_u35(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_hypotd4_u05(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_hypotd4_u35(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fabsd4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_copysignd4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmaxd4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmind4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fdimd4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_truncd4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_floord4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_ceild4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_roundd4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_rintd4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_nextafterd4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_frfrexpd4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_expfrexpd4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmodd4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_remainderd4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_modfd4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_lgammad4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tgammad4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_erfd4_u10(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_erfcd4_u15(__m256d); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd4(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd4(int); + +#ifndef Sleef___m256_2_DEFINED +typedef struct { + __m256 x, y; +} Sleef___m256_2; +#define Sleef___m256_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinf8_u35(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cosf8_u35(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincosf8_u35(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanf8_u35(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinf8_u35(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acosf8_u35(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanf8_u35(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atan2f8_u35(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_logf8_u35(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cbrtf8_u35(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinf8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cosf8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincosf8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanf8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinf8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acosf8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanf8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atan2f8_u10(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_logf8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cbrtf8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_expf8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_powf8_u10(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinhf8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_coshf8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanhf8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinhf8_u35(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_coshf8_u35(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanhf8_u35(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastsinf8_u3500(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastcosf8_u3500(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastpowf8_u3500(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinhf8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acoshf8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanhf8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp2f8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp2f8_u35(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp10f8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp10f8_u35(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_expm1f8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log10f8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log2f8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log2f8_u35(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log1pf8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincospif8_u05(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincospif8_u35(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinpif8_u05(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cospif8_u05(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmaf8(__m256, __m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_u05(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_u35(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_hypotf8_u05(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_hypotf8_u35(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fabsf8(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_copysignf8(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmaxf8(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fminf8(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fdimf8(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_truncf8(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_floorf8(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_ceilf8(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_roundf8(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_rintf8(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_nextafterf8(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_frfrexpf8(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmodf8(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_remainderf8(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_modff8(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_lgammaf8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tgammaf8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_erff8_u10(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_erfcf8_u15(__m256); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf8(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf8(int); +#endif +#ifdef __AVX__ + +#ifndef Sleef___m256d_2_DEFINED +typedef struct { + __m256d x, y; +} Sleef___m256d_2; +#define Sleef___m256d_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sind4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_sind4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cosd4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_cosd4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincosd4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_cinz_sincosd4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tand4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_tand4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asind4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_asind4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acosd4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_acosd4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atand4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_atand4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atan2d4_u35avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_atan2d4_u35avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_logd4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_logd4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cbrtd4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_cbrtd4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sind4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_sind4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cosd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_cosd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincosd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_cinz_sincosd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tand4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_tand4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asind4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_asind4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acosd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_acosd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atand4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_atand4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atan2d4_u10avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_atan2d4_u10avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_logd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_logd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cbrtd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_cbrtd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_expd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_expd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_powd4_u10avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_powd4_u10avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinhd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_sinhd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_coshd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_coshd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tanhd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_tanhd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinhd4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_sinhd4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_coshd4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_coshd4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tanhd4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_tanhd4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastsind4_u3500avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_fastsind4_u3500avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastcosd4_u3500avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_fastcosd4_u3500avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastpowd4_u3500avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_fastpowd4_u3500avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asinhd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_asinhd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acoshd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_acoshd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atanhd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_atanhd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp2d4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_exp2d4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp2d4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_exp2d4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp10d4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_exp10d4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp10d4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_exp10d4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_expm1d4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_expm1d4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log10d4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_log10d4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log2d4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_log2d4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log2d4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_log2d4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log1pd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_log1pd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincospid4_u05avx(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_cinz_sincospid4_u05avx(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincospid4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_cinz_sincospid4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinpid4_u05avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_sinpid4_u05avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cospid4_u05avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_cospid4_u05avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_ldexpd4_avx(__m256d, __m128i); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_ldexpd4_avx(__m256d, __m128i); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_ilogbd4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_cinz_ilogbd4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmad4_avx(__m256d, __m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_fmad4_avx(__m256d, __m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_sqrtd4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_u05avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_sqrtd4_u05avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_sqrtd4_u35avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_hypotd4_u05avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_hypotd4_u05avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_hypotd4_u35avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_hypotd4_u35avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fabsd4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_fabsd4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_copysignd4_avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_copysignd4_avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmaxd4_avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_fmaxd4_avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmind4_avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_fmind4_avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fdimd4_avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_fdimd4_avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_truncd4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_truncd4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_floord4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_floord4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_ceild4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_ceild4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_roundd4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_roundd4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_rintd4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_rintd4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_nextafterd4_avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_nextafterd4_avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_frfrexpd4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_frfrexpd4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_expfrexpd4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_cinz_expfrexpd4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmodd4_avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_fmodd4_avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_remainderd4_avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_remainderd4_avx(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_modfd4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_cinz_modfd4_avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_lgammad4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_lgammad4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tgammad4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_tgammad4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_erfd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_erfd4_u10avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_erfcd4_u15avx(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cinz_erfcd4_u15avx(__m256d); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd4_avx(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd4_avx(int); + +#ifndef Sleef___m256_2_DEFINED +typedef struct { + __m256 x, y; +} Sleef___m256_2; +#define Sleef___m256_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_sinf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cosf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_cosf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincosf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_cinz_sincosf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_tanf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_asinf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acosf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_acosf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_atanf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atan2f8_u35avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_atan2f8_u35avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_logf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_logf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cbrtf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_cbrtf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_sinf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cosf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_cosf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincosf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_cinz_sincosf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_tanf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_asinf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acosf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_acosf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_atanf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atan2f8_u10avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_atan2f8_u10avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_logf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_logf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cbrtf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_cbrtf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_expf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_expf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_powf8_u10avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_powf8_u10avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinhf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_sinhf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_coshf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_coshf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanhf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_tanhf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinhf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_sinhf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_coshf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_coshf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanhf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_tanhf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastsinf8_u3500avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_fastsinf8_u3500avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastcosf8_u3500avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_fastcosf8_u3500avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastpowf8_u3500avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_fastpowf8_u3500avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinhf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_asinhf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acoshf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_acoshf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanhf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_atanhf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp2f8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_exp2f8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp2f8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_exp2f8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp10f8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_exp10f8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp10f8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_exp10f8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_expm1f8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_expm1f8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log10f8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_log10f8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log2f8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_log2f8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log2f8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_log2f8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log1pf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_log1pf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincospif8_u05avx(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_cinz_sincospif8_u05avx(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincospif8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_cinz_sincospif8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinpif8_u05avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_sinpif8_u05avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cospif8_u05avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_cospif8_u05avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmaf8_avx(__m256, __m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_fmaf8_avx(__m256, __m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_sqrtf8_avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_u05avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_sqrtf8_u05avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_sqrtf8_u35avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_hypotf8_u05avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_hypotf8_u05avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_hypotf8_u35avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_hypotf8_u35avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fabsf8_avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_fabsf8_avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_copysignf8_avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_copysignf8_avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmaxf8_avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_fmaxf8_avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fminf8_avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_fminf8_avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fdimf8_avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_fdimf8_avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_truncf8_avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_truncf8_avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_floorf8_avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_floorf8_avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_ceilf8_avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_ceilf8_avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_roundf8_avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_roundf8_avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_rintf8_avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_rintf8_avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_nextafterf8_avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_nextafterf8_avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_frfrexpf8_avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_frfrexpf8_avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmodf8_avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_fmodf8_avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_remainderf8_avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_remainderf8_avx(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_modff8_avx(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_cinz_modff8_avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_lgammaf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_lgammaf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tgammaf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_tgammaf8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_erff8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_erff8_u10avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_erfcf8_u15avx(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cinz_erfcf8_u15avx(__m256); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf8_avx(int); +SLEEF_IMPORT SLEEF_CONST int Sleef_cinz_getIntf8_avx(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf8_avx(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_cinz_getPtrf8_avx(int); +#endif +#ifdef __AVX__ + +#ifndef Sleef___m256d_2_DEFINED +typedef struct { + __m256d x, y; +} Sleef___m256d_2; +#define Sleef___m256d_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sind4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sind4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cosd4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_cosd4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincosd4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_finz_sincosd4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tand4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_tand4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asind4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_asind4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acosd4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_acosd4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atand4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_atand4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atan2d4_u35fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_atan2d4_u35fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_logd4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_logd4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cbrtd4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_cbrtd4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sind4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sind4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cosd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_cosd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincosd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_finz_sincosd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tand4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_tand4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asind4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_asind4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acosd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_acosd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atand4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_atand4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atan2d4_u10fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_atan2d4_u10fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_logd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_logd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cbrtd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_cbrtd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_expd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_expd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_powd4_u10fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_powd4_u10fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinhd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sinhd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_coshd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_coshd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tanhd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_tanhd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinhd4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sinhd4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_coshd4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_coshd4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tanhd4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_tanhd4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastsind4_u3500fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fastsind4_u3500fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastcosd4_u3500fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fastcosd4_u3500fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastpowd4_u3500fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fastpowd4_u3500fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asinhd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_asinhd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acoshd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_acoshd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atanhd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_atanhd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp2d4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_exp2d4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp2d4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_exp2d4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp10d4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_exp10d4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp10d4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_exp10d4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_expm1d4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_expm1d4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log10d4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_log10d4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log2d4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_log2d4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log2d4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_log2d4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log1pd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_log1pd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincospid4_u05fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_finz_sincospid4_u05fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincospid4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_finz_sincospid4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinpid4_u05fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sinpid4_u05fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cospid4_u05fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_cospid4_u05fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_ldexpd4_fma4(__m256d, __m128i); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_ldexpd4_fma4(__m256d, __m128i); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_ilogbd4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_finz_ilogbd4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmad4_fma4(__m256d, __m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fmad4_fma4(__m256d, __m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sqrtd4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_u05fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sqrtd4_u05fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sqrtd4_u35fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_hypotd4_u05fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_hypotd4_u05fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_hypotd4_u35fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_hypotd4_u35fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fabsd4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fabsd4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_copysignd4_fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_copysignd4_fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmaxd4_fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fmaxd4_fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmind4_fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fmind4_fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fdimd4_fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fdimd4_fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_truncd4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_truncd4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_floord4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_floord4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_ceild4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_ceild4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_roundd4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_roundd4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_rintd4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_rintd4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_nextafterd4_fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_nextafterd4_fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_frfrexpd4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_frfrexpd4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_expfrexpd4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_finz_expfrexpd4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmodd4_fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fmodd4_fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_remainderd4_fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_remainderd4_fma4(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_modfd4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_finz_modfd4_fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_lgammad4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_lgammad4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tgammad4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_tgammad4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_erfd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_erfd4_u10fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_erfcd4_u15fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_erfcd4_u15fma4(__m256d); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd4_fma4(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd4_fma4(int); + +#ifndef Sleef___m256_2_DEFINED +typedef struct { + __m256 x, y; +} Sleef___m256_2; +#define Sleef___m256_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sinf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cosf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_cosf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincosf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_finz_sincosf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_tanf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_asinf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acosf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_acosf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_atanf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atan2f8_u35fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_atan2f8_u35fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_logf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_logf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cbrtf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_cbrtf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sinf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cosf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_cosf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincosf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_finz_sincosf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_tanf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_asinf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acosf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_acosf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_atanf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atan2f8_u10fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_atan2f8_u10fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_logf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_logf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cbrtf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_cbrtf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_expf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_expf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_powf8_u10fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_powf8_u10fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinhf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sinhf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_coshf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_coshf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanhf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_tanhf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinhf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sinhf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_coshf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_coshf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanhf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_tanhf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastsinf8_u3500fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fastsinf8_u3500fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastcosf8_u3500fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fastcosf8_u3500fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastpowf8_u3500fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fastpowf8_u3500fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinhf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_asinhf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acoshf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_acoshf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanhf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_atanhf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp2f8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_exp2f8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp2f8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_exp2f8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp10f8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_exp10f8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp10f8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_exp10f8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_expm1f8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_expm1f8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log10f8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_log10f8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log2f8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_log2f8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log2f8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_log2f8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log1pf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_log1pf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincospif8_u05fma4(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_finz_sincospif8_u05fma4(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincospif8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_finz_sincospif8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinpif8_u05fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sinpif8_u05fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cospif8_u05fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_cospif8_u05fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmaf8_fma4(__m256, __m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fmaf8_fma4(__m256, __m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sqrtf8_fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_u05fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sqrtf8_u05fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sqrtf8_u35fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_hypotf8_u05fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_hypotf8_u05fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_hypotf8_u35fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_hypotf8_u35fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fabsf8_fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fabsf8_fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_copysignf8_fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_copysignf8_fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmaxf8_fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fmaxf8_fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fminf8_fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fminf8_fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fdimf8_fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fdimf8_fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_truncf8_fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_truncf8_fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_floorf8_fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_floorf8_fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_ceilf8_fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_ceilf8_fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_roundf8_fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_roundf8_fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_rintf8_fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_rintf8_fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_nextafterf8_fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_nextafterf8_fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_frfrexpf8_fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_frfrexpf8_fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmodf8_fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fmodf8_fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_remainderf8_fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_remainderf8_fma4(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_modff8_fma4(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_finz_modff8_fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_lgammaf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_lgammaf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tgammaf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_tgammaf8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_erff8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_erff8_u10fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_erfcf8_u15fma4(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_erfcf8_u15fma4(__m256); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf8_fma4(int); +SLEEF_IMPORT SLEEF_CONST int Sleef_finz_getIntf8_fma4(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf8_fma4(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_finz_getPtrf8_fma4(int); +#endif +#ifdef __AVX__ + +#ifndef Sleef___m256d_2_DEFINED +typedef struct { + __m256d x, y; +} Sleef___m256d_2; +#define Sleef___m256d_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sind4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sind4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cosd4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_cosd4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincosd4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_finz_sincosd4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tand4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_tand4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asind4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_asind4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acosd4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_acosd4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atand4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_atand4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atan2d4_u35avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_atan2d4_u35avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_logd4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_logd4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cbrtd4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_cbrtd4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sind4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sind4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cosd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_cosd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincosd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_finz_sincosd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tand4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_tand4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asind4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_asind4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acosd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_acosd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atand4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_atand4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atan2d4_u10avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_atan2d4_u10avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_logd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_logd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cbrtd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_cbrtd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_expd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_expd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_powd4_u10avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_powd4_u10avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinhd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sinhd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_coshd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_coshd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tanhd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_tanhd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinhd4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sinhd4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_coshd4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_coshd4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tanhd4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_tanhd4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastsind4_u3500avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fastsind4_u3500avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastcosd4_u3500avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fastcosd4_u3500avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fastpowd4_u3500avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fastpowd4_u3500avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_asinhd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_asinhd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_acoshd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_acoshd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_atanhd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_atanhd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp2d4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_exp2d4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp2d4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_exp2d4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp10d4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_exp10d4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_exp10d4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_exp10d4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_expm1d4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_expm1d4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log10d4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_log10d4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log2d4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_log2d4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log2d4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_log2d4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_log1pd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_log1pd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincospid4_u05avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_finz_sincospid4_u05avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_sincospid4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_finz_sincospid4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sinpid4_u05avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sinpid4_u05avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_cospid4_u05avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_cospid4_u05avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_ldexpd4_avx2(__m256d, __m128i); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_ldexpd4_avx2(__m256d, __m128i); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_ilogbd4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_finz_ilogbd4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmad4_avx2(__m256d, __m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fmad4_avx2(__m256d, __m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sqrtd4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_u05avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sqrtd4_u05avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_sqrtd4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_sqrtd4_u35avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_hypotd4_u05avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_hypotd4_u05avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_hypotd4_u35avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_hypotd4_u35avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fabsd4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fabsd4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_copysignd4_avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_copysignd4_avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmaxd4_avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fmaxd4_avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmind4_avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fmind4_avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fdimd4_avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fdimd4_avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_truncd4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_truncd4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_floord4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_floord4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_ceild4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_ceild4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_roundd4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_roundd4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_rintd4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_rintd4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_nextafterd4_avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_nextafterd4_avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_frfrexpd4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_frfrexpd4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_expfrexpd4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_finz_expfrexpd4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_fmodd4_avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_fmodd4_avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_remainderd4_avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_remainderd4_avx2(__m256d, __m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_modfd4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST Sleef___m256d_2 Sleef_finz_modfd4_avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_lgammad4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_lgammad4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_tgammad4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_tgammad4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_erfd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_erfd4_u10avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_erfcd4_u15avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST __m256d Sleef_finz_erfcd4_u15avx2(__m256d); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd4_avx2(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd4_avx2(int); + +#ifndef Sleef___m256_2_DEFINED +typedef struct { + __m256 x, y; +} Sleef___m256_2; +#define Sleef___m256_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sinf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cosf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_cosf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincosf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_finz_sincosf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_tanf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_asinf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acosf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_acosf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_atanf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atan2f8_u35avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_atan2f8_u35avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_logf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_logf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cbrtf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_cbrtf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sinf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cosf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_cosf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincosf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_finz_sincosf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_tanf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_asinf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acosf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_acosf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_atanf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atan2f8_u10avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_atan2f8_u10avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_logf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_logf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cbrtf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_cbrtf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_expf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_expf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_powf8_u10avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_powf8_u10avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinhf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sinhf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_coshf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_coshf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanhf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_tanhf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinhf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sinhf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_coshf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_coshf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tanhf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_tanhf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastsinf8_u3500avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fastsinf8_u3500avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastcosf8_u3500avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fastcosf8_u3500avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fastpowf8_u3500avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fastpowf8_u3500avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_asinhf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_asinhf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_acoshf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_acoshf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_atanhf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_atanhf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp2f8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_exp2f8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp2f8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_exp2f8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp10f8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_exp10f8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_exp10f8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_exp10f8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_expm1f8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_expm1f8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log10f8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_log10f8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log2f8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_log2f8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log2f8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_log2f8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_log1pf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_log1pf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincospif8_u05avx2(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_finz_sincospif8_u05avx2(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_sincospif8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_finz_sincospif8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sinpif8_u05avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sinpif8_u05avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_cospif8_u05avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_cospif8_u05avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmaf8_avx2(__m256, __m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fmaf8_avx2(__m256, __m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sqrtf8_avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_u05avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sqrtf8_u05avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_sqrtf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_sqrtf8_u35avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_hypotf8_u05avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_hypotf8_u05avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_hypotf8_u35avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_hypotf8_u35avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fabsf8_avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fabsf8_avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_copysignf8_avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_copysignf8_avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmaxf8_avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fmaxf8_avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fminf8_avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fminf8_avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fdimf8_avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fdimf8_avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_truncf8_avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_truncf8_avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_floorf8_avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_floorf8_avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_ceilf8_avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_ceilf8_avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_roundf8_avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_roundf8_avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_rintf8_avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_rintf8_avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_nextafterf8_avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_nextafterf8_avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_frfrexpf8_avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_frfrexpf8_avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_fmodf8_avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_fmodf8_avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_remainderf8_avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_remainderf8_avx2(__m256, __m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_modff8_avx2(__m256); +SLEEF_IMPORT SLEEF_CONST Sleef___m256_2 Sleef_finz_modff8_avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_lgammaf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_lgammaf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_tgammaf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_tgammaf8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_erff8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_erff8_u10avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_erfcf8_u15avx2(__m256); +SLEEF_IMPORT SLEEF_CONST __m256 Sleef_finz_erfcf8_u15avx2(__m256); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf8_avx2(int); +SLEEF_IMPORT SLEEF_CONST int Sleef_finz_getIntf8_avx2(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf8_avx2(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_finz_getPtrf8_avx2(int); +#endif +#ifdef __SSE2__ + +#ifndef Sleef___m128d_2_DEFINED +typedef struct { + __m128d x, y; +} Sleef___m128d_2; +#define Sleef___m128d_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sind2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_sind2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cosd2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_cosd2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincosd2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_finz_sincosd2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tand2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_tand2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asind2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_asind2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acosd2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_acosd2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atand2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_atand2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atan2d2_u35avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_atan2d2_u35avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_logd2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_logd2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cbrtd2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_cbrtd2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sind2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_sind2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cosd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_cosd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincosd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_finz_sincosd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tand2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_tand2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asind2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_asind2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acosd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_acosd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atand2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_atand2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atan2d2_u10avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_atan2d2_u10avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_logd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_logd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cbrtd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_cbrtd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_expd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_expd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_powd2_u10avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_powd2_u10avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinhd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_sinhd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_coshd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_coshd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tanhd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_tanhd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinhd2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_sinhd2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_coshd2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_coshd2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tanhd2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_tanhd2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastsind2_u3500avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_fastsind2_u3500avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastcosd2_u3500avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_fastcosd2_u3500avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fastpowd2_u3500avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_fastpowd2_u3500avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_asinhd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_asinhd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_acoshd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_acoshd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_atanhd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_atanhd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp2d2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_exp2d2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp2d2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_exp2d2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp10d2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_exp10d2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_exp10d2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_exp10d2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_expm1d2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_expm1d2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log10d2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_log10d2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log2d2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_log2d2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log2d2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_log2d2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_log1pd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_log1pd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincospid2_u05avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_finz_sincospid2_u05avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_sincospid2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_finz_sincospid2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sinpid2_u05avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_sinpid2_u05avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_cospid2_u05avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_cospid2_u05avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_ldexpd2_avx2128(__m128d, __m128i); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_ldexpd2_avx2128(__m128d, __m128i); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_ilogbd2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_finz_ilogbd2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmad2_avx2128(__m128d, __m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_fmad2_avx2128(__m128d, __m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_sqrtd2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_u05avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_sqrtd2_u05avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_sqrtd2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_sqrtd2_u35avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_hypotd2_u05avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_hypotd2_u05avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_hypotd2_u35avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_hypotd2_u35avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fabsd2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_fabsd2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_copysignd2_avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_copysignd2_avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmaxd2_avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_fmaxd2_avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmind2_avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_fmind2_avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fdimd2_avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_fdimd2_avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_truncd2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_truncd2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_floord2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_floord2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_ceild2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_ceild2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_roundd2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_roundd2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_rintd2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_rintd2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_nextafterd2_avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_nextafterd2_avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_frfrexpd2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_frfrexpd2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_expfrexpd2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128i Sleef_finz_expfrexpd2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_fmodd2_avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_fmodd2_avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_remainderd2_avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_remainderd2_avx2128(__m128d, __m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_modfd2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST Sleef___m128d_2 Sleef_finz_modfd2_avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_lgammad2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_lgammad2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_tgammad2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_tgammad2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_erfd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_erfd2_u10avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_erfcd2_u15avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST __m128d Sleef_finz_erfcd2_u15avx2128(__m128d); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd2_avx2128(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd2_avx2128(int); + +#ifndef Sleef___m128_2_DEFINED +typedef struct { + __m128 x, y; +} Sleef___m128_2; +#define Sleef___m128_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_sinf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cosf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_cosf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincosf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_finz_sincosf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_tanf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_asinf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acosf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_acosf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_atanf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atan2f4_u35avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_atan2f4_u35avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_logf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_logf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cbrtf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_cbrtf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_sinf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cosf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_cosf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincosf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_finz_sincosf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_tanf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_asinf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acosf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_acosf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_atanf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atan2f4_u10avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_atan2f4_u10avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_logf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_logf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cbrtf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_cbrtf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_expf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_expf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_powf4_u10avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_powf4_u10avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinhf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_sinhf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_coshf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_coshf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanhf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_tanhf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinhf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_sinhf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_coshf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_coshf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tanhf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_tanhf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastsinf4_u3500avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_fastsinf4_u3500avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastcosf4_u3500avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_fastcosf4_u3500avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fastpowf4_u3500avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_fastpowf4_u3500avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_asinhf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_asinhf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_acoshf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_acoshf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_atanhf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_atanhf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp2f4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_exp2f4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp2f4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_exp2f4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp10f4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_exp10f4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_exp10f4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_exp10f4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_expm1f4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_expm1f4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log10f4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_log10f4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log2f4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_log2f4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log2f4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_log2f4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_log1pf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_log1pf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincospif4_u05avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_finz_sincospif4_u05avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_sincospif4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_finz_sincospif4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sinpif4_u05avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_sinpif4_u05avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_cospif4_u05avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_cospif4_u05avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmaf4_avx2128(__m128, __m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_fmaf4_avx2128(__m128, __m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_sqrtf4_avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_u05avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_sqrtf4_u05avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_sqrtf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_sqrtf4_u35avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_hypotf4_u05avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_hypotf4_u05avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_hypotf4_u35avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_hypotf4_u35avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fabsf4_avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_fabsf4_avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_copysignf4_avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_copysignf4_avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmaxf4_avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_fmaxf4_avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fminf4_avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_fminf4_avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fdimf4_avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_fdimf4_avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_truncf4_avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_truncf4_avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_floorf4_avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_floorf4_avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_ceilf4_avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_ceilf4_avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_roundf4_avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_roundf4_avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_rintf4_avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_rintf4_avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_nextafterf4_avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_nextafterf4_avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_frfrexpf4_avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_frfrexpf4_avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_fmodf4_avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_fmodf4_avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_remainderf4_avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_remainderf4_avx2128(__m128, __m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_modff4_avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST Sleef___m128_2 Sleef_finz_modff4_avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_lgammaf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_lgammaf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_tgammaf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_tgammaf4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_erff4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_erff4_u10avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_erfcf4_u15avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST __m128 Sleef_finz_erfcf4_u15avx2128(__m128); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf4_avx2128(int); +SLEEF_IMPORT SLEEF_CONST int Sleef_finz_getIntf4_avx2128(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf4_avx2128(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_finz_getPtrf4_avx2128(int); +#endif +#ifdef __AVX512F__ + +#ifndef Sleef___m512d_2_DEFINED +typedef struct { + __m512d x, y; +} Sleef___m512d_2; +#define Sleef___m512d_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sind8_u35(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cosd8_u35(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincosd8_u35(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tand8_u35(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_asind8_u35(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_acosd8_u35(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atand8_u35(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atan2d8_u35(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_logd8_u35(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cbrtd8_u35(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sind8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cosd8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincosd8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tand8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_asind8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_acosd8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atand8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atan2d8_u10(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_logd8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cbrtd8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_expd8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_powd8_u10(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sinhd8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_coshd8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tanhd8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sinhd8_u35(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_coshd8_u35(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tanhd8_u35(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fastsind8_u3500(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fastcosd8_u3500(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fastpowd8_u3500(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_asinhd8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_acoshd8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atanhd8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp2d8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp2d8_u35(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp10d8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp10d8_u35(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_expm1d8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log10d8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log2d8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log2d8_u35(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log1pd8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincospid8_u05(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincospid8_u35(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sinpid8_u05(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cospid8_u05(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_ldexpd8(__m512d, __m256i); +SLEEF_IMPORT SLEEF_CONST __m256i Sleef_ilogbd8(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmad8(__m512d, __m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sqrtd8(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sqrtd8_u05(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sqrtd8_u35(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_hypotd8_u05(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_hypotd8_u35(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fabsd8(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_copysignd8(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmaxd8(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmind8(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fdimd8(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_truncd8(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_floord8(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_ceild8(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_roundd8(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_rintd8(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_nextafterd8(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_frfrexpd8(__m512d); +SLEEF_IMPORT SLEEF_CONST __m256i Sleef_expfrexpd8(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmodd8(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_remainderd8(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_modfd8(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_lgammad8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tgammad8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_erfd8_u10(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_erfcd8_u15(__m512d); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd8(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd8(int); + +#ifndef Sleef___m512_2_DEFINED +typedef struct { + __m512 x, y; +} Sleef___m512_2; +#define Sleef___m512_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinf16_u35(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cosf16_u35(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincosf16_u35(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanf16_u35(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_asinf16_u35(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_acosf16_u35(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atanf16_u35(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atan2f16_u35(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_logf16_u35(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cbrtf16_u35(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinf16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cosf16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincosf16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanf16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_asinf16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_acosf16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atanf16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atan2f16_u10(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_logf16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cbrtf16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_expf16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_powf16_u10(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinhf16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_coshf16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanhf16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinhf16_u35(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_coshf16_u35(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanhf16_u35(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fastsinf16_u3500(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fastcosf16_u3500(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fastpowf16_u3500(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_asinhf16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_acoshf16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atanhf16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp2f16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp2f16_u35(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp10f16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp10f16_u35(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_expm1f16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log10f16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log2f16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log2f16_u35(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log1pf16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincospif16_u05(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincospif16_u35(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinpif16_u05(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cospif16_u05(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fmaf16(__m512, __m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sqrtf16(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sqrtf16_u05(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sqrtf16_u35(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_hypotf16_u05(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_hypotf16_u35(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fabsf16(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_copysignf16(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fmaxf16(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fminf16(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fdimf16(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_truncf16(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_floorf16(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_ceilf16(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_roundf16(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_rintf16(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_nextafterf16(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_frfrexpf16(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fmodf16(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_remainderf16(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_modff16(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_lgammaf16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tgammaf16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_erff16_u10(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_erfcf16_u15(__m512); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf16(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf16(int); +#endif +#ifdef __AVX512F__ + +#ifndef Sleef___m512d_2_DEFINED +typedef struct { + __m512d x, y; +} Sleef___m512d_2; +#define Sleef___m512d_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sind8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_sind8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cosd8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_cosd8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincosd8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_finz_sincosd8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tand8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_tand8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_asind8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_asind8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_acosd8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_acosd8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atand8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_atand8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atan2d8_u35avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_atan2d8_u35avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_logd8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_logd8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cbrtd8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_cbrtd8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sind8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_sind8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cosd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_cosd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincosd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_finz_sincosd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tand8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_tand8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_asind8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_asind8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_acosd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_acosd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atand8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_atand8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atan2d8_u10avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_atan2d8_u10avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_logd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_logd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cbrtd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_cbrtd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_expd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_expd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_powd8_u10avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_powd8_u10avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sinhd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_sinhd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_coshd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_coshd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tanhd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_tanhd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sinhd8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_sinhd8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_coshd8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_coshd8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tanhd8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_tanhd8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fastsind8_u3500avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_fastsind8_u3500avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fastcosd8_u3500avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_fastcosd8_u3500avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fastpowd8_u3500avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_fastpowd8_u3500avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_asinhd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_asinhd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_acoshd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_acoshd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atanhd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_atanhd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp2d8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_exp2d8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp2d8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_exp2d8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp10d8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_exp10d8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp10d8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_exp10d8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_expm1d8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_expm1d8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log10d8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_log10d8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log2d8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_log2d8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log2d8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_log2d8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log1pd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_log1pd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincospid8_u05avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_finz_sincospid8_u05avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincospid8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_finz_sincospid8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sinpid8_u05avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_sinpid8_u05avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cospid8_u05avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_cospid8_u05avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_ldexpd8_avx512f(__m512d, __m256i); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_ldexpd8_avx512f(__m512d, __m256i); +SLEEF_IMPORT SLEEF_CONST __m256i Sleef_ilogbd8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m256i Sleef_finz_ilogbd8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmad8_avx512f(__m512d, __m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_fmad8_avx512f(__m512d, __m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sqrtd8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_sqrtd8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sqrtd8_u05avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_sqrtd8_u05avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sqrtd8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_sqrtd8_u35avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_hypotd8_u05avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_hypotd8_u05avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_hypotd8_u35avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_hypotd8_u35avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fabsd8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_fabsd8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_copysignd8_avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_copysignd8_avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmaxd8_avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_fmaxd8_avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmind8_avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_fmind8_avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fdimd8_avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_fdimd8_avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_truncd8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_truncd8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_floord8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_floord8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_ceild8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_ceild8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_roundd8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_roundd8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_rintd8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_rintd8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_nextafterd8_avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_nextafterd8_avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_frfrexpd8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_frfrexpd8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m256i Sleef_expfrexpd8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m256i Sleef_finz_expfrexpd8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmodd8_avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_fmodd8_avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_remainderd8_avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_remainderd8_avx512f(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_modfd8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_finz_modfd8_avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_lgammad8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_lgammad8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tgammad8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_tgammad8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_erfd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_erfd8_u10avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_erfcd8_u15avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_finz_erfcd8_u15avx512f(__m512d); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd8_avx512f(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd8_avx512f(int); + +#ifndef Sleef___m512_2_DEFINED +typedef struct { + __m512 x, y; +} Sleef___m512_2; +#define Sleef___m512_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_sinf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cosf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_cosf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincosf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_finz_sincosf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_tanf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_asinf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_asinf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_acosf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_acosf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atanf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_atanf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atan2f16_u35avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_atan2f16_u35avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_logf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_logf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cbrtf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_cbrtf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_sinf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cosf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_cosf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincosf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_finz_sincosf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_tanf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_asinf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_asinf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_acosf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_acosf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atanf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_atanf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atan2f16_u10avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_atan2f16_u10avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_logf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_logf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cbrtf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_cbrtf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_expf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_expf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_powf16_u10avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_powf16_u10avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinhf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_sinhf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_coshf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_coshf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanhf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_tanhf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinhf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_sinhf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_coshf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_coshf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanhf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_tanhf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fastsinf16_u3500avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_fastsinf16_u3500avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fastcosf16_u3500avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_fastcosf16_u3500avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fastpowf16_u3500avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_fastpowf16_u3500avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_asinhf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_asinhf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_acoshf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_acoshf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atanhf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_atanhf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp2f16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_exp2f16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp2f16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_exp2f16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp10f16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_exp10f16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp10f16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_exp10f16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_expm1f16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_expm1f16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log10f16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_log10f16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log2f16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_log2f16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log2f16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_log2f16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log1pf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_log1pf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincospif16_u05avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_finz_sincospif16_u05avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincospif16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_finz_sincospif16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinpif16_u05avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_sinpif16_u05avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cospif16_u05avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_cospif16_u05avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fmaf16_avx512f(__m512, __m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_fmaf16_avx512f(__m512, __m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sqrtf16_avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_sqrtf16_avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sqrtf16_u05avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_sqrtf16_u05avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sqrtf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_sqrtf16_u35avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_hypotf16_u05avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_hypotf16_u05avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_hypotf16_u35avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_hypotf16_u35avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fabsf16_avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_fabsf16_avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_copysignf16_avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_copysignf16_avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fmaxf16_avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_fmaxf16_avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fminf16_avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_fminf16_avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fdimf16_avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_fdimf16_avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_truncf16_avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_truncf16_avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_floorf16_avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_floorf16_avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_ceilf16_avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_ceilf16_avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_roundf16_avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_roundf16_avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_rintf16_avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_rintf16_avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_nextafterf16_avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_nextafterf16_avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_frfrexpf16_avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_frfrexpf16_avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fmodf16_avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_fmodf16_avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_remainderf16_avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_remainderf16_avx512f(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_modff16_avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_finz_modff16_avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_lgammaf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_lgammaf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tgammaf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_tgammaf16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_erff16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_erff16_u10avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_erfcf16_u15avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_finz_erfcf16_u15avx512f(__m512); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf16_avx512f(int); +SLEEF_IMPORT SLEEF_CONST int Sleef_finz_getIntf16_avx512f(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf16_avx512f(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_finz_getPtrf16_avx512f(int); +#endif +#ifdef __AVX512F__ + +#ifndef Sleef___m512d_2_DEFINED +typedef struct { + __m512d x, y; +} Sleef___m512d_2; +#define Sleef___m512d_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sind8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_sind8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cosd8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_cosd8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincosd8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_cinz_sincosd8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tand8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_tand8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_asind8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_asind8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_acosd8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_acosd8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atand8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_atand8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atan2d8_u35avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_atan2d8_u35avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_logd8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_logd8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cbrtd8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_cbrtd8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sind8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_sind8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cosd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_cosd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincosd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_cinz_sincosd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tand8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_tand8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_asind8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_asind8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_acosd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_acosd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atand8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_atand8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atan2d8_u10avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_atan2d8_u10avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_logd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_logd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cbrtd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_cbrtd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_expd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_expd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_powd8_u10avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_powd8_u10avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sinhd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_sinhd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_coshd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_coshd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tanhd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_tanhd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sinhd8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_sinhd8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_coshd8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_coshd8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tanhd8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_tanhd8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fastsind8_u3500avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_fastsind8_u3500avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fastcosd8_u3500avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_fastcosd8_u3500avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fastpowd8_u3500avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_fastpowd8_u3500avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_asinhd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_asinhd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_acoshd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_acoshd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_atanhd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_atanhd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp2d8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_exp2d8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp2d8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_exp2d8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp10d8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_exp10d8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_exp10d8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_exp10d8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_expm1d8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_expm1d8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log10d8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_log10d8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log2d8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_log2d8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log2d8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_log2d8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_log1pd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_log1pd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincospid8_u05avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_cinz_sincospid8_u05avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_sincospid8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_cinz_sincospid8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sinpid8_u05avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_sinpid8_u05avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cospid8_u05avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_cospid8_u05avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_ldexpd8_avx512fnofma(__m512d, __m256i); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_ldexpd8_avx512fnofma(__m512d, __m256i); +SLEEF_IMPORT SLEEF_CONST __m256i Sleef_ilogbd8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m256i Sleef_cinz_ilogbd8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmad8_avx512fnofma(__m512d, __m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_fmad8_avx512fnofma(__m512d, __m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sqrtd8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_sqrtd8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sqrtd8_u05avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_sqrtd8_u05avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_sqrtd8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_sqrtd8_u35avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_hypotd8_u05avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_hypotd8_u05avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_hypotd8_u35avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_hypotd8_u35avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fabsd8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_fabsd8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_copysignd8_avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_copysignd8_avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmaxd8_avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_fmaxd8_avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmind8_avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_fmind8_avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fdimd8_avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_fdimd8_avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_truncd8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_truncd8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_floord8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_floord8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_ceild8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_ceild8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_roundd8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_roundd8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_rintd8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_rintd8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_nextafterd8_avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_nextafterd8_avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_frfrexpd8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_frfrexpd8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m256i Sleef_expfrexpd8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m256i Sleef_cinz_expfrexpd8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_fmodd8_avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_fmodd8_avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_remainderd8_avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_remainderd8_avx512fnofma(__m512d, __m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_modfd8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST Sleef___m512d_2 Sleef_cinz_modfd8_avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_lgammad8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_lgammad8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_tgammad8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_tgammad8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_erfd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_erfd8_u10avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_erfcd8_u15avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST __m512d Sleef_cinz_erfcd8_u15avx512fnofma(__m512d); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd8_avx512fnofma(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd8_avx512fnofma(int); + +#ifndef Sleef___m512_2_DEFINED +typedef struct { + __m512 x, y; +} Sleef___m512_2; +#define Sleef___m512_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_sinf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cosf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_cosf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincosf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_cinz_sincosf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_tanf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_asinf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_asinf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_acosf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_acosf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atanf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_atanf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atan2f16_u35avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_atan2f16_u35avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_logf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_logf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cbrtf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_cbrtf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_sinf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cosf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_cosf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincosf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_cinz_sincosf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_tanf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_asinf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_asinf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_acosf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_acosf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atanf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_atanf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atan2f16_u10avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_atan2f16_u10avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_logf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_logf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cbrtf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_cbrtf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_expf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_expf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_powf16_u10avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_powf16_u10avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinhf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_sinhf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_coshf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_coshf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanhf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_tanhf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinhf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_sinhf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_coshf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_coshf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tanhf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_tanhf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fastsinf16_u3500avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_fastsinf16_u3500avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fastcosf16_u3500avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_fastcosf16_u3500avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fastpowf16_u3500avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_fastpowf16_u3500avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_asinhf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_asinhf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_acoshf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_acoshf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_atanhf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_atanhf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp2f16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_exp2f16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp2f16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_exp2f16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp10f16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_exp10f16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_exp10f16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_exp10f16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_expm1f16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_expm1f16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log10f16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_log10f16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log2f16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_log2f16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log2f16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_log2f16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_log1pf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_log1pf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincospif16_u05avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_cinz_sincospif16_u05avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_sincospif16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_cinz_sincospif16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sinpif16_u05avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_sinpif16_u05avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cospif16_u05avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_cospif16_u05avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fmaf16_avx512fnofma(__m512, __m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_fmaf16_avx512fnofma(__m512, __m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sqrtf16_avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_sqrtf16_avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sqrtf16_u05avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_sqrtf16_u05avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_sqrtf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_sqrtf16_u35avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_hypotf16_u05avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_hypotf16_u05avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_hypotf16_u35avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_hypotf16_u35avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fabsf16_avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_fabsf16_avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_copysignf16_avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_copysignf16_avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fmaxf16_avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_fmaxf16_avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fminf16_avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_fminf16_avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fdimf16_avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_fdimf16_avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_truncf16_avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_truncf16_avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_floorf16_avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_floorf16_avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_ceilf16_avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_ceilf16_avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_roundf16_avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_roundf16_avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_rintf16_avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_rintf16_avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_nextafterf16_avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_nextafterf16_avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_frfrexpf16_avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_frfrexpf16_avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_fmodf16_avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_fmodf16_avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_remainderf16_avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_remainderf16_avx512fnofma(__m512, __m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_modff16_avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST Sleef___m512_2 Sleef_cinz_modff16_avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_lgammaf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_lgammaf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_tgammaf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_tgammaf16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_erff16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_erff16_u10avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_erfcf16_u15avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST __m512 Sleef_cinz_erfcf16_u15avx512fnofma(__m512); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf16_avx512fnofma(int); +SLEEF_IMPORT SLEEF_CONST int Sleef_cinz_getIntf16_avx512fnofma(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf16_avx512fnofma(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_cinz_getPtrf16_avx512fnofma(int); +#endif +#ifdef __STDC__ + +#ifndef Sleef_double_2_DEFINED +typedef Sleef_double2 Sleef_double_2; +#define Sleef_double_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST double Sleef_sind1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_sind1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cosd1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_cosd1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincosd1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_cinz_sincosd1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_tand1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_tand1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_asind1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_asind1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_acosd1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_acosd1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_atand1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_atand1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_atan2d1_u35purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_atan2d1_u35purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_logd1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_logd1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cbrtd1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_cbrtd1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_sind1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_sind1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cosd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_cosd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincosd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_cinz_sincosd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_tand1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_tand1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_asind1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_asind1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_acosd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_acosd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_atand1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_atand1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_atan2d1_u10purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_atan2d1_u10purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_logd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_logd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cbrtd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_cbrtd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_expd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_expd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_powd1_u10purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_powd1_u10purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_sinhd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_sinhd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_coshd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_coshd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_tanhd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_tanhd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_sinhd1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_sinhd1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_coshd1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_coshd1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_tanhd1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_tanhd1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_fastsind1_u3500purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_fastsind1_u3500purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_fastcosd1_u3500purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_fastcosd1_u3500purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_fastpowd1_u3500purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_fastpowd1_u3500purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_asinhd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_asinhd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_acoshd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_acoshd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_atanhd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_atanhd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_exp2d1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_exp2d1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_exp2d1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_exp2d1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_exp10d1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_exp10d1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_exp10d1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_exp10d1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_expm1d1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_expm1d1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_log10d1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_log10d1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_log2d1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_log2d1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_log2d1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_log2d1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_log1pd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_log1pd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincospid1_u05purec(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_cinz_sincospid1_u05purec(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincospid1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_cinz_sincospid1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_sinpid1_u05purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_sinpid1_u05purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cospid1_u05purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_cospid1_u05purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_ldexpd1_purec(double, int32_t); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_ldexpd1_purec(double, int32_t); +SLEEF_IMPORT SLEEF_CONST int32_t Sleef_ilogbd1_purec(double); +SLEEF_IMPORT SLEEF_CONST int32_t Sleef_cinz_ilogbd1_purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_fmad1_purec(double, double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_fmad1_purec(double, double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_sqrtd1_purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_sqrtd1_purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_sqrtd1_u05purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_sqrtd1_u05purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_sqrtd1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_sqrtd1_u35purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_hypotd1_u05purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_hypotd1_u05purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_hypotd1_u35purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_hypotd1_u35purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_fabsd1_purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_fabsd1_purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_copysignd1_purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_copysignd1_purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_fmaxd1_purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_fmaxd1_purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_fmind1_purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_fmind1_purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_fdimd1_purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_fdimd1_purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_truncd1_purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_truncd1_purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_floord1_purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_floord1_purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_ceild1_purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_ceild1_purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_roundd1_purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_roundd1_purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_rintd1_purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_rintd1_purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_nextafterd1_purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_nextafterd1_purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_frfrexpd1_purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_frfrexpd1_purec(double); +SLEEF_IMPORT SLEEF_CONST int32_t Sleef_expfrexpd1_purec(double); +SLEEF_IMPORT SLEEF_CONST int32_t Sleef_cinz_expfrexpd1_purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_fmodd1_purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_fmodd1_purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_remainderd1_purec(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_remainderd1_purec(double, double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_modfd1_purec(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_cinz_modfd1_purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_lgammad1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_lgammad1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_tgammad1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_tgammad1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_erfd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_erfd1_u10purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_erfcd1_u15purec(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cinz_erfcd1_u15purec(double); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd1_purec(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd1_purec(int); + +#ifndef Sleef_float_2_DEFINED +typedef Sleef_float2 Sleef_float_2; +#define Sleef_float_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST float Sleef_sinf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_sinf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cosf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_cosf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincosf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_cinz_sincosf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_tanf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_tanf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_asinf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_asinf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_acosf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_acosf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_atanf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_atanf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_atan2f1_u35purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_atan2f1_u35purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_logf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_logf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cbrtf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_cbrtf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_sinf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_sinf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cosf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_cosf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincosf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_cinz_sincosf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_tanf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_tanf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_asinf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_asinf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_acosf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_acosf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_atanf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_atanf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_atan2f1_u10purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_atan2f1_u10purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_logf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_logf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cbrtf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_cbrtf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_expf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_expf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_powf1_u10purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_powf1_u10purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_sinhf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_sinhf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_coshf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_coshf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_tanhf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_tanhf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_sinhf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_sinhf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_coshf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_coshf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_tanhf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_tanhf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_fastsinf1_u3500purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_fastsinf1_u3500purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_fastcosf1_u3500purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_fastcosf1_u3500purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_fastpowf1_u3500purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_fastpowf1_u3500purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_asinhf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_asinhf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_acoshf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_acoshf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_atanhf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_atanhf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_exp2f1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_exp2f1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_exp2f1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_exp2f1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_exp10f1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_exp10f1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_exp10f1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_exp10f1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_expm1f1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_expm1f1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_log10f1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_log10f1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_log2f1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_log2f1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_log2f1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_log2f1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_log1pf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_log1pf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincospif1_u05purec(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_cinz_sincospif1_u05purec(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincospif1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_cinz_sincospif1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_sinpif1_u05purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_sinpif1_u05purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cospif1_u05purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_cospif1_u05purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_fmaf1_purec(float, float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_fmaf1_purec(float, float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf1_purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_sqrtf1_purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf1_u05purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_sqrtf1_u05purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_sqrtf1_u35purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_hypotf1_u05purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_hypotf1_u05purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_hypotf1_u35purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_hypotf1_u35purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_fabsf1_purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_fabsf1_purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_copysignf1_purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_copysignf1_purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_fmaxf1_purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_fmaxf1_purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_fminf1_purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_fminf1_purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_fdimf1_purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_fdimf1_purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_truncf1_purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_truncf1_purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_floorf1_purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_floorf1_purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_ceilf1_purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_ceilf1_purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_roundf1_purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_roundf1_purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_rintf1_purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_rintf1_purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_nextafterf1_purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_nextafterf1_purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_frfrexpf1_purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_frfrexpf1_purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_fmodf1_purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_fmodf1_purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_remainderf1_purec(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_remainderf1_purec(float, float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_modff1_purec(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_cinz_modff1_purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_lgammaf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_lgammaf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_tgammaf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_tgammaf1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_erff1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_erff1_u10purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_erfcf1_u15purec(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cinz_erfcf1_u15purec(float); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf1_purec(int); +SLEEF_IMPORT SLEEF_CONST int Sleef_cinz_getIntf1_purec(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf1_purec(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_cinz_getPtrf1_purec(int); +#endif +#ifdef __STDC__ + +#ifndef Sleef_double_2_DEFINED +typedef Sleef_double2 Sleef_double_2; +#define Sleef_double_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST double Sleef_sind1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_sind1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cosd1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_cosd1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincosd1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_finz_sincosd1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_tand1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_tand1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_asind1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_asind1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_acosd1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_acosd1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_atand1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_atand1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_atan2d1_u35purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_atan2d1_u35purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_logd1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_logd1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cbrtd1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_cbrtd1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_sind1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_sind1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cosd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_cosd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincosd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_finz_sincosd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_tand1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_tand1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_asind1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_asind1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_acosd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_acosd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_atand1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_atand1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_atan2d1_u10purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_atan2d1_u10purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_logd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_logd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cbrtd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_cbrtd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_expd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_expd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_powd1_u10purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_powd1_u10purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_sinhd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_sinhd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_coshd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_coshd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_tanhd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_tanhd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_sinhd1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_sinhd1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_coshd1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_coshd1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_tanhd1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_tanhd1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_fastsind1_u3500purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_fastsind1_u3500purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_fastcosd1_u3500purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_fastcosd1_u3500purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_fastpowd1_u3500purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_fastpowd1_u3500purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_asinhd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_asinhd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_acoshd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_acoshd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_atanhd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_atanhd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_exp2d1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_exp2d1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_exp2d1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_exp2d1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_exp10d1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_exp10d1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_exp10d1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_exp10d1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_expm1d1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_expm1d1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_log10d1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_log10d1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_log2d1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_log2d1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_log2d1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_log2d1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_log1pd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_log1pd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincospid1_u05purecfma(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_finz_sincospid1_u05purecfma(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincospid1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_finz_sincospid1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_sinpid1_u05purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_sinpid1_u05purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_cospid1_u05purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_cospid1_u05purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_ldexpd1_purecfma(double, int32_t); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_ldexpd1_purecfma(double, int32_t); +SLEEF_IMPORT SLEEF_CONST int32_t Sleef_ilogbd1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST int32_t Sleef_finz_ilogbd1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_fmad1_purecfma(double, double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_fmad1_purecfma(double, double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_sqrtd1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_sqrtd1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_sqrtd1_u05purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_sqrtd1_u05purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_sqrtd1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_sqrtd1_u35purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_hypotd1_u05purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_hypotd1_u05purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_hypotd1_u35purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_hypotd1_u35purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_fabsd1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_fabsd1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_copysignd1_purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_copysignd1_purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_fmaxd1_purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_fmaxd1_purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_fmind1_purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_fmind1_purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_fdimd1_purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_fdimd1_purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_truncd1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_truncd1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_floord1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_floord1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_ceild1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_ceild1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_roundd1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_roundd1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_rintd1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_rintd1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_nextafterd1_purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_nextafterd1_purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_frfrexpd1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_frfrexpd1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST int32_t Sleef_expfrexpd1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST int32_t Sleef_finz_expfrexpd1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_fmodd1_purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_fmodd1_purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_remainderd1_purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_remainderd1_purecfma(double, double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_modfd1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_finz_modfd1_purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_lgammad1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_lgammad1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_tgammad1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_tgammad1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_erfd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_erfd1_u10purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_erfcd1_u15purecfma(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_finz_erfcd1_u15purecfma(double); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd1_purecfma(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd1_purecfma(int); + +#ifndef Sleef_float_2_DEFINED +typedef Sleef_float2 Sleef_float_2; +#define Sleef_float_2_DEFINED +#endif + +SLEEF_IMPORT SLEEF_CONST float Sleef_sinf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_sinf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cosf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_cosf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincosf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_finz_sincosf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_tanf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_tanf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_asinf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_asinf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_acosf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_acosf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_atanf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_atanf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_atan2f1_u35purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_atan2f1_u35purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_logf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_logf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cbrtf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_cbrtf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_sinf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_sinf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cosf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_cosf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincosf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_finz_sincosf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_tanf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_tanf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_asinf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_asinf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_acosf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_acosf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_atanf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_atanf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_atan2f1_u10purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_atan2f1_u10purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_logf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_logf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cbrtf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_cbrtf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_expf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_expf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_powf1_u10purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_powf1_u10purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_sinhf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_sinhf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_coshf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_coshf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_tanhf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_tanhf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_sinhf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_sinhf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_coshf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_coshf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_tanhf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_tanhf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_fastsinf1_u3500purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_fastsinf1_u3500purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_fastcosf1_u3500purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_fastcosf1_u3500purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_fastpowf1_u3500purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_fastpowf1_u3500purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_asinhf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_asinhf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_acoshf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_acoshf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_atanhf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_atanhf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_exp2f1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_exp2f1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_exp2f1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_exp2f1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_exp10f1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_exp10f1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_exp10f1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_exp10f1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_expm1f1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_expm1f1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_log10f1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_log10f1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_log2f1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_log2f1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_log2f1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_log2f1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_log1pf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_log1pf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincospif1_u05purecfma(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_finz_sincospif1_u05purecfma(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincospif1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_finz_sincospif1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_sinpif1_u05purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_sinpif1_u05purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_cospif1_u05purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_cospif1_u05purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_fmaf1_purecfma(float, float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_fmaf1_purecfma(float, float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf1_purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_sqrtf1_purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf1_u05purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_sqrtf1_u05purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_sqrtf1_u35purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_hypotf1_u05purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_hypotf1_u05purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_hypotf1_u35purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_hypotf1_u35purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_fabsf1_purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_fabsf1_purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_copysignf1_purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_copysignf1_purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_fmaxf1_purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_fmaxf1_purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_fminf1_purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_fminf1_purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_fdimf1_purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_fdimf1_purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_truncf1_purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_truncf1_purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_floorf1_purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_floorf1_purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_ceilf1_purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_ceilf1_purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_roundf1_purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_roundf1_purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_rintf1_purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_rintf1_purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_nextafterf1_purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_nextafterf1_purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_frfrexpf1_purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_frfrexpf1_purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_fmodf1_purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_fmodf1_purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_remainderf1_purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_remainderf1_purecfma(float, float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_modff1_purecfma(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_finz_modff1_purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_lgammaf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_lgammaf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_tgammaf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_tgammaf1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_erff1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_erff1_u10purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_erfcf1_u15purecfma(float); +SLEEF_IMPORT SLEEF_CONST float Sleef_finz_erfcf1_u15purecfma(float); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf1_purecfma(int); +SLEEF_IMPORT SLEEF_CONST int Sleef_finz_getIntf1_purecfma(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf1_purecfma(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_finz_getPtrf1_purecfma(int); +#endif +#ifdef __STDC__ + +#ifndef Sleef_double_2_DEFINED +typedef Sleef_double2 Sleef_double_2; +#define Sleef_double_2_DEFINED +#endif + +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sind1_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cosd1_u35(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincosd1_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_tand1_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_asind1_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_acosd1_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_atand1_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_atan2d1_u35(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_logd1_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cbrtd1_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sind1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cosd1_u10(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincosd1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_tand1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_asind1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_acosd1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_atand1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_atan2d1_u10(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_logd1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cbrtd1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_expd1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_powd1_u10(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sinhd1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_coshd1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_tanhd1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sinhd1_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_coshd1_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_tanhd1_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fastsind1_u3500(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fastcosd1_u3500(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fastpowd1_u3500(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_asinhd1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_acoshd1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_atanhd1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_exp2d1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_exp2d1_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_exp10d1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_exp10d1_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_expm1d1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_log10d1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_log2d1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_log2d1_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_log1pd1_u10(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincospid1_u05(double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_sincospid1_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sinpid1_u05(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_cospid1_u05(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_ldexpd1(double, int32_t); +SLEEF_IMPORT SLEEF_CONST int32_t Sleef_ilogbd1(double); +SLEEF_IMPORT SLEEF_CONST double Sleef_fmad1(double, double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sqrtd1(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sqrtd1_u05(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_sqrtd1_u35(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_hypotd1_u05(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_hypotd1_u35(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fabsd1(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_copysignd1(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fmaxd1(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fmind1(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fdimd1(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_truncd1(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_floord1(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_ceild1(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_roundd1(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_rintd1(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_nextafterd1(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_frfrexpd1(double); +SLEEF_IMPORT SLEEF_CONST int32_t Sleef_expfrexpd1(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_fmodd1(double, double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_remainderd1(double, double); +SLEEF_IMPORT SLEEF_CONST Sleef_double_2 Sleef_modfd1(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_lgammad1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_tgammad1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_erfd1_u10(double); +SLEEF_PRAGMA_OMP_SIMD_DP SLEEF_IMPORT SLEEF_CONST double Sleef_erfcd1_u15(double); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntd1(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrd1(int); + +#ifndef Sleef_float_2_DEFINED +typedef Sleef_float2 Sleef_float_2; +#define Sleef_float_2_DEFINED +#endif + +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sinf1_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_cosf1_u35(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincosf1_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_tanf1_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_asinf1_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_acosf1_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_atanf1_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_atan2f1_u35(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_logf1_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_cbrtf1_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sinf1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_cosf1_u10(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincosf1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_tanf1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_asinf1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_acosf1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_atanf1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_atan2f1_u10(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_logf1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_cbrtf1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_expf1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_powf1_u10(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sinhf1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_coshf1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_tanhf1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sinhf1_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_coshf1_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_tanhf1_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fastsinf1_u3500(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fastcosf1_u3500(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fastpowf1_u3500(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_asinhf1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_acoshf1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_atanhf1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_exp2f1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_exp2f1_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_exp10f1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_exp10f1_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_expm1f1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_log10f1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_log2f1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_log2f1_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_log1pf1_u10(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincospif1_u05(float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_sincospif1_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sinpif1_u05(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_cospif1_u05(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fmaf1(float, float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf1(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf1_u05(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_sqrtf1_u35(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_hypotf1_u05(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_hypotf1_u35(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fabsf1(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_copysignf1(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fmaxf1(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fminf1(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fdimf1(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_truncf1(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_floorf1(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_ceilf1(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_roundf1(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_rintf1(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_nextafterf1(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_frfrexpf1(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_fmodf1(float, float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_remainderf1(float, float); +SLEEF_IMPORT SLEEF_CONST Sleef_float_2 Sleef_modff1(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_lgammaf1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_tgammaf1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_erff1_u10(float); +SLEEF_PRAGMA_OMP_SIMD_SP SLEEF_IMPORT SLEEF_CONST float Sleef_erfcf1_u15(float); +SLEEF_IMPORT SLEEF_CONST int Sleef_getIntf1(int); +SLEEF_IMPORT SLEEF_CONST void *Sleef_getPtrf1(int); +#endif + +// + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // #ifndef __SLEEF_H__ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/__pycache__/all_renames_v2.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/__pycache__/all_renames_v2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd3e92be568764a97320392b8ca0c2bc744dbde1 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/__pycache__/all_renames_v2.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/__pycache__/ast_edits.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/__pycache__/ast_edits.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c114a8f33a699038e281ee04d6a9436634382546 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/__pycache__/ast_edits.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/__pycache__/module_deprecations_v2.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/__pycache__/module_deprecations_v2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5941c3799eaecf77da806c5a3c666268c825a8d0 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/__pycache__/module_deprecations_v2.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/__pycache__/reorders_v2.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/__pycache__/reorders_v2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2b1d1662b047b6b1a555141e0504c6489732b68 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/__pycache__/reorders_v2.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/__pycache__/tf_upgrade_v2_safety.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/__pycache__/tf_upgrade_v2_safety.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7273b3aa4e232d14b8d1c71cf44a334dafa20cc Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/__pycache__/tf_upgrade_v2_safety.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/all_renames_v2.py b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/all_renames_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..6fb040c4c9c61223983c42a282609c0a5fd54ba1 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/all_renames_v2.py @@ -0,0 +1,517 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Provides a list of renames between TensorFlow 1.* and 2.0.""" +from tensorflow.tools.compatibility import renames_v2 + +# pylint: disable=line-too-long + +# Add additional renames not in renames_v2.py here. +# IMPORTANT: For the renames in here, if you also need to add to +# function_reorders or function_keyword_renames in tf_upgrade_v2.py, +# use the OLD function name. +# These renames happen after the arguments have been processed. +# After modifying this dict, run the following to update reorders_v2.py: +# bazel run tensorflow/tools/compatibility/update:generate_v2_reorders_map +manual_symbol_renames = { + "tf.batch_to_space_nd": "tf.batch_to_space", + "tf.batch_gather": "tf.compat.v1.batch_gather", + "tf.space_to_batch_nd": "tf.space_to_batch", + "tf.nn.space_to_batch": "tf.space_to_batch", + "tf.extract_image_patches": "tf.image.extract_patches", + "tf.image.extract_image_patches": "tf.image.extract_patches", + "tf.gfile.Copy": "tf.io.gfile.copy", + "tf.gfile.DeleteRecursively": "tf.io.gfile.rmtree", + "tf.gfile.Exists": "tf.io.gfile.exists", + "tf.gfile.Glob": "tf.io.gfile.glob", + "tf.gfile.GFile": "tf.io.gfile.GFile", + "tf.gfile.IsDirectory": "tf.io.gfile.isdir", + "tf.gfile.ListDirectory": "tf.io.gfile.listdir", + "tf.gfile.MakeDirs": "tf.io.gfile.makedirs", + "tf.gfile.MkDir": "tf.io.gfile.mkdir", + "tf.gfile.Open": "tf.io.gfile.GFile", + "tf.gfile.Remove": "tf.io.gfile.remove", + "tf.gfile.Rename": "tf.io.gfile.rename", + "tf.gfile.Stat": "tf.io.gfile.stat", + "tf.gfile.Walk": "tf.io.gfile.walk", + "tf.contrib.cluster_resolver.ClusterResolver": ( + "tf.distribute.cluster_resolver.ClusterResolver" + ), + "tf.contrib.cluster_resolver.GceClusterResolver": ( + "tf.distribute.cluster_resolver.GCEClusterResolver" + ), + "tf.contrib.cluster_resolver.KubernetesClusterResolver": ( + "tf.distribute.cluster_resolver.KubernetesClusterResolver" + ), + "tf.contrib.cluster_resolver.SimpleClusterResolver": ( + "tf.distribute.cluster_resolver.SimpleClusterResolver" + ), + "tf.contrib.cluster_resolver.SlurmClusterResolver": ( + "tf.distribute.cluster_resolver.SlurmClusterResolver" + ), + "tf.contrib.cluster_resolver.TFConfigClusterResolver": ( + "tf.distribute.cluster_resolver.TFConfigClusterResolver" + ), + "tf.contrib.cluster_resolver.TPUClusterResolver": ( + "tf.distribute.cluster_resolver.TPUClusterResolver" + ), + "tf.contrib.cluster_resolver.UnionClusterResolver": ( + "tf.distribute.cluster_resolver.UnionClusterResolver" + ), + "tf.contrib.data.AUTOTUNE": "tf.data.experimental.AUTOTUNE", + "tf.contrib.data.Counter": "tf.data.experimental.Counter", + "tf.contrib.data.CheckpointInputPipelineHook": ( + "tf.data.experimental.CheckpointInputPipelineHook" + ), + "tf.contrib.data.CsvDataset": "tf.data.experimental.CsvDataset", + "tf.contrib.data.Optional": "tf.data.experimental.Optional", + "tf.contrib.data.RandomDataset": "tf.data.experimental.RandomDataset", + "tf.contrib.data.Reducer": "tf.data.experimental.Reducer", + "tf.contrib.data.SqlDataset": "tf.data.experimental.SqlDataset", + "tf.contrib.data.StatsAggregator": "tf.data.experimental.StatsAggregator", + "tf.contrib.data.TFRecordWriter": "tf.data.experimental.TFRecordWriter", + "tf.contrib.data.assert_element_shape": ( + "tf.data.experimental.assert_element_shape" + ), + "tf.contrib.data.bucket_by_sequence_length": ( + "tf.data.experimental.bucket_by_sequence_length" + ), + "tf.contrib.data.choose_from_datasets": ( + "tf.data.experimental.choose_from_datasets" + ), + "tf.contrib.data.copy_to_device": "tf.data.experimental.copy_to_device", + "tf.contrib.data.dense_to_sparse_batch": ( + "tf.data.experimental.dense_to_sparse_batch" + ), + "tf.contrib.data.enumerate_dataset": ( + "tf.data.experimental.enumerate_dataset" + ), + "tf.contrib.data.get_next_as_optional": ( + "tf.data.experimental.get_next_as_optional" + ), + "tf.contrib.data.get_single_element": ( + "tf.data.experimental.get_single_element" + ), + "tf.contrib.data.group_by_reducer": "tf.data.experimental.group_by_reducer", + "tf.contrib.data.group_by_window": "tf.data.experimental.group_by_window", + "tf.contrib.data.ignore_errors": "tf.data.experimental.ignore_errors", + "tf.contrib.data.latency_stats": "tf.data.experimental.latency_stats", + "tf.contrib.data.make_batched_features_dataset": ( + "tf.data.experimental.make_batched_features_dataset" + ), + "tf.contrib.data.make_csv_dataset": "tf.data.experimental.make_csv_dataset", + "tf.contrib.data.make_saveable_from_iterator": ( + "tf.data.experimental.make_saveable_from_iterator" + ), + "tf.contrib.data.map_and_batch": "tf.data.experimental.map_and_batch", + "tf.contrib.data.parallel_interleave": ( + "tf.data.experimental.parallel_interleave" + ), + "tf.contrib.data.parse_example_dataset": ( + "tf.data.experimental.parse_example_dataset" + ), + "tf.contrib.data.prefetch_to_device": ( + "tf.data.experimental.prefetch_to_device" + ), + "tf.contrib.data.rejection_resample": ( + "tf.data.experimental.rejection_resample" + ), + "tf.contrib.data.sample_from_datasets": ( + "tf.data.experimental.sample_from_datasets" + ), + "tf.contrib.data.scan": "tf.data.experimental.scan", + "tf.contrib.data.set_stats_aggregator": ( + "tf.data.experimental.set_stats_aggregator" + ), + "tf.contrib.data.shuffle_and_repeat": ( + "tf.data.experimental.shuffle_and_repeat" + ), + "tf.contrib.data.unbatch": "tf.data.experimental.unbatch", + "tf.contrib.data.unique": "tf.data.experimental.unique", + "tf.contrib.distribute.CrossDeviceOps": "tf.distribute.CrossDeviceOps", + "tf.contrib.distribute.ReductionToOneDeviceCrossDeviceOps": ( + "tf.distribute.ReductionToOneDevice" + ), + "tf.contrib.framework.CriticalSection": "tf.CriticalSection", + "tf.contrib.framework.is_tensor": "tf.is_tensor", + "tf.contrib.framework.load_variable": "tf.train.load_variable", + "tf.contrib.framework.nest.assert_same_structure": ( + "tf.nest.assert_same_structure" + ), + "tf.contrib.framework.nest.flatten": "tf.nest.flatten", + "tf.contrib.framework.nest.is_nested": "tf.nest.is_nested", + "tf.contrib.framework.nest.map_structure": "tf.nest.map_structure", + "tf.contrib.framework.nest.pack_sequence_as": "tf.nest.pack_sequence_as", + "tf.contrib.batching.batch_function": "tf.nondifferentiable_batch_function", + "tf.contrib.util.constant_value": "tf.get_static_value", + "tf.contrib.saved_model.load_keras_model": ( + "tf.compat.v1.keras.experimental.load_from_saved_model" + ), + "tf.contrib.saved_model.save_keras_model": ( + "tf.compat.v1.keras.experimental.export_saved_model" + ), + "tf.contrib.rnn.RNNCell": "tf.compat.v1.nn.rnn_cell.RNNCell", + "tf.contrib.rnn.LSTMStateTuple": "tf.nn.rnn_cell.LSTMStateTuple", + "tf.contrib.rnn.BasicLSTMCell": "tf.compat.v1.nn.rnn_cell.BasicLSTMCell", + "tf.contrib.rnn.BasicRNNCell": "tf.compat.v1.nn.rnn_cell.BasicRNNCell", + "tf.contrib.rnn.GRUCell": "tf.compat.v1.nn.rnn_cell.GRUCell", + "tf.contrib.rnn.LSTMCell": "tf.compat.v1.nn.rnn_cell.LSTMCell", + "tf.contrib.rnn.MultiRNNCell": "tf.compat.v1.nn.rnn_cell.MultiRNNCell", + "tf.contrib.rnn.static_rnn": "tf.compat.v1.nn.static_rnn", + "tf.contrib.rnn.static_state_saving_rnn": ( + "tf.compat.v1.nn.static_state_saving_rnn" + ), + "tf.contrib.rnn.static_bidirectional_rnn": ( + "tf.compat.v1.nn.static_bidirectional_rnn" + ), + "tf.contrib.framework.sort": "tf.sort", + "tf.contrib.framework.argsort": "tf.argsort", + "tf.contrib.summary.all_summary_ops": ( + "tf.compat.v1.summary.all_v2_summary_ops" + ), + "tf.contrib.summary.always_record_summaries": ( + "tf.compat.v2.summary.record_if" + ), + "tf.contrib.summary.audio": "tf.compat.v2.summary.audio", + "tf.contrib.summary.create_file_writer": ( + "tf.compat.v2.summary.create_file_writer" + ), + "tf.contrib.summary.flush": "tf.compat.v2.summary.flush", + "tf.contrib.summary.generic": "tf.compat.v2.summary.write", + "tf.contrib.summary.histogram": "tf.compat.v2.summary.histogram", + "tf.contrib.summary.image": "tf.compat.v2.summary.image", + "tf.contrib.summary.initialize": "tf.compat.v1.summary.initialize", + "tf.contrib.summary.never_record_summaries": ( + "tf.compat.v2.summary.record_if" + ), + "tf.contrib.summary.scalar": "tf.compat.v2.summary.scalar", + "tf.contrib.tpu.CrossShardOptimizer": ( + "tf.compat.v1.tpu.CrossShardOptimizer" + ), + "tf.contrib.tpu.batch_parallel": "tf.compat.v1.tpu.batch_parallel", + "tf.contrib.tpu.bfloat16_scope": "tf.compat.v1.tpu.bfloat16_scope", + "tf.contrib.tpu.core": "tf.compat.v1.tpu.core", + "tf.contrib.tpu.cross_replica_sum": "tf.compat.v1.tpu.cross_replica_sum", + "tf.contrib.tpu.initialize_system": "tf.compat.v1.tpu.initialize_system", + "tf.contrib.tpu.outside_compilation": ( + "tf.compat.v1.tpu.outside_compilation" + ), + "tf.contrib.tpu.replicate": "tf.compat.v1.tpu.replicate", + "tf.contrib.tpu.rewrite": "tf.compat.v1.tpu.rewrite", + "tf.contrib.tpu.shard": "tf.compat.v1.tpu.shard", + "tf.contrib.tpu.shutdown_system": "tf.compat.v1.tpu.shutdown_system", + "tf.contrib.training.checkpoints_iterator": "tf.train.checkpoints_iterator", + "tf.contrib.layers.recompute_grad": "tf.recompute_grad", + "tf.count_nonzero": "tf.math.count_nonzero", + "tf.decode_raw": "tf.io.decode_raw", + "tf.manip.batch_to_space_nd": "tf.batch_to_space", + "tf.quantize_v2": "tf.quantization.quantize", + "tf.sparse_matmul": "tf.linalg.matmul", + "tf.random.stateless_multinomial": "tf.random.stateless_categorical", + "tf.substr": "tf.strings.substr", + # TODO(b/129398290) + "tf.string_split": "tf.compat.v1.string_split", + "tf.string_to_hash_bucket": "tf.strings.to_hash_bucket", + "tf.string_to_number": "tf.strings.to_number", + "tf.multinomial": "tf.random.categorical", + "tf.random.multinomial": "tf.random.categorical", + "tf.reduce_join": "tf.strings.reduce_join", + "tf.load_file_system_library": "tf.load_library", + "tf.bincount": "tf.math.bincount", + "tf.confusion_matrix": "tf.math.confusion_matrix", + "tf.train.confusion_matrix": "tf.math.confusion_matrix", + "tf.train.sdca_fprint": "tf.raw_ops.SdcaFprint", + "tf.train.sdca_optimizer": "tf.raw_ops.SdcaOptimizer", + "tf.train.sdca_shrink_l1": "tf.raw_ops.SdcaShrinkL1", + "tf.decode_csv": "tf.io.decode_csv", + "tf.data.Iterator": "tf.compat.v1.data.Iterator", + "tf.data.experimental.DatasetStructure": "tf.data.DatasetSpec", + "tf.data.experimental.OptionalStructure": "tf.OptionalSpec", + "tf.data.experimental.RaggedTensorStructure": "tf.RaggedTensorSpec", + "tf.data.experimental.SparseTensorStructure": "tf.SparseTensorSpec", + "tf.data.experimental.Structure": "tf.TypeSpec", + "tf.data.experimental.TensorArrayStructure": "tf.TensorArraySpec", + "tf.data.experimental.TensorStructure": "tf.TensorSpec", + "tf.parse_example": "tf.io.parse_example", + "tf.parse_single_example": "tf.io.parse_single_example", + "tf.nn.fused_batch_norm": "tf.compat.v1.nn.fused_batch_norm", + "tf.nn.softmax_cross_entropy_with_logits_v2": ( + "tf.nn.softmax_cross_entropy_with_logits" + ), + "tf.losses.Reduction.MEAN": "tf.compat.v1.losses.Reduction.MEAN", + "tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS": ( + "tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS" + ), + "tf.losses.Reduction.SUM_OVER_NONZERO_WEIGHTS": ( + "tf.compat.v1.losses.Reduction.SUM_OVER_NONZERO_WEIGHTS" + ), + "tf.lite.constants.FLOAT": "tf.float32", + "tf.lite.constants.FLOAT16": "tf.float16", + "tf.lite.constants.INT16": "tf.int16", + "tf.lite.constants.INT32": "tf.int32", + "tf.lite.constants.INT64": "tf.int64", + "tf.lite.constants.INT8": "tf.int8", + "tf.lite.constants.STRING": "tf.string", + "tf.lite.constants.QUANTIZED_UINT8": "tf.uint8", + "tf.arg_max": "tf.argmax", + "tf.arg_min": "tf.argmin", + # tf.nn.ctc_loss is still available in 2.0 but behavior + # changed significantly. + "tf.nn.ctc_loss": "tf.compat.v1.nn.ctc_loss", + # tf.saved_model.load in 1.x has no equivalent in 2.x, but there is a + # symbol with the same name. + "tf.saved_model.load": "tf.compat.v1.saved_model.load", + "tf.saved_model.loader.load": "tf.compat.v1.saved_model.load", + "tf.saved_model.load_v2": "tf.compat.v2.saved_model.load", + "tf.image.resize_images": "tf.image.resize", + "tf.assert_equal": "tf.compat.v1.assert_equal", + "tf.assert_greater": "tf.compat.v1.assert_greater", + "tf.assert_greater_equal": "tf.compat.v1.assert_greater_equal", + "tf.assert_integer": "tf.compat.v1.assert_integer", + "tf.assert_less": "tf.compat.v1.assert_less", + "tf.assert_less_equal": "tf.compat.v1.assert_less_equal", + "tf.assert_near": "tf.compat.v1.assert_near", + "tf.assert_negative": "tf.compat.v1.assert_negative", + "tf.assert_non_negative": "tf.compat.v1.assert_non_negative", + "tf.assert_non_positive": "tf.compat.v1.assert_non_positive", + "tf.assert_none_equal": "tf.compat.v1.assert_none_equal", + "tf.assert_positive": "tf.compat.v1.assert_positive", + "tf.assert_rank": "tf.compat.v1.assert_rank", + "tf.assert_rank_at_least": "tf.compat.v1.assert_rank_at_least", + "tf.assert_rank_in": "tf.compat.v1.assert_rank_in", + "tf.assert_scalar": "tf.compat.v1.assert_scalar", + "tf.assert_type": "tf.compat.v1.assert_type", + "tf.assert_variables_initialized": ( + "tf.compat.v1.assert_variables_initialized" + ), + "tf.debugging.assert_equal": "tf.compat.v1.debugging.assert_equal", + "tf.debugging.assert_greater": "tf.compat.v1.debugging.assert_greater", + "tf.debugging.assert_greater_equal": ( + "tf.compat.v1.debugging.assert_greater_equal" + ), + "tf.debugging.assert_integer": "tf.compat.v1.debugging.assert_integer", + "tf.debugging.assert_less": "tf.compat.v1.debugging.assert_less", + "tf.debugging.assert_less_equal": ( + "tf.compat.v1.debugging.assert_less_equal" + ), + "tf.debugging.assert_near": "tf.compat.v1.debugging.assert_near", + "tf.debugging.assert_negative": "tf.compat.v1.debugging.assert_negative", + "tf.debugging.assert_non_negative": ( + "tf.compat.v1.debugging.assert_non_negative" + ), + "tf.debugging.assert_non_positive": ( + "tf.compat.v1.debugging.assert_non_positive" + ), + "tf.debugging.assert_none_equal": ( + "tf.compat.v1.debugging.assert_none_equal" + ), + "tf.debugging.assert_positive": "tf.compat.v1.debugging.assert_positive", + "tf.debugging.assert_rank": "tf.compat.v1.debugging.assert_rank", + "tf.debugging.assert_rank_at_least": ( + "tf.compat.v1.debugging.assert_rank_at_least" + ), + "tf.debugging.assert_rank_in": "tf.compat.v1.debugging.assert_rank_in", + "tf.debugging.assert_scalar": "tf.compat.v1.debugging.assert_scalar", + "tf.debugging.assert_type": "tf.compat.v1.debugging.assert_type", + "tf.errors.exception_type_from_error_code": ( + "tf.compat.v1.errors.exception_type_from_error_code" + ), + "tf.errors.error_code_from_exception_type": ( + "tf.compat.v1.errors.error_code_from_exception_type" + ), + "tf.errors.raise_exception_on_not_ok_status": ( + "tf.compat.v1.errors.raise_exception_on_not_ok_status" + ), + "tf.nn.max_pool": "tf.nn.max_pool2d", + "tf.nn.avg_pool": "tf.nn.avg_pool2d", + "tf.keras.initializers.zeros": "tf.compat.v1.keras.initializers.zeros", + "tf.keras.initializers.Zeros": "tf.compat.v1.keras.initializers.Zeros", + "tf.keras.initializers.ones": "tf.compat.v1.keras.initializers.ones", + "tf.keras.initializers.Ones": "tf.compat.v1.keras.initializers.Ones", + "tf.keras.initializers.constant": ( + "tf.compat.v1.keras.initializers.constant" + ), + "tf.keras.initializers.Constant": ( + "tf.compat.v1.keras.initializers.Constant" + ), + "tf.keras.initializers.VarianceScaling": ( + "tf.compat.v1.keras.initializers.VarianceScaling" + ), + "tf.keras.initializers.Orthogonal": ( + "tf.compat.v1.keras.initializers.Orthogonal" + ), + "tf.keras.initializers.orthogonal": ( + "tf.compat.v1.keras.initializers.orthogonal" + ), + "tf.keras.initializers.Identity": ( + "tf.compat.v1.keras.initializers.Identity" + ), + "tf.keras.initializers.identity": ( + "tf.compat.v1.keras.initializers.identity" + ), + "tf.keras.initializers.glorot_uniform": ( + "tf.compat.v1.keras.initializers.glorot_uniform" + ), + "tf.keras.initializers.glorot_normal": ( + "tf.compat.v1.keras.initializers.glorot_normal" + ), + "tf.keras.initializers.lecun_normal": ( + "tf.compat.v1.keras.initializers.lecun_normal" + ), + "tf.keras.initializers.lecun_uniform": ( + "tf.compat.v1.keras.initializers.lecun_uniform" + ), + "tf.keras.initializers.he_normal": ( + "tf.compat.v1.keras.initializers.he_normal" + ), + "tf.keras.initializers.he_uniform": ( + "tf.compat.v1.keras.initializers.he_uniform" + ), + "tf.keras.initializers.TruncatedNormal": ( + "tf.compat.v1.keras.initializers.TruncatedNormal" + ), + "tf.keras.initializers.truncated_normal": ( + "tf.compat.v1.keras.initializers.truncated_normal" + ), + "tf.keras.initializers.RandomUniform": ( + "tf.compat.v1.keras.initializers.RandomUniform" + ), + "tf.keras.initializers.uniform": "tf.compat.v1.keras.initializers.uniform", + "tf.keras.initializers.random_uniform": ( + "tf.compat.v1.keras.initializers.random_uniform" + ), + "tf.keras.initializers.RandomNormal": ( + "tf.compat.v1.keras.initializers.RandomNormal" + ), + "tf.keras.initializers.normal": "tf.compat.v1.keras.initializers.normal", + "tf.keras.initializers.random_normal": ( + "tf.compat.v1.keras.initializers.random_normal" + ), + "tf.zeros_initializer": "tf.compat.v1.zeros_initializer", + "tf.initializers.zeros": "tf.compat.v1.initializers.zeros", + "tf.ones_initializer": "tf.compat.v1.ones_initializer", + "tf.initializers.ones": "tf.compat.v1.initializers.ones", + "tf.constant_initializer": "tf.compat.v1.constant_initializer", + "tf.initializers.constant": "tf.compat.v1.initializers.constant", + "tf.random_uniform_initializer": "tf.compat.v1.random_uniform_initializer", + "tf.initializers.random_uniform": ( + "tf.compat.v1.initializers.random_uniform" + ), + "tf.random_normal_initializer": "tf.compat.v1.random_normal_initializer", + "tf.initializers.random_normal": "tf.compat.v1.initializers.random_normal", + "tf.truncated_normal_initializer": ( + "tf.compat.v1.truncated_normal_initializer" + ), + "tf.initializers.truncated_normal": ( + "tf.compat.v1.initializers.truncated_normal" + ), + "tf.variance_scaling_initializer": ( + "tf.compat.v1.variance_scaling_initializer" + ), + "tf.initializers.variance_scaling": ( + "tf.compat.v1.initializers.variance_scaling" + ), + "tf.orthogonal_initializer": "tf.compat.v1.orthogonal_initializer", + "tf.initializers.orthogonal": "tf.compat.v1.initializers.orthogonal", + "tf.glorot_uniform_initializer": "tf.compat.v1.glorot_uniform_initializer", + "tf.initializers.glorot_uniform": ( + "tf.compat.v1.initializers.glorot_uniform" + ), + "tf.glorot_normal_initializer": "tf.compat.v1.glorot_normal_initializer", + "tf.initializers.glorot_normal": "tf.compat.v1.initializers.glorot_normal", + "tf.initializers.identity": "tf.compat.v1.initializers.identity", + "tf.initializers.lecun_normal": "tf.compat.v1.initializers.lecun_normal", + "tf.initializers.lecun_uniform": "tf.compat.v1.initializers.lecun_uniform", + "tf.initializers.he_normal": "tf.compat.v1.initializers.he_normal", + "tf.initializers.he_uniform": "tf.compat.v1.initializers.he_uniform", + "tf.data.experimental.map_and_batch_with_legacy_function": ( + "tf.compat.v1.data.experimental.map_and_batch_with_legacy_function" + ), + "tf.nn.conv2d_backprop_input": "tf.nn.conv2d_transpose", + "tf.test.compute_gradient": "tf.compat.v1.test.compute_gradient", + "tf.floor_div": "tf.math.floordiv", + "tf.where": "tf.compat.v1.where", + "tf.where_v2": "tf.compat.v2.where", + "tf.app.flags": "tf.compat.v1.app.flags", +} +# pylint: enable=line-too-long + + +def add_contrib_direct_import_support(symbol_dict): + """Add support for `tf.contrib.*` alias `contrib_*.` Updates dict in place.""" + for symbol_name in list(symbol_dict.keys()): + symbol_alias = symbol_name.replace("tf.contrib.", "contrib_") + symbol_dict[symbol_alias] = symbol_dict[symbol_name] + +add_contrib_direct_import_support(manual_symbol_renames) + +symbol_renames = renames_v2.renames +symbol_renames.update(manual_symbol_renames) + +addons_symbol_mappings = { + "tf.contrib.layers.poincare_normalize": + "tfa.layers.PoincareNormalize", + "tf.contrib.layers.maxout": + "tfa.layers.Maxout", + "tf.contrib.layers.group_norm": + "tfa.layers.GroupNormalization", + "tf.contrib.layers.instance_norm": + "tfa.layers.InstanceNormalization", + "tf.contrib.sparsemax.sparsemax": + "tfa.activations.sparsemax", + "tf.contrib.losses.metric_learning.contrastive_loss": + "tfa.losses.ContrastiveLoss", + "tf.contrib.losses.metric_learning.lifted_struct_loss": + "tfa.losses.LiftedStructLoss", + "tf.contrib.sparsemax.sparsemax_loss": + "tfa.losses.SparsemaxLoss", + "tf.contrib.losses.metric_learning.triplet_semihard_loss": + "tfa.losses.TripletSemiHardLoss", + "tf.contrib.opt.LazyAdamOptimizer": + "tfa.optimizers.LazyAdam", + "tf.contrib.opt.MovingAverageOptimizer": + "tfa.optimizers.MovingAverage", + "tf.contrib.opt.MomentumWOptimizer": + "tfa.optimizers.SGDW", + "tf.contrib.opt.AdamWOptimizer": + "tfa.optimizers.AdamW", + "tf.contrib.opt.extend_with_decoupled_weight_decay": + "tfa.optimizers.extend_with_decoupled_weight_decay", + "tf.contrib.text.skip_gram_sample": + "tfa.text.skip_gram_sample", + "tf.contrib.text.skip_gram_sample_with_text_vocab": + "tfa.text.skip_gram_sample_with_text_vocab", + "tf.contrib.image.dense_image_warp": + "tfa.image.dense_image_warp", + "tf.contrib.image.adjust_hsv_in_yiq": + "tfa.image.adjust_hsv_in_yiq", + "tf.contrib.image.compose_transforms": + "tfa.image.compose_transforms", + "tf.contrib.image.random_hsv_in_yiq": + "tfa.image.random_hsv_in_yiq", + "tf.contrib.image.angles_to_projective_transforms": + "tfa.image.angles_to_projective_transforms", + "tf.contrib.image.matrices_to_flat_transforms": + "tfa.image.matrices_to_flat_transforms", + "tf.contrib.image.rotate": + "tfa.image.rotate", + "tf.contrib.image.transform": + "tfa.image.transform", + "tf.contrib.rnn.NASCell": + "tfa.rnn.NASCell", + "tf.contrib.rnn.LayerNormBasicLSTMCell": + "tfa.rnn.LayerNormLSTMCell" +} + +add_contrib_direct_import_support(addons_symbol_mappings) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/ast_edits.py b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/ast_edits.py new file mode 100644 index 0000000000000000000000000000000000000000..ed595ba05135b0fe74b8882be205433df72ef32e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/ast_edits.py @@ -0,0 +1,1101 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Upgrader for Python scripts according to an API change specification.""" + +import ast +import collections +import os +import re +import shutil +import sys +import tempfile +import traceback + +import pasta + + +# Some regular expressions we will need for parsing +FIND_OPEN = re.compile(r"^\s*(\[).*$") +FIND_STRING_CHARS = re.compile(r"['\"]") + + +INFO = "INFO" +WARNING = "WARNING" +ERROR = "ERROR" + + +ImportRename = collections.namedtuple( + "ImportRename", ["new_name", "excluded_prefixes"]) + + +def full_name_node(name, ctx=ast.Load()): + """Make an Attribute or Name node for name. + + Translate a qualified name into nested Attribute nodes (and a Name node). + + Args: + name: The name to translate to a node. + ctx: What context this name is used in. Defaults to Load() + + Returns: + A Name or Attribute node. + """ + names = name.split(".") + names.reverse() + node = ast.Name(id=names.pop(), ctx=ast.Load()) + while names: + node = ast.Attribute(value=node, attr=names.pop(), ctx=ast.Load()) + + # Change outermost ctx to the one given to us (inner ones should be Load). + node.ctx = ctx + return node + + +def get_arg_value(node, arg_name, arg_pos=None): + """Get the value of an argument from a ast.Call node. + + This function goes through the positional and keyword arguments to check + whether a given argument was used, and if so, returns its value (the node + representing its value). + + This cannot introspect *args or **args, but it safely handles *args in + Python3.5+. + + Args: + node: The ast.Call node to extract arg values from. + arg_name: The name of the argument to extract. + arg_pos: The position of the argument (in case it's passed as a positional + argument). + + Returns: + A tuple (arg_present, arg_value) containing a boolean indicating whether + the argument is present, and its value in case it is. + """ + # Check keyword args + if arg_name is not None: + for kw in node.keywords: + if kw.arg == arg_name: + return (True, kw.value) + + # Check positional args + if arg_pos is not None: + idx = 0 + for arg in node.args: + if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred): + continue # Can't parse Starred + if idx == arg_pos: + return (True, arg) + idx += 1 + + return (False, None) + + +def uses_star_args_in_call(node): + """Check if an ast.Call node uses arbitrary-length positional *args. + + This function works with the AST call node format of Python3.5+ + as well as the different AST format of earlier versions of Python. + + Args: + node: The ast.Call node to check arg values for. + + Returns: + True if the node uses starred variadic positional args or keyword args. + False if it does not. + """ + if sys.version_info[:2] >= (3, 5): + # Check for an *args usage in python 3.5+ + for arg in node.args: + if isinstance(arg, ast.Starred): + return True + else: + if node.starargs: + return True + return False + + +def uses_star_kwargs_in_call(node): + """Check if an ast.Call node uses arbitrary-length **kwargs. + + This function works with the AST call node format of Python3.5+ + as well as the different AST format of earlier versions of Python. + + Args: + node: The ast.Call node to check arg values for. + + Returns: + True if the node uses starred variadic positional args or keyword args. + False if it does not. + """ + if sys.version_info[:2] >= (3, 5): + # Check for a **kwarg usage in python 3.5+ + for keyword in node.keywords: + if keyword.arg is None: + return True + else: + if node.kwargs: + return True + return False + + +def uses_star_args_or_kwargs_in_call(node): + """Check if an ast.Call node uses arbitrary-length *args or **kwargs. + + This function works with the AST call node format of Python3.5+ + as well as the different AST format of earlier versions of Python. + + Args: + node: The ast.Call node to check arg values for. + + Returns: + True if the node uses starred variadic positional args or keyword args. + False if it does not. + """ + return uses_star_args_in_call(node) or uses_star_kwargs_in_call(node) + + +def excluded_from_module_rename(module, import_rename_spec): + """Check if this module import should not be renamed. + + Args: + module: (string) module name. + import_rename_spec: ImportRename instance. + + Returns: + True if this import should not be renamed according to the + import_rename_spec. + """ + for excluded_prefix in import_rename_spec.excluded_prefixes: + if module.startswith(excluded_prefix): + return True + return False + + +class APIChangeSpec: + """This class defines the transformations that need to happen. + + This class must provide the following fields: + + * `function_keyword_renames`: maps function names to a map of old -> new + argument names + * `symbol_renames`: maps function names to new function names + * `change_to_function`: a set of function names that have changed (for + notifications) + * `function_reorders`: maps functions whose argument order has changed to the + list of arguments in the new order + * `function_warnings`: maps full names of functions to warnings that will be + printed out if the function is used. (e.g. tf.nn.convolution()) + * `function_transformers`: maps function names to custom handlers + * `module_deprecations`: maps module names to warnings that will be printed + if the module is still used after all other transformations have run + * `import_renames`: maps import name (must be a short name without '.') + to ImportRename instance. + + For an example, see `TFAPIChangeSpec`. + """ + + def preprocess(self, root_node): # pylint: disable=unused-argument + """Preprocess a parse tree. Return a preprocessed node, logs and errors.""" + return root_node, [], [] + + def clear_preprocessing(self): + """Restore this APIChangeSpec to before it preprocessed a file. + + This is needed if preprocessing a file changed any rewriting rules. + """ + pass + + +class NoUpdateSpec(APIChangeSpec): + """A specification of an API change which doesn't change anything.""" + + def __init__(self): + self.function_handle = {} + self.function_reorders = {} + self.function_keyword_renames = {} + self.symbol_renames = {} + self.function_warnings = {} + self.change_to_function = {} + self.module_deprecations = {} + self.function_transformers = {} + self.import_renames = {} + + +class _PastaEditVisitor(ast.NodeVisitor): + """AST Visitor that processes function calls. + + Updates function calls from old API version to new API version using a given + change spec. + """ + + def __init__(self, api_change_spec): + self._api_change_spec = api_change_spec + self._log = [] # Holds 4-tuples: severity, line, col, msg. + self._stack = [] # Allow easy access to parents. + + # Overridden to maintain a stack of nodes to allow for parent access + def visit(self, node): + self._stack.append(node) + super(_PastaEditVisitor, self).visit(node) + self._stack.pop() + + @property + def errors(self): + return [log for log in self._log if log[0] == ERROR] + + @property + def warnings(self): + return [log for log in self._log if log[0] == WARNING] + + @property + def warnings_and_errors(self): + return [log for log in self._log if log[0] in (WARNING, ERROR)] + + @property + def info(self): + return [log for log in self._log if log[0] == INFO] + + @property + def log(self): + return self._log + + def add_log(self, severity, lineno, col, msg): + self._log.append((severity, lineno, col, msg)) + print("%s line %d:%d: %s" % (severity, lineno, col, msg)) + + def add_logs(self, logs): + """Record a log and print it. + + The log should be a tuple `(severity, lineno, col_offset, msg)`, which will + be printed and recorded. It is part of the log available in the `self.log` + property. + + Args: + logs: The logs to add. Must be a list of tuples + `(severity, lineno, col_offset, msg)`. + """ + self._log.extend(logs) + for log in logs: + print("%s line %d:%d: %s" % log) + + def _get_applicable_entries(self, transformer_field, full_name, name): + """Get all list entries indexed by name that apply to full_name or name.""" + # Transformers are indexed to full name, name, or no name + # as a performance optimization. + function_transformers = getattr(self._api_change_spec, + transformer_field, {}) + + glob_name = "*." + name if name else None + transformers = [] + if full_name in function_transformers: + transformers.append(function_transformers[full_name]) + if glob_name in function_transformers: + transformers.append(function_transformers[glob_name]) + if "*" in function_transformers: + transformers.append(function_transformers["*"]) + return transformers + + def _get_applicable_dict(self, transformer_field, full_name, name): + """Get all dict entries indexed by name that apply to full_name or name.""" + # Transformers are indexed to full name, name, or no name + # as a performance optimization. + function_transformers = getattr(self._api_change_spec, + transformer_field, {}) + + glob_name = "*." + name if name else None + transformers = function_transformers.get("*", {}).copy() + transformers.update(function_transformers.get(glob_name, {})) + transformers.update(function_transformers.get(full_name, {})) + return transformers + + def _get_full_name(self, node): + """Traverse an Attribute node to generate a full name, e.g., "tf.foo.bar". + + This is the inverse of `full_name_node`. + + Args: + node: A Node of type Attribute. + + Returns: + a '.'-delimited full-name or None if node was not Attribute or Name. + i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c". + """ + curr = node + items = [] + while not isinstance(curr, ast.Name): + if not isinstance(curr, ast.Attribute): + return None + items.append(curr.attr) + curr = curr.value + items.append(curr.id) + return ".".join(reversed(items)) + + def _maybe_add_warning(self, node, full_name): + """Adds an error to be printed about full_name at node.""" + function_warnings = self._api_change_spec.function_warnings + if full_name in function_warnings: + level, message = function_warnings[full_name] + message = message.replace("", full_name) + self.add_log(level, node.lineno, node.col_offset, + "%s requires manual check. %s" % (full_name, message)) + return True + else: + return False + + def _maybe_add_module_deprecation_warning(self, node, full_name, whole_name): + """Adds a warning if full_name is a deprecated module.""" + warnings = self._api_change_spec.module_deprecations + if full_name in warnings: + level, message = warnings[full_name] + message = message.replace("", whole_name) + self.add_log(level, node.lineno, node.col_offset, + "Using member %s in deprecated module %s. %s" % (whole_name, + full_name, + message)) + return True + else: + return False + + def _maybe_add_call_warning(self, node, full_name, name): + """Print a warning when specific functions are called with selected args. + + The function _print_warning_for_function matches the full name of the called + function, e.g., tf.foo.bar(). This function matches the function name that + is called, as long as the function is an attribute. For example, + `tf.foo.bar()` and `foo.bar()` are matched, but not `bar()`. + + Args: + node: ast.Call object + full_name: The precomputed full name of the callable, if one exists, None + otherwise. + name: The precomputed name of the callable, if one exists, None otherwise. + + Returns: + Whether an error was recorded. + """ + # Only look for *.-warnings here, the other will be handled by the Attribute + # visitor. Also, do not warn for bare functions, only if the call func is + # an attribute. + warned = False + if isinstance(node.func, ast.Attribute): + warned = self._maybe_add_warning(node, "*." + name) + + # All arg warnings are handled here, since only we have the args + arg_warnings = self._get_applicable_dict("function_arg_warnings", + full_name, name) + + variadic_args = uses_star_args_or_kwargs_in_call(node) + + for (kwarg, arg), (level, warning) in sorted(arg_warnings.items()): + present, _ = get_arg_value(node, kwarg, arg) or variadic_args + if present: + warned = True + warning_message = warning.replace("", full_name or name) + template = "%s called with %s argument, requires manual check: %s" + if variadic_args: + template = ("%s called with *args or **kwargs that may include %s, " + "requires manual check: %s") + self.add_log(level, node.lineno, node.col_offset, + template % (full_name or name, kwarg, warning_message)) + + return warned + + def _maybe_rename(self, parent, node, full_name): + """Replace node (Attribute or Name) with a node representing full_name.""" + new_name = self._api_change_spec.symbol_renames.get(full_name, None) + if new_name: + self.add_log(INFO, node.lineno, node.col_offset, + "Renamed %r to %r" % (full_name, new_name)) + new_node = full_name_node(new_name, node.ctx) + ast.copy_location(new_node, node) + pasta.ast_utils.replace_child(parent, node, new_node) + return True + else: + return False + + def _maybe_change_to_function_call(self, parent, node, full_name): + """Wraps node (typically, an Attribute or Expr) in a Call.""" + if full_name in self._api_change_spec.change_to_function: + if not isinstance(parent, ast.Call): + # ast.Call's constructor is really picky about how many arguments it + # wants, and also, it changed between Py2 and Py3. + new_node = ast.Call(node, [], []) + pasta.ast_utils.replace_child(parent, node, new_node) + ast.copy_location(new_node, node) + self.add_log(INFO, node.lineno, node.col_offset, + "Changed %r to a function call" % full_name) + return True + return False + + def _maybe_add_arg_names(self, node, full_name): + """Make args into keyword args if function called full_name requires it.""" + function_reorders = self._api_change_spec.function_reorders + + if full_name in function_reorders: + if uses_star_args_in_call(node): + self.add_log(WARNING, node.lineno, node.col_offset, + "(Manual check required) upgrading %s may require " + "re-ordering the call arguments, but it was passed " + "variable-length positional *args. The upgrade " + "script cannot handle these automatically." % full_name) + + reordered = function_reorders[full_name] + new_args = [] + new_keywords = [] + idx = 0 + for arg in node.args: + if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred): + continue # Can't move Starred to keywords + keyword_arg = reordered[idx] + if keyword_arg: + new_keywords.append(ast.keyword(arg=keyword_arg, value=arg)) + else: + new_args.append(arg) + idx += 1 + + if new_keywords: + self.add_log(INFO, node.lineno, node.col_offset, + "Added keywords to args of function %r" % full_name) + node.args = new_args + node.keywords = new_keywords + (node.keywords or []) + return True + return False + + def _maybe_modify_args(self, node, full_name, name): + """Rename keyword args if the function called full_name requires it.""" + renamed_keywords = self._get_applicable_dict("function_keyword_renames", + full_name, name) + + if not renamed_keywords: + return False + + if uses_star_kwargs_in_call(node): + self.add_log(WARNING, node.lineno, node.col_offset, + "(Manual check required) upgrading %s may require " + "renaming or removing call arguments, but it was passed " + "variable-length *args or **kwargs. The upgrade " + "script cannot handle these automatically." % + (full_name or name)) + modified = False + new_keywords = [] + for keyword in node.keywords: + argkey = keyword.arg + if argkey in renamed_keywords: + modified = True + if renamed_keywords[argkey] is None: + lineno = getattr(keyword, "lineno", node.lineno) + col_offset = getattr(keyword, "col_offset", node.col_offset) + self.add_log(INFO, lineno, col_offset, + "Removed argument %s for function %s" % ( + argkey, full_name or name)) + else: + keyword.arg = renamed_keywords[argkey] + lineno = getattr(keyword, "lineno", node.lineno) + col_offset = getattr(keyword, "col_offset", node.col_offset) + self.add_log(INFO, lineno, col_offset, + "Renamed keyword argument for %s from %s to %s" % ( + full_name, argkey, renamed_keywords[argkey])) + new_keywords.append(keyword) + else: + new_keywords.append(keyword) + + if modified: + node.keywords = new_keywords + return modified + + def visit_Call(self, node): # pylint: disable=invalid-name + """Handle visiting a call node in the AST. + + Args: + node: Current Node + """ + assert self._stack[-1] is node + + # Get the name for this call, so we can index stuff with it. + full_name = self._get_full_name(node.func) + if full_name: + name = full_name.split(".")[-1] + elif isinstance(node.func, ast.Name): + name = node.func.id + elif isinstance(node.func, ast.Attribute): + name = node.func.attr + else: + name = None + + # Call standard transformers for this node. + # Make sure warnings come first, since args or names triggering warnings + # may be removed by the other transformations. + self._maybe_add_call_warning(node, full_name, name) + # Make all args into kwargs + self._maybe_add_arg_names(node, full_name) + # Argument name changes or deletions + self._maybe_modify_args(node, full_name, name) + + # Call transformers. These have the ability to modify the node, and if they + # do, will return the new node they created (or the same node if they just + # changed it). The are given the parent, but we will take care of + # integrating their changes into the parent if they return a new node. + # + # These are matched on the old name, since renaming is performed by the + # Attribute visitor, which happens later. + transformers = self._get_applicable_entries("function_transformers", + full_name, name) + + parent = self._stack[-2] + + if transformers: + if uses_star_args_or_kwargs_in_call(node): + self.add_log(WARNING, node.lineno, node.col_offset, + "(Manual check required) upgrading %s may require " + "modifying call arguments, but it was passed " + "variable-length *args or **kwargs. The upgrade " + "script cannot handle these automatically." % + (full_name or name)) + + for transformer in transformers: + logs = [] + new_node = transformer(parent, node, full_name, name, logs) + self.add_logs(logs) + if new_node and new_node is not node: + pasta.ast_utils.replace_child(parent, node, new_node) + node = new_node + self._stack[-1] = node + + self.generic_visit(node) + + def visit_Attribute(self, node): # pylint: disable=invalid-name + """Handle bare Attributes i.e. [tf.foo, tf.bar].""" + assert self._stack[-1] is node + + full_name = self._get_full_name(node) + if full_name: + parent = self._stack[-2] + + # Make sure the warning comes first, otherwise the name may have changed + self._maybe_add_warning(node, full_name) + + # Once we did a modification, node is invalid and not worth inspecting + # further. Also, we only perform modifications for simple nodes, so + # There'd be no point in descending further. + if self._maybe_rename(parent, node, full_name): + return + if self._maybe_change_to_function_call(parent, node, full_name): + return + + # The isinstance check is enough -- a bare Attribute is never root. + i = 2 + while isinstance(self._stack[-i], ast.Attribute): + i += 1 + whole_name = pasta.dump(self._stack[-(i-1)]) + + self._maybe_add_module_deprecation_warning(node, full_name, whole_name) + + self.generic_visit(node) + + def visit_Import(self, node): # pylint: disable=invalid-name + """Handle visiting an import node in the AST. + + Args: + node: Current Node + """ + new_aliases = [] + import_updated = False + import_renames = getattr(self._api_change_spec, "import_renames", {}) + max_submodule_depth = getattr(self._api_change_spec, "max_submodule_depth", + 1) + inserts_after_imports = getattr(self._api_change_spec, + "inserts_after_imports", {}) + + # This loop processes imports in the format + # import foo as f, bar as b + for import_alias in node.names: + all_import_components = import_alias.name.split(".") + # Look for rename, starting with longest import levels. + found_update = False + for i in reversed(list(range(1, max_submodule_depth + 1))): + import_component = all_import_components[0] + for j in range(1, min(i, len(all_import_components))): + import_component += "." + all_import_components[j] + import_rename_spec = import_renames.get(import_component, None) + + if not import_rename_spec or excluded_from_module_rename( + import_alias.name, import_rename_spec): + continue + + new_name = ( + import_rename_spec.new_name + + import_alias.name[len(import_component):]) + + # If current import is + # import foo + # then new import should preserve imported name: + # import new_foo as foo + # This happens when module has just one component. + new_asname = import_alias.asname + if not new_asname and "." not in import_alias.name: + new_asname = import_alias.name + + new_alias = ast.alias(name=new_name, asname=new_asname) + new_aliases.append(new_alias) + import_updated = True + found_update = True + + # Insert any followup lines that should happen after this import. + full_import = (import_alias.name, import_alias.asname) + insert_offset = 1 + for line_to_insert in inserts_after_imports.get(full_import, []): + assert self._stack[-1] is node + parent = self._stack[-2] + + new_line_node = pasta.parse(line_to_insert) + ast.copy_location(new_line_node, node) + parent.body.insert( + parent.body.index(node) + insert_offset, new_line_node) + insert_offset += 1 + + # Insert a newline after the import if necessary + old_suffix = pasta.base.formatting.get(node, "suffix") + if old_suffix is None: + old_suffix = os.linesep + if os.linesep not in old_suffix: + pasta.base.formatting.set(node, "suffix", old_suffix + os.linesep) + + # Apply indentation to new node. + pasta.base.formatting.set(new_line_node, "prefix", + pasta.base.formatting.get(node, "prefix")) + pasta.base.formatting.set(new_line_node, "suffix", os.linesep) + self.add_log( + INFO, node.lineno, node.col_offset, + "Adding `%s` after import of %s" % + (new_line_node, import_alias.name)) + # Find one match, break + if found_update: + break + # No rename is found for all levels + if not found_update: + new_aliases.append(import_alias) # no change needed + + # Replace the node if at least one import needs to be updated. + if import_updated: + assert self._stack[-1] is node + parent = self._stack[-2] + + new_node = ast.Import(new_aliases) + ast.copy_location(new_node, node) + pasta.ast_utils.replace_child(parent, node, new_node) + self.add_log( + INFO, node.lineno, node.col_offset, + "Changed import from %r to %r." % + (pasta.dump(node), pasta.dump(new_node))) + + self.generic_visit(node) + + def visit_ImportFrom(self, node): # pylint: disable=invalid-name + """Handle visiting an import-from node in the AST. + + Args: + node: Current Node + """ + if not node.module: + self.generic_visit(node) + return + + from_import = node.module + + # Look for rename based on first component of from-import. + # i.e. based on foo in foo.bar. + from_import_first_component = from_import.split(".")[0] + import_renames = getattr(self._api_change_spec, "import_renames", {}) + import_rename_spec = import_renames.get(from_import_first_component, None) + if not import_rename_spec: + self.generic_visit(node) + return + + # Split module aliases into the ones that require import update + # and those that don't. For e.g. if we want to rename "a" to "b" + # unless we import "a.c" in the following: + # from a import c, d + # we want to update import for "d" but not for "c". + updated_aliases = [] + same_aliases = [] + for import_alias in node.names: + full_module_name = "%s.%s" % (from_import, import_alias.name) + if excluded_from_module_rename(full_module_name, import_rename_spec): + same_aliases.append(import_alias) + else: + updated_aliases.append(import_alias) + + if not updated_aliases: + self.generic_visit(node) + return + + assert self._stack[-1] is node + parent = self._stack[-2] + + # Replace first component of from-import with new name. + new_from_import = ( + import_rename_spec.new_name + + from_import[len(from_import_first_component):]) + updated_node = ast.ImportFrom(new_from_import, updated_aliases, node.level) + ast.copy_location(updated_node, node) + pasta.ast_utils.replace_child(parent, node, updated_node) + + # If some imports had to stay the same, add another import for them. + additional_import_log = "" + if same_aliases: + same_node = ast.ImportFrom(from_import, same_aliases, node.level, + col_offset=node.col_offset, lineno=node.lineno) + ast.copy_location(same_node, node) + parent.body.insert(parent.body.index(updated_node), same_node) + # Apply indentation to new node. + pasta.base.formatting.set( + same_node, "prefix", + pasta.base.formatting.get(updated_node, "prefix")) + additional_import_log = " and %r" % pasta.dump(same_node) + + self.add_log( + INFO, node.lineno, node.col_offset, + "Changed import from %r to %r%s." % + (pasta.dump(node), + pasta.dump(updated_node), + additional_import_log)) + + self.generic_visit(node) + + +class AnalysisResult: + """This class represents an analysis result and how it should be logged. + + This class must provide the following fields: + + * `log_level`: The log level to which this detection should be logged + * `log_message`: The message that should be logged for this detection + + For an example, see `VersionedTFImport`. + """ + + +class APIAnalysisSpec: + """This class defines how `AnalysisResult`s should be generated. + + It specifies how to map imports and symbols to `AnalysisResult`s. + + This class must provide the following fields: + + * `symbols_to_detect`: maps function names to `AnalysisResult`s + * `imports_to_detect`: maps imports represented as (full module name, alias) + tuples to `AnalysisResult`s + notifications) + + For an example, see `TFAPIImportAnalysisSpec`. + """ + + +class PastaAnalyzeVisitor(_PastaEditVisitor): + """AST Visitor that looks for specific API usage without editing anything. + + This is used before any rewriting is done to detect if any symbols are used + that require changing imports or disabling rewriting altogether. + """ + + def __init__(self, api_analysis_spec): + super(PastaAnalyzeVisitor, self).__init__(NoUpdateSpec()) + self._api_analysis_spec = api_analysis_spec + self._results = [] # Holds AnalysisResult objects + + @property + def results(self): + return self._results + + def add_result(self, analysis_result): + self._results.append(analysis_result) + + def visit_Attribute(self, node): # pylint: disable=invalid-name + """Handle bare Attributes i.e. [tf.foo, tf.bar].""" + full_name = self._get_full_name(node) + if full_name: + detection = self._api_analysis_spec.symbols_to_detect.get(full_name, None) + if detection: + self.add_result(detection) + self.add_log( + detection.log_level, node.lineno, node.col_offset, + detection.log_message) + + self.generic_visit(node) + + def visit_Import(self, node): # pylint: disable=invalid-name + """Handle visiting an import node in the AST. + + Args: + node: Current Node + """ + for import_alias in node.names: + # Detect based on full import name and alias) + full_import = (import_alias.name, import_alias.asname) + detection = (self._api_analysis_spec + .imports_to_detect.get(full_import, None)) + if detection: + self.add_result(detection) + self.add_log( + detection.log_level, node.lineno, node.col_offset, + detection.log_message) + + self.generic_visit(node) + + def visit_ImportFrom(self, node): # pylint: disable=invalid-name + """Handle visiting an import-from node in the AST. + + Args: + node: Current Node + """ + if not node.module: + self.generic_visit(node) + return + + from_import = node.module + + for import_alias in node.names: + # Detect based on full import name(to & as) + full_module_name = "%s.%s" % (from_import, import_alias.name) + full_import = (full_module_name, import_alias.asname) + detection = (self._api_analysis_spec + .imports_to_detect.get(full_import, None)) + if detection: + self.add_result(detection) + self.add_log( + detection.log_level, node.lineno, node.col_offset, + detection.log_message) + + self.generic_visit(node) + + +class ASTCodeUpgrader: + """Handles upgrading a set of Python files using a given API change spec.""" + + def __init__(self, api_change_spec): + if not isinstance(api_change_spec, APIChangeSpec): + raise TypeError("Must pass APIChangeSpec to ASTCodeUpgrader, got %s" % + type(api_change_spec)) + self._api_change_spec = api_change_spec + + def process_file(self, + in_filename, + out_filename, + no_change_to_outfile_on_error=False): + """Process the given python file for incompatible changes. + + Args: + in_filename: filename to parse + out_filename: output file to write to + no_change_to_outfile_on_error: not modify the output file on errors + Returns: + A tuple representing number of files processed, log of actions, errors + """ + + # Write to a temporary file, just in case we are doing an implace modify. + # pylint: disable=g-backslash-continuation + with open(in_filename, "r") as in_file, \ + tempfile.NamedTemporaryFile("w", delete=False) as temp_file: + ret = self.process_opened_file(in_filename, in_file, out_filename, + temp_file) + # pylint: enable=g-backslash-continuation + + if no_change_to_outfile_on_error and ret[0] == 0: + os.remove(temp_file.name) + else: + shutil.move(temp_file.name, out_filename) + return ret + + def format_log(self, log, in_filename): + log_string = "%d:%d: %s: %s" % (log[1], log[2], log[0], log[3]) + if in_filename: + return in_filename + ":" + log_string + else: + return log_string + + def update_string_pasta(self, text, in_filename): + """Updates a file using pasta.""" + try: + t = pasta.parse(text) + except (SyntaxError, ValueError, TypeError): + log = ["ERROR: Failed to parse.\n" + traceback.format_exc()] + return 0, "", log, [] + + t, preprocess_logs, preprocess_errors = self._api_change_spec.preprocess(t) + + visitor = _PastaEditVisitor(self._api_change_spec) + visitor.visit(t) + + self._api_change_spec.clear_preprocessing() + + logs = [self.format_log(log, None) for log in (preprocess_logs + + visitor.log)] + errors = [self.format_log(error, in_filename) + for error in (preprocess_errors + + visitor.warnings_and_errors)] + return 1, pasta.dump(t), logs, errors + + def _format_log(self, log, in_filename, out_filename): + text = "-" * 80 + "\n" + text += "Processing file %r\n outputting to %r\n" % (in_filename, + out_filename) + text += "-" * 80 + "\n\n" + text += "\n".join(log) + "\n" + text += "-" * 80 + "\n\n" + return text + + def process_opened_file(self, in_filename, in_file, out_filename, out_file): + """Process the given python file for incompatible changes. + + This function is split out to facilitate StringIO testing from + tf_upgrade_test.py. + + Args: + in_filename: filename to parse + in_file: opened file (or StringIO) + out_filename: output file to write to + out_file: opened file (or StringIO) + Returns: + A tuple representing number of files processed, log of actions, errors + """ + lines = in_file.readlines() + processed_file, new_file_content, log, process_errors = ( + self.update_string_pasta("".join(lines), in_filename)) + + if out_file and processed_file: + out_file.write(new_file_content) + + return (processed_file, + self._format_log(log, in_filename, out_filename), + process_errors) + + def process_tree(self, root_directory, output_root_directory, + copy_other_files): + """Processes upgrades on an entire tree of python files in place. + + Note that only Python files. If you have custom code in other languages, + you will need to manually upgrade those. + + Args: + root_directory: Directory to walk and process. + output_root_directory: Directory to use as base. + copy_other_files: Copy files that are not touched by this converter. + + Returns: + A tuple of files processed, the report string for all files, and a dict + mapping filenames to errors encountered in that file. + """ + + if output_root_directory == root_directory: + return self.process_tree_inplace(root_directory) + + # make sure output directory doesn't exist + if output_root_directory and os.path.exists(output_root_directory): + print("Output directory %r must not already exist." % + (output_root_directory)) + sys.exit(1) + + # make sure output directory does not overlap with root_directory + norm_root = os.path.split(os.path.normpath(root_directory)) + norm_output = os.path.split(os.path.normpath(output_root_directory)) + if norm_root == norm_output: + print("Output directory %r same as input directory %r" % + (root_directory, output_root_directory)) + sys.exit(1) + + # Collect list of files to process (we do this to correctly handle if the + # user puts the output directory in some sub directory of the input dir) + files_to_process = [] + files_to_copy = [] + for dir_name, _, file_list in os.walk(root_directory): + py_files = [f for f in file_list if f.endswith(".py")] + copy_files = [f for f in file_list if not f.endswith(".py")] + for filename in py_files: + fullpath = os.path.join(dir_name, filename) + fullpath_output = os.path.join(output_root_directory, + os.path.relpath(fullpath, + root_directory)) + files_to_process.append((fullpath, fullpath_output)) + if copy_other_files: + for filename in copy_files: + fullpath = os.path.join(dir_name, filename) + fullpath_output = os.path.join(output_root_directory, + os.path.relpath( + fullpath, root_directory)) + files_to_copy.append((fullpath, fullpath_output)) + + file_count = 0 + tree_errors = {} + report = "" + report += ("=" * 80) + "\n" + report += "Input tree: %r\n" % root_directory + report += ("=" * 80) + "\n" + + for input_path, output_path in files_to_process: + output_directory = os.path.dirname(output_path) + if not os.path.isdir(output_directory): + os.makedirs(output_directory) + + if os.path.islink(input_path): + link_target = os.readlink(input_path) + link_target_output = os.path.join( + output_root_directory, os.path.relpath(link_target, root_directory)) + if (link_target, link_target_output) in files_to_process: + # Create a link to the new location of the target file + os.symlink(link_target_output, output_path) + else: + report += "Copying symlink %s without modifying its target %s" % ( + input_path, link_target) + os.symlink(link_target, output_path) + continue + + file_count += 1 + _, l_report, l_errors = self.process_file(input_path, output_path) + tree_errors[input_path] = l_errors + report += l_report + + for input_path, output_path in files_to_copy: + output_directory = os.path.dirname(output_path) + if not os.path.isdir(output_directory): + os.makedirs(output_directory) + shutil.copy(input_path, output_path) + return file_count, report, tree_errors + + def process_tree_inplace(self, root_directory): + """Process a directory of python files in place.""" + files_to_process = [] + for dir_name, _, file_list in os.walk(root_directory): + py_files = [ + os.path.join(dir_name, f) for f in file_list if f.endswith(".py") + ] + files_to_process += py_files + + file_count = 0 + tree_errors = {} + report = "" + report += ("=" * 80) + "\n" + report += "Input tree: %r\n" % root_directory + report += ("=" * 80) + "\n" + + for path in files_to_process: + if os.path.islink(path): + report += "Skipping symlink %s.\n" % path + continue + file_count += 1 + _, l_report, l_errors = self.process_file(path, path) + tree_errors[path] = l_errors + report += l_report + + return file_count, report, tree_errors diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/ipynb.py b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/ipynb.py new file mode 100644 index 0000000000000000000000000000000000000000..c371baa73d8b1e667c83fb9f19b6759b44ef538b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/ipynb.py @@ -0,0 +1,170 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= +"""A module to support operations on ipynb files""" + +import collections +import copy +import json +import re +import shutil +import tempfile + +CodeLine = collections.namedtuple("CodeLine", ["cell_number", "code"]) + +def is_python(cell): + """Checks if the cell consists of Python code.""" + return (cell["cell_type"] == "code" # code cells only + and cell["source"] # non-empty cells + and not cell["source"][0].startswith("%%")) # multiline eg: %%bash + + +def process_file(in_filename, out_filename, upgrader): + """The function where we inject the support for ipynb upgrade.""" + print("Extracting code lines from original notebook") + raw_code, notebook = _get_code(in_filename) + raw_lines = [cl.code for cl in raw_code] + + # The function follows the original flow from `upgrader.process_fil` + with tempfile.NamedTemporaryFile("w", delete=False) as temp_file: + + processed_file, new_file_content, log, process_errors = ( + upgrader.update_string_pasta("\n".join(raw_lines), in_filename)) + + if temp_file and processed_file: + new_notebook = _update_notebook(notebook, raw_code, + new_file_content.split("\n")) + json.dump(new_notebook, temp_file) + else: + raise SyntaxError( + "Was not able to process the file: \n%s\n" % "".join(log)) + + files_processed = processed_file + report_text = upgrader._format_log(log, in_filename, out_filename) + errors = process_errors + + shutil.move(temp_file.name, out_filename) + + return files_processed, report_text, errors + + +def skip_magic(code_line, magic_list): + """Checks if the cell has magic, that is not Python-based. + + Args: + code_line: A line of Python code + magic_list: A list of jupyter "magic" exceptions + + Returns: + If the line jupyter "magic" line, not Python line + + >>> skip_magic('!ls -laF', ['%', '!', '?']) + True + """ + + for magic in magic_list: + if code_line.startswith(magic): + return True + + return False + + +def check_line_split(code_line): + r"""Checks if a line was split with `\`. + + Args: + code_line: A line of Python code + + Returns: + If the line was split with `\` + + >>> skip_magic("!gcloud ml-engine models create ${MODEL} \\\n") + True + """ + + return re.search(r"\\\s*\n$", code_line) + + +def _get_code(input_file): + """Loads the ipynb file and returns a list of CodeLines.""" + + raw_code = [] + + with open(input_file) as in_file: + notebook = json.load(in_file) + + cell_index = 0 + for cell in notebook["cells"]: + if is_python(cell): + cell_lines = cell["source"] + + is_line_split = False + for line_idx, code_line in enumerate(cell_lines): + + # Sometimes, jupyter has more than python code + # Idea is to comment these lines, for upgrade time + if skip_magic(code_line, ["%", "!", "?"]) or is_line_split: + # Found a special character, need to "encode" + code_line = "###!!!" + code_line + + # if this cell ends with `\` -> skip the next line + is_line_split = check_line_split(code_line) + + if is_line_split: + is_line_split = check_line_split(code_line) + + # Sometimes, people leave \n at the end of cell + # in order to migrate only related things, and make the diff + # the smallest -> here is another hack + if (line_idx == len(cell_lines) - 1) and code_line.endswith("\n"): + code_line = code_line.replace("\n", "###===") + + # sometimes a line would start with `\n` and content after + # that's the hack for this + raw_code.append( + CodeLine(cell_index, + code_line.rstrip().replace("\n", "###==="))) + + cell_index += 1 + + return raw_code, notebook + + +def _update_notebook(original_notebook, original_raw_lines, updated_code_lines): + """Updates notebook, once migration is done.""" + + new_notebook = copy.deepcopy(original_notebook) + + # validate that the number of lines is the same + assert len(original_raw_lines) == len(updated_code_lines), \ + ("The lengths of input and converted files are not the same: " + "{} vs {}".format(len(original_raw_lines), len(updated_code_lines))) + + code_cell_idx = 0 + for cell in new_notebook["cells"]: + if not is_python(cell): + continue + + applicable_lines = [ + idx for idx, code_line in enumerate(original_raw_lines) + if code_line.cell_number == code_cell_idx + ] + + new_code = [updated_code_lines[idx] for idx in applicable_lines] + + cell["source"] = "\n".join(new_code).replace("###!!!", "").replace( + "###===", "\n") + code_cell_idx += 1 + + return new_notebook diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/module_deprecations_v2.py b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/module_deprecations_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..3c647b3653c23ee21bef83a7f5c34c8a60245f29 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/module_deprecations_v2.py @@ -0,0 +1,66 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Module deprecation warnings for TensorFlow 2.0.""" + +from tensorflow.tools.compatibility import ast_edits + + +_CONTRIB_WARNING = ( + ast_edits.ERROR, + " cannot be converted automatically. tf.contrib will not" + " be distributed with TensorFlow 2.0, please consider an alternative in" + " non-contrib TensorFlow, a community-maintained repository such as " + "tensorflow/addons, or fork the required code.") + +_FLAGS_WARNING = ( + ast_edits.ERROR, + "tf.flags and tf.app.flags have been removed, please use the argparse or " + "absl modules if you need command line parsing.") + +_CONTRIB_CUDNN_RNN_WARNING = ( + ast_edits.WARNING, + "(Manual edit required) tf.contrib.cudnn_rnn.* has been deprecated, " + "and the CuDNN kernel has been integrated with " + "tf.keras.layers.LSTM/GRU in TensorFlow 2.0. Please check the new API " + "and use that instead." +) + +_CONTRIB_RNN_WARNING = ( + ast_edits.WARNING, + "(Manual edit required) tf.contrib.rnn.* has been deprecated, and " + "widely used cells/functions will be moved to tensorflow/addons " + "repository. Please check it there and file Github issues if necessary." +) + +_CONTRIB_DIST_STRAT_WARNING = ( + ast_edits.WARNING, + "(Manual edit required) tf.contrib.distribute.* have been migrated to " + "tf.distribute.*. Please check out the new module for updated APIs.") + +_CONTRIB_SEQ2SEQ_WARNING = ( + ast_edits.WARNING, + "(Manual edit required) tf.contrib.seq2seq.* have been migrated to " + "`tfa.seq2seq.*` in TensorFlow Addons. Please see " + "https://github.com/tensorflow/addons for more info.") + +MODULE_DEPRECATIONS = { + "tf.contrib": _CONTRIB_WARNING, + "tf.contrib.cudnn_rnn": _CONTRIB_CUDNN_RNN_WARNING, + "tf.contrib.rnn": _CONTRIB_RNN_WARNING, + "tf.flags": _FLAGS_WARNING, + "tf.app.flags": _FLAGS_WARNING, + "tf.contrib.distribute": _CONTRIB_DIST_STRAT_WARNING, + "tf.contrib.seq2seq": _CONTRIB_SEQ2SEQ_WARNING +} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/renames_v2.py b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/renames_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..debae39beb44b889e05e7849d7e4ea784fcd646c --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/renames_v2.py @@ -0,0 +1,1511 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# pylint: disable=line-too-long +"""List of renames to apply when converting from TF 1.0 to TF 2.0. + +THIS FILE IS AUTOGENERATED: To update, please run: + bazel run tensorflow/tools/compatibility/update:generate_v2_renames_map +This file should be updated whenever endpoints are deprecated. +""" +renames = { + 'tf.AUTO_REUSE': + 'tf.compat.v1.AUTO_REUSE', + 'tf.AttrValue': + 'tf.compat.v1.AttrValue', + 'tf.COMPILER_VERSION': + 'tf.version.COMPILER_VERSION', + 'tf.CXX11_ABI_FLAG': + 'tf.sysconfig.CXX11_ABI_FLAG', + 'tf.CXX_VERSION': + 'tf.sysconfig.CXX_VERSION', + 'tf.ConditionalAccumulator': + 'tf.compat.v1.ConditionalAccumulator', + 'tf.ConditionalAccumulatorBase': + 'tf.compat.v1.ConditionalAccumulatorBase', + 'tf.ConfigProto': + 'tf.compat.v1.ConfigProto', + 'tf.Dimension': + 'tf.compat.v1.Dimension', + 'tf.Event': + 'tf.compat.v1.Event', + 'tf.FIFOQueue': + 'tf.queue.FIFOQueue', + 'tf.FixedLenFeature': + 'tf.io.FixedLenFeature', + 'tf.FixedLenSequenceFeature': + 'tf.io.FixedLenSequenceFeature', + 'tf.FixedLengthRecordReader': + 'tf.compat.v1.FixedLengthRecordReader', + 'tf.GIT_VERSION': + 'tf.version.GIT_VERSION', + 'tf.GPUOptions': + 'tf.compat.v1.GPUOptions', + 'tf.GRAPH_DEF_VERSION': + 'tf.version.GRAPH_DEF_VERSION', + 'tf.GRAPH_DEF_VERSION_MIN_CONSUMER': + 'tf.version.GRAPH_DEF_VERSION_MIN_CONSUMER', + 'tf.GRAPH_DEF_VERSION_MIN_PRODUCER': + 'tf.version.GRAPH_DEF_VERSION_MIN_PRODUCER', + 'tf.GraphDef': + 'tf.compat.v1.GraphDef', + 'tf.GraphKeys': + 'tf.compat.v1.GraphKeys', + 'tf.GraphOptions': + 'tf.compat.v1.GraphOptions', + 'tf.HistogramProto': + 'tf.compat.v1.HistogramProto', + 'tf.IdentityReader': + 'tf.compat.v1.IdentityReader', + 'tf.InteractiveSession': + 'tf.compat.v1.InteractiveSession', + 'tf.LMDBReader': + 'tf.compat.v1.LMDBReader', + 'tf.LogMessage': + 'tf.compat.v1.LogMessage', + 'tf.MONOLITHIC_BUILD': + 'tf.sysconfig.MONOLITHIC_BUILD', + 'tf.MetaGraphDef': + 'tf.compat.v1.MetaGraphDef', + 'tf.NameAttrList': + 'tf.compat.v1.NameAttrList', + 'tf.NoGradient': + 'tf.no_gradient', + 'tf.NodeDef': + 'tf.compat.v1.NodeDef', + 'tf.NotDifferentiable': + 'tf.no_gradient', + 'tf.OpError': + 'tf.errors.OpError', + 'tf.OptimizerOptions': + 'tf.compat.v1.OptimizerOptions', + 'tf.PaddingFIFOQueue': + 'tf.queue.PaddingFIFOQueue', + 'tf.Print': + 'tf.compat.v1.Print', + 'tf.PriorityQueue': + 'tf.queue.PriorityQueue', + 'tf.QUANTIZED_DTYPES': + 'tf.dtypes.QUANTIZED_DTYPES', + 'tf.QueueBase': + 'tf.queue.QueueBase', + 'tf.RandomShuffleQueue': + 'tf.queue.RandomShuffleQueue', + 'tf.ReaderBase': + 'tf.compat.v1.ReaderBase', + 'tf.RunMetadata': + 'tf.compat.v1.RunMetadata', + 'tf.RunOptions': + 'tf.compat.v1.RunOptions', + 'tf.Session': + 'tf.compat.v1.Session', + 'tf.SessionLog': + 'tf.compat.v1.SessionLog', + 'tf.SparseConditionalAccumulator': + 'tf.compat.v1.SparseConditionalAccumulator', + 'tf.SparseFeature': + 'tf.io.SparseFeature', + 'tf.SparseTensorValue': + 'tf.compat.v1.SparseTensorValue', + 'tf.Summary': + 'tf.compat.v1.Summary', + 'tf.SummaryMetadata': + 'tf.compat.v1.SummaryMetadata', + 'tf.TFRecordReader': + 'tf.compat.v1.TFRecordReader', + 'tf.TensorInfo': + 'tf.compat.v1.TensorInfo', + 'tf.TextLineReader': + 'tf.compat.v1.TextLineReader', + 'tf.VERSION': + 'tf.version.VERSION', + 'tf.VarLenFeature': + 'tf.io.VarLenFeature', + 'tf.VariableScope': + 'tf.compat.v1.VariableScope', + 'tf.WholeFileReader': + 'tf.compat.v1.WholeFileReader', + 'tf.accumulate_n': + 'tf.math.accumulate_n', + 'tf.add_check_numerics_ops': + 'tf.compat.v1.add_check_numerics_ops', + 'tf.add_to_collection': + 'tf.compat.v1.add_to_collection', + 'tf.add_to_collections': + 'tf.compat.v1.add_to_collections', + 'tf.all_variables': + 'tf.compat.v1.all_variables', + 'tf.angle': + 'tf.math.angle', + 'tf.app.run': + 'tf.compat.v1.app.run', + 'tf.assert_proper_iterable': + 'tf.debugging.assert_proper_iterable', + 'tf.assert_same_float_dtype': + 'tf.debugging.assert_same_float_dtype', + 'tf.assign': + 'tf.compat.v1.assign', + 'tf.assign_add': + 'tf.compat.v1.assign_add', + 'tf.assign_sub': + 'tf.compat.v1.assign_sub', + 'tf.batch_scatter_update': + 'tf.compat.v1.batch_scatter_update', + 'tf.betainc': + 'tf.math.betainc', + 'tf.ceil': + 'tf.math.ceil', + 'tf.check_numerics': + 'tf.debugging.check_numerics', + 'tf.cholesky': + 'tf.linalg.cholesky', + 'tf.cholesky_solve': + 'tf.linalg.cholesky_solve', + 'tf.clip_by_average_norm': + 'tf.compat.v1.clip_by_average_norm', + 'tf.colocate_with': + 'tf.compat.v1.colocate_with', + 'tf.conj': + 'tf.math.conj', + 'tf.container': + 'tf.compat.v1.container', + 'tf.control_flow_v2_enabled': + 'tf.compat.v1.control_flow_v2_enabled', + 'tf.convert_to_tensor_or_indexed_slices': + 'tf.compat.v1.convert_to_tensor_or_indexed_slices', + 'tf.convert_to_tensor_or_sparse_tensor': + 'tf.compat.v1.convert_to_tensor_or_sparse_tensor', + 'tf.count_up_to': + 'tf.compat.v1.count_up_to', + 'tf.create_partitioned_variables': + 'tf.compat.v1.create_partitioned_variables', + 'tf.cross': + 'tf.linalg.cross', + 'tf.cumprod': + 'tf.math.cumprod', + 'tf.data.get_output_classes': + 'tf.compat.v1.data.get_output_classes', + 'tf.data.get_output_shapes': + 'tf.compat.v1.data.get_output_shapes', + 'tf.data.get_output_types': + 'tf.compat.v1.data.get_output_types', + 'tf.data.make_initializable_iterator': + 'tf.compat.v1.data.make_initializable_iterator', + 'tf.data.make_one_shot_iterator': + 'tf.compat.v1.data.make_one_shot_iterator', + 'tf.debugging.is_finite': + 'tf.math.is_finite', + 'tf.debugging.is_inf': + 'tf.math.is_inf', + 'tf.debugging.is_nan': + 'tf.math.is_nan', + 'tf.debugging.is_non_decreasing': + 'tf.math.is_non_decreasing', + 'tf.debugging.is_strictly_increasing': + 'tf.math.is_strictly_increasing', + 'tf.decode_base64': + 'tf.io.decode_base64', + 'tf.decode_compressed': + 'tf.io.decode_compressed', + 'tf.decode_json_example': + 'tf.io.decode_json_example', + 'tf.delete_session_tensor': + 'tf.compat.v1.delete_session_tensor', + 'tf.depth_to_space': + 'tf.nn.depth_to_space', + 'tf.dequantize': + 'tf.quantization.dequantize', + 'tf.deserialize_many_sparse': + 'tf.io.deserialize_many_sparse', + 'tf.diag': + 'tf.linalg.tensor_diag', + 'tf.diag_part': + 'tf.linalg.tensor_diag_part', + 'tf.digamma': + 'tf.math.digamma', + 'tf.dimension_at_index': + 'tf.compat.dimension_at_index', + 'tf.dimension_value': + 'tf.compat.dimension_value', + 'tf.disable_control_flow_v2': + 'tf.compat.v1.disable_control_flow_v2', + 'tf.disable_eager_execution': + 'tf.compat.v1.disable_eager_execution', + 'tf.disable_resource_variables': + 'tf.compat.v1.disable_resource_variables', + 'tf.disable_tensor_equality': + 'tf.compat.v1.disable_tensor_equality', + 'tf.disable_v2_behavior': + 'tf.compat.v1.disable_v2_behavior', + 'tf.disable_v2_tensorshape': + 'tf.compat.v1.disable_v2_tensorshape', + 'tf.distribute.get_loss_reduction': + 'tf.compat.v1.distribute.get_loss_reduction', + 'tf.distributions.Bernoulli': + 'tf.compat.v1.distributions.Bernoulli', + 'tf.distributions.Beta': + 'tf.compat.v1.distributions.Beta', + 'tf.distributions.Categorical': + 'tf.compat.v1.distributions.Categorical', + 'tf.distributions.Dirichlet': + 'tf.compat.v1.distributions.Dirichlet', + 'tf.distributions.DirichletMultinomial': + 'tf.compat.v1.distributions.DirichletMultinomial', + 'tf.distributions.Distribution': + 'tf.compat.v1.distributions.Distribution', + 'tf.distributions.Exponential': + 'tf.compat.v1.distributions.Exponential', + 'tf.distributions.FULLY_REPARAMETERIZED': + 'tf.compat.v1.distributions.FULLY_REPARAMETERIZED', + 'tf.distributions.Gamma': + 'tf.compat.v1.distributions.Gamma', + 'tf.distributions.Laplace': + 'tf.compat.v1.distributions.Laplace', + 'tf.distributions.Multinomial': + 'tf.compat.v1.distributions.Multinomial', + 'tf.distributions.NOT_REPARAMETERIZED': + 'tf.compat.v1.distributions.NOT_REPARAMETERIZED', + 'tf.distributions.Normal': + 'tf.compat.v1.distributions.Normal', + 'tf.distributions.RegisterKL': + 'tf.compat.v1.distributions.RegisterKL', + 'tf.distributions.ReparameterizationType': + 'tf.compat.v1.distributions.ReparameterizationType', + 'tf.distributions.StudentT': + 'tf.compat.v1.distributions.StudentT', + 'tf.distributions.Uniform': + 'tf.compat.v1.distributions.Uniform', + 'tf.distributions.kl_divergence': + 'tf.compat.v1.distributions.kl_divergence', + 'tf.div': + 'tf.compat.v1.div', + 'tf.div_no_nan': + 'tf.math.divide_no_nan', + 'tf.dtypes.as_string': + 'tf.strings.as_string', + 'tf.enable_control_flow_v2': + 'tf.compat.v1.enable_control_flow_v2', + 'tf.enable_eager_execution': + 'tf.compat.v1.enable_eager_execution', + 'tf.enable_resource_variables': + 'tf.compat.v1.enable_resource_variables', + 'tf.enable_tensor_equality': + 'tf.compat.v1.enable_tensor_equality', + 'tf.enable_v2_behavior': + 'tf.compat.v1.enable_v2_behavior', + 'tf.enable_v2_tensorshape': + 'tf.compat.v1.enable_v2_tensorshape', + 'tf.encode_base64': + 'tf.io.encode_base64', + 'tf.erf': + 'tf.math.erf', + 'tf.erfc': + 'tf.math.erfc', + 'tf.executing_eagerly_outside_functions': + 'tf.compat.v1.executing_eagerly_outside_functions', + 'tf.experimental.output_all_intermediates': + 'tf.compat.v1.experimental.output_all_intermediates', + 'tf.expm1': + 'tf.math.expm1', + 'tf.fake_quant_with_min_max_args': + 'tf.quantization.fake_quant_with_min_max_args', + 'tf.fake_quant_with_min_max_args_gradient': + 'tf.quantization.fake_quant_with_min_max_args_gradient', + 'tf.fake_quant_with_min_max_vars': + 'tf.quantization.fake_quant_with_min_max_vars', + 'tf.fake_quant_with_min_max_vars_gradient': + 'tf.quantization.fake_quant_with_min_max_vars_gradient', + 'tf.fake_quant_with_min_max_vars_per_channel': + 'tf.quantization.fake_quant_with_min_max_vars_per_channel', + 'tf.fake_quant_with_min_max_vars_per_channel_gradient': + 'tf.quantization.fake_quant_with_min_max_vars_per_channel_gradient', + 'tf.feature_column.input_layer': + 'tf.compat.v1.feature_column.input_layer', + 'tf.feature_column.linear_model': + 'tf.compat.v1.feature_column.linear_model', + 'tf.feature_column.shared_embedding_columns': + 'tf.compat.v1.feature_column.shared_embedding_columns', + 'tf.fft': + 'tf.signal.fft', + 'tf.fft2d': + 'tf.signal.fft2d', + 'tf.fft3d': + 'tf.signal.fft3d', + 'tf.fixed_size_partitioner': + 'tf.compat.v1.fixed_size_partitioner', + 'tf.floordiv': + 'tf.math.floordiv', + 'tf.floormod': + 'tf.math.floormod', + 'tf.get_collection': + 'tf.compat.v1.get_collection', + 'tf.get_collection_ref': + 'tf.compat.v1.get_collection_ref', + 'tf.get_default_graph': + 'tf.compat.v1.get_default_graph', + 'tf.get_default_session': + 'tf.compat.v1.get_default_session', + 'tf.get_local_variable': + 'tf.compat.v1.get_local_variable', + 'tf.get_seed': + 'tf.compat.v1.get_seed', + 'tf.get_session_handle': + 'tf.compat.v1.get_session_handle', + 'tf.get_session_tensor': + 'tf.compat.v1.get_session_tensor', + 'tf.get_variable': + 'tf.compat.v1.get_variable', + 'tf.get_variable_scope': + 'tf.compat.v1.get_variable_scope', + 'tf.gfile.FastGFile': + 'tf.compat.v1.gfile.FastGFile', + 'tf.global_norm': + 'tf.linalg.global_norm', + 'tf.global_variables': + 'tf.compat.v1.global_variables', + 'tf.global_variables_initializer': + 'tf.compat.v1.global_variables_initializer', + 'tf.graph_util.convert_variables_to_constants': + 'tf.compat.v1.graph_util.convert_variables_to_constants', + 'tf.graph_util.extract_sub_graph': + 'tf.compat.v1.graph_util.extract_sub_graph', + 'tf.graph_util.must_run_on_cpu': + 'tf.compat.v1.graph_util.must_run_on_cpu', + 'tf.graph_util.remove_training_nodes': + 'tf.compat.v1.graph_util.remove_training_nodes', + 'tf.graph_util.tensor_shape_from_node_def_name': + 'tf.compat.v1.graph_util.tensor_shape_from_node_def_name', + 'tf.ifft': + 'tf.signal.ifft', + 'tf.ifft2d': + 'tf.signal.ifft2d', + 'tf.ifft3d': + 'tf.signal.ifft3d', + 'tf.igamma': + 'tf.math.igamma', + 'tf.igammac': + 'tf.math.igammac', + 'tf.imag': + 'tf.math.imag', + 'tf.image.resize_area': + 'tf.compat.v1.image.resize_area', + 'tf.image.resize_bicubic': + 'tf.compat.v1.image.resize_bicubic', + 'tf.image.resize_bilinear': + 'tf.compat.v1.image.resize_bilinear', + 'tf.image.resize_image_with_crop_or_pad': + 'tf.image.resize_with_crop_or_pad', + 'tf.image.resize_image_with_pad': + 'tf.compat.v1.image.resize_image_with_pad', + 'tf.image.resize_nearest_neighbor': + 'tf.compat.v1.image.resize_nearest_neighbor', + 'tf.image.transpose_image': + 'tf.image.transpose', + 'tf.initialize_all_tables': + 'tf.compat.v1.initialize_all_tables', + 'tf.initialize_all_variables': + 'tf.compat.v1.initialize_all_variables', + 'tf.initialize_local_variables': + 'tf.compat.v1.initialize_local_variables', + 'tf.initialize_variables': + 'tf.compat.v1.initialize_variables', + 'tf.initializers.global_variables': + 'tf.compat.v1.initializers.global_variables', + 'tf.initializers.local_variables': + 'tf.compat.v1.initializers.local_variables', + 'tf.initializers.tables_initializer': + 'tf.compat.v1.initializers.tables_initializer', + 'tf.initializers.uniform_unit_scaling': + 'tf.compat.v1.initializers.uniform_unit_scaling', + 'tf.initializers.variables': + 'tf.compat.v1.initializers.variables', + 'tf.invert_permutation': + 'tf.math.invert_permutation', + 'tf.io.PaddingFIFOQueue': + 'tf.queue.PaddingFIFOQueue', + 'tf.io.PriorityQueue': + 'tf.queue.PriorityQueue', + 'tf.io.QueueBase': + 'tf.queue.QueueBase', + 'tf.io.RandomShuffleQueue': + 'tf.queue.RandomShuffleQueue', + 'tf.io.TFRecordCompressionType': + 'tf.compat.v1.io.TFRecordCompressionType', + 'tf.io.tf_record_iterator': + 'tf.compat.v1.io.tf_record_iterator', + 'tf.is_finite': + 'tf.math.is_finite', + 'tf.is_inf': + 'tf.math.is_inf', + 'tf.is_nan': + 'tf.math.is_nan', + 'tf.is_non_decreasing': + 'tf.math.is_non_decreasing', + 'tf.is_numeric_tensor': + 'tf.debugging.is_numeric_tensor', + 'tf.is_strictly_increasing': + 'tf.math.is_strictly_increasing', + 'tf.is_variable_initialized': + 'tf.compat.v1.is_variable_initialized', + 'tf.keras.backend.get_session': + 'tf.compat.v1.keras.backend.get_session', + 'tf.keras.backend.set_session': + 'tf.compat.v1.keras.backend.set_session', + 'tf.keras.layers.CuDNNGRU': + 'tf.compat.v1.keras.layers.CuDNNGRU', + 'tf.keras.layers.CuDNNLSTM': + 'tf.compat.v1.keras.layers.CuDNNLSTM', + 'tf.keras.layers.disable_v2_dtype_behavior': + 'tf.compat.v1.keras.layers.disable_v2_dtype_behavior', + 'tf.keras.layers.enable_v2_dtype_behavior': + 'tf.compat.v1.keras.layers.enable_v2_dtype_behavior', + 'tf.keras.losses.cosine': + 'tf.keras.losses.cosine_similarity', + 'tf.keras.losses.cosine_proximity': + 'tf.keras.losses.cosine_similarity', + 'tf.keras.metrics.cosine': + 'tf.keras.losses.cosine_similarity', + 'tf.keras.metrics.cosine_proximity': + 'tf.keras.losses.cosine_similarity', + 'tf.keras.models.LinearModel': + 'tf.keras.experimental.LinearModel', + 'tf.keras.models.WideDeepModel': + 'tf.keras.experimental.WideDeepModel', + 'tf.keras.optimizers.Adadelta': + 'tf.keras.optimizers.legacy.Adadelta', + 'tf.keras.optimizers.Adagrad': + 'tf.keras.optimizers.legacy.Adagrad', + 'tf.keras.optimizers.Adam': + 'tf.keras.optimizers.legacy.Adam', + 'tf.keras.optimizers.Adamax': + 'tf.keras.optimizers.legacy.Adamax', + 'tf.keras.optimizers.Ftrl': + 'tf.keras.optimizers.legacy.Ftrl', + 'tf.keras.optimizers.Nadam': + 'tf.keras.optimizers.legacy.Nadam', + 'tf.keras.optimizers.Optimizer': + 'tf.keras.optimizers.legacy.Optimizer', + 'tf.keras.optimizers.RMSprop': + 'tf.keras.optimizers.legacy.RMSprop', + 'tf.keras.optimizers.SGD': + 'tf.keras.optimizers.legacy.SGD', + 'tf.keras.utils.DeterministicRandomTestTool': + 'tf.compat.v1.keras.utils.DeterministicRandomTestTool', + 'tf.keras.utils.get_or_create_layer': + 'tf.compat.v1.keras.utils.get_or_create_layer', + 'tf.keras.utils.track_tf1_style_variables': + 'tf.compat.v1.keras.utils.track_tf1_style_variables', + 'tf.layers.BatchNormalization': + 'tf.compat.v1.layers.BatchNormalization', + 'tf.layers.InputSpec': + 'tf.keras.layers.InputSpec', + 'tf.layers.batch_normalization': + 'tf.compat.v1.layers.batch_normalization', + 'tf.lbeta': + 'tf.math.lbeta', + 'tf.lgamma': + 'tf.math.lgamma', + 'tf.lin_space': + 'tf.linspace', + 'tf.linalg.transpose': + 'tf.linalg.matrix_transpose', + 'tf.lite.OpHint': + 'tf.compat.v1.lite.OpHint', + 'tf.lite.TocoConverter': + 'tf.compat.v1.lite.TocoConverter', + 'tf.lite.constants.GRAPHVIZ_DOT': + 'tf.compat.v1.lite.constants.GRAPHVIZ_DOT', + 'tf.lite.constants.TFLITE': + 'tf.compat.v1.lite.constants.TFLITE', + 'tf.lite.experimental.convert_op_hints_to_stubs': + 'tf.compat.v1.lite.experimental.convert_op_hints_to_stubs', + 'tf.lite.toco_convert': + 'tf.compat.v1.lite.toco_convert', + 'tf.local_variables': + 'tf.compat.v1.local_variables', + 'tf.local_variables_initializer': + 'tf.compat.v1.local_variables_initializer', + 'tf.log': + 'tf.math.log', + 'tf.log1p': + 'tf.math.log1p', + 'tf.log_sigmoid': + 'tf.math.log_sigmoid', + 'tf.logging.DEBUG': + 'tf.compat.v1.logging.DEBUG', + 'tf.logging.ERROR': + 'tf.compat.v1.logging.ERROR', + 'tf.logging.FATAL': + 'tf.compat.v1.logging.FATAL', + 'tf.logging.INFO': + 'tf.compat.v1.logging.INFO', + 'tf.logging.TaskLevelStatusMessage': + 'tf.compat.v1.logging.TaskLevelStatusMessage', + 'tf.logging.WARN': + 'tf.compat.v1.logging.WARN', + 'tf.logging.debug': + 'tf.compat.v1.logging.debug', + 'tf.logging.error': + 'tf.compat.v1.logging.error', + 'tf.logging.fatal': + 'tf.compat.v1.logging.fatal', + 'tf.logging.flush': + 'tf.compat.v1.logging.flush', + 'tf.logging.get_verbosity': + 'tf.compat.v1.logging.get_verbosity', + 'tf.logging.info': + 'tf.compat.v1.logging.info', + 'tf.logging.log': + 'tf.compat.v1.logging.log', + 'tf.logging.log_every_n': + 'tf.compat.v1.logging.log_every_n', + 'tf.logging.log_first_n': + 'tf.compat.v1.logging.log_first_n', + 'tf.logging.log_if': + 'tf.compat.v1.logging.log_if', + 'tf.logging.set_verbosity': + 'tf.compat.v1.logging.set_verbosity', + 'tf.logging.vlog': + 'tf.compat.v1.logging.vlog', + 'tf.logging.warn': + 'tf.compat.v1.logging.warn', + 'tf.logging.warning': + 'tf.compat.v1.logging.warning', + 'tf.logical_xor': + 'tf.math.logical_xor', + 'tf.losses.Reduction': + 'tf.compat.v1.losses.Reduction', + 'tf.losses.absolute_difference': + 'tf.compat.v1.losses.absolute_difference', + 'tf.losses.add_loss': + 'tf.compat.v1.losses.add_loss', + 'tf.losses.compute_weighted_loss': + 'tf.compat.v1.losses.compute_weighted_loss', + 'tf.losses.cosine_distance': + 'tf.compat.v1.losses.cosine_distance', + 'tf.losses.get_losses': + 'tf.compat.v1.losses.get_losses', + 'tf.losses.get_regularization_loss': + 'tf.compat.v1.losses.get_regularization_loss', + 'tf.losses.get_regularization_losses': + 'tf.compat.v1.losses.get_regularization_losses', + 'tf.losses.get_total_loss': + 'tf.compat.v1.losses.get_total_loss', + 'tf.losses.hinge_loss': + 'tf.compat.v1.losses.hinge_loss', + 'tf.losses.huber_loss': + 'tf.compat.v1.losses.huber_loss', + 'tf.losses.log_loss': + 'tf.compat.v1.losses.log_loss', + 'tf.losses.mean_pairwise_squared_error': + 'tf.compat.v1.losses.mean_pairwise_squared_error', + 'tf.losses.mean_squared_error': + 'tf.compat.v1.losses.mean_squared_error', + 'tf.losses.sigmoid_cross_entropy': + 'tf.compat.v1.losses.sigmoid_cross_entropy', + 'tf.losses.softmax_cross_entropy': + 'tf.compat.v1.losses.softmax_cross_entropy', + 'tf.losses.sparse_softmax_cross_entropy': + 'tf.compat.v1.losses.sparse_softmax_cross_entropy', + 'tf.make_template': + 'tf.compat.v1.make_template', + 'tf.manip.gather_nd': + 'tf.gather_nd', + 'tf.manip.reshape': + 'tf.reshape', + 'tf.manip.reverse': + 'tf.reverse', + 'tf.manip.roll': + 'tf.roll', + 'tf.manip.scatter_nd': + 'tf.scatter_nd', + 'tf.manip.space_to_batch_nd': + 'tf.space_to_batch_nd', + 'tf.manip.tile': + 'tf.tile', + 'tf.matching_files': + 'tf.io.matching_files', + 'tf.matrix_band_part': + 'tf.linalg.band_part', + 'tf.matrix_determinant': + 'tf.linalg.det', + 'tf.matrix_diag': + 'tf.linalg.diag', + 'tf.matrix_diag_part': + 'tf.linalg.diag_part', + 'tf.matrix_inverse': + 'tf.linalg.inv', + 'tf.matrix_set_diag': + 'tf.linalg.set_diag', + 'tf.matrix_solve': + 'tf.linalg.solve', + 'tf.matrix_solve_ls': + 'tf.linalg.lstsq', + 'tf.matrix_transpose': + 'tf.linalg.matrix_transpose', + 'tf.matrix_triangular_solve': + 'tf.linalg.triangular_solve', + 'tf.metrics.accuracy': + 'tf.compat.v1.metrics.accuracy', + 'tf.metrics.auc': + 'tf.compat.v1.metrics.auc', + 'tf.metrics.average_precision_at_k': + 'tf.compat.v1.metrics.average_precision_at_k', + 'tf.metrics.false_negatives': + 'tf.compat.v1.metrics.false_negatives', + 'tf.metrics.false_negatives_at_thresholds': + 'tf.compat.v1.metrics.false_negatives_at_thresholds', + 'tf.metrics.false_positives': + 'tf.compat.v1.metrics.false_positives', + 'tf.metrics.false_positives_at_thresholds': + 'tf.compat.v1.metrics.false_positives_at_thresholds', + 'tf.metrics.mean': + 'tf.compat.v1.metrics.mean', + 'tf.metrics.mean_absolute_error': + 'tf.compat.v1.metrics.mean_absolute_error', + 'tf.metrics.mean_cosine_distance': + 'tf.compat.v1.metrics.mean_cosine_distance', + 'tf.metrics.mean_iou': + 'tf.compat.v1.metrics.mean_iou', + 'tf.metrics.mean_per_class_accuracy': + 'tf.compat.v1.metrics.mean_per_class_accuracy', + 'tf.metrics.mean_relative_error': + 'tf.compat.v1.metrics.mean_relative_error', + 'tf.metrics.mean_squared_error': + 'tf.compat.v1.metrics.mean_squared_error', + 'tf.metrics.mean_tensor': + 'tf.compat.v1.metrics.mean_tensor', + 'tf.metrics.percentage_below': + 'tf.compat.v1.metrics.percentage_below', + 'tf.metrics.precision': + 'tf.compat.v1.metrics.precision', + 'tf.metrics.precision_at_k': + 'tf.compat.v1.metrics.precision_at_k', + 'tf.metrics.precision_at_thresholds': + 'tf.compat.v1.metrics.precision_at_thresholds', + 'tf.metrics.precision_at_top_k': + 'tf.compat.v1.metrics.precision_at_top_k', + 'tf.metrics.recall': + 'tf.compat.v1.metrics.recall', + 'tf.metrics.recall_at_k': + 'tf.compat.v1.metrics.recall_at_k', + 'tf.metrics.recall_at_thresholds': + 'tf.compat.v1.metrics.recall_at_thresholds', + 'tf.metrics.recall_at_top_k': + 'tf.compat.v1.metrics.recall_at_top_k', + 'tf.metrics.root_mean_squared_error': + 'tf.compat.v1.metrics.root_mean_squared_error', + 'tf.metrics.sensitivity_at_specificity': + 'tf.compat.v1.metrics.sensitivity_at_specificity', + 'tf.metrics.sparse_average_precision_at_k': + 'tf.compat.v1.metrics.sparse_average_precision_at_k', + 'tf.metrics.sparse_precision_at_k': + 'tf.compat.v1.metrics.sparse_precision_at_k', + 'tf.metrics.specificity_at_sensitivity': + 'tf.compat.v1.metrics.specificity_at_sensitivity', + 'tf.metrics.true_negatives': + 'tf.compat.v1.metrics.true_negatives', + 'tf.metrics.true_negatives_at_thresholds': + 'tf.compat.v1.metrics.true_negatives_at_thresholds', + 'tf.metrics.true_positives': + 'tf.compat.v1.metrics.true_positives', + 'tf.metrics.true_positives_at_thresholds': + 'tf.compat.v1.metrics.true_positives_at_thresholds', + 'tf.min_max_variable_partitioner': + 'tf.compat.v1.min_max_variable_partitioner', + 'tf.mixed_precision.DynamicLossScale': + 'tf.compat.v1.mixed_precision.DynamicLossScale', + 'tf.mixed_precision.FixedLossScale': + 'tf.compat.v1.mixed_precision.FixedLossScale', + 'tf.mixed_precision.LossScale': + 'tf.compat.v1.mixed_precision.LossScale', + 'tf.mixed_precision.MixedPrecisionLossScaleOptimizer': + 'tf.compat.v1.mixed_precision.MixedPrecisionLossScaleOptimizer', + 'tf.mixed_precision.disable_mixed_precision_graph_rewrite': + 'tf.compat.v1.mixed_precision.disable_mixed_precision_graph_rewrite', + 'tf.mixed_precision.enable_mixed_precision_graph_rewrite': + 'tf.compat.v1.mixed_precision.enable_mixed_precision_graph_rewrite', + 'tf.mixed_precision.experimental.DynamicLossScale': + 'tf.compat.v1.mixed_precision.experimental.DynamicLossScale', + 'tf.mixed_precision.experimental.FixedLossScale': + 'tf.compat.v1.mixed_precision.experimental.FixedLossScale', + 'tf.mixed_precision.experimental.LossScale': + 'tf.compat.v1.mixed_precision.experimental.LossScale', + 'tf.mod': + 'tf.math.floormod', + 'tf.model_variables': + 'tf.compat.v1.model_variables', + 'tf.moving_average_variables': + 'tf.compat.v1.moving_average_variables', + 'tf.nn.avg_pool_v2': + 'tf.nn.avg_pool', + 'tf.nn.bidirectional_dynamic_rnn': + 'tf.compat.v1.nn.bidirectional_dynamic_rnn', + 'tf.nn.conv2d_backprop_filter': + 'tf.compat.v1.nn.conv2d_backprop_filter', + 'tf.nn.conv3d_backprop_filter': + 'tf.compat.v1.nn.conv3d_backprop_filter', + 'tf.nn.conv3d_backprop_filter_v2': + 'tf.compat.v1.nn.conv3d_backprop_filter_v2', + 'tf.nn.ctc_beam_search_decoder_v2': + 'tf.nn.ctc_beam_search_decoder', + 'tf.nn.ctc_loss_v2': + 'tf.compat.v1.nn.ctc_loss_v2', + 'tf.nn.depthwise_conv2d_native': + 'tf.compat.v1.nn.depthwise_conv2d_native', + 'tf.nn.depthwise_conv2d_native_backprop_filter': + 'tf.nn.depthwise_conv2d_backprop_filter', + 'tf.nn.depthwise_conv2d_native_backprop_input': + 'tf.nn.depthwise_conv2d_backprop_input', + 'tf.nn.dynamic_rnn': + 'tf.compat.v1.nn.dynamic_rnn', + 'tf.nn.log_uniform_candidate_sampler': + 'tf.random.log_uniform_candidate_sampler', + 'tf.nn.max_pool_v2': + 'tf.nn.max_pool', + 'tf.nn.quantized_avg_pool': + 'tf.compat.v1.nn.quantized_avg_pool', + 'tf.nn.quantized_conv2d': + 'tf.compat.v1.nn.quantized_conv2d', + 'tf.nn.quantized_max_pool': + 'tf.compat.v1.nn.quantized_max_pool', + 'tf.nn.quantized_relu_x': + 'tf.compat.v1.nn.quantized_relu_x', + 'tf.nn.raw_rnn': + 'tf.compat.v1.nn.raw_rnn', + 'tf.nn.relu_layer': + 'tf.compat.v1.nn.relu_layer', + 'tf.nn.rnn_cell.BasicLSTMCell': + 'tf.compat.v1.nn.rnn_cell.BasicLSTMCell', + 'tf.nn.rnn_cell.BasicRNNCell': + 'tf.compat.v1.nn.rnn_cell.BasicRNNCell', + 'tf.nn.rnn_cell.DeviceWrapper': + 'tf.compat.v1.nn.rnn_cell.DeviceWrapper', + 'tf.nn.rnn_cell.DropoutWrapper': + 'tf.compat.v1.nn.rnn_cell.DropoutWrapper', + 'tf.nn.rnn_cell.GRUCell': + 'tf.compat.v1.nn.rnn_cell.GRUCell', + 'tf.nn.rnn_cell.LSTMCell': + 'tf.compat.v1.nn.rnn_cell.LSTMCell', + 'tf.nn.rnn_cell.LSTMStateTuple': + 'tf.compat.v1.nn.rnn_cell.LSTMStateTuple', + 'tf.nn.rnn_cell.MultiRNNCell': + 'tf.compat.v1.nn.rnn_cell.MultiRNNCell', + 'tf.nn.rnn_cell.RNNCell': + 'tf.compat.v1.nn.rnn_cell.RNNCell', + 'tf.nn.rnn_cell.ResidualWrapper': + 'tf.compat.v1.nn.rnn_cell.ResidualWrapper', + 'tf.nn.static_bidirectional_rnn': + 'tf.compat.v1.nn.static_bidirectional_rnn', + 'tf.nn.static_rnn': + 'tf.compat.v1.nn.static_rnn', + 'tf.nn.static_state_saving_rnn': + 'tf.compat.v1.nn.static_state_saving_rnn', + 'tf.nn.uniform_candidate_sampler': + 'tf.random.uniform_candidate_sampler', + 'tf.nn.xw_plus_b': + 'tf.compat.v1.nn.xw_plus_b', + 'tf.no_regularizer': + 'tf.compat.v1.no_regularizer', + 'tf.op_scope': + 'tf.compat.v1.op_scope', + 'tf.parse_single_sequence_example': + 'tf.io.parse_single_sequence_example', + 'tf.parse_tensor': + 'tf.io.parse_tensor', + 'tf.placeholder': + 'tf.compat.v1.placeholder', + 'tf.placeholder_with_default': + 'tf.compat.v1.placeholder_with_default', + 'tf.polygamma': + 'tf.math.polygamma', + 'tf.profiler.AdviceProto': + 'tf.compat.v1.profiler.AdviceProto', + 'tf.profiler.GraphNodeProto': + 'tf.compat.v1.profiler.GraphNodeProto', + 'tf.profiler.MultiGraphNodeProto': + 'tf.compat.v1.profiler.MultiGraphNodeProto', + 'tf.profiler.OpLogProto': + 'tf.compat.v1.profiler.OpLogProto', + 'tf.profiler.ProfileOptionBuilder': + 'tf.compat.v1.profiler.ProfileOptionBuilder', + 'tf.profiler.Profiler': + 'tf.compat.v1.profiler.Profiler', + 'tf.profiler.advise': + 'tf.compat.v1.profiler.advise', + 'tf.profiler.profile': + 'tf.compat.v1.profiler.profile', + 'tf.profiler.write_op_log': + 'tf.compat.v1.profiler.write_op_log', + 'tf.py_func': + 'tf.compat.v1.py_func', + 'tf.python_io.TFRecordCompressionType': + 'tf.compat.v1.python_io.TFRecordCompressionType', + 'tf.python_io.TFRecordOptions': + 'tf.io.TFRecordOptions', + 'tf.python_io.TFRecordWriter': + 'tf.io.TFRecordWriter', + 'tf.python_io.tf_record_iterator': + 'tf.compat.v1.python_io.tf_record_iterator', + 'tf.qr': + 'tf.linalg.qr', + 'tf.quantize': + 'tf.quantization.quantize', + 'tf.quantized_concat': + 'tf.quantization.quantized_concat', + 'tf.ragged.RaggedTensorValue': + 'tf.compat.v1.ragged.RaggedTensorValue', + 'tf.ragged.constant_value': + 'tf.compat.v1.ragged.constant_value', + 'tf.ragged.placeholder': + 'tf.compat.v1.ragged.placeholder', + 'tf.random.get_seed': + 'tf.compat.v1.random.get_seed', + 'tf.random.set_random_seed': + 'tf.compat.v1.random.set_random_seed', + 'tf.random_crop': + 'tf.image.random_crop', + 'tf.random_gamma': + 'tf.random.gamma', + 'tf.random_normal': + 'tf.random.normal', + 'tf.random_poisson': + 'tf.random.poisson', + 'tf.random_shuffle': + 'tf.random.shuffle', + 'tf.random_uniform': + 'tf.random.uniform', + 'tf.read_file': + 'tf.io.read_file', + 'tf.real': + 'tf.math.real', + 'tf.reciprocal': + 'tf.math.reciprocal', + 'tf.regex_replace': + 'tf.strings.regex_replace', + 'tf.report_uninitialized_variables': + 'tf.compat.v1.report_uninitialized_variables', + 'tf.reset_default_graph': + 'tf.compat.v1.reset_default_graph', + 'tf.resource_loader.get_data_files_path': + 'tf.compat.v1.resource_loader.get_data_files_path', + 'tf.resource_loader.get_path_to_datafile': + 'tf.compat.v1.resource_loader.get_path_to_datafile', + 'tf.resource_loader.get_root_dir_with_all_resources': + 'tf.compat.v1.resource_loader.get_root_dir_with_all_resources', + 'tf.resource_loader.load_resource': + 'tf.compat.v1.resource_loader.load_resource', + 'tf.resource_loader.readahead_file_path': + 'tf.compat.v1.resource_loader.readahead_file_path', + 'tf.resource_variables_enabled': + 'tf.compat.v1.resource_variables_enabled', + 'tf.reverse_v2': + 'tf.reverse', + 'tf.rint': + 'tf.math.rint', + 'tf.rsqrt': + 'tf.math.rsqrt', + 'tf.saved_model.Builder': + 'tf.compat.v1.saved_model.Builder', + 'tf.saved_model.LEGACY_INIT_OP_KEY': + 'tf.compat.v1.saved_model.LEGACY_INIT_OP_KEY', + 'tf.saved_model.MAIN_OP_KEY': + 'tf.compat.v1.saved_model.MAIN_OP_KEY', + 'tf.saved_model.build_signature_def': + 'tf.compat.v1.saved_model.build_signature_def', + 'tf.saved_model.build_tensor_info': + 'tf.compat.v1.saved_model.build_tensor_info', + 'tf.saved_model.builder.SavedModelBuilder': + 'tf.compat.v1.saved_model.builder.SavedModelBuilder', + 'tf.saved_model.classification_signature_def': + 'tf.compat.v1.saved_model.classification_signature_def', + 'tf.saved_model.constants.ASSETS_DIRECTORY': + 'tf.saved_model.ASSETS_DIRECTORY', + 'tf.saved_model.constants.ASSETS_KEY': + 'tf.saved_model.ASSETS_KEY', + 'tf.saved_model.constants.DEBUG_DIRECTORY': + 'tf.saved_model.DEBUG_DIRECTORY', + 'tf.saved_model.constants.DEBUG_INFO_FILENAME_PB': + 'tf.saved_model.DEBUG_INFO_FILENAME_PB', + 'tf.saved_model.constants.LEGACY_INIT_OP_KEY': + 'tf.compat.v1.saved_model.constants.LEGACY_INIT_OP_KEY', + 'tf.saved_model.constants.MAIN_OP_KEY': + 'tf.compat.v1.saved_model.constants.MAIN_OP_KEY', + 'tf.saved_model.constants.SAVED_MODEL_FILENAME_PB': + 'tf.saved_model.SAVED_MODEL_FILENAME_PB', + 'tf.saved_model.constants.SAVED_MODEL_FILENAME_PBTXT': + 'tf.saved_model.SAVED_MODEL_FILENAME_PBTXT', + 'tf.saved_model.constants.SAVED_MODEL_SCHEMA_VERSION': + 'tf.saved_model.SAVED_MODEL_SCHEMA_VERSION', + 'tf.saved_model.constants.VARIABLES_DIRECTORY': + 'tf.saved_model.VARIABLES_DIRECTORY', + 'tf.saved_model.constants.VARIABLES_FILENAME': + 'tf.saved_model.VARIABLES_FILENAME', + 'tf.saved_model.experimental.save': + 'tf.saved_model.save', + 'tf.saved_model.get_tensor_from_tensor_info': + 'tf.compat.v1.saved_model.get_tensor_from_tensor_info', + 'tf.saved_model.is_valid_signature': + 'tf.compat.v1.saved_model.is_valid_signature', + 'tf.saved_model.loader.maybe_saved_model_directory': + 'tf.saved_model.contains_saved_model', + 'tf.saved_model.main_op.main_op': + 'tf.compat.v1.saved_model.main_op.main_op', + 'tf.saved_model.main_op.main_op_with_restore': + 'tf.compat.v1.saved_model.main_op.main_op_with_restore', + 'tf.saved_model.main_op_with_restore': + 'tf.compat.v1.saved_model.main_op_with_restore', + 'tf.saved_model.maybe_saved_model_directory': + 'tf.saved_model.contains_saved_model', + 'tf.saved_model.predict_signature_def': + 'tf.compat.v1.saved_model.predict_signature_def', + 'tf.saved_model.regression_signature_def': + 'tf.compat.v1.saved_model.regression_signature_def', + 'tf.saved_model.signature_constants.CLASSIFY_INPUTS': + 'tf.saved_model.CLASSIFY_INPUTS', + 'tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME': + 'tf.saved_model.CLASSIFY_METHOD_NAME', + 'tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES': + 'tf.saved_model.CLASSIFY_OUTPUT_CLASSES', + 'tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES': + 'tf.saved_model.CLASSIFY_OUTPUT_SCORES', + 'tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY': + 'tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY', + 'tf.saved_model.signature_constants.PREDICT_INPUTS': + 'tf.saved_model.PREDICT_INPUTS', + 'tf.saved_model.signature_constants.PREDICT_METHOD_NAME': + 'tf.saved_model.PREDICT_METHOD_NAME', + 'tf.saved_model.signature_constants.PREDICT_OUTPUTS': + 'tf.saved_model.PREDICT_OUTPUTS', + 'tf.saved_model.signature_constants.REGRESS_INPUTS': + 'tf.saved_model.REGRESS_INPUTS', + 'tf.saved_model.signature_constants.REGRESS_METHOD_NAME': + 'tf.saved_model.REGRESS_METHOD_NAME', + 'tf.saved_model.signature_constants.REGRESS_OUTPUTS': + 'tf.saved_model.REGRESS_OUTPUTS', + 'tf.saved_model.signature_def_utils.MethodNameUpdater': + 'tf.compat.v1.saved_model.signature_def_utils.MethodNameUpdater', + 'tf.saved_model.signature_def_utils.build_signature_def': + 'tf.compat.v1.saved_model.signature_def_utils.build_signature_def', + 'tf.saved_model.signature_def_utils.classification_signature_def': + 'tf.compat.v1.saved_model.signature_def_utils.classification_signature_def', + 'tf.saved_model.signature_def_utils.is_valid_signature': + 'tf.compat.v1.saved_model.signature_def_utils.is_valid_signature', + 'tf.saved_model.signature_def_utils.predict_signature_def': + 'tf.compat.v1.saved_model.signature_def_utils.predict_signature_def', + 'tf.saved_model.signature_def_utils.regression_signature_def': + 'tf.compat.v1.saved_model.signature_def_utils.regression_signature_def', + 'tf.saved_model.simple_save': + 'tf.compat.v1.saved_model.simple_save', + 'tf.saved_model.tag_constants.GPU': + 'tf.saved_model.GPU', + 'tf.saved_model.tag_constants.SERVING': + 'tf.saved_model.SERVING', + 'tf.saved_model.tag_constants.TPU': + 'tf.saved_model.TPU', + 'tf.saved_model.tag_constants.TRAINING': + 'tf.saved_model.TRAINING', + 'tf.saved_model.utils.build_tensor_info': + 'tf.compat.v1.saved_model.utils.build_tensor_info', + 'tf.saved_model.utils.get_tensor_from_tensor_info': + 'tf.compat.v1.saved_model.utils.get_tensor_from_tensor_info', + 'tf.scatter_add': + 'tf.compat.v1.scatter_add', + 'tf.scatter_div': + 'tf.compat.v1.scatter_div', + 'tf.scatter_max': + 'tf.compat.v1.scatter_max', + 'tf.scatter_min': + 'tf.compat.v1.scatter_min', + 'tf.scatter_mul': + 'tf.compat.v1.scatter_mul', + 'tf.scatter_nd_add': + 'tf.compat.v1.scatter_nd_add', + 'tf.scatter_nd_sub': + 'tf.compat.v1.scatter_nd_sub', + 'tf.scatter_nd_update': + 'tf.compat.v1.scatter_nd_update', + 'tf.scatter_sub': + 'tf.compat.v1.scatter_sub', + 'tf.scatter_update': + 'tf.compat.v1.scatter_update', + 'tf.segment_max': + 'tf.math.segment_max', + 'tf.segment_mean': + 'tf.math.segment_mean', + 'tf.segment_min': + 'tf.math.segment_min', + 'tf.segment_prod': + 'tf.math.segment_prod', + 'tf.segment_sum': + 'tf.math.segment_sum', + 'tf.self_adjoint_eig': + 'tf.linalg.eigh', + 'tf.self_adjoint_eigvals': + 'tf.linalg.eigvalsh', + 'tf.serialize_many_sparse': + 'tf.io.serialize_many_sparse', + 'tf.serialize_sparse': + 'tf.io.serialize_sparse', + 'tf.serialize_tensor': + 'tf.io.serialize_tensor', + 'tf.set_random_seed': + 'tf.compat.v1.set_random_seed', + 'tf.setdiff1d': + 'tf.compat.v1.setdiff1d', + 'tf.sets.set_difference': + 'tf.sets.difference', + 'tf.sets.set_intersection': + 'tf.sets.intersection', + 'tf.sets.set_size': + 'tf.sets.size', + 'tf.sets.set_union': + 'tf.sets.union', + 'tf.space_to_depth': + 'tf.nn.space_to_depth', + 'tf.sparse.SparseConditionalAccumulator': + 'tf.compat.v1.sparse.SparseConditionalAccumulator', + 'tf.sparse.matmul': + 'tf.sparse.sparse_dense_matmul', + 'tf.sparse.merge': + 'tf.compat.v1.sparse.merge', + 'tf.sparse.placeholder': + 'tf.compat.v1.sparse.placeholder', + 'tf.sparse.reduce_max_sparse': + 'tf.compat.v1.sparse.reduce_max_sparse', + 'tf.sparse.reduce_sum_sparse': + 'tf.compat.v1.sparse.reduce_sum_sparse', + 'tf.sparse_add': + 'tf.sparse.add', + 'tf.sparse_concat': + 'tf.sparse.concat', + 'tf.sparse_fill_empty_rows': + 'tf.sparse.fill_empty_rows', + 'tf.sparse_mask': + 'tf.sparse.mask', + 'tf.sparse_maximum': + 'tf.sparse.maximum', + 'tf.sparse_merge': + 'tf.compat.v1.sparse_merge', + 'tf.sparse_minimum': + 'tf.sparse.minimum', + 'tf.sparse_placeholder': + 'tf.compat.v1.sparse_placeholder', + 'tf.sparse_reduce_max': + 'tf.sparse.reduce_max', + 'tf.sparse_reduce_max_sparse': + 'tf.compat.v1.sparse_reduce_max_sparse', + 'tf.sparse_reduce_sum': + 'tf.sparse.reduce_sum', + 'tf.sparse_reduce_sum_sparse': + 'tf.compat.v1.sparse_reduce_sum_sparse', + 'tf.sparse_reorder': + 'tf.sparse.reorder', + 'tf.sparse_reset_shape': + 'tf.sparse.reset_shape', + 'tf.sparse_reshape': + 'tf.sparse.reshape', + 'tf.sparse_retain': + 'tf.sparse.retain', + 'tf.sparse_segment_mean': + 'tf.sparse.segment_mean', + 'tf.sparse_segment_sqrt_n': + 'tf.sparse.segment_sqrt_n', + 'tf.sparse_segment_sum': + 'tf.sparse.segment_sum', + 'tf.sparse_slice': + 'tf.sparse.slice', + 'tf.sparse_softmax': + 'tf.sparse.softmax', + 'tf.sparse_split': + 'tf.sparse.split', + 'tf.sparse_tensor_dense_matmul': + 'tf.sparse.sparse_dense_matmul', + 'tf.sparse_tensor_to_dense': + 'tf.sparse.to_dense', + 'tf.sparse_to_dense': + 'tf.compat.v1.sparse_to_dense', + 'tf.sparse_to_indicator': + 'tf.sparse.to_indicator', + 'tf.sparse_transpose': + 'tf.sparse.transpose', + 'tf.spectral.dct': + 'tf.signal.dct', + 'tf.spectral.fft': + 'tf.signal.fft', + 'tf.spectral.fft2d': + 'tf.signal.fft2d', + 'tf.spectral.fft3d': + 'tf.signal.fft3d', + 'tf.spectral.idct': + 'tf.signal.idct', + 'tf.spectral.ifft': + 'tf.signal.ifft', + 'tf.spectral.ifft2d': + 'tf.signal.ifft2d', + 'tf.spectral.ifft3d': + 'tf.signal.ifft3d', + 'tf.spectral.irfft': + 'tf.signal.irfft', + 'tf.spectral.irfft2d': + 'tf.signal.irfft2d', + 'tf.spectral.irfft3d': + 'tf.signal.irfft3d', + 'tf.spectral.rfft': + 'tf.signal.rfft', + 'tf.spectral.rfft2d': + 'tf.signal.rfft2d', + 'tf.spectral.rfft3d': + 'tf.signal.rfft3d', + 'tf.squared_difference': + 'tf.math.squared_difference', + 'tf.string_join': + 'tf.strings.join', + 'tf.string_strip': + 'tf.strings.strip', + 'tf.string_to_hash_bucket_fast': + 'tf.strings.to_hash_bucket_fast', + 'tf.string_to_hash_bucket_strong': + 'tf.strings.to_hash_bucket_strong', + 'tf.summary.Event': + 'tf.compat.v1.summary.Event', + 'tf.summary.FileWriter': + 'tf.compat.v1.summary.FileWriter', + 'tf.summary.FileWriterCache': + 'tf.compat.v1.summary.FileWriterCache', + 'tf.summary.SessionLog': + 'tf.compat.v1.summary.SessionLog', + 'tf.summary.Summary': + 'tf.compat.v1.summary.Summary', + 'tf.summary.SummaryDescription': + 'tf.compat.v1.summary.SummaryDescription', + 'tf.summary.TaggedRunMetadata': + 'tf.compat.v1.summary.TaggedRunMetadata', + 'tf.summary.all_v2_summary_ops': + 'tf.compat.v1.summary.all_v2_summary_ops', + 'tf.summary.get_summary_description': + 'tf.compat.v1.summary.get_summary_description', + 'tf.summary.initialize': + 'tf.compat.v1.summary.initialize', + 'tf.summary.merge': + 'tf.compat.v1.summary.merge', + 'tf.summary.merge_all': + 'tf.compat.v1.summary.merge_all', + 'tf.summary.tensor_summary': + 'tf.compat.v1.summary.tensor_summary', + 'tf.svd': + 'tf.linalg.svd', + 'tf.tables_initializer': + 'tf.compat.v1.tables_initializer', + 'tf.tensor_scatter_add': + 'tf.tensor_scatter_nd_add', + 'tf.tensor_scatter_sub': + 'tf.tensor_scatter_nd_sub', + 'tf.tensor_scatter_update': + 'tf.tensor_scatter_nd_update', + 'tf.test.StubOutForTesting': + 'tf.compat.v1.test.StubOutForTesting', + 'tf.test.compute_gradient_error': + 'tf.compat.v1.test.compute_gradient_error', + 'tf.test.get_temp_dir': + 'tf.compat.v1.test.get_temp_dir', + 'tf.test.mock': + 'tf.compat.v1.test.mock', + 'tf.test.test_src_dir_path': + 'tf.compat.v1.test.test_src_dir_path', + 'tf.to_bfloat16': + 'tf.compat.v1.to_bfloat16', + 'tf.to_complex128': + 'tf.compat.v1.to_complex128', + 'tf.to_complex64': + 'tf.compat.v1.to_complex64', + 'tf.to_double': + 'tf.compat.v1.to_double', + 'tf.to_float': + 'tf.compat.v1.to_float', + 'tf.to_int32': + 'tf.compat.v1.to_int32', + 'tf.to_int64': + 'tf.compat.v1.to_int64', + 'tf.tpu.CrossShardOptimizer': + 'tf.compat.v1.tpu.CrossShardOptimizer', + 'tf.tpu.PaddingSpec': + 'tf.compat.v1.tpu.PaddingSpec', + 'tf.tpu.batch_parallel': + 'tf.compat.v1.tpu.batch_parallel', + 'tf.tpu.bfloat16_scope': + 'tf.compat.v1.tpu.bfloat16_scope', + 'tf.tpu.core': + 'tf.compat.v1.tpu.core', + 'tf.tpu.cross_replica_sum': + 'tf.compat.v1.tpu.cross_replica_sum', + 'tf.tpu.experimental.AdagradParameters': + 'tf.compat.v1.tpu.experimental.AdagradParameters', + 'tf.tpu.experimental.AdamParameters': + 'tf.compat.v1.tpu.experimental.AdamParameters', + 'tf.tpu.experimental.FtrlParameters': + 'tf.compat.v1.tpu.experimental.FtrlParameters', + 'tf.tpu.experimental.StochasticGradientDescentParameters': + 'tf.compat.v1.tpu.experimental.StochasticGradientDescentParameters', + 'tf.tpu.experimental.embedding_column': + 'tf.compat.v1.tpu.experimental.embedding_column', + 'tf.tpu.experimental.shared_embedding_columns': + 'tf.compat.v1.tpu.experimental.shared_embedding_columns', + 'tf.tpu.initialize_system': + 'tf.compat.v1.tpu.initialize_system', + 'tf.tpu.outside_compilation': + 'tf.compat.v1.tpu.outside_compilation', + 'tf.tpu.replicate': + 'tf.compat.v1.tpu.replicate', + 'tf.tpu.rewrite': + 'tf.compat.v1.tpu.rewrite', + 'tf.tpu.shard': + 'tf.compat.v1.tpu.shard', + 'tf.tpu.shutdown_system': + 'tf.compat.v1.tpu.shutdown_system', + 'tf.trace': + 'tf.linalg.trace', + 'tf.train.AdadeltaOptimizer': + 'tf.compat.v1.train.AdadeltaOptimizer', + 'tf.train.AdagradDAOptimizer': + 'tf.compat.v1.train.AdagradDAOptimizer', + 'tf.train.AdagradOptimizer': + 'tf.compat.v1.train.AdagradOptimizer', + 'tf.train.AdamOptimizer': + 'tf.compat.v1.train.AdamOptimizer', + 'tf.train.CheckpointSaverHook': + 'tf.compat.v1.train.CheckpointSaverHook', + 'tf.train.CheckpointSaverListener': + 'tf.compat.v1.train.CheckpointSaverListener', + 'tf.train.ChiefSessionCreator': + 'tf.compat.v1.train.ChiefSessionCreator', + 'tf.train.FeedFnHook': + 'tf.compat.v1.train.FeedFnHook', + 'tf.train.FinalOpsHook': + 'tf.compat.v1.train.FinalOpsHook', + 'tf.train.FtrlOptimizer': + 'tf.compat.v1.train.FtrlOptimizer', + 'tf.train.GlobalStepWaiterHook': + 'tf.compat.v1.train.GlobalStepWaiterHook', + 'tf.train.GradientDescentOptimizer': + 'tf.compat.v1.train.GradientDescentOptimizer', + 'tf.train.LoggingTensorHook': + 'tf.compat.v1.train.LoggingTensorHook', + 'tf.train.LooperThread': + 'tf.compat.v1.train.LooperThread', + 'tf.train.MomentumOptimizer': + 'tf.compat.v1.train.MomentumOptimizer', + 'tf.train.MonitoredSession': + 'tf.compat.v1.train.MonitoredSession', + 'tf.train.MonitoredTrainingSession': + 'tf.compat.v1.train.MonitoredTrainingSession', + 'tf.train.NanLossDuringTrainingError': + 'tf.compat.v1.train.NanLossDuringTrainingError', + 'tf.train.NanTensorHook': + 'tf.compat.v1.train.NanTensorHook', + 'tf.train.NewCheckpointReader': + 'tf.compat.v1.train.NewCheckpointReader', + 'tf.train.Optimizer': + 'tf.compat.v1.train.Optimizer', + 'tf.train.ProfilerHook': + 'tf.compat.v1.train.ProfilerHook', + 'tf.train.ProximalAdagradOptimizer': + 'tf.compat.v1.train.ProximalAdagradOptimizer', + 'tf.train.ProximalGradientDescentOptimizer': + 'tf.compat.v1.train.ProximalGradientDescentOptimizer', + 'tf.train.QueueRunner': + 'tf.compat.v1.train.QueueRunner', + 'tf.train.RMSPropOptimizer': + 'tf.compat.v1.train.RMSPropOptimizer', + 'tf.train.Saver': + 'tf.compat.v1.train.Saver', + 'tf.train.SaverDef': + 'tf.compat.v1.train.SaverDef', + 'tf.train.Scaffold': + 'tf.compat.v1.train.Scaffold', + 'tf.train.SecondOrStepTimer': + 'tf.compat.v1.train.SecondOrStepTimer', + 'tf.train.Server': + 'tf.distribute.Server', + 'tf.train.SessionCreator': + 'tf.compat.v1.train.SessionCreator', + 'tf.train.SessionManager': + 'tf.compat.v1.train.SessionManager', + 'tf.train.SessionRunArgs': + 'tf.compat.v1.train.SessionRunArgs', + 'tf.train.SessionRunContext': + 'tf.compat.v1.train.SessionRunContext', + 'tf.train.SessionRunHook': + 'tf.compat.v1.train.SessionRunHook', + 'tf.train.SessionRunValues': + 'tf.compat.v1.train.SessionRunValues', + 'tf.train.SingularMonitoredSession': + 'tf.compat.v1.train.SingularMonitoredSession', + 'tf.train.StepCounterHook': + 'tf.compat.v1.train.StepCounterHook', + 'tf.train.StopAtStepHook': + 'tf.compat.v1.train.StopAtStepHook', + 'tf.train.SummarySaverHook': + 'tf.compat.v1.train.SummarySaverHook', + 'tf.train.Supervisor': + 'tf.compat.v1.train.Supervisor', + 'tf.train.SyncReplicasOptimizer': + 'tf.compat.v1.train.SyncReplicasOptimizer', + 'tf.train.VocabInfo': + 'tf.compat.v1.train.VocabInfo', + 'tf.train.WorkerSessionCreator': + 'tf.compat.v1.train.WorkerSessionCreator', + 'tf.train.add_queue_runner': + 'tf.compat.v1.train.add_queue_runner', + 'tf.train.assert_global_step': + 'tf.compat.v1.train.assert_global_step', + 'tf.train.basic_train_loop': + 'tf.compat.v1.train.basic_train_loop', + 'tf.train.batch': + 'tf.compat.v1.train.batch', + 'tf.train.batch_join': + 'tf.compat.v1.train.batch_join', + 'tf.train.checkpoint_exists': + 'tf.compat.v1.train.checkpoint_exists', + 'tf.train.cosine_decay': + 'tf.compat.v1.train.cosine_decay', + 'tf.train.cosine_decay_restarts': + 'tf.compat.v1.train.cosine_decay_restarts', + 'tf.train.create_global_step': + 'tf.compat.v1.train.create_global_step', + 'tf.train.do_quantize_training_on_graphdef': + 'tf.compat.v1.train.do_quantize_training_on_graphdef', + 'tf.train.experimental.DynamicLossScale': + 'tf.compat.v1.train.experimental.DynamicLossScale', + 'tf.train.experimental.FixedLossScale': + 'tf.compat.v1.train.experimental.FixedLossScale', + 'tf.train.experimental.LossScale': + 'tf.compat.v1.train.experimental.LossScale', + 'tf.train.experimental.MixedPrecisionLossScaleOptimizer': + 'tf.compat.v1.train.experimental.MixedPrecisionLossScaleOptimizer', + 'tf.train.experimental.disable_mixed_precision_graph_rewrite': + 'tf.compat.v1.train.experimental.disable_mixed_precision_graph_rewrite', + 'tf.train.experimental.enable_mixed_precision_graph_rewrite': + 'tf.compat.v1.train.experimental.enable_mixed_precision_graph_rewrite', + 'tf.train.exponential_decay': + 'tf.compat.v1.train.exponential_decay', + 'tf.train.export_meta_graph': + 'tf.compat.v1.train.export_meta_graph', + 'tf.train.generate_checkpoint_state_proto': + 'tf.compat.v1.train.generate_checkpoint_state_proto', + 'tf.train.get_checkpoint_mtimes': + 'tf.compat.v1.train.get_checkpoint_mtimes', + 'tf.train.get_global_step': + 'tf.compat.v1.train.get_global_step', + 'tf.train.get_or_create_global_step': + 'tf.compat.v1.train.get_or_create_global_step', + 'tf.train.global_step': + 'tf.compat.v1.train.global_step', + 'tf.train.import_meta_graph': + 'tf.compat.v1.train.import_meta_graph', + 'tf.train.init_from_checkpoint': + 'tf.compat.v1.train.init_from_checkpoint', + 'tf.train.input_producer': + 'tf.compat.v1.train.input_producer', + 'tf.train.inverse_time_decay': + 'tf.compat.v1.train.inverse_time_decay', + 'tf.train.limit_epochs': + 'tf.compat.v1.train.limit_epochs', + 'tf.train.linear_cosine_decay': + 'tf.compat.v1.train.linear_cosine_decay', + 'tf.train.match_filenames_once': + 'tf.io.match_filenames_once', + 'tf.train.maybe_batch': + 'tf.compat.v1.train.maybe_batch', + 'tf.train.maybe_batch_join': + 'tf.compat.v1.train.maybe_batch_join', + 'tf.train.maybe_shuffle_batch': + 'tf.compat.v1.train.maybe_shuffle_batch', + 'tf.train.maybe_shuffle_batch_join': + 'tf.compat.v1.train.maybe_shuffle_batch_join', + 'tf.train.natural_exp_decay': + 'tf.compat.v1.train.natural_exp_decay', + 'tf.train.noisy_linear_cosine_decay': + 'tf.compat.v1.train.noisy_linear_cosine_decay', + 'tf.train.piecewise_constant': + 'tf.compat.v1.train.piecewise_constant', + 'tf.train.piecewise_constant_decay': + 'tf.compat.v1.train.piecewise_constant_decay', + 'tf.train.polynomial_decay': + 'tf.compat.v1.train.polynomial_decay', + 'tf.train.queue_runner.QueueRunner': + 'tf.compat.v1.train.queue_runner.QueueRunner', + 'tf.train.queue_runner.add_queue_runner': + 'tf.compat.v1.train.queue_runner.add_queue_runner', + 'tf.train.queue_runner.start_queue_runners': + 'tf.compat.v1.train.queue_runner.start_queue_runners', + 'tf.train.range_input_producer': + 'tf.compat.v1.train.range_input_producer', + 'tf.train.remove_checkpoint': + 'tf.compat.v1.train.remove_checkpoint', + 'tf.train.replica_device_setter': + 'tf.compat.v1.train.replica_device_setter', + 'tf.train.shuffle_batch': + 'tf.compat.v1.train.shuffle_batch', + 'tf.train.shuffle_batch_join': + 'tf.compat.v1.train.shuffle_batch_join', + 'tf.train.slice_input_producer': + 'tf.compat.v1.train.slice_input_producer', + 'tf.train.start_queue_runners': + 'tf.compat.v1.train.start_queue_runners', + 'tf.train.string_input_producer': + 'tf.compat.v1.train.string_input_producer', + 'tf.train.summary_iterator': + 'tf.compat.v1.train.summary_iterator', + 'tf.train.update_checkpoint_state': + 'tf.compat.v1.train.update_checkpoint_state', + 'tf.train.warm_start': + 'tf.compat.v1.train.warm_start', + 'tf.train.write_graph': + 'tf.io.write_graph', + 'tf.trainable_variables': + 'tf.compat.v1.trainable_variables', + 'tf.truncated_normal': + 'tf.random.truncated_normal', + 'tf.uniform_unit_scaling_initializer': + 'tf.compat.v1.uniform_unit_scaling_initializer', + 'tf.unsorted_segment_max': + 'tf.math.unsorted_segment_max', + 'tf.unsorted_segment_mean': + 'tf.math.unsorted_segment_mean', + 'tf.unsorted_segment_min': + 'tf.math.unsorted_segment_min', + 'tf.unsorted_segment_prod': + 'tf.math.unsorted_segment_prod', + 'tf.unsorted_segment_sqrt_n': + 'tf.math.unsorted_segment_sqrt_n', + 'tf.unsorted_segment_sum': + 'tf.math.unsorted_segment_sum', + 'tf.variable_axis_size_partitioner': + 'tf.compat.v1.variable_axis_size_partitioner', + 'tf.variable_op_scope': + 'tf.compat.v1.variable_op_scope', + 'tf.variable_scope': + 'tf.compat.v1.variable_scope', + 'tf.variables_initializer': + 'tf.compat.v1.variables_initializer', + 'tf.verify_tensor_all_finite': + 'tf.debugging.assert_all_finite', + 'tf.wrap_function': + 'tf.compat.v1.wrap_function', + 'tf.write_file': + 'tf.io.write_file', + 'tf.zeta': + 'tf.math.zeta' +} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/reorders_v2.py b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/reorders_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..c0ecbc6e8c0bcc6452e2ab115cf91ae02974e8b4 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/reorders_v2.py @@ -0,0 +1,136 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# pylint: disable=line-too-long +"""List of renames to apply when converting from TF 1.0 to TF 2.0. + +THIS FILE IS AUTOGENERATED: To update, please run: + bazel run tensorflow/tools/compatibility/update:generate_v2_reorders_map +This file should be updated whenever a function is added to +self.reordered_function_names in tf_upgrade_v2.py. +""" +reorders = { + 'tf.argmax': [None, None, 'name', 'dimension', 'output_type'], + 'tf.argmin': [None, None, 'name', 'dimension', 'output_type'], + 'tf.batch_to_space': [None, 'crops', 'block_size', 'name', 'block_shape'], + 'tf.boolean_mask': [None, None, 'name', 'axis'], + 'tf.cond': [None, None, None, 'strict', 'name', 'fn1', 'fn2'], + 'tf.confusion_matrix': [None, None, None, 'dtype', 'name', 'weights'], + 'tf.convert_to_tensor': [None, None, 'name', 'preferred_dtype', 'dtype_hint'], + 'tf.data.experimental.RaggedTensorStructure': ['dtype', 'shape', 'ragged_rank'], + 'tf.data.experimental.SparseTensorStructure': ['dtype', 'shape'], + 'tf.data.experimental.TensorArrayStructure': ['dtype', 'element_shape', 'dynamic_size', 'infer_shape'], + 'tf.data.experimental.TensorStructure': ['dtype', 'shape'], + 'tf.debugging.assert_all_finite': ['t', 'msg', 'name', 'x', 'message'], + 'tf.decode_csv': [None, None, None, None, 'name', 'na_value', 'select_cols'], + 'tf.depth_to_space': [None, None, 'name', 'data_format'], + 'tf.feature_column.categorical_column_with_vocabulary_file': [None, None, None, 'num_oov_buckets', 'default_value', 'dtype'], + 'tf.gather_nd': [None, None, 'name', 'batch_dims'], + 'tf.gradients': [None, None, None, None, 'colocate_gradients_with_ops', 'gate_gradients', 'aggregation_method', 'stop_gradients', 'unconnected_gradients'], + 'tf.hessians': [None, None, 'name', 'colocate_gradients_with_ops', 'gate_gradients', 'aggregation_method'], + 'tf.image.sample_distorted_bounding_box': [None, None, None, 'seed2', 'min_object_covered', 'aspect_ratio_range', 'area_range', 'max_attempts', 'use_image_if_no_bounding_boxes', 'name'], + 'tf.initializers.uniform_unit_scaling': ['factor', 'seed', 'dtype'], + 'tf.io.decode_csv': [None, None, None, None, 'name', 'na_value', 'select_cols'], + 'tf.io.parse_example': [None, None, 'name', 'example_names'], + 'tf.io.parse_single_example': [None, None, 'name', 'example_names'], + 'tf.io.serialize_many_sparse': [None, 'name', 'out_type'], + 'tf.io.serialize_sparse': [None, 'name', 'out_type'], + 'tf.linalg.norm': [None, None, None, None, None, 'keep_dims'], + 'tf.manip.gather_nd': [None, None, 'name', 'batch_dims'], + 'tf.math.argmax': [None, None, 'name', 'dimension', 'output_type'], + 'tf.math.argmin': [None, None, 'name', 'dimension', 'output_type'], + 'tf.math.confusion_matrix': [None, None, None, 'dtype', 'name', 'weights'], + 'tf.math.in_top_k': ['predictions', 'targets', 'k', 'name'], + 'tf.math.reduce_all': [None, None, None, None, 'reduction_indices', 'keep_dims'], + 'tf.math.reduce_any': [None, None, None, None, 'reduction_indices', 'keep_dims'], + 'tf.math.reduce_logsumexp': [None, None, None, None, 'reduction_indices', 'keep_dims'], + 'tf.math.reduce_max': [None, None, None, None, 'reduction_indices', 'keep_dims'], + 'tf.math.reduce_mean': [None, None, None, None, 'reduction_indices', 'keep_dims'], + 'tf.math.reduce_min': [None, None, None, None, 'reduction_indices', 'keep_dims'], + 'tf.math.reduce_prod': [None, None, None, None, 'reduction_indices', 'keep_dims'], + 'tf.math.reduce_sum': [None, None, None, None, 'reduction_indices', 'keep_dims'], + 'tf.multinomial': [None, None, 'seed', 'name', 'output_dtype'], + 'tf.nn.avg_pool': ['value', 'ksize', 'strides', 'padding', 'data_format', 'name', 'input'], + 'tf.nn.avg_pool2d': ['value', 'ksize', 'strides', 'padding', 'data_format', 'name', 'input'], + 'tf.nn.conv1d': ['value', 'filters', 'stride', 'padding', 'use_cudnn_on_gpu', 'data_format', 'name', 'input', 'dilations'], + 'tf.nn.conv2d': [None, 'filter', 'strides', 'padding', 'use_cudnn_on_gpu', 'data_format', 'dilations', 'name', 'filters'], + 'tf.nn.conv2d_backprop_input': ['input_sizes', 'filter', 'out_backprop', 'strides', 'padding', 'use_cudnn_on_gpu', 'data_format', 'dilations', 'name', 'filters'], + 'tf.nn.convolution': [None, 'filter', 'padding', 'strides', 'dilation_rate', 'name', 'data_format', 'filters', 'dilations'], + 'tf.nn.crelu': [None, 'name', 'axis'], + 'tf.nn.ctc_beam_search_decoder': ['inputs', 'sequence_length', 'beam_width', 'top_paths', 'merge_repeated'], + 'tf.nn.depth_to_space': [None, None, 'name', 'data_format'], + 'tf.nn.depthwise_conv2d': [None, None, None, None, 'rate', 'name', 'data_format', 'dilations'], + 'tf.nn.embedding_lookup': [None, None, 'partition_strategy', 'name', 'validate_indices', 'max_norm'], + 'tf.nn.embedding_lookup_sparse': [None, None, None, 'partition_strategy', 'name', 'combiner', 'max_norm', 'allow_fast_lookup'], + 'tf.nn.fractional_avg_pool': ['value', 'pooling_ratio', 'pseudo_random', 'overlapping', 'deterministic', 'seed', 'seed2', 'name'], + 'tf.nn.fractional_max_pool': ['value', 'pooling_ratio', 'pseudo_random', 'overlapping', 'deterministic', 'seed', 'seed2', 'name'], + 'tf.nn.in_top_k': ['predictions', 'targets', 'k', 'name'], + 'tf.nn.max_pool': ['value', 'ksize', 'strides', 'padding', 'data_format', 'name', 'input'], + 'tf.nn.moments': [None, None, None, 'name', 'keep_dims', 'keepdims'], + 'tf.nn.pool': [None, None, None, 'padding', 'dilation_rate', 'strides', 'name', 'data_format', 'dilations'], + 'tf.nn.separable_conv2d': [None, None, None, None, None, 'rate', 'name', 'data_format', 'dilations'], + 'tf.nn.softmax_cross_entropy_with_logits': ['labels', 'logits', 'dim', 'name', 'axis'], + 'tf.nn.space_to_batch': [None, 'paddings', 'block_size', 'name', 'block_shape'], + 'tf.nn.space_to_depth': [None, None, 'name', 'data_format'], + 'tf.nn.weighted_moments': [None, None, None, 'name', 'keep_dims', 'keepdims'], + 'tf.norm': [None, None, None, None, None, 'keep_dims'], + 'tf.pad': [None, None, None, 'name', 'constant_values'], + 'tf.parse_example': [None, None, 'name', 'example_names'], + 'tf.parse_single_example': [None, None, 'name', 'example_names'], + 'tf.quantize_v2': [None, None, None, None, None, 'name', 'round_mode', 'narrow_range', 'axis', 'ensure_minimum_range'], + 'tf.random.multinomial': [None, None, 'seed', 'name', 'output_dtype'], + 'tf.random.poisson': ['lam', 'shape', 'dtype', 'seed', 'name'], + 'tf.random_poisson': ['lam', 'shape', 'dtype', 'seed', 'name'], + 'tf.reduce_all': [None, None, None, None, 'reduction_indices', 'keep_dims'], + 'tf.reduce_any': [None, None, None, None, 'reduction_indices', 'keep_dims'], + 'tf.reduce_join': [None, None, 'keep_dims', 'separator', 'name', 'reduction_indices', 'keepdims'], + 'tf.reduce_logsumexp': [None, None, None, None, 'reduction_indices', 'keep_dims'], + 'tf.reduce_max': [None, None, None, None, 'reduction_indices', 'keep_dims'], + 'tf.reduce_mean': [None, None, None, None, 'reduction_indices', 'keep_dims'], + 'tf.reduce_min': [None, None, None, None, 'reduction_indices', 'keep_dims'], + 'tf.reduce_prod': [None, None, None, None, 'reduction_indices', 'keep_dims'], + 'tf.reduce_sum': [None, None, None, None, 'reduction_indices', 'keep_dims'], + 'tf.reverse_sequence': [None, None, None, None, None, 'seq_dim', 'batch_dim'], + 'tf.serialize_many_sparse': [None, 'name', 'out_type'], + 'tf.serialize_sparse': [None, 'name', 'out_type'], + 'tf.shape': [None, 'name', 'out_type'], + 'tf.size': [None, 'name', 'out_type'], + 'tf.space_to_batch': [None, 'paddings', 'block_size', 'name', 'block_shape'], + 'tf.space_to_depth': [None, None, 'name', 'data_format'], + 'tf.sparse.add': [None, None, None, 'thresh'], + 'tf.sparse.concat': [None, None, 'name', 'expand_nonconcat_dim', 'concat_dim', 'expand_nonconcat_dims'], + 'tf.sparse.reduce_max': [None, None, None, 'reduction_axes', 'keep_dims'], + 'tf.sparse.segment_mean': [None, None, None, 'name', 'num_segments', 'sparse_gradient'], + 'tf.sparse.segment_sqrt_n': [None, None, None, 'name', 'num_segments', 'sparse_gradient'], + 'tf.sparse.segment_sum': [None, None, None, 'name', 'num_segments', 'sparse_gradient'], + 'tf.sparse.split': ['keyword_required', 'sp_input', 'num_split', 'axis', 'name', 'split_dim'], + 'tf.sparse_add': [None, None, None, 'thresh'], + 'tf.sparse_concat': [None, None, 'name', 'expand_nonconcat_dim', 'concat_dim', 'expand_nonconcat_dims'], + 'tf.sparse_matmul': [None, None, None, None, 'a_is_sparse', 'b_is_sparse', 'name'], + 'tf.sparse_reduce_max': [None, None, None, 'reduction_axes', 'keep_dims'], + 'tf.sparse_segment_mean': [None, None, None, 'name', 'num_segments', 'sparse_gradient'], + 'tf.sparse_segment_sqrt_n': [None, None, None, 'name', 'num_segments', 'sparse_gradient'], + 'tf.sparse_segment_sum': [None, None, None, 'name', 'num_segments', 'sparse_gradient'], + 'tf.sparse_split': ['keyword_required', 'sp_input', 'num_split', 'axis', 'name', 'split_dim'], + 'tf.strings.length': [None, 'name', 'unit'], + 'tf.strings.reduce_join': [None, None, 'keep_dims', 'separator', 'name', 'reduction_indices', 'keepdims'], + 'tf.strings.substr': [None, None, None, 'name', 'unit'], + 'tf.substr': [None, None, None, 'name', 'unit'], + 'tf.test.assert_equal_graph_def': ['actual', 'expected', 'checkpoint_v2', 'hash_table_shared_name'], + 'tf.transpose': [None, None, 'name', 'conjugate'], + 'tf.tuple': [None, 'name', 'control_inputs'], + 'tf.uniform_unit_scaling_initializer': ['factor', 'seed', 'dtype'], + 'tf.verify_tensor_all_finite': ['t', 'msg', 'name', 'x', 'message'], + 'tf.while_loop': ['cond', 'body', 'loop_vars', 'shape_invariants', 'parallel_iterations', 'back_prop', 'swap_memory', 'name', 'maximum_iterations', 'return_same_structure'] +} diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/tf_upgrade_v2.py b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/tf_upgrade_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..11c56830f7eb0208fb76f5be22d979adac8d452e --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/tf_upgrade_v2.py @@ -0,0 +1,2471 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Upgrader for Python scripts from 1.* TensorFlow to 2.0 TensorFlow.""" + +import ast +import copy +import functools +import sys + +import pasta + +from tensorflow.tools.compatibility import all_renames_v2 +from tensorflow.tools.compatibility import ast_edits +from tensorflow.tools.compatibility import module_deprecations_v2 +from tensorflow.tools.compatibility import reorders_v2 + +# These pylint warnings are a mistake. +# pylint: disable=g-explicit-bool-comparison,g-bool-id-comparison + + +class UnaliasedTFImport(ast_edits.AnalysisResult): + + def __init__(self): + self.log_level = ast_edits.ERROR + self.log_message = ("The tf_upgrade_v2 script detected an unaliased " + "`import tensorflow`. The script can only run when " + "importing with `import tensorflow as tf`.") + + +class VersionedTFImport(ast_edits.AnalysisResult): + + def __init__(self, version): + self.log_level = ast_edits.INFO + self.log_message = ("Not upgrading symbols because `tensorflow." + version + + "` was directly imported as `tf`.") + + +compat_v1_import = VersionedTFImport("compat.v1") +compat_v2_import = VersionedTFImport("compat.v2") + + +class TFAPIImportAnalysisSpec(ast_edits.APIAnalysisSpec): + + def __init__(self): + self.symbols_to_detect = {} + self.imports_to_detect = { + ("tensorflow", None): UnaliasedTFImport(), + ("tensorflow.compat.v1", "tf"): compat_v1_import, + ("tensorflow.compat.v2", "tf"): compat_v2_import, + } + + +class CompatV1ImportReplacer(ast.NodeVisitor): + """AST Visitor that replaces `import tensorflow.compat.v1 as tf`. + + Converts `import tensorflow.compat.v1 as tf` to `import tensorflow as tf` + """ + + def visit_Import(self, node): # pylint: disable=invalid-name + """Handle visiting an import node in the AST. + + Args: + node: Current Node + """ + for import_alias in node.names: + # Detect based on full import name and alias + if (import_alias.name == "tensorflow.compat.v1" and + import_alias.asname == "tf"): + import_alias.name = "tensorflow" + self.generic_visit(node) + + +class TFAPIChangeSpec(ast_edits.NoUpdateSpec): + """List of maps that describe what changed in the API.""" + + def __init__(self, import_rename=False, upgrade_compat_v1_import=False): + self.upgrade_compat_v1_import = upgrade_compat_v1_import + + # Maps from a function name to a dictionary that describes how to + # map from an old argument keyword to the new argument keyword. + # If the new argument is None, it will be removed. + # Only keyword args are handled, so make sure to also put any function in + # function_reorders to ensure that all args are made into keywords first. + self.function_keyword_renames = { + # TODO(b/129398290) + # "tf.string_split": { + # "delimiter": "sep", + # }, + "tf.test.assert_equal_graph_def": { + "checkpoint_v2": None, + "hash_table_shared_name": None, + }, + "tf.autograph.to_code": { + "arg_types": None, + "arg_values": None, + "indentation": None, + }, + "tf.autograph.to_graph": { + "arg_types": None, + "arg_values": None, + }, + "tf.nn.embedding_lookup": { + "validate_indices": None, + }, + "tf.image.sample_distorted_bounding_box": { + "seed2": None, + }, + "tf.gradients": { + "colocate_gradients_with_ops": None, + }, + "tf.hessians": { + "colocate_gradients_with_ops": None, + }, + "*.minimize": { + "colocate_gradients_with_ops": None, + }, + "*.compute_gradients": { + "colocate_gradients_with_ops": None, + }, + "tf.cond": { + "strict": None, + "fn1": "true_fn", + "fn2": "false_fn" + }, + "tf.argmin": { + "dimension": "axis", + }, + "tf.argmax": { + "dimension": "axis", + }, + "tf.arg_min": { + "dimension": "axis", + }, + "tf.arg_max": { + "dimension": "axis", + }, + "tf.math.argmin": { + "dimension": "axis", + }, + "tf.math.argmax": { + "dimension": "axis", + }, + "tf.image.crop_and_resize": { + "box_ind": "box_indices", + }, + "tf.extract_image_patches": { + "ksizes": "sizes", + }, + "tf.image.extract_image_patches": { + "ksizes": "sizes", + }, + "tf.image.resize": { + "align_corners": None, + }, + "tf.image.resize_images": { + "align_corners": None, + }, + "tf.expand_dims": { + "dim": "axis", + }, + "tf.batch_to_space": { + "block_size": "block_shape", + }, + "tf.space_to_batch": { + "block_size": "block_shape", + }, + "tf.nn.space_to_batch": { + "block_size": "block_shape", + }, + "tf.constant": { + "verify_shape": "verify_shape_is_now_always_true", + }, + "tf.convert_to_tensor": { + "preferred_dtype": "dtype_hint" + }, + "tf.nn.softmax_cross_entropy_with_logits": { + "dim": "axis", + }, + "tf.nn.softmax_cross_entropy_with_logits_v2": { + "dim": "axis" + }, + "tf.linalg.l2_normalize": { + "dim": "axis", + }, + "tf.linalg.norm": { + "keep_dims": "keepdims", + }, + "tf.norm": { + "keep_dims": "keepdims", + }, + "tf.load_file_system_library": { + "library_filename": "library_location", + }, + "tf.count_nonzero": { + "input_tensor": "input", + "keep_dims": "keepdims", + "reduction_indices": "axis", + }, + "tf.math.count_nonzero": { + "input_tensor": "input", + "keep_dims": "keepdims", + "reduction_indices": "axis", + }, + "tf.nn.erosion2d": { + "kernel": "filters", + "rates": "dilations", + }, + "tf.math.l2_normalize": { + "dim": "axis", + }, + "tf.math.log_softmax": { + "dim": "axis", + }, + "tf.math.softmax": { + "dim": "axis" + }, + "tf.nn.l2_normalize": { + "dim": "axis", + }, + "tf.nn.log_softmax": { + "dim": "axis", + }, + "tf.nn.moments": { + "keep_dims": "keepdims", + }, + "tf.nn.pool": { + "dilation_rate": "dilations" + }, + "tf.nn.separable_conv2d": { + "rate": "dilations" + }, + "tf.nn.depthwise_conv2d": { + "rate": "dilations" + }, + "tf.nn.softmax": { + "dim": "axis" + }, + "tf.nn.sufficient_statistics": { + "keep_dims": "keepdims" + }, + "tf.debugging.assert_all_finite": { + "t": "x", + "msg": "message", + }, + "tf.verify_tensor_all_finite": { + "t": "x", + "msg": "message", + }, + "tf.sparse.add": { + "thresh": "threshold", + }, + "tf.sparse_add": { + "thresh": "threshold", + }, + "tf.sparse.concat": { + "concat_dim": "axis", + "expand_nonconcat_dim": "expand_nonconcat_dims", + }, + "tf.sparse_concat": { + "concat_dim": "axis", + "expand_nonconcat_dim": "expand_nonconcat_dims", + }, + "tf.sparse.split": { + "split_dim": "axis", + }, + "tf.sparse_split": { + "split_dim": "axis", + }, + "tf.sparse.reduce_max": { + "reduction_axes": "axis", + "keep_dims": "keepdims", + }, + "tf.sparse_reduce_max": { + "reduction_axes": "axis", + "keep_dims": "keepdims", + }, + "tf.sparse.reduce_sum": { + "reduction_axes": "axis", + "keep_dims": "keepdims", + }, + "tf.sparse_reduce_sum": { + "reduction_axes": "axis", + "keep_dims": "keepdims", + }, + "tf.nn.max_pool_with_argmax": { + "Targmax": "output_dtype", + }, + "tf.nn.max_pool": { + "value": "input" + }, + "tf.nn.avg_pool": { + "value": "input" + }, + "tf.nn.avg_pool2d": { + "value": "input" + }, + "tf.multinomial": { + "output_dtype": "dtype", + }, + "tf.random.multinomial": { + "output_dtype": "dtype", + }, + "tf.reverse_sequence": { + "seq_dim": "seq_axis", + "batch_dim": "batch_axis", + }, + "tf.nn.batch_norm_with_global_normalization": { + "t": "input", + "m": "mean", + "v": "variance", + }, + "tf.nn.dilation2d": { + "filter": "filters", + "rates": "dilations", + }, + "tf.nn.conv3d": { + "filter": "filters" + }, + "tf.zeros_like": { + "tensor": "input", + }, + "tf.ones_like": { + "tensor": "input", + }, + "tf.nn.conv2d_transpose": { + "value": "input", + "filter": "filters", + }, + "tf.nn.conv3d_transpose": { + "value": "input", + "filter": "filters", + }, + "tf.nn.convolution": { + "filter": "filters", + "dilation_rate": "dilations", + }, + "tf.gfile.Exists": { + "filename": "path", + }, + "tf.gfile.Remove": { + "filename": "path", + }, + "tf.gfile.Stat": { + "filename": "path", + }, + "tf.gfile.Glob": { + "filename": "pattern", + }, + "tf.gfile.MkDir": { + "dirname": "path", + }, + "tf.gfile.MakeDirs": { + "dirname": "path", + }, + "tf.gfile.DeleteRecursively": { + "dirname": "path", + }, + "tf.gfile.IsDirectory": { + "dirname": "path", + }, + "tf.gfile.ListDirectory": { + "dirname": "path", + }, + "tf.gfile.Copy": { + "oldpath": "src", + "newpath": "dst", + }, + "tf.gfile.Rename": { + "oldname": "src", + "newname": "dst", + }, + "tf.gfile.Walk": { + "in_order": "topdown", + }, + "tf.random.stateless_multinomial": { + "output_dtype": "dtype", + }, + "tf.string_to_number": { + "string_tensor": "input", + }, + "tf.strings.to_number": { + "string_tensor": "input", + }, + "tf.string_to_hash_bucket": { + "string_tensor": "input", + }, + "tf.strings.to_hash_bucket": { + "string_tensor": "input", + }, + "tf.reduce_all": { + "reduction_indices": "axis", + "keep_dims": "keepdims", + }, + "tf.math.reduce_all": { + "reduction_indices": "axis", + "keep_dims": "keepdims", + }, + "tf.reduce_any": { + "reduction_indices": "axis", + "keep_dims": "keepdims", + }, + "tf.math.reduce_any": { + "reduction_indices": "axis", + "keep_dims": "keepdims", + }, + "tf.reduce_min": { + "reduction_indices": "axis", + "keep_dims": "keepdims", + }, + "tf.math.reduce_min": { + "reduction_indices": "axis", + "keep_dims": "keepdims", + }, + "tf.reduce_max": { + "reduction_indices": "axis", + "keep_dims": "keepdims", + }, + "tf.math.reduce_max": { + "reduction_indices": "axis", + "keep_dims": "keepdims", + }, + "tf.reduce_sum": { + "reduction_indices": "axis", + "keep_dims": "keepdims", + }, + "tf.math.reduce_sum": { + "reduction_indices": "axis", + "keep_dims": "keepdims", + }, + "tf.reduce_mean": { + "reduction_indices": "axis", + "keep_dims": "keepdims", + }, + "tf.math.reduce_mean": { + "reduction_indices": "axis", + "keep_dims": "keepdims", + }, + "tf.reduce_prod": { + "reduction_indices": "axis", + "keep_dims": "keepdims", + }, + "tf.math.reduce_prod": { + "reduction_indices": "axis", + "keep_dims": "keepdims", + }, + "tf.reduce_logsumexp": { + "reduction_indices": "axis", + "keep_dims": "keepdims", + }, + "tf.math.reduce_logsumexp": { + "reduction_indices": "axis", + "keep_dims": "keepdims", + }, + "tf.reduce_join": { + "keep_dims": "keepdims", + "reduction_indices": "axis" + }, + "tf.strings.reduce_join": { + "keep_dims": "keepdims", + "reduction_indices": "axis" + }, + "tf.squeeze": { + "squeeze_dims": "axis", + }, + "tf.nn.weighted_moments": { + "keep_dims": "keepdims" + }, + "tf.nn.conv1d": { + "value": "input", + "use_cudnn_on_gpu": None, + }, + "tf.nn.conv2d": { + "filter": "filters", + "use_cudnn_on_gpu": None, + }, + "tf.nn.conv2d_backprop_input": { + "use_cudnn_on_gpu": None, + "input_sizes": "output_shape", + "out_backprop": "input", + "filter": "filters", + }, + "tf.contrib.summary.audio": { + "tensor": "data", + "family": None, + }, + "tf.contrib.summary.create_file_writer": { + "name": None, + }, + "tf.contrib.summary.generic": { + "name": "tag", + "tensor": "data", + "family": None, + }, + "tf.contrib.summary.histogram": { + "tensor": "data", + "family": None, + }, + "tf.contrib.summary.image": { + "tensor": "data", + "bad_color": None, + "max_images": "max_outputs", + "family": None, + }, + "tf.contrib.summary.scalar": { + "tensor": "data", + "family": None, + }, + "tf.nn.weighted_cross_entropy_with_logits": { + "targets": "labels", + }, + "tf.decode_raw": { + "bytes": "input_bytes", + }, + "tf.io.decode_raw": { + "bytes": "input_bytes", + }, + "tf.contrib.framework.load_variable": { + "checkpoint_dir": "ckpt_dir_or_file", + } + } + all_renames_v2.add_contrib_direct_import_support( + self.function_keyword_renames) + + # Mapping from function to the new name of the function + # Add additional renames not in renames_v2.py to all_renames_v2.py. + self.symbol_renames = all_renames_v2.symbol_renames + self.import_rename = import_rename + if self.import_rename: + self.import_renames = { + "tensorflow": + ast_edits.ImportRename( + "tensorflow.compat.v2", + excluded_prefixes=[ + "tensorflow.contrib", "tensorflow.flags", + "tensorflow.compat.v1", "tensorflow.compat.v2", + "tensorflow.google" + ], + ) + } + else: + self.import_renames = {} + + # Variables that should be changed to functions. + self.change_to_function = {} + + # pylint: disable=line-too-long + # This list contains names of functions that had their arguments reordered. + # After modifying this list, run the following to update reorders_v2.py: + # bazel run tensorflow/tools/compatibility/update:generate_v2_reorders_map + # pylint: enable=line-too-long + self.reordered_function_names = { + "tf.io.serialize_sparse", + "tf.io.serialize_many_sparse", + "tf.argmax", + "tf.argmin", + "tf.batch_to_space", + "tf.cond", + "tf.nn.space_to_batch", + "tf.boolean_mask", + "tf.convert_to_tensor", + "tf.nn.conv1d", + "tf.nn.conv2d", + "tf.nn.conv2d_backprop_input", + "tf.nn.ctc_beam_search_decoder", + "tf.nn.moments", + "tf.nn.convolution", + "tf.nn.crelu", + "tf.nn.weighted_moments", + "tf.nn.pool", + "tf.nn.separable_conv2d", + "tf.nn.depthwise_conv2d", + "tf.multinomial", + "tf.random.multinomial", + "tf.pad", + "tf.quantize_v2", + "tf.feature_column.categorical_column_with_vocabulary_file", + "tf.shape", + "tf.size", + # TODO(b/129398290) + # "tf.string_split", + "tf.random.poisson", + "tf.sparse.add", + "tf.sparse_add", + "tf.sparse.concat", + "tf.sparse_concat", + "tf.sparse.segment_mean", + "tf.sparse.segment_sqrt_n", + "tf.sparse.segment_sum", + "tf.sparse_matmul", + "tf.sparse.reduce_max", + "tf.sparse_reduce_max", + "tf.io.decode_csv", + "tf.strings.length", + "tf.strings.reduce_join", + "tf.strings.substr", + "tf.substr", + "tf.transpose", + "tf.tuple", + "tf.parse_example", + "tf.parse_single_example", + "tf.io.parse_example", + "tf.io.parse_single_example", + "tf.while_loop", + "tf.reduce_all", + "tf.math.reduce_all", + "tf.reduce_any", + "tf.math.reduce_any", + "tf.reduce_min", + "tf.math.reduce_min", + "tf.reduce_max", + "tf.math.reduce_max", + "tf.reduce_sum", + "tf.math.reduce_sum", + "tf.reduce_mean", + "tf.math.reduce_mean", + "tf.reduce_prod", + "tf.math.reduce_prod", + "tf.reduce_logsumexp", + "tf.math.reduce_logsumexp", + "tf.reduce_join", + "tf.confusion_matrix", + "tf.math.confusion_matrix", + "tf.math.in_top_k", + "tf.nn.depth_to_space", + "tf.nn.embedding_lookup", + "tf.nn.embedding_lookup_sparse", + "tf.nn.in_top_k", + "tf.nn.space_to_depth", + "tf.test.assert_equal_graph_def", + "tf.linalg.norm", + "tf.norm", + "tf.reverse_sequence", + "tf.sparse_split", + # tf.nn.softmax_cross_entropy_with_logits *must* be called with + # keyword arguments. Add keyword arguments in rare case when they + # are not specified. + "tf.nn.softmax_cross_entropy_with_logits", + "tf.nn.fractional_avg_pool", + "tf.nn.fractional_max_pool", + "tf.image.sample_distorted_bounding_box", + "tf.gradients", + "tf.hessians", + "tf.nn.max_pool", + "tf.nn.avg_pool", + "tf.initializers.uniform_unit_scaling", + "tf.uniform_unit_scaling_initializer", + "tf.data.experimental.TensorStructure", + "tf.data.experimental.SparseTensorStructure", + "tf.data.experimental.RaggedTensorStructure", + "tf.data.experimental.TensorArrayStructure", + "tf.debugging.assert_all_finite", + "tf.gather_nd", + } + + # Manual mapping of function names to be reordered to their list of argument + # names, in order. Only use this if argument names cannot be autodetected, + # e.g. if the functions are in contrib. + self.manual_function_reorders = { + "tf.contrib.summary.audio": [ + "name", "tensor", "sample_rate", "max_outputs", "family", "step"], + "tf.contrib.summary.create_file_writer": [ + "logdir", "max_queue", "flush_millis", "filename_suffix", "name"], + "tf.contrib.summary.generic": [ + "name", "tensor", "metadata", "family", "step"], + "tf.contrib.summary.histogram": [ + "name", "tensor", "family", "step"], + "tf.contrib.summary.image": [ + "name", "tensor", "bad_color", "max_images", "family", "step"], + "tf.contrib.summary.scalar": [ + "name", "tensor", "family", "step"], + } + # Functions that were reordered should be changed to the new keyword args + # for safety, if positional arguments are used. If you have reversed the + # positional arguments yourself, this could do the wrong thing. + self.function_reorders = dict(reorders_v2.reorders) + self.function_reorders.update(self.manual_function_reorders) + + decay_function_comment = ( + ast_edits.INFO, + "To use learning rate decay schedules with TensorFlow 2.0, switch to " + "the schedules in `tf.keras.optimizers.schedules`.\n" + ) + + assert_return_type_comment = ( + ast_edits.INFO, + " has been changed to return None, the " + "data argument has been removed, and arguments have been reordered." + "\nThe calls have been converted to compat.v1 for safety (even though " + " they may already have been correct)." + ) + + assert_rank_comment = ( + ast_edits.INFO, + " has been changed to return None, and" + " the data and summarize arguments have been removed." + "\nThe calls have been converted to compat.v1 for safety (even though " + " they may already have been correct)." + ) + + contrib_layers_layer_norm_comment = ( + ast_edits.WARNING, + "(Manual edit required) `tf.contrib.layers.layer_norm` has been " + "deprecated, and its implementation has been integrated with " + "`tf.keras.layers.LayerNormalization` in TensorFlow 2.0. " + "Note that, the default value of `epsilon` is changed to `1e-3` in the " + "new API from `1e-12`, and this may introduce numerical differences. " + "Please check the new API and use that instead." + ) + + initializers_no_dtype_comment = ( + ast_edits.INFO, "Initializers no longer have the " + "dtype argument in the constructor or partition_info argument in the " + "__call__ method.\nThe calls have been converted to compat.v1 for " + "safety (even though they may already have been correct).") + + metrics_comment = ( + ast_edits.INFO, + "tf.metrics have been replaced with object oriented versions in" + " TF 2.0 and after. The metric function calls have been converted to " + "compat.v1 for backward compatibility. Please update these calls to " + "the TF 2.0 versions.") + + losses_comment = ( + ast_edits.INFO, + "tf.losses have been replaced with object oriented versions in" + " TF 2.0 and after. The loss function calls have been converted to " + "compat.v1 for backward compatibility. Please update these calls to " + "the TF 2.0 versions.") + + # This could be done with a _rename_if_arg_not_found_transformer + deprecate_partition_strategy_comment = ( + ast_edits.WARNING, + "`partition_strategy` has been removed from . " + " The 'div' strategy will be used by default.") + + # make change instead + uniform_unit_scaling_initializer_comment = ( + ast_edits.ERROR, + "uniform_unit_scaling_initializer has been removed. Please use" + " tf.initializers.variance_scaling instead with distribution=uniform " + "to get equivalent behaviour.") + + summary_api_comment = ( + ast_edits.INFO, + "The TF 1.x summary API cannot be automatically migrated to TF 2.0, so " + "symbols have been converted to tf.compat.v1.summary.* and must be " + "migrated manually. Typical usage will only require changes to the " + "summary writing logic, not to individual calls like scalar(). " + "For examples of the new summary API, see the Effective TF 2.0 " + "migration document or check the TF 2.0 TensorBoard tutorials.") + + contrib_summary_comment = ( + ast_edits.WARNING, + "tf.contrib.summary.* functions have been migrated best-effort to " + "tf.compat.v2.summary.* equivalents where possible, but the resulting " + "code is not guaranteed to work, so please check carefully. For more " + "information about the new summary API, see the Effective TF 2.0 " + "migration document or check the updated TensorBoard tutorials.") + + contrib_summary_family_arg_comment = ( + ast_edits.WARNING, + " replacement does not accept a 'family' argument; " + "instead regular name scoping should be used. This call site specifies " + "a family argument that has been removed on conversion, so the emitted " + "tag names may be incorrect without manual editing.") + + contrib_create_file_writer_comment = ( + ast_edits.WARNING, + "tf.contrib.summary.create_file_writer() has been ported to the new " + "tf.compat.v2.summary.create_file_writer(), which no longer re-uses " + "existing event files for the same logdir; instead it always opens a " + "new writer/file. The python writer objects must be re-used explicitly " + "if the reusing behavior is desired.") + + contrib_summary_record_every_n_comment = ( + ast_edits.ERROR, + "(Manual edit required) " + "tf.contrib.summary.record_summaries_every_n_global_steps(n, step) " + "should be replaced by a call to tf.compat.v2.summary.record_if() with " + "the argument `lambda: tf.math.equal(0, global_step % n)` (or in graph " + "mode, the lambda body can be used directly). If no global step was " + "passed, instead use tf.compat.v1.train.get_or_create_global_step().") + + contrib_summary_graph_comment = ( + ast_edits.ERROR, + "(Manual edit required) tf.contrib.summary.graph() has no direct " + "equivalent in TF 2.0 because manual graph construction has been " + "superseded by use of tf.function. To log tf.function execution graphs " + "to the summary writer, use the new tf.compat.v2.summary.trace_* " + "functions instead.") + + contrib_summary_import_event_comment = ( + ast_edits.ERROR, + "(Manual edit required) tf.contrib.summary.import_event() has no " + "direct equivalent in TF 2.0. For a similar experimental feature, try " + "tf.compat.v2.summary.experimental.write_raw_pb() which also accepts " + "serialized summary protocol buffer input, but for tf.Summary " + "protobufs rather than tf.Events.") + + keras_default_save_format_comment = ( + ast_edits.WARNING, + "(This warning is only applicable if the code saves a tf.Keras model) " + "Keras model.save now saves to the Tensorflow SavedModel format by " + "default, instead of HDF5. To continue saving to HDF5, add the " + "argument save_format='h5' to the save() function.") + + distribute_strategy_api_changes = ( + "If you're using the strategy with a " + "custom training loop, note the following changes in methods: " + "make_dataset_iterator->experimental_distribute_dataset, " + "experimental_make_numpy_iterator->experimental_make_numpy_dataset, " + "extended.call_for_each_replica->run, " + "reduce requires an axis argument, " + "unwrap->experimental_local_results " + "experimental_initialize and experimental_finalize no longer needed ") + + contrib_mirrored_strategy_warning = ( + ast_edits.ERROR, + "(Manual edit required) tf.contrib.distribute.MirroredStrategy has " + "been migrated to tf.distribute.MirroredStrategy. Things to note: " + "Constructor arguments have changed. If you are using " + "MirroredStrategy with Keras training framework, the input provided to " + "`model.fit` will be assumed to have global batch size and split " + "across the replicas. " + distribute_strategy_api_changes) + + core_mirrored_strategy_warning = ( + ast_edits.WARNING, + "(Manual edit may be required) tf.distribute.MirroredStrategy API has " + "changed. " + distribute_strategy_api_changes) + + contrib_one_device_strategy_warning = ( + ast_edits.ERROR, + "(Manual edit required) tf.contrib.distribute.OneDeviceStrategy has " + "been migrated to tf.distribute.OneDeviceStrategy. " + + distribute_strategy_api_changes) + + contrib_tpu_strategy_warning = ( + ast_edits.ERROR, + "(Manual edit required) tf.contrib.distribute.TPUStrategy has " + "been migrated to tf.distribute.TPUStrategy. Note the " + "slight changes in constructor. " + distribute_strategy_api_changes) + + contrib_collective_strategy_warning = ( + ast_edits.ERROR, + "(Manual edit required) " + "tf.contrib.distribute.CollectiveAllReduceStrategy has " + "been migrated to " + "tf.distribute.experimental.MultiWorkerMirroredStrategy. Note the " + "changes in constructor. " + distribute_strategy_api_changes) + + contrib_ps_strategy_warning = ( + ast_edits.ERROR, "(Manual edit required) " + "tf.contrib.distribute.ParameterServerStrategy has " + "been migrated to " + "tf.compat.v1.distribute.experimental.ParameterServerStrategy (multi " + "machine) and tf.distribute.experimental.CentralStorageStrategy (one " + "machine). Note the changes in constructors. " + + distribute_strategy_api_changes) + + keras_experimental_export_comment = ( + ast_edits.WARNING, + "tf.keras.experimental.export_saved_model and " + "tf.keras.experimental.load_from_saved_model have been deprecated." + "Please use model.save(path, save_format='tf') " + "(or alternatively tf.keras.models.save_model), and " + "tf.keras.models.load_model(path) instead.") + + saved_model_load_warning = ( + ast_edits.WARNING, + "tf.saved_model.load works differently in 2.0 compared to 1.0. See " + "migration information in the documentation of " + "tf.compat.v1.saved_model.load." + "\nThe calls have been converted to compat.v1.") + + # Function warnings. placeholder inside warnings will be + # replaced by function name. + # You can use *. to add items which do not check the FQN, and apply to e.g., + # methods. + self.function_warnings = { + "*.save": + keras_default_save_format_comment, + "tf.assert_equal": + assert_return_type_comment, + "tf.assert_none_equal": + assert_return_type_comment, + "tf.assert_negative": + assert_return_type_comment, + "tf.assert_positive": + assert_return_type_comment, + "tf.assert_non_negative": + assert_return_type_comment, + "tf.assert_non_positive": + assert_return_type_comment, + "tf.assert_near": + assert_return_type_comment, + "tf.assert_less": + assert_return_type_comment, + "tf.assert_less_equal": + assert_return_type_comment, + "tf.assert_greater": + assert_return_type_comment, + "tf.assert_greater_equal": + assert_return_type_comment, + "tf.assert_integer": + assert_return_type_comment, + "tf.assert_type": + assert_return_type_comment, + "tf.assert_scalar": + assert_return_type_comment, + "tf.assert_rank": + assert_rank_comment, + "tf.assert_rank_at_least": + assert_rank_comment, + "tf.assert_rank_in": + assert_rank_comment, + "tf.contrib.layers.layer_norm": + contrib_layers_layer_norm_comment, + "tf.contrib.saved_model.load_keras_model": + keras_experimental_export_comment, + "tf.contrib.saved_model.save_keras_model": + keras_experimental_export_comment, + "tf.contrib.summary.all_summary_ops": + contrib_summary_comment, + "tf.contrib.summary.audio": + contrib_summary_comment, + "tf.contrib.summary.create_file_writer": + contrib_create_file_writer_comment, + "tf.contrib.summary.generic": + contrib_summary_comment, + "tf.contrib.summary.graph": + contrib_summary_graph_comment, + "tf.contrib.summary.histogram": + contrib_summary_comment, + "tf.contrib.summary.import_event": + contrib_summary_import_event_comment, + "tf.contrib.summary.image": + contrib_summary_comment, + "tf.contrib.summary.record_summaries_every_n_global_steps": + contrib_summary_record_every_n_comment, + "tf.contrib.summary.scalar": + contrib_summary_comment, + "tf.debugging.assert_equal": + assert_return_type_comment, + "tf.debugging.assert_greater": + assert_return_type_comment, + "tf.debugging.assert_greater_equal": + assert_return_type_comment, + "tf.debugging.assert_integer": + assert_return_type_comment, + "tf.debugging.assert_less": + assert_return_type_comment, + "tf.debugging.assert_less_equal": + assert_return_type_comment, + "tf.debugging.assert_near": + assert_return_type_comment, + "tf.debugging.assert_negative": + assert_return_type_comment, + "tf.debugging.assert_non_negative": + assert_return_type_comment, + "tf.debugging.assert_non_positive": + assert_return_type_comment, + "tf.debugging.assert_none_equal": + assert_return_type_comment, + "tf.debugging.assert_positive": + assert_return_type_comment, + "tf.debugging.assert_type": + assert_return_type_comment, + "tf.debugging.assert_scalar": + assert_return_type_comment, + "tf.debugging.assert_rank": + assert_rank_comment, + "tf.debugging.assert_rank_at_least": + assert_rank_comment, + "tf.debugging.assert_rank_in": + assert_rank_comment, + "tf.train.exponential_decay": + decay_function_comment, + "tf.train.piecewise_constant_decay": + decay_function_comment, + "tf.train.polynomial_decay": + decay_function_comment, + "tf.train.natural_exp_decay": + decay_function_comment, + "tf.train.inverse_time_decay": + decay_function_comment, + "tf.train.cosine_decay": + decay_function_comment, + "tf.train.cosine_decay_restarts": + decay_function_comment, + "tf.train.linear_cosine_decay": + decay_function_comment, + "tf.train.noisy_linear_cosine_decay": + decay_function_comment, + "tf.nn.embedding_lookup": + deprecate_partition_strategy_comment, + "tf.nn.embedding_lookup_sparse": + deprecate_partition_strategy_comment, + "tf.nn.nce_loss": + deprecate_partition_strategy_comment, + "tf.nn.safe_embedding_lookup_sparse": + deprecate_partition_strategy_comment, + "tf.nn.sampled_softmax_loss": + deprecate_partition_strategy_comment, + "tf.keras.experimental.export_saved_model": + keras_experimental_export_comment, + "tf.keras.experimental.load_from_saved_model": + keras_experimental_export_comment, + "tf.keras.initializers.Zeros": + initializers_no_dtype_comment, + "tf.keras.initializers.zeros": + initializers_no_dtype_comment, + "tf.keras.initializers.Ones": + initializers_no_dtype_comment, + "tf.keras.initializers.ones": + initializers_no_dtype_comment, + "tf.keras.initializers.Constant": + initializers_no_dtype_comment, + "tf.keras.initializers.constant": + initializers_no_dtype_comment, + "tf.keras.initializers.VarianceScaling": + initializers_no_dtype_comment, + "tf.keras.initializers.Orthogonal": + initializers_no_dtype_comment, + "tf.keras.initializers.orthogonal": + initializers_no_dtype_comment, + "tf.keras.initializers.Identity": + initializers_no_dtype_comment, + "tf.keras.initializers.identity": + initializers_no_dtype_comment, + "tf.keras.initializers.glorot_uniform": + initializers_no_dtype_comment, + "tf.keras.initializers.glorot_normal": + initializers_no_dtype_comment, + "tf.initializers.zeros": + initializers_no_dtype_comment, + "tf.zeros_initializer": + initializers_no_dtype_comment, + "tf.initializers.ones": + initializers_no_dtype_comment, + "tf.ones_initializer": + initializers_no_dtype_comment, + "tf.initializers.constant": + initializers_no_dtype_comment, + "tf.constant_initializer": + initializers_no_dtype_comment, + "tf.initializers.random_uniform": + initializers_no_dtype_comment, + "tf.random_uniform_initializer": + initializers_no_dtype_comment, + "tf.initializers.random_normal": + initializers_no_dtype_comment, + "tf.random_normal_initializer": + initializers_no_dtype_comment, + "tf.initializers.truncated_normal": + initializers_no_dtype_comment, + "tf.truncated_normal_initializer": + initializers_no_dtype_comment, + "tf.initializers.variance_scaling": + initializers_no_dtype_comment, + "tf.variance_scaling_initializer": + initializers_no_dtype_comment, + "tf.initializers.orthogonal": + initializers_no_dtype_comment, + "tf.orthogonal_initializer": + initializers_no_dtype_comment, + "tf.initializers.identity": + initializers_no_dtype_comment, + "tf.glorot_uniform_initializer": + initializers_no_dtype_comment, + "tf.initializers.glorot_uniform": + initializers_no_dtype_comment, + "tf.glorot_normal_initializer": + initializers_no_dtype_comment, + "tf.initializers.glorot_normal": + initializers_no_dtype_comment, + "tf.losses.absolute_difference": + losses_comment, + "tf.losses.add_loss": + losses_comment, + "tf.losses.compute_weighted_loss": + losses_comment, + "tf.losses.cosine_distance": + losses_comment, + "tf.losses.get_losses": + losses_comment, + "tf.losses.get_regularization_loss": + losses_comment, + "tf.losses.get_regularization_losses": + losses_comment, + "tf.losses.get_total_loss": + losses_comment, + "tf.losses.hinge_loss": + losses_comment, + "tf.losses.huber_loss": + losses_comment, + "tf.losses.log_loss": + losses_comment, + "tf.losses.mean_pairwise_squared_error": + losses_comment, + "tf.losses.mean_squared_error": + losses_comment, + "tf.losses.sigmoid_cross_entropy": + losses_comment, + "tf.losses.softmax_cross_entropy": + losses_comment, + "tf.losses.sparse_softmax_cross_entropy": + losses_comment, + "tf.metrics.accuracy": + metrics_comment, + "tf.metrics.auc": + metrics_comment, + "tf.metrics.average_precision_at_k": + metrics_comment, + "tf.metrics.false_negatives": + metrics_comment, + "tf.metrics.false_negatives_at_thresholds": + metrics_comment, + "tf.metrics.false_positives": + metrics_comment, + "tf.metrics.false_positives_at_thresholds": + metrics_comment, + "tf.metrics.mean": + metrics_comment, + "tf.metrics.mean_absolute_error": + metrics_comment, + "tf.metrics.mean_cosine_distance": + metrics_comment, + "tf.metrics.mean_iou": + metrics_comment, + "tf.metrics.mean_per_class_accuracy": + metrics_comment, + "tf.metrics.mean_relative_error": + metrics_comment, + "tf.metrics.mean_squared_error": + metrics_comment, + "tf.metrics.mean_tensor": + metrics_comment, + "tf.metrics.percentage_below": + metrics_comment, + "tf.metrics.precision": + metrics_comment, + "tf.metrics.precision_at_k": + metrics_comment, + "tf.metrics.precision_at_thresholds": + metrics_comment, + "tf.metrics.precision_at_top_k": + metrics_comment, + "tf.metrics.recall": + metrics_comment, + "tf.metrics.recall_at_k": + metrics_comment, + "tf.metrics.recall_at_thresholds": + metrics_comment, + "tf.metrics.recall_at_top_k": + metrics_comment, + "tf.metrics.root_mean_squared_error": + metrics_comment, + "tf.metrics.sensitivity_at_specificity": + metrics_comment, + "tf.metrics.sparse_average_precision_at_k": + metrics_comment, + "tf.metrics.sparse_precision_at_k": + metrics_comment, + "tf.metrics.specificity_at_sensitivity": + metrics_comment, + "tf.metrics.true_negatives": + metrics_comment, + "tf.metrics.true_negatives_at_thresholds": + metrics_comment, + "tf.metrics.true_positives": + metrics_comment, + "tf.metrics.true_positives_at_thresholds": + metrics_comment, + "tf.get_variable": + (ast_edits.WARNING, + " returns ResourceVariables by default in 2.0, " + "which have well-defined semantics and are stricter about shapes. " + "You can disable this behavior by passing use_resource=False, or " + "by calling tf.compat.v1.disable_resource_variables()."), + "tf.pywrap_tensorflow": + (ast_edits.ERROR, + " cannot be converted automatically. " + "`tf.pywrap_tensorflow` will not be distributed with " + "TensorFlow 2.0, please consider an alternative in public " + "TensorFlow APIs."), + "tf.contrib.distribute.MirroredStrategy": + contrib_mirrored_strategy_warning, + "tf.distribute.MirroredStrategy": + core_mirrored_strategy_warning, + "tf.contrib.distribute.OneDeviceStrategy": + contrib_one_device_strategy_warning, + "tf.contrib.distribute.TPUStrategy": + contrib_tpu_strategy_warning, + "tf.contrib.distribute.CollectiveAllReduceStrategy": + contrib_collective_strategy_warning, + "tf.contrib.distribute.ParameterServerStrategy": + contrib_ps_strategy_warning, + "tf.summary.FileWriter": summary_api_comment, + "tf.summary.FileWriterCache": summary_api_comment, + "tf.summary.Summary": summary_api_comment, + "tf.summary.audio": summary_api_comment, + "tf.summary.histogram": summary_api_comment, + "tf.summary.image": summary_api_comment, + "tf.summary.merge": summary_api_comment, + "tf.summary.merge_all": summary_api_comment, + "tf.summary.scalar": summary_api_comment, + "tf.summary.tensor_summary": summary_api_comment, + "tf.summary.text": summary_api_comment, + "tf.saved_model.load": saved_model_load_warning, + "tf.saved_model.loader.load": saved_model_load_warning, + } + all_renames_v2.add_contrib_direct_import_support(self.function_warnings) + + for symbol, replacement in all_renames_v2.addons_symbol_mappings.items(): + warning = ( + ast_edits.WARNING, ( + "(Manual edit required) `{}` has been migrated to `{}` in " + "TensorFlow Addons. The API spec may have changed during the " + "migration. Please see https://github.com/tensorflow/addons " + "for more info.").format(symbol, replacement)) + self.function_warnings[symbol] = warning + + # Warnings that are emitted only if a specific arg is found. + self.function_arg_warnings = { + "tf.nn.conv1d": { + ("use_cudnn_on_gpu", 4): + (ast_edits.WARNING, + "use_cudnn_on_gpu has been removed, behavior is now equivalent" + "to setting it to True."), + }, + "tf.nn.conv2d": { + ("use_cudnn_on_gpu", 4): + (ast_edits.WARNING, + "use_cudnn_on_gpu has been removed, behavior is now equivalent" + "to setting it to True."), + }, + "tf.nn.conv2d_backprop_filter": { + ("use_cudnn_on_gpu", 5): + (ast_edits.WARNING, + "use_cudnn_on_gpu has been removed, behavior is now equivalent" + "to setting it to True."), + }, + "tf.nn.conv2d_backprop_input": { + ("use_cudnn_on_gpu", 5): + (ast_edits.WARNING, + "use_cudnn_on_gpu has been removed, behavior is now equivalent" + "to setting it to True."), + }, + "tf.gradients": { + ("colocate_gradients_with_ops", 4): + (ast_edits.INFO, "tf.gradients no longer takes " + "'colocate_gradients_with_ops' argument, it behaves as if it " + "was set to True."), + }, + "tf.hessians": { + ("colocate_gradients_with_ops", 3): + (ast_edits.INFO, "tf.hessians no longer takes " + "'colocate_gradients_with_ops' argument, it behaves as if it " + "was set to True."), + }, + "*.minimize": { + ("colocate_gradients_with_ops", 5): + (ast_edits.INFO, "Optimizer.minimize no longer takes " + "'colocate_gradients_with_ops' argument, it behaves as if it " + "was set to True."), + }, + "*.compute_gradients": { + ("colocate_gradients_with_ops", 4): + (ast_edits.INFO, "Optimizer.compute_gradients no " + "longer takes 'colocate_gradients_with_ops' argument, it " + "behaves as if it was set to True."), + }, + "tf.cond": { + ("strict", 3): + (ast_edits.WARNING, + "tf.cond no longer takes 'strict' argument, it behaves as " + "if was set to True.") + }, + "tf.contrib.summary.audio": { + ("family", 4): contrib_summary_family_arg_comment, + }, + "tf.contrib.summary.create_file_writer": { + ("name", 4): + (ast_edits.WARNING, + "tf.contrib.summary.create_file_writer() no longer supports " + "implicit writer re-use based on shared logdirs or resource " + "names; this call site passed a 'name' argument that has been " + "removed. The new tf.compat.v2.summary.create_file_writer() " + "replacement has a 'name' parameter but the semantics are " + "the usual ones to name the op itself and do not control " + "writer re-use; writers must be manually re-used if desired.") + }, + "tf.contrib.summary.generic": { + ("name", 0): ( + ast_edits.WARNING, + "tf.contrib.summary.generic() takes a 'name' argument for the " + "op name that also determines the emitted tag (prefixed by any " + "active name scopes), but tf.compat.v2.summary.write(), which " + "replaces it, separates these into 'tag' and 'name' arguments. " + "The 'name' argument here has been converted to 'tag' to " + "preserve a meaningful tag, but any name scopes will not be " + "reflected in the tag without manual editing."), + ("family", 3): contrib_summary_family_arg_comment, + }, + "tf.contrib.summary.histogram": { + ("family", 2): contrib_summary_family_arg_comment, + }, + "tf.contrib.summary.image": { + ("bad_color", 2): ( + ast_edits.WARNING, + "tf.contrib.summary.image no longer takes the 'bad_color' " + "argument; caller must now preprocess if needed. This call " + "site specifies a bad_color argument so it cannot be converted " + "safely."), + ("family", 4): contrib_summary_family_arg_comment, + }, + "tf.contrib.summary.scalar": { + ("family", 2): contrib_summary_family_arg_comment, + }, + "tf.image.resize": { + ("align_corners", 3): + (ast_edits.WARNING, + "align_corners is not supported by tf.image.resize, the new " + "default transformation is close to what v1 provided. If you " + "require exactly the same transformation as before, use " + "compat.v1.image.resize."), + }, + "tf.image.resize_bilinear": { + ("align_corners", 2): + (ast_edits.WARNING, + "align_corners is not supported by tf.image.resize, the new " + "default transformation is close to what v1 provided. If you " + "require exactly the same transformation as before, use " + "compat.v1.image.resize_bilinear."), + }, + "tf.image.resize_area": { + ("align_corners", 2): + (ast_edits.WARNING, + "align_corners is not supported by tf.image.resize, the new " + "default transformation is close to what v1 provided. If you " + "require exactly the same transformation as before, use " + "compat.v1.image.resize_area."), + }, + "tf.image.resize_bicubic": { + ("align_corners", 2): + (ast_edits.WARNING, + "align_corners is not supported by tf.image.resize, the new " + "default transformation is close to what v1 provided. If you " + "require exactly the same transformation as before, use " + "compat.v1.image.resize_bicubic."), + }, + "tf.image.resize_nearest_neighbor": { + ("align_corners", 2): + (ast_edits.WARNING, + "align_corners is not supported by tf.image.resize, the new " + "default transformation is close to what v1 provided. If you " + "require exactly the same transformation as before, use " + "compat.v1.image.resize_nearest_neighbor."), + }, + } + all_renames_v2.add_contrib_direct_import_support(self.function_arg_warnings) + + # pylint: disable=line-too-long + # Specially handled functions + # Each transformer is a callable which will be called with the arguments + # transformer(parent, node, full_name, name, logs) + # Where logs is a list to which (level, line, col, msg) tuples can be + # appended, full_name is the FQN of the function called (or None if that is + # unknown), name is the name of the function called (or None is that is + # unknown). node is an ast.Call node representing this function call, and + # parent is its parent in the AST. + # The function may modify node (but not parent), and must return + # - none, if nothing was modified + # - node, if node was modified in place (make sure to use + # pasta.ast_utils.replace_child to swap out children, otherwise formatting + # may get messy) + # - a replacement for node, if the whole call node was replaced. The caller + # will take care of changing parent. + # After modifying this dict, run the following to update reorders_v2.py: + # bazel run tensorflow/tools/compatibility/update:generate_v2_reorders_map + # pylint: enable=line-too-long + self.function_transformers = { + "*.make_initializable_iterator": _iterator_transformer, + "*.make_one_shot_iterator": _iterator_transformer, + "tf.nn.dropout": _dropout_transformer, + "tf.to_bfloat16": _cast_transformer, + "tf.to_complex128": _cast_transformer, + "tf.to_complex64": _cast_transformer, + "tf.to_double": _cast_transformer, + "tf.to_float": _cast_transformer, + "tf.to_int32": _cast_transformer, + "tf.to_int64": _cast_transformer, + "tf.nn.softmax_cross_entropy_with_logits": + _softmax_cross_entropy_with_logits_transformer, + "tf.image.extract_glimpse": _extract_glimpse_transformer, + "tf.image.resize_area": _image_resize_transformer, + "tf.image.resize_bicubic": _image_resize_transformer, + "tf.image.resize_bilinear": _image_resize_transformer, + "tf.image.resize_nearest_neighbor": _image_resize_transformer, + "tf.nn.fractional_avg_pool": _pool_seed_transformer, + "tf.nn.fractional_max_pool": _pool_seed_transformer, + "tf.name_scope": _name_scope_transformer, + # TODO(b/129398290) + # "tf.string_split": _string_split_transformer, + "tf.strings.split": _string_split_rtype_transformer, + "tf.device": functools.partial( + _rename_if_arg_found_transformer, arg_name="device_name", + arg_ok_predicate=_is_ast_str, remove_if_ok=False, + message="tf.device no longer takes functions as an argument. " + "We could not determine that the argument value is a string, so " + "the call was converted to compat.v1."), + "tf.zeros_like": functools.partial( + _rename_if_arg_found_transformer, arg_name="optimize", + arg_ok_predicate=_is_ast_true, remove_if_ok=True, + message="tf.zeros_like no longer takes an optimize argument, and " + "behaves as if optimize=True. This call site specifies something " + "other than optimize=True, so it was converted to compat.v1."), + "tf.ones_like": functools.partial( + _rename_if_arg_found_transformer, arg_name="optimize", + arg_ok_predicate=_is_ast_true, remove_if_ok=True, + message="tf.ones_like no longer takes an optimize argument, and " + "behaves as if optimize=True. This call site specifies something " + "other than optimize=True, so it was converted to compat.v1."), + "tf.while_loop": functools.partial( + _rename_if_arg_found_transformer, + arg_name="return_same_structure", + arg_ok_predicate=_is_ast_true, remove_if_ok=True, + message="tf.while_loop no longer takes 'return_same_structure' " + "argument and behaves as if return_same_structure=True. This call " + "site specifies something other than return_same_structure=True, " + "so it was converted to compat.v1."), + "tf.nn.ctc_beam_search_decoder": functools.partial( + _rename_if_arg_found_transformer, + arg_name="merge_repeated", + arg_ok_predicate=_is_ast_false, remove_if_ok=True, + message="tf.nn.ctc_beam_search_decoder no longer takes the " + "'merge_repeated' argument and behaves as if merge_repeated=False. " + "This call site specifies something other than " + "merge_repeated=False, so it was converted to compat.v1."), + "tf.nn.dilation2d": functools.partial( + _add_argument_transformer, + arg_name="data_format", + arg_value_ast=ast.Str("NHWC")), + "tf.nn.erosion2d": functools.partial( + _add_argument_transformer, + arg_name="data_format", + arg_value_ast=ast.Str("NHWC")), + "tf.contrib.summary.always_record_summaries": functools.partial( + _add_summary_recording_cond_transformer, cond="True"), + "tf.contrib.summary.audio": _add_summary_step_transformer, + "tf.contrib.summary.generic": _add_summary_step_transformer, + "tf.contrib.summary.histogram": _add_summary_step_transformer, + "tf.contrib.summary.image": _add_summary_step_transformer, + "tf.contrib.summary.never_record_summaries": functools.partial( + _add_summary_recording_cond_transformer, cond="False"), + "tf.contrib.summary.scalar": _add_summary_step_transformer, + "tf.contrib.layers.l1_regularizer": + _contrib_layers_l1_regularizer_transformer, + "tf.contrib.layers.l2_regularizer": + _contrib_layers_l2_regularizer_transformer, + "tf.contrib.layers.xavier_initializer": + _contrib_layers_xavier_initializer_transformer, + "tf.contrib.layers.xavier_initializer_conv2d": + _contrib_layers_xavier_initializer_transformer, + "tf.contrib.layers.variance_scaling_initializer": + _contrib_layers_variance_scaling_initializer_transformer, + "tf.initializers.uniform_unit_scaling": + _add_uniform_scaling_initializer_transformer, + "tf.uniform_unit_scaling_initializer": + _add_uniform_scaling_initializer_transformer, + "slim.l1_regularizer": + _contrib_layers_l1_regularizer_transformer, + "slim.l2_regularizer": + _contrib_layers_l2_regularizer_transformer, + "slim.xavier_initializer": + _contrib_layers_xavier_initializer_transformer, + "slim.xavier_initializer_conv2d": + _contrib_layers_xavier_initializer_transformer, + "slim.variance_scaling_initializer": + _contrib_layers_variance_scaling_initializer_transformer, + "tf.keras.models.save_model": functools.partial( + _add_argument_transformer, + arg_name="save_format", + arg_value_ast=ast.Str("h5")), + } + all_renames_v2.add_contrib_direct_import_support(self.function_transformers) + + self.module_deprecations = module_deprecations_v2.MODULE_DEPRECATIONS + + def preprocess(self, root_node, after_compat_v1_upgrade=False): + visitor = ast_edits.PastaAnalyzeVisitor(TFAPIImportAnalysisSpec()) + visitor.visit(root_node) + detections = set(visitor.results) + + # Upgrade explicit compat v1 imports if `upgrade_compat_v1_import` is + # enabled. Then preprocess the updated root node. + # We only do this upgrading once, because some forms of the import may + # still cause errors but aren't trivially upgradeable, and we don't want + # to enter an infinite loop. E.g. `from tensorflow.compat import v1, v2`. + if (compat_v1_import in detections and self.upgrade_compat_v1_import and + not after_compat_v1_upgrade): + CompatV1ImportReplacer().visit(root_node) + return self.preprocess(root_node, after_compat_v1_upgrade=True) + + # If we have detected the presence of imports of specific TF versions, + # We want to modify the update spec to check only module deprecations + # and skip all other conversions. + if detections: + self.function_handle = {} + self.function_reorders = {} + self.function_keyword_renames = {} + self.symbol_renames = {} + self.function_warnings = {} + self.change_to_function = {} + self.module_deprecations = module_deprecations_v2.MODULE_DEPRECATIONS + self.function_transformers = {} + self.import_renames = {} + return root_node, visitor.log, visitor.warnings_and_errors + + def clear_preprocessing(self): + self.__init__(import_rename=self.import_rename, + upgrade_compat_v1_import=self.upgrade_compat_v1_import) + + +def _is_ast_str(node): + """Determine whether this node represents a string.""" + allowed_types = [ast.Str] + if hasattr(ast, "Bytes"): + allowed_types += [ast.Bytes] + if hasattr(ast, "JoinedStr"): + allowed_types += [ast.JoinedStr] + if hasattr(ast, "FormattedValue"): + allowed_types += [ast.FormattedValue] + return isinstance(node, allowed_types) + + +def _is_ast_true(node): + if hasattr(ast, "NameConstant"): + return isinstance(node, ast.NameConstant) and node.value is True + else: + return isinstance(node, ast.Name) and node.id == "True" + + +def _is_ast_false(node): + if hasattr(ast, "NameConstant"): + return isinstance(node, ast.NameConstant) and node.value is False + else: + return isinstance(node, ast.Name) and node.id == "False" + + +# Lots of unused arguments below, since these are called in a standard manner. +# pylint: disable=unused-argument + + +def _rename_if_arg_found_transformer(parent, node, full_name, name, logs, + arg_name=None, + arg_ok_predicate=None, + remove_if_ok=False, + message=None): + """Replaces the given call with tf.compat.v1 if the given arg is found. + + This requires the function to be called with all named args, so for using + this transformer, the function should also be added to renames. + + If the arg is not found, the call site is left alone. + + If the arg is found, and if arg_ok_predicate is given, it is called with + the ast Expression representing the argument value found. If it returns + True, the function is left alone. + + If the arg is found, arg_ok_predicate is not None and returns ok, and + remove_if_ok is True, the argument is removed from the call. + + Otherwise, `compat.v1` is inserted between tf and the function name. + + Args: + parent: Parent of node. + node: ast.Call node to maybe modify. + full_name: full name of function to modify + name: name of function to modify + logs: list of logs to append to + arg_name: name of the argument to look for + arg_ok_predicate: predicate callable with the ast of the argument value, + returns whether the argument value is allowed. + remove_if_ok: remove the argument if present and ok as determined by + arg_ok_predicate. + message: message to print if a non-ok arg is found (and hence, the function + is renamed to its compat.v1 version). + + Returns: + node, if it was modified, else None. + """ + # Check whether arg is there. + arg_present, arg_value = ast_edits.get_arg_value(node, arg_name) + if not arg_present: + return + + # Check whether arg is problematic (and if not, maybe remove it). + if arg_ok_predicate and arg_ok_predicate(arg_value): + if remove_if_ok: + for i, kw in enumerate(node.keywords): + if kw.arg == arg_name: + node.keywords.pop(i) + logs.append((ast_edits.INFO, node.lineno, node.col_offset, + "Removed argument %s for function %s" % ( + arg_name, full_name or name))) + break + return node + else: + return + + # All conditions met, insert v1 and log what we did. + # We must have a full name, so the func is an attribute. + new_name = full_name.replace("tf.", "tf.compat.v1.", 1) + node.func = ast_edits.full_name_node(new_name) + logs.append(( + ast_edits.INFO, node.lineno, node.col_offset, + "Renaming %s to %s because argument %s is present. %s" % + (full_name, new_name, arg_name, message if message is not None else "") + )) + return node + + +def _add_argument_transformer(parent, node, full_name, name, logs, + arg_name, arg_value_ast): + """Adds an argument (as a final kwarg arg_name=arg_value_ast).""" + node.keywords.append(ast.keyword(arg=arg_name, value=arg_value_ast)) + logs.append(( + ast_edits.INFO, node.lineno, node.col_offset, + "Adding argument '%s' to call to %s." % (pasta.dump(node.keywords[-1]), + full_name or name) + )) + return node + + +def _iterator_transformer(parent, node, full_name, name, logs): + """Transform iterator methods to compat function calls.""" + # First, check that node.func.value is not already something we like + # (tf.compat.v1.data), or something which is handled in the rename + # (tf.data). This transformer only handles the method call to function call + # conversion. + if full_name and (full_name.startswith("tf.compat.v1.data") or + full_name.startswith("tf.data")): + return + + # This should never happen, since we're only called for Attribute nodes. + if not isinstance(node.func, ast.Attribute): + return + + # Transform from x.f(y) to tf.compat.v1.data.f(x, y) + # Fortunately, node.func.value should already have valid position info + node.args = [node.func.value] + node.args + node.func.value = ast_edits.full_name_node("tf.compat.v1.data") + + logs.append((ast_edits.WARNING, node.lineno, node.col_offset, + "Changing dataset.%s() to tf.compat.v1.data.%s(dataset). " + "Please check this transformation.\n" % (name, name))) + + return node + + +def _dropout_transformer(parent, node, full_name, name, logs): + """Replace keep_prob with 1-rate.""" + def _replace_keep_prob_node(parent, old_value): + """Replaces old_value with 1-(old_value).""" + one = ast.Num(n=1) + one.lineno = 0 + one.col_offset = 0 + new_value = ast.BinOp(left=one, op=ast.Sub(), + right=old_value) + # This copies the prefix and suffix on old_value to new_value. + pasta.ast_utils.replace_child(parent, old_value, new_value) + ast.copy_location(new_value, old_value) + # Put parentheses around keep_prob.value (and remove the old prefix/ + # suffix, they should only be around new_value). + pasta.base.formatting.set(old_value, "prefix", "(") + pasta.base.formatting.set(old_value, "suffix", ")") + + # Check if we have a keep_prob keyword arg + for keep_prob in node.keywords: + if keep_prob.arg == "keep_prob": + logs.append((ast_edits.INFO, node.lineno, node.col_offset, + "Changing keep_prob arg of tf.nn.dropout to rate\n")) + keep_prob.arg = "rate" + _replace_keep_prob_node(keep_prob, keep_prob.value) + return node + + # Maybe it was a positional arg + if len(node.args) < 2: + logs.append((ast_edits.ERROR, node.lineno, node.col_offset, + "tf.nn.dropout called without arguments, so " + "automatic fix was disabled. tf.nn.dropout has changed " + "the semantics of the second argument.")) + else: + rate_arg = ast.keyword(arg="rate", value=node.args[1]) + _replace_keep_prob_node(rate_arg, rate_arg.value) + node.keywords.append(rate_arg) + del node.args[1] + logs.append((ast_edits.INFO, node.lineno, node.col_offset, + "Changing keep_prob arg of tf.nn.dropout to rate, and " + "recomputing value.\n")) + + return node + + +def _cast_transformer(parent, node, full_name, name, logs): + """Transforms to_int and to_float to cast(..., dtype=...).""" + + # Find out the dtype to cast to from the function name + dtype_str = name[3:] + # Special cases where the full dtype is not given + if dtype_str == "float": + dtype_str = "float32" + elif dtype_str == "double": + dtype_str = "float64" + new_arg = ast.keyword(arg="dtype", + value=ast.Attribute(value=ast.Name(id="tf", + ctx=ast.Load()), + attr=dtype_str, ctx=ast.Load())) + # Ensures a valid transformation when a positional name arg is given + if len(node.args) == 2: + name_arg = ast.keyword(arg="name", + value=node.args[-1]) + node.args = node.args[:-1] + node.keywords.append(name_arg) + + # Python3 ast requires the args for the Attribute, but codegen will mess up + # the arg order if we just set them to 0. + new_arg.value.lineno = node.lineno + new_arg.value.col_offset = node.col_offset+100 + + node.keywords.append(new_arg) + if isinstance(node.func, ast.Attribute): + node.func.attr = "cast" + else: + assert isinstance(node.func, ast.Name) + node.func.id = "cast" + + logs.append((ast_edits.INFO, node.lineno, node.col_offset, + "Changed %s call to tf.cast(..., dtype=tf.%s)." % (full_name, + dtype_str))) + return node + + +def _softmax_cross_entropy_with_logits_transformer( + parent, node, full_name, name, logs): + """Wrap labels argument with stop_gradients.""" + def _wrap_label(parent, old_value): + """Wrap labels with tf.stop_gradient.""" + already_stop_grad = (isinstance(old_value, ast.Call) and + isinstance(old_value.func, ast.Attribute) and + old_value.func.attr == "stop_gradient" and + isinstance(old_value.func.value, ast.Name) and + old_value.func.value.id == "tf") + if already_stop_grad: + return False + try: + new_value = ast.Call( + ast.Name(id="tf.stop_gradient", ctx=ast.Load()), + [old_value], []) + except TypeError: + new_value = ast.Call( + ast.Name(id="tf.stop_gradient", ctx=ast.Load()), + [old_value], [], None, None) + + # This copies the prefix and suffix on old_value to new_value. + pasta.ast_utils.replace_child(parent, old_value, new_value) + ast.copy_location(new_value, old_value) + return True + + # Check if we have a labels keyword arg + for karg in node.keywords: + if karg.arg == "labels": + if _wrap_label(karg, karg.value): + logs.append((ast_edits.INFO, node.lineno, node.col_offset, + "Changing labels arg of " + "tf.nn.softmax_cross_entropy_with_logits to " + "tf.stop_gradient(labels). Please check this " + "transformation.\n")) + return node + return node + + +def _image_resize_transformer(parent, node, full_name, name, logs): + """Transforms image.resize_* to image.resize(..., method=*, ...).""" + resize_method = name[7:].upper() + new_arg = ast.keyword(arg="method", + value=ast.Attribute( + value=ast.Attribute( + value=ast.Attribute( + value=ast.Name(id="tf", ctx=ast.Load()), + attr="image", ctx=ast.Load()), + attr="ResizeMethod", ctx=ast.Load()), + attr=resize_method, ctx=ast.Load())) + + # Ensures a valid transformation when a positional name arg is given + if len(node.args) == 4: + pos_arg = ast.keyword(arg="preserve_aspect_ratio", + value=node.args[-1]) + node.args = node.args[:-1] + node.keywords.append(pos_arg) + if len(node.args) == 3: + pos_arg = ast.keyword(arg="align_corners", + value=node.args[-1]) + node.args = node.args[:-1] + + new_keywords = [] + for kw in node.keywords: + if kw.arg != "align_corners": + new_keywords.append(kw) + node.keywords = new_keywords + + # Python3 ast requires the args for the Attribute, but codegen will mess up + # the arg order if we just set them to 0. + new_arg.value.lineno = node.lineno + new_arg.value.col_offset = node.col_offset+100 + + node.keywords.append(new_arg) + if isinstance(node.func, ast.Attribute): + node.func.attr = "resize" + else: + assert isinstance(node.func, ast.Name) + node.func.id = "resize" + + logs.append((ast_edits.INFO, node.lineno, node.col_offset, + "Changed %s call to tf.image.resize(..., " + "method=tf.image.ResizeMethod.%s)." % (full_name, + resize_method))) + return node + + +def _pool_seed_transformer(parent, node, full_name, name, logs): + """Removes seed2 and deterministic, and adds non-zero seed if needed.""" + # This requires that this function uses all kwargs (add to renames!). + seed_arg = None + deterministic = False + modified = False + new_keywords = [] + + for kw in node.keywords: + if sys.version_info[:2] >= (3, 5) and isinstance(kw, ast.Starred): + pass + elif kw.arg == "seed": + seed_arg = kw + elif kw.arg == "seed2" or kw.arg == "deterministic": + lineno = getattr(kw, "lineno", node.lineno) + col_offset = getattr(kw, "col_offset", node.col_offset) + logs.append((ast_edits.INFO, lineno, col_offset, + "Removed argument %s for function %s" % ( + kw.arg, full_name or name))) + if kw.arg == "deterministic": + if not _is_ast_false(kw.value): + deterministic = True + modified = True + continue + new_keywords.append(kw) + + if deterministic: + if seed_arg is None: + new_keywords.append(ast.keyword(arg="seed", value=ast.Num(42))) + logs.add(( + ast_edits.INFO, node.lineno, node.col_offset, + "Adding seed=42 to call to %s since determinism was requested" % ( + full_name or name) + )) + else: + logs.add(( + ast_edits.WARNING, node.lineno, node.col_offset, + "The deterministic argument is deprecated for %s, pass a " + "non-zero seed for determinism. The deterministic argument is " + "present, possibly not False, and the seed is already set. The " + "converter cannot determine whether it is nonzero, please check." + )) + + if modified: + node.keywords = new_keywords + return node + else: + return + + +def _extract_glimpse_transformer(parent, node, full_name, name, logs): + + def _replace_uniform_noise_node(parent, old_value): + """Replaces old_value with 'uniform' or 'gaussian'.""" + uniform = ast.Str(s="uniform") + gaussian = ast.Str(s="gaussian") + new_value = ast.IfExp(body=uniform, test=old_value, orelse=gaussian) + # This copies the prefix and suffix on old_value to new_value. + pasta.ast_utils.replace_child(parent, old_value, new_value) + ast.copy_location(new_value, old_value) + # Put parentheses around noise.value.test (and remove the old prefix/ + # suffix, they should only be around new_value.test), so that: + # "uniform" if (a if b else c) else "gaussian" is valid. + pasta.base.formatting.set(new_value.test, "prefix", "(") + pasta.base.formatting.set(new_value.test, "suffix", ")") + + # Check if we have a uniform_noise keyword arg + for uniform_noise in node.keywords: + if uniform_noise.arg == "uniform_noise": + logs.append((ast_edits.INFO, node.lineno, node.col_offset, + "Changing uniform_noise arg of tf.image.extract_glimpse " + "to noise, and recomputing value. Please check this " + "transformation.\n")) + uniform_noise.arg = "noise" + value = "uniform" if uniform_noise.value else "gaussian" + _replace_uniform_noise_node(uniform_noise, uniform_noise.value) + return node + + # Since `noise`/`uniform_noise` is optional arg, nothing needs to be + # done if len(node.args) < 5. + if len(node.args) >= 5: + _replace_uniform_noise_node(node, node.args[5]) + logs.append((ast_edits.INFO, node.lineno, node.col_offset, + "Changing uniform_noise arg of tf.image.extract_glimpse to " + "noise, and recomputing value.\n")) + return node + +def _add_summary_step_transformer(parent, node, full_name, name, logs): + """Adds a step argument to the summary API call if not specified. + + The inserted argument value is tf.compat.v1.train.get_or_create_global_step(). + """ + for keyword_arg in node.keywords: + if keyword_arg.arg == "step": + return node + default_value = "tf.compat.v1.train.get_or_create_global_step()" + ast_value = ast.parse(default_value).body[0].value + del ast_value.lineno # hack to prevent spurious reordering of call args + node.keywords.append(ast.keyword(arg="step", value=ast_value)) + logs.append(( + ast_edits.WARNING, node.lineno, node.col_offset, + "Summary API writing function %s now requires a 'step' argument; " + "inserting default of %s." % (full_name or name, default_value))) + return node + + +def _add_summary_recording_cond_transformer(parent, node, full_name, name, logs, + cond): + """Adds cond argument to tf.contrib.summary.xxx_record_summaries(). + + This is in anticipation of them being renamed to tf.summary.record_if(), which + requires the cond argument. + """ + node.args.append(pasta.parse(cond)) + logs.append(( + ast_edits.INFO, node.lineno, node.col_offset, + "Adding `%s` argument to %s in anticipation of it being renamed to " + "tf.compat.v2.summary.record_if()" % (cond, full_name or name))) + return node + + +def _rename_if_any_arg_found_transformer( + parent, + node, + full_name, + name, + logs, + arg_names=None, + arg_ok_predicate=None, + remove_if_ok=False, + message=None): + """Replaces the given call with tf.compat.v1 if any of the arg_names is found. + + Args: + parent: Parent of node. + node: ast.Call node to modify. + full_name: full name of function to modify. + name: name of function to modify. + logs: list of logs to append to. + arg_names: list of names of the argument to look for. + arg_ok_predicate: predicate callable with the ast of the argument value, + returns whether the argument value is allowed. + remove_if_ok: remove the argument if present and ok as determined by + arg_ok_predicate. + message: message to print if a non-ok arg is found (and hence, the function + is renamed to its compat.v1 version). + + Returns: + node, if it was modified, else None. + """ + for arg_name in arg_names: + rename_node = _rename_if_arg_found_transformer(parent, node, + full_name, name, logs, + arg_name, arg_ok_predicate, + remove_if_ok, message) + node = rename_node if rename_node else node + + return node + + +def _rename_if_arg_found_and_add_loss_reduction_transformer( + parent, + node, + full_name, + name, + logs, + arg_names=None, + arg_ok_predicate=None, + remove_if_ok=False, + message=None): + """Combination of _rename_if_arg_found and _add_loss_reduction transformers. + + Args: + parent: Parent of node. + node: ast.Call node to maybe modify. + full_name: full name of function to modify + name: name of function to modify + logs: list of logs to append to + arg_names: list of names of the argument to look for + arg_ok_predicate: predicate callable with the ast of the argument value, + returns whether the argument value is allowed. + remove_if_ok: remove the argument if present and ok as determined by + arg_ok_predicate. + message: message to print if a non-ok arg is found (and hence, the function + is renamed to its compat.v1 version). + + Returns: + node, if it was modified, else None. + """ + + for arg_name in arg_names: + rename_node = _rename_if_arg_found_transformer(parent, node, full_name, + name, logs, arg_name, + arg_ok_predicate, + remove_if_ok, message) + node = rename_node if rename_node else node + + return node + + +def _add_uniform_scaling_initializer_transformer( + parent, node, full_name, name, logs): + """Updates references to uniform_unit_scaling_initializer. + + Transforms: + tf.uniform_unit_scaling_initializer(factor, seed, dtype) to + tf.compat.v1.keras.initializers.VarianceScaling( + scale=factor, distribution="uniform", seed=seed) + + Note: to apply this transformation, symbol must be added + to reordered_function_names above. + """ + for keyword_arg in node.keywords: + if keyword_arg.arg == "factor": + keyword_arg.arg = "scale" + + distribution_value = "\"uniform\"" + # Parse with pasta instead of ast to avoid emitting a spurious trailing \n. + ast_value = pasta.parse(distribution_value) + node.keywords.append(ast.keyword(arg="distribution", value=ast_value)) + + lineno = node.func.value.lineno + col_offset = node.func.value.col_offset + node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers") + node.func.value.lineno = lineno + node.func.value.col_offset = col_offset + node.func.attr = "VarianceScaling" + return node + + +def _contrib_layers_xavier_initializer_transformer( + parent, node, full_name, name, logs): + """Updates references to contrib.layers.xavier_initializer. + + Transforms: + tf.contrib.layers.xavier_initializer(uniform, seed, dtype) to + tf.compat.v1.keras.initializers.VarianceScaling( + scale=1.0, mode="fan_avg", + distribution=("uniform" if uniform else "truncated_normal"), + seed=seed, dtype=dtype) + + Returns: The new node + """ + def _get_distribution(old_value): + """Returns an AST matching the following: + ("uniform" if (old_value) else "truncated_normal") + """ + dist = pasta.parse("\"uniform\" if old_value else \"truncated_normal\"") + ifexpr = dist.body[0].value + pasta.ast_utils.replace_child(ifexpr, ifexpr.test, old_value) + + pasta.base.formatting.set(dist, "prefix", "(") + pasta.base.formatting.set(dist, "suffix", ")") + + return dist + + found_distribution = False + for keyword_arg in node.keywords: + if keyword_arg.arg == "uniform": + found_distribution = True + keyword_arg.arg = "distribution" + + old_value = keyword_arg.value + new_value = _get_distribution(keyword_arg.value) + + pasta.ast_utils.replace_child(keyword_arg, old_value, new_value) + + pasta.base.formatting.set(keyword_arg.value, "prefix", "(") + pasta.base.formatting.set(keyword_arg.value, "suffix", ")") + + new_keywords = [] + scale = pasta.parse("1.0") + new_keywords.append(ast.keyword(arg="scale", value=scale)) + + mode = pasta.parse("\"fan_avg\"") + new_keywords.append(ast.keyword(arg="mode", value=mode)) + + if len(node.args) >= 1: + found_distribution = True + dist = _get_distribution(node.args[0]) + new_keywords.append(ast.keyword(arg="distribution", value=dist)) + if not found_distribution: + # Parse with pasta instead of ast to avoid emitting a spurious trailing \n. + uniform_dist = pasta.parse("\"uniform\"") + new_keywords.append(ast.keyword(arg="distribution", value=uniform_dist)) + if len(node.args) >= 2: + new_keywords.append(ast.keyword(arg="seed", value=node.args[1])) + if len(node.args) >= 3: + new_keywords.append(ast.keyword(arg="dtype", value=node.args[2])) + node.args = [] + + node.keywords = new_keywords + node.keywords + + lineno = node.func.value.lineno + col_offset = node.func.value.col_offset + node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers") + node.func.value.lineno = lineno + node.func.value.col_offset = col_offset + node.func.attr = "VarianceScaling" + + logs.append((ast_edits.INFO, node.lineno, node.col_offset, + "Changing tf.contrib.layers xavier initializer" + " to a tf.compat.v1.keras.initializers.VarianceScaling and" + " converting arguments.\n")) + + return node + + +def _contrib_layers_variance_scaling_initializer_transformer( + parent, node, full_name, name, logs): + """Updates references to contrib.layers.variance_scaling_initializer. + + Transforms: + tf.contrib.layers.variance_scaling_initializer( + factor, mode, uniform, seed, dtype + ) to + tf.compat.v1.keras.initializers.VarianceScaling( + scale=factor, mode=mode.lower(), + distribution=("uniform" if uniform else "truncated_normal"), + seed=seed, dtype=dtype) + + And handles the case where no factor is provided and scale needs to be + set to 2.0 to match contrib's default instead of tf.keras.initializer's + default of 1.0 + """ + def _replace_distribution(parent, old_value): + """Replaces old_value: ("uniform" if (old_value) else "truncated_normal")""" + new_value = pasta.parse( + "\"uniform\" if old_value else \"truncated_normal\"") + ifexpr = new_value.body[0].value + pasta.ast_utils.replace_child(ifexpr, ifexpr.test, old_value) + + pasta.ast_utils.replace_child(parent, old_value, new_value) + + pasta.base.formatting.set(new_value, "prefix", "(") + pasta.base.formatting.set(new_value, "suffix", ")") + + def _replace_mode(parent, old_value): + """Replaces old_value with (old_value).lower().""" + new_value = pasta.parse("mode.lower()") + mode = new_value.body[0].value.func + pasta.ast_utils.replace_child(mode, mode.value, old_value) + + # This copies the prefix and suffix on old_value to new_value. + pasta.ast_utils.replace_child(parent, old_value, new_value) + + # Put parentheses around keep_prob.value (and remove the old prefix/ + # suffix, they should only be around new_value). + pasta.base.formatting.set(old_value, "prefix", "(") + pasta.base.formatting.set(old_value, "suffix", ")") + + # Need to keep track of scale because slim & keras + # have different defaults + found_scale = False + for keyword_arg in node.keywords: + if keyword_arg.arg == "factor": + keyword_arg.arg = "scale" + found_scale = True + if keyword_arg.arg == "mode": + _replace_mode(keyword_arg, keyword_arg.value) + if keyword_arg.arg == "uniform": + keyword_arg.arg = "distribution" + _replace_distribution(keyword_arg, keyword_arg.value) + + # Handle any detected positional arguments + if len(node.args) >= 1: + found_scale = True + if len(node.args) >= 2: + _replace_mode(node, node.args[1]) + if len(node.args) >= 3: + _replace_distribution(node, node.args[2]) + + # If no scale was provided, make tf 2.0 use slim's default factor + if not found_scale: + # Parse with pasta instead of ast to avoid emitting a spurious trailing \n. + scale_value = pasta.parse("2.0") + node.keywords = ([ast.keyword(arg="scale", value=scale_value)] + + node.keywords) + + lineno = node.func.value.lineno + col_offset = node.func.value.col_offset + node.func.value = ast_edits.full_name_node("tf.compat.v1.keras.initializers") + node.func.value.lineno = lineno + node.func.value.col_offset = col_offset + node.func.attr = "VarianceScaling" + + logs.append((ast_edits.INFO, node.lineno, node.col_offset, + "Changing tf.contrib.layers.variance_scaling_initializer" + " to a tf.compat.v1.keras.initializers.VarianceScaling and" + " converting arguments.\n")) + + return node + + +def _contrib_layers_l1_regularizer_transformer( + parent, node, full_name, name, logs): + """Replace slim l1 regularizer with Keras one. + + This entails renaming the 'scale' arg to 'l' and dropping any + provided scope arg. + """ + # Check if we have a scale or scope keyword arg + scope_keyword = None + for keyword in node.keywords: + if keyword.arg == "scale": + logs.append((ast_edits.INFO, node.lineno, node.col_offset, + "Renaming scale arg of regularizer\n")) + keyword.arg = "l" + if keyword.arg == "scope": + scope_keyword = keyword + + # Remove the scope keyword or arg if it is present + if scope_keyword: + logs.append((ast_edits.INFO, node.lineno, node.col_offset, + "Dropping scope arg from tf.contrib.layers.l1_regularizer," + " because it is unsupported in tf.keras.regularizers.l1\n")) + node.keywords.remove(scope_keyword) + if len(node.args) > 1: + node.args = node.args[:1] + logs.append((ast_edits.INFO, node.lineno, node.col_offset, + "Dropping scope arg from tf.contrib.layers.l1_regularizer," + " because it is unsupported in tf.keras.regularizers.l1\n")) + + lineno = node.func.value.lineno + col_offset = node.func.value.col_offset + node.func.value = ast_edits.full_name_node("tf.keras.regularizers") + node.func.value.lineno = lineno + node.func.value.col_offset = col_offset + node.func.attr = "l1" + + return node + + +def _contrib_layers_l2_regularizer_transformer( + parent, node, full_name, name, logs): + """Replace slim l2 regularizer with Keras one, with l=0.5*scale. + + Also drops the scope argument. + """ + def _replace_scale_node(parent, old_value): + """Replaces old_value with 0.5*(old_value).""" + half = ast.Num(n=0.5) + half.lineno = 0 + half.col_offset = 0 + new_value = ast.BinOp(left=half, op=ast.Mult(), + right=old_value) + # This copies the prefix and suffix on old_value to new_value. + pasta.ast_utils.replace_child(parent, old_value, new_value) + + # Put parentheses around scale.value (and remove the old prefix/ + # suffix, they should only be around new_value). + pasta.base.formatting.set(old_value, "prefix", "(") + pasta.base.formatting.set(old_value, "suffix", ")") + + # Check if we have a scale or scope keyword arg + scope_keyword = None + for keyword in node.keywords: + if keyword.arg == "scale": + keyword.arg = "l" + _replace_scale_node(keyword, keyword.value) + if keyword.arg == "scope": + scope_keyword = keyword + + # Maybe it was a positional arg + if len(node.args) >= 1: + _replace_scale_node(node, node.args[0]) + + # Remove the scope keyword or arg if it is present + if scope_keyword: + logs.append((ast_edits.INFO, node.lineno, node.col_offset, + "Dropping scope arg from tf.contrib.layers.l2_regularizer," + " because it is unsupported in tf.keras.regularizers.l2\n")) + node.keywords.remove(scope_keyword) + if len(node.args) > 1: + node.args = node.args[:1] + logs.append((ast_edits.INFO, node.lineno, node.col_offset, + "Dropping scope arg from tf.contrib.layers.l2_regularizer," + " because it is unsupported in tf.keras.regularizers.l2\n")) + + logs.append((ast_edits.INFO, node.lineno, node.col_offset, + "Multiplying scale arg of tf.contrib.layers.l2_regularizer" + " by half to what tf.keras.regularizers.l2 expects.\n")) + + lineno = node.func.value.lineno + col_offset = node.func.value.col_offset + node.func.value = ast_edits.full_name_node("tf.keras.regularizers") + node.func.value.lineno = lineno + node.func.value.col_offset = col_offset + node.func.attr = "l2" + + return node + + +def _name_scope_transformer(parent, node, full_name, name, logs): + """Fix name scope invocation to use 'default_name' and omit 'values' args.""" + + name_found, name = ast_edits.get_arg_value(node, "name", 0) + default_found, default_name = ast_edits.get_arg_value(node, "default_name", 1) + + # If an actual name was given... + if name_found and pasta.dump(name) != "None": + logs.append((ast_edits.INFO, node.lineno, node.col_offset, + "`name` passed to `name_scope`. Because you may be re-entering" + " an existing scope, it is not safe to convert automatically, " + " the v2 name_scope does not support re-entering scopes by" + " name.\n")) + # Rename to compat.v1 + new_name = "tf.compat.v1.name_scope" + logs.append((ast_edits.INFO, node.func.lineno, node.func.col_offset, + "Renamed %r to %r" % (full_name, new_name))) + new_name_node = ast_edits.full_name_node(new_name, node.func.ctx) + ast.copy_location(new_name_node, node.func) + pasta.ast_utils.replace_child(node, node.func, new_name_node) + return node + + if default_found: + # New name scope doesn't have name, but it has a default name. We use + # name=default_name, and values can be dropped (it's only for + # error reporting and useless outside of graph mode). + logs.append((ast_edits.INFO, node.lineno, node.col_offset, + "Using default_name as name in call to name_scope.\n")) + # Remove all args other than name + node.args = [] + node.keywords = [ast.keyword(arg="name", value=default_name)] + return node + + logs.append((ast_edits.ERROR, node.lineno, node.col_offset, + "name_scope call with neither name nor default_name cannot be " + "converted properly.")) + + +def _rename_to_compat_v1(node, full_name, logs, reason): + new_name = full_name.replace("tf.", "tf.compat.v1.", 1) + return _rename_func(node, full_name, new_name, logs, reason) + + +def _rename_func(node, full_name, new_name, logs, reason): + logs.append((ast_edits.INFO, node.lineno, node.col_offset, + "Renamed %r to %r: %s" % (full_name, new_name, reason))) + new_name_node = ast_edits.full_name_node(new_name, node.func.ctx) + ast.copy_location(new_name_node, node.func) + pasta.ast_utils.replace_child(node, node.func, new_name_node) + return node + + +def _string_split_transformer(parent, node, full_name, name, logs): + """Update tf.string_split arguments: skip_empty, sep, result_type, source.""" + # Check the skip_empty parameter: if not false, then use compat.v1. + for i, kw in enumerate(node.keywords): + if kw.arg == "skip_empty": + if _is_ast_false(kw.value): + logs.append((ast_edits.INFO, node.lineno, node.col_offset, + "removed argument skip_empty for tf.string_split.")) + node.keywords.pop(i) + break + else: + return _rename_to_compat_v1( + node, full_name, logs, "tf.string_split's replacement no longer " + "takes the skip_empty argument.") + + # Check the sep parameter: if it's definitely an empty string, use + # tf.strings.bytes_split(). If we can't tell, then use compat.v1. + found_sep = False + for i, kw in enumerate(node.keywords): + if kw.arg == "sep": + found_sep = True + if isinstance(kw.value, ast.Str): + if kw.value.s == "": + node = _rename_func( + node, full_name, "tf.strings.bytes_split", logs, + "Splitting bytes is not handled by tf.strings.bytes_split().") + node.keywords.pop(i) + else: + return _rename_to_compat_v1( + node, full_name, logs, + "The semantics for tf.string_split's sep parameter have changed " + "when sep is the empty string; but sep is not a string literal, " + "so we can't tell if it's an empty string.") + if not found_sep: + return _rename_to_compat_v1( + node, full_name, logs, + "The semantics for tf.string_split's sep parameter have changed " + "when sep unspecified: it now splits on all whitespace, not just " + "the space character.") + # Check the result_type parameter + return _string_split_rtype_transformer(parent, node, full_name, name, logs) + + +def _string_split_rtype_transformer(parent, node, full_name, name, logs): + """Update tf.strings.split arguments: result_type, source.""" + # Remove the "result_type" argument. + need_to_sparse = True + for i, kw in enumerate(node.keywords): + if kw.arg == "result_type": + if (isinstance(kw.value, ast.Str) and + kw.value.s in ("RaggedTensor", "SparseTensor")): + logs.append((ast_edits.INFO, node.lineno, node.col_offset, + "Removed argument result_type=%r for function %s" % + (kw.value.s, full_name or name))) + node.keywords.pop(i) + if kw.value.s == "RaggedTensor": + need_to_sparse = False + else: + return _rename_to_compat_v1( + node, full_name, logs, + "%s no longer takes the result_type parameter." % full_name) + break + + for i, kw in enumerate(node.keywords): + if kw.arg == "source": + kw.arg = "input" + + # If necessary, add a call to .to_sparse() to convert the output of + # strings.split from a RaggedTensor to a SparseTensor. + if need_to_sparse: + if (isinstance(parent, ast.Attribute) and parent.attr == "to_sparse"): + return # Prevent infinite recursion (since child nodes are transformed) + logs.append( + (ast_edits.INFO, node.lineno, node.col_offset, + "Adding call to RaggedTensor.to_sparse() to result of strings.split, " + "since it now returns a RaggedTensor.")) + node = ast.Attribute(value=copy.deepcopy(node), attr="to_sparse") + try: + node = ast.Call(node, [], []) + except TypeError: + node = ast.Call(node, [], [], None, None) + + return node diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/tf_upgrade_v2_main.py b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/tf_upgrade_v2_main.py new file mode 100644 index 0000000000000000000000000000000000000000..5ac3bf6e875c7dbbdde6a240dbb28abacd9a32c9 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/tf_upgrade_v2_main.py @@ -0,0 +1,206 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Upgrader for Python scripts from 1.x TensorFlow to 2.0 TensorFlow.""" + +import argparse + +from tensorflow.tools.compatibility import ast_edits +from tensorflow.tools.compatibility import ipynb +from tensorflow.tools.compatibility import tf_upgrade_v2 +from tensorflow.tools.compatibility import tf_upgrade_v2_safety + +# Make straightforward changes to convert to 2.0. In harder cases, +# use compat.v1. +_DEFAULT_MODE = "DEFAULT" + +# Convert to use compat.v1. +_SAFETY_MODE = "SAFETY" + +# Whether to rename to compat.v2 +_IMPORT_RENAME_DEFAULT = False + + +def process_file(in_filename, out_filename, upgrader): + """Process a file of type `.py` or `.ipynb`.""" + + if in_filename.endswith(".py"): + files_processed, report_text, errors = \ + upgrader.process_file(in_filename, out_filename) + elif in_filename.endswith(".ipynb"): + files_processed, report_text, errors = \ + ipynb.process_file(in_filename, out_filename, upgrader) + else: + raise NotImplementedError( + "Currently converter only supports python or ipynb") + + return files_processed, report_text, errors + + +def main(): + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description="""Convert a TensorFlow Python file from 1.x to 2.0 + +Simple usage: + tf_upgrade_v2.py --infile foo.py --outfile bar.py + tf_upgrade_v2.py --infile foo.ipynb --outfile bar.ipynb + tf_upgrade_v2.py --intree ~/code/old --outtree ~/code/new +""") + parser.add_argument( + "--infile", + dest="input_file", + help="If converting a single file, the name of the file " + "to convert") + parser.add_argument( + "--outfile", + dest="output_file", + help="If converting a single file, the output filename.") + parser.add_argument( + "--intree", + dest="input_tree", + help="If converting a whole tree of files, the directory " + "to read from (relative or absolute).") + parser.add_argument( + "--outtree", + dest="output_tree", + help="If converting a whole tree of files, the output " + "directory (relative or absolute).") + parser.add_argument( + "--copyotherfiles", + dest="copy_other_files", + help=("If converting a whole tree of files, whether to " + "copy the other files."), + type=bool, + default=True) + parser.add_argument( + "--inplace", + dest="in_place", + help=("If converting a set of files, whether to " + "allow the conversion to be performed on the " + "input files."), + action="store_true") + parser.add_argument( + "--no_import_rename", + dest="no_import_rename", + help=("Not to rename import to compat.v2 explicitly."), + action="store_true") + parser.add_argument( + "--no_upgrade_compat_v1_import", + dest="no_upgrade_compat_v1_import", + help=("If specified, don't upgrade explicit imports of " + "`tensorflow.compat.v1 as tf` to the v2 APIs. Otherwise, " + "explicit imports of the form `tensorflow.compat.v1 as tf` will " + "be upgraded."), + action="store_true") + parser.add_argument( + "--reportfile", + dest="report_filename", + help=("The name of the file where the report log is " + "stored." + "(default: %(default)s)"), + default="report.txt") + parser.add_argument( + "--mode", + dest="mode", + choices=[_DEFAULT_MODE, _SAFETY_MODE], + help=("Upgrade script mode. Supported modes:\n" + "%s: Perform only straightforward conversions to upgrade to " + "2.0. In more difficult cases, switch to use compat.v1.\n" + "%s: Keep 1.* code intact and import compat.v1 " + "module." % + (_DEFAULT_MODE, _SAFETY_MODE)), + default=_DEFAULT_MODE) + parser.add_argument( + "--print_all", + dest="print_all", + help="Print full log to stdout instead of just printing errors", + action="store_true") + args = parser.parse_args() + + if args.mode == _SAFETY_MODE: + change_spec = tf_upgrade_v2_safety.TFAPIChangeSpec() + else: + if args.no_import_rename: + change_spec = tf_upgrade_v2.TFAPIChangeSpec( + import_rename=False, + upgrade_compat_v1_import=not args.no_upgrade_compat_v1_import) + else: + change_spec = tf_upgrade_v2.TFAPIChangeSpec( + import_rename=_IMPORT_RENAME_DEFAULT, + upgrade_compat_v1_import=not args.no_upgrade_compat_v1_import) + upgrade = ast_edits.ASTCodeUpgrader(change_spec) + + report_text = None + report_filename = args.report_filename + files_processed = 0 + if args.input_file: + if not args.in_place and not args.output_file: + raise ValueError( + "--outfile= argument is required when converting a " + "single file.") + if args.in_place and args.output_file: + raise ValueError("--outfile argument is invalid when converting in place") + output_file = args.input_file if args.in_place else args.output_file + files_processed, report_text, errors = process_file( + args.input_file, output_file, upgrade) + errors = {args.input_file: errors} + files_processed = 1 + elif args.input_tree: + if not args.in_place and not args.output_tree: + raise ValueError( + "--outtree= argument is required when converting a " + "file tree.") + if args.in_place and args.output_tree: + raise ValueError("--outtree argument is invalid when converting in place") + output_tree = args.input_tree if args.in_place else args.output_tree + files_processed, report_text, errors = upgrade.process_tree( + args.input_tree, output_tree, args.copy_other_files) + else: + parser.print_help() + if report_text: + num_errors = 0 + report = [] + for f in errors: + if errors[f]: + num_errors += len(errors[f]) + report.append("-" * 80 + "\n") + report.append("File: %s\n" % f) + report.append("-" * 80 + "\n") + report.append("\n".join(errors[f]) + "\n") + + report = ("TensorFlow 2.0 Upgrade Script\n" + "-----------------------------\n" + "Converted %d files\n" % files_processed + + "Detected %d issues that require attention" % num_errors + "\n" + + "-" * 80 + "\n") + "".join(report) + detailed_report_header = "=" * 80 + "\n" + detailed_report_header += "Detailed log follows:\n\n" + detailed_report_header += "=" * 80 + "\n" + + with open(report_filename, "w") as report_file: + report_file.write(report) + report_file.write(detailed_report_header) + report_file.write(report_text) + + if args.print_all: + print(report) + print(detailed_report_header) + print(report_text) + else: + print(report) + print("\nMake sure to read the detailed log %r\n" % report_filename) + +if __name__ == "__main__": + main() diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/tf_upgrade_v2_safety.py b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/tf_upgrade_v2_safety.py new file mode 100644 index 0000000000000000000000000000000000000000..4aa05a72801aab4980407f6c808116a786358f2f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/tools/compatibility/tf_upgrade_v2_safety.py @@ -0,0 +1,58 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Upgrader for Python scripts from 1.* to 2.0 TensorFlow using SAFETY mode.""" + +from tensorflow.tools.compatibility import all_renames_v2 +from tensorflow.tools.compatibility import ast_edits +from tensorflow.tools.compatibility import module_deprecations_v2 + + +class TFAPIChangeSpec(ast_edits.APIChangeSpec): + """List of maps that describe what changed in the API.""" + + def __init__(self): + self.function_keyword_renames = {} + self.symbol_renames = {} + self.change_to_function = {} + self.function_reorders = {} + self.function_warnings = {} + self.function_transformers = {} + self.module_deprecations = module_deprecations_v2.MODULE_DEPRECATIONS + + ## Inform about the addons mappings + for symbol, replacement in all_renames_v2.addons_symbol_mappings.items(): + warning = ( + ast_edits.WARNING, ( + "(Manual edit required) `{}` has been migrated to `{}` in " + "TensorFlow Addons. The API spec may have changed during the " + "migration. Please see https://github.com/tensorflow/addons " + "for more info.").format(symbol, replacement)) + self.function_warnings[symbol] = warning + + # List module renames. If changed, please update max_submodule_depth. + self.import_renames = { + "tensorflow": + ast_edits.ImportRename( + "tensorflow.compat.v1", + excluded_prefixes=[ + "tensorflow.contrib", "tensorflow.flags", + "tensorflow.compat", + "tensorflow.compat.v1", "tensorflow.compat.v2", + "tensorflow.google" + ], + ) + } + # Needs to be updated if self.import_renames is changed. + self.max_submodule_depth = 2 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/tools/docs/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/tools/docs/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4063811bc1bdd024a5240cdd39b9fca6326f93ce Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/tools/docs/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/tools/docs/__pycache__/doc_controls.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/tools/docs/__pycache__/doc_controls.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b9d935cf65e6e7bd321a688c85763bf7406c7b4 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/tools/docs/__pycache__/doc_controls.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/tools/docs/__pycache__/tf_doctest_lib.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/tools/docs/__pycache__/tf_doctest_lib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca3b5425103668f7d30a8a80766c563cf0de0802 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/tools/docs/__pycache__/tf_doctest_lib.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/tools/docs/tf_doctest_lib.py b/videochat2/lib/python3.10/site-packages/tensorflow/tools/docs/tf_doctest_lib.py new file mode 100644 index 0000000000000000000000000000000000000000..7424de437f7e1e177ddc47669ec308d756dca7a4 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/tools/docs/tf_doctest_lib.py @@ -0,0 +1,224 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Run doctests for tensorflow.""" + +import doctest +import re +import textwrap + +import numpy as np + + +class _FloatExtractor(object): + """Class for extracting floats from a string. + + For example: + + >>> text_parts, floats = _FloatExtractor()("Text 1.0 Text") + >>> text_parts + ["Text ", " Text"] + >>> floats + np.array([1.0]) + """ + + # Note: non-capturing groups "(?" are not returned in matched groups, or by + # re.split. + _FLOAT_RE = re.compile( + r""" + ( # Captures the float value. + (?: + [-+]| # Start with a sign is okay anywhere. + (?: # Otherwise: + ^| # Start after the start of string + (?<=[^\w.]) # Not after a word char, or a . + ) + ) + (?: # Digits and exponent - something like: + {digits_dot_maybe_digits}{exponent}?| # "1.0" "1." "1.0e3", "1.e3" + {dot_digits}{exponent}?| # ".1" ".1e3" + {digits}{exponent}| # "1e3" + {digits}(?=j) # "300j" + ) + ) + j? # Optional j for cplx numbers, not captured. + (?= # Only accept the match if + $| # * At the end of the string, or + [^\w.] # * Next char is not a word char or "." + ) + """.format( + # Digits, a "." and optional more digits: "1.1". + digits_dot_maybe_digits=r'(?:[0-9]+\.(?:[0-9]*))', + # A "." with trailing digits ".23" + dot_digits=r'(?:\.[0-9]+)', + # digits: "12" + digits=r'(?:[0-9]+)', + # The exponent: An "e" or "E", optional sign, and at least one digit. + # "e-123", "E+12", "e12" + exponent=r'(?:[eE][-+]?[0-9]+)'), + re.VERBOSE) + + def __call__(self, string): + """Extracts floats from a string. + + >>> text_parts, floats = _FloatExtractor()("Text 1.0 Text") + >>> text_parts + ["Text ", " Text"] + >>> floats + np.array([1.0]) + + Args: + string: the string to extract floats from. + + Returns: + A (string, array) pair, where `string` has each float replaced by "..." + and `array` is a `float32` `numpy.array` containing the extracted floats. + """ + texts = [] + floats = [] + for i, part in enumerate(self._FLOAT_RE.split(string)): + if i % 2 == 0: + texts.append(part) + else: + floats.append(float(part)) + + return texts, np.array(floats) + + +class TfDoctestOutputChecker(doctest.OutputChecker, object): + """Customizes how `want` and `got` are compared, see `check_output`.""" + + def __init__(self, *args, **kwargs): + super(TfDoctestOutputChecker, self).__init__(*args, **kwargs) + self.extract_floats = _FloatExtractor() + self.text_good = None + self.float_size_good = None + + _ADDRESS_RE = re.compile(r'\bat 0x[0-9a-f]*?>') + # TODO(yashkatariya): Add other tensor's string substitutions too. + # tf.RaggedTensor doesn't need one. + _NUMPY_OUTPUT_RE = re.compile(r'', re.DOTALL) + + def _allclose(self, want, got, rtol=1e-3, atol=1e-3): + return np.allclose(want, got, rtol=rtol, atol=atol) + + def _tf_tensor_numpy_output(self, string): + modified_string = self._NUMPY_OUTPUT_RE.sub(r'\1', string) + return modified_string, modified_string != string + + MESSAGE = textwrap.dedent("""\n + ############################################################# + Check the documentation (https://www.tensorflow.org/community/contribute/docs_ref) on how to + write testable docstrings. + #############################################################""") + + def check_output(self, want, got, optionflags): + """Compares the docstring output to the output gotten by running the code. + + Python addresses in the output are replaced with wildcards. + + Float values in the output compared as using `np.allclose`: + + * Float values are extracted from the text and replaced with wildcards. + * The wildcard text is compared to the actual output. + * The float values are compared using `np.allclose`. + + The method returns `True` if both the text comparison and the numeric + comparison are successful. + + The numeric comparison will fail if either: + + * The wrong number of floats are found. + * The float values are not within tolerence. + + Args: + want: The output in the docstring. + got: The output generated after running the snippet. + optionflags: Flags passed to the doctest. + + Returns: + A bool, indicating if the check was successful or not. + """ + + # If the docstring's output is empty and there is some output generated + # after running the snippet, return True. This is because if the user + # doesn't want to display output, respect that over what the doctest wants. + if got and not want: + return True + + if want is None: + want = '' + + if want == got: + return True + + # Replace python's addresses with ellipsis (`...`) since it can change on + # each execution. + want = self._ADDRESS_RE.sub('at ...>', want) + + # Replace tf.Tensor strings with only their numpy field values. + want, want_changed = self._tf_tensor_numpy_output(want) + if want_changed: + got, _ = self._tf_tensor_numpy_output(got) + + # Separate out the floats, and replace `want` with the wild-card version + # "result=7.0" => "result=..." + want_text_parts, self.want_floats = self.extract_floats(want) + # numpy sometimes pads floats in arrays with spaces + # got: [1.2345, 2.3456, 3.0 ] want: [1.2345, 2.3456, 3.0001] + # And "normalize whitespace" only works when there's at least one space, + # so strip them and let the wildcard handle it. + want_text_parts = [part.strip(' ') for part in want_text_parts] + want_text_wild = '...'.join(want_text_parts) + if '....' in want_text_wild: + # If a float comes just after a period you'll end up four dots and the + # first three count as the ellipsis. Replace it with three dots. + want_text_wild = re.sub(r'\.\.\.\.+', '...', want_text_wild) + + # Find the floats in the string returned by the test + _, self.got_floats = self.extract_floats(got) + + self.text_good = super(TfDoctestOutputChecker, self).check_output( + want=want_text_wild, got=got, optionflags=optionflags) + if not self.text_good: + return False + + if self.want_floats.size == 0: + # If there are no floats in the "want" string, ignore all the floats in + # the result. "np.array([ ... ])" matches "np.array([ 1.0, 2.0 ])" + return True + + self.float_size_good = (self.want_floats.size == self.got_floats.size) + + if self.float_size_good: + return self._allclose(self.want_floats, self.got_floats) + else: + return False + + def output_difference(self, example, got, optionflags): + got = [got] + + # If the some of the float output is hidden with `...`, `float_size_good` + # will be False. This is because the floats extracted from the string is + # converted into a 1-D numpy array. Hence hidding floats is not allowed + # anymore. + if self.text_good: + if not self.float_size_good: + got.append("\n\nCAUTION: tf_doctest doesn't work if *some* of the " + "*float output* is hidden with a \"...\".") + + got.append(self.MESSAGE) + got = '\n'.join(got) + return (super(TfDoctestOutputChecker, + self).output_difference(example, got, optionflags))