diff --git a/.gitattributes b/.gitattributes index 09a2135e5bff4c452e2b19c0b5f30004d8e45862..983805859fbc32d503296808baf90aaf24149abb 100644 --- a/.gitattributes +++ b/.gitattributes @@ -699,3 +699,4 @@ deepseek/lib/python3.10/site-packages/numpy/random/mtrand.cpython-310-x86_64-lin deepseek/lib/python3.10/site-packages/numpy/core/__pycache__/fromnumeric.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text deepseek/lib/python3.10/site-packages/numpy/lib/__pycache__/function_base.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text deepseek/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test_function_base.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +deepseek/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_numeric.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/deepseek/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_numeric.cpython-310.pyc b/deepseek/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_numeric.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9cfe31ecdc4442e35c44083260b3f8ad7b597b4 --- /dev/null +++ b/deepseek/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_numeric.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37e00abe340d625cc21f2794e11f2c08e4679f553ca45593e531d017f7cda903 +size 118193 diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Azores b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Azores new file mode 100644 index 0000000000000000000000000000000000000000..dd2c235bf9c4cc3c2ec4c2725a7cd5deac128fea Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Azores differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Bermuda b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Bermuda new file mode 100644 index 0000000000000000000000000000000000000000..527524ed295aba41b9a0448ffd7993c489a2cb99 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Bermuda differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Canary b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Canary new file mode 100644 index 0000000000000000000000000000000000000000..f3192156ff043a529461aa9004a8de9dda326f7d Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Canary differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Cape_Verde b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Cape_Verde new file mode 100644 index 0000000000000000000000000000000000000000..0d0d31a2f092d03f8512ed9c34f36a3f3f21209b Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Cape_Verde differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Faeroe b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Faeroe new file mode 100644 index 0000000000000000000000000000000000000000..4dab7ef0859c244b916d61b7489d7371881e0ca2 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Faeroe differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Faroe b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Faroe new file mode 100644 index 0000000000000000000000000000000000000000..4dab7ef0859c244b916d61b7489d7371881e0ca2 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Faroe differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Madeira b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Madeira new file mode 100644 index 0000000000000000000000000000000000000000..6725a0ffc1f7cfc6cad54361ca91500e087e2b64 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/Madeira differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/St_Helena b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/St_Helena new file mode 100644 index 0000000000000000000000000000000000000000..28b32ab2e0b9053f39a91d9f28b6072e41423954 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Atlantic/St_Helena differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/ACT b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/ACT new file mode 100644 index 0000000000000000000000000000000000000000..0aea4c3d43e504dafabc031d7ca9cbe8db46163c Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/ACT differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Adelaide b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Adelaide new file mode 100644 index 0000000000000000000000000000000000000000..f5dedca59e2b220f7395c73f60ff26e610373e8b Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Adelaide differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Brisbane b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Brisbane new file mode 100644 index 0000000000000000000000000000000000000000..7ff9949ffa93e44835ab133998b89e440094f909 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Brisbane differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Currie b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Currie new file mode 100644 index 0000000000000000000000000000000000000000..3adb8e1bf7c6ec51f1c100538799271d7d7a6e6f Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Currie differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Darwin b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Darwin new file mode 100644 index 0000000000000000000000000000000000000000..74a30879bc6180d588a706451226cb4c95faf79d Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Darwin differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Eucla b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Eucla new file mode 100644 index 0000000000000000000000000000000000000000..1551e96cbc3de5565356954b61aac3c4388e90db Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Eucla differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Hobart b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Hobart new file mode 100644 index 0000000000000000000000000000000000000000..3adb8e1bf7c6ec51f1c100538799271d7d7a6e6f Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Hobart differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/LHI b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/LHI new file mode 100644 index 0000000000000000000000000000000000000000..069a95ad686c1139e2ff2b9ce94dc5ef5bc98c67 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/LHI differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Lindeman b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Lindeman new file mode 100644 index 0000000000000000000000000000000000000000..4ee1825abfe65887069dcbd10bcf786d50ba0702 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Lindeman differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Perth b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Perth new file mode 100644 index 0000000000000000000000000000000000000000..f8ddbdf215d34b022af11c3d1930dd6ea4dca87e Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Perth differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Queensland b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Queensland new file mode 100644 index 0000000000000000000000000000000000000000..7ff9949ffa93e44835ab133998b89e440094f909 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Queensland differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Tasmania b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Tasmania new file mode 100644 index 0000000000000000000000000000000000000000..3adb8e1bf7c6ec51f1c100538799271d7d7a6e6f Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Tasmania differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Victoria b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Victoria new file mode 100644 index 0000000000000000000000000000000000000000..ee903f4b1fc292bc9cbec7b501a266030ef3510e Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Victoria differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Yancowinna b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Yancowinna new file mode 100644 index 0000000000000000000000000000000000000000..698c76e30e91f568a29daca12993cfacbfdbf83e Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Australia/Yancowinna differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Belgrade b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Belgrade new file mode 100644 index 0000000000000000000000000000000000000000..27de456f16ab549627b284a39e2265cbdb4ad8e9 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Belgrade differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Bratislava b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Bratislava new file mode 100644 index 0000000000000000000000000000000000000000..ce8f433ece44f0b96b18d3b5780730e7f9cad9f5 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Bratislava differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Istanbul b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Istanbul new file mode 100644 index 0000000000000000000000000000000000000000..7c2336dd80c3c9cbf71cb53d2b2c1f89a65a8ba5 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Istanbul differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Mariehamn b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Mariehamn new file mode 100644 index 0000000000000000000000000000000000000000..b4f8f9cbb57450549933f83ac90dd56a2ca75344 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Mariehamn differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Moscow b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Moscow new file mode 100644 index 0000000000000000000000000000000000000000..ddb3f4e99a1030f33b56fad986c8d9c16e59eb32 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Moscow differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Simferopol b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Simferopol new file mode 100644 index 0000000000000000000000000000000000000000..4bf24de1d9f8ebc410f120aa83d98b7e41d1e6c4 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Simferopol differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Vaduz b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Vaduz new file mode 100644 index 0000000000000000000000000000000000000000..ad6cf59281a1046d9dcd045fda521585e3e33e06 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Europe/Vaduz differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Chagos b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Chagos new file mode 100644 index 0000000000000000000000000000000000000000..a5554816e2928c2bd5d02e032bbeb1e1cb101009 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Chagos differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Cocos b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Cocos new file mode 100644 index 0000000000000000000000000000000000000000..eef37b42e8a0e7179f8113bea01f4a71d668e8ef Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Cocos differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Comoro b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Comoro new file mode 100644 index 0000000000000000000000000000000000000000..9dcfc19c56e62b12b730f4335b34479695f273f5 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Comoro differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Mahe b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Mahe new file mode 100644 index 0000000000000000000000000000000000000000..b3ac791aef4e73d6d644c40c614f37f15d462cdd Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Mahe differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Maldives b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Maldives new file mode 100644 index 0000000000000000000000000000000000000000..555728b1a0187cc0ac63b8fe45c44bd1e0957918 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Maldives differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Mauritius b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Mauritius new file mode 100644 index 0000000000000000000000000000000000000000..212d4b2e2afaed06110a1acff4fdb6bd6103b4ff Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Mauritius differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Mayotte b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Mayotte new file mode 100644 index 0000000000000000000000000000000000000000..9dcfc19c56e62b12b730f4335b34479695f273f5 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Mayotte differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Reunion b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Reunion new file mode 100644 index 0000000000000000000000000000000000000000..b3ac791aef4e73d6d644c40c614f37f15d462cdd Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/Indian/Reunion differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Aleutian b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Aleutian new file mode 100644 index 0000000000000000000000000000000000000000..43236498f681cc06f64ca2afa613880331fe6fbb Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Aleutian differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Arizona b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Arizona new file mode 100644 index 0000000000000000000000000000000000000000..ab37e845566aa95659b7b85be0051d0c67a7e53a Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Arizona differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Central b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Central new file mode 100644 index 0000000000000000000000000000000000000000..c6981a06b1d9c26f447518efe265a6454726eae7 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Central differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Eastern b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Eastern new file mode 100644 index 0000000000000000000000000000000000000000..a8b9ab1992257d721ad627b14f535c3d4b020888 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Eastern differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Hawaii b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Hawaii new file mode 100644 index 0000000000000000000000000000000000000000..c7cd060159bd22fc5e6f10ac5a2089afb2c19c6a Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Hawaii differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Indiana-Starke b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Indiana-Starke new file mode 100644 index 0000000000000000000000000000000000000000..025d132dd48ba978c6fedf86d70173127be49d49 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Indiana-Starke differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Michigan b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Michigan new file mode 100644 index 0000000000000000000000000000000000000000..e104faa46545ee873295cde34e1d46bccad8647c Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Michigan differ diff --git a/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Pacific b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Pacific new file mode 100644 index 0000000000000000000000000000000000000000..610e7af5fc13d9784de30d272c7c39d7938873a0 Binary files /dev/null and b/deepseek/lib/python3.10/site-packages/pytz/zoneinfo/US/Pacific differ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/Allocator.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/Allocator.h new file mode 100644 index 0000000000000000000000000000000000000000..1fe60817f8e2d278da49e383ae4420c1cd5cee9b --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/Allocator.h @@ -0,0 +1,269 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include + +namespace c10 { + +// A DataPtr is a unique pointer (with an attached deleter and some +// context for the deleter) to some memory, which also records what +// device is for its data. +// +// nullptr DataPtrs can still have a nontrivial device; this allows +// us to treat zero-size allocations uniformly with non-zero allocations. +// +class C10_API DataPtr { + private: + c10::detail::UniqueVoidPtr ptr_; + Device device_; + + public: + // Choice of CPU here is arbitrary; if there's an "undefined" device + // we could use that too + DataPtr() : ptr_(), device_(DeviceType::CPU) {} + DataPtr(void* data, Device device) : ptr_(data), device_(device) {} + DataPtr(void* data, void* ctx, DeleterFnPtr ctx_deleter, Device device) + : ptr_(data, ctx, ctx_deleter), device_(device) {} + void* operator->() const { + return ptr_.get(); + } + void clear() { + ptr_.clear(); + } + void* get() const { + return ptr_.get(); + } + void* get_context() const { + return ptr_.get_context(); + } + void* release_context() { + return ptr_.release_context(); + } + std::unique_ptr&& move_context() { + return ptr_.move_context(); + } + operator bool() const { + return static_cast(ptr_); + } + template + T* cast_context(DeleterFnPtr expected_deleter) const { + return ptr_.cast_context(expected_deleter); + } + DeleterFnPtr get_deleter() const { + return ptr_.get_deleter(); + } + /** + * Compare the deleter in a DataPtr to expected_deleter. + * If it matches, replace the deleter with new_deleter + * and return true; otherwise, does nothing and returns + * false. + * + * In general, it is not safe to unconditionally set the + * deleter on a DataPtr, because you don't know what + * the deleter is, and thus will have a hard time properly + * disposing of the deleter without storing the original + * deleter (this is difficult to do, because DeleterFnPtr + * is not a closure, and because the context on DataPtr is + * only a single word, you generally don't have enough + * space to store both the original deleter and its context). + * However, in some cases, you know /exactly/ what the deleter + * is, and you have a new deleter that manually wraps + * the old one. In this case, you can safely swap the deleter + * after asserting that the deleters line up. + * + * What are the requirements on new_deleter? It must still + * properly dispose of the void* pointer passed in as its argument, + * where void* is whatever the context of the original deleter + * is. So in general, you expect the new deleter to look something + * like this: + * + * [](void* ptr) { + * some_new_stuff(ptr); + * get_orig_allocator()->raw_deleter(ptr); + * } + * + * Note that it won't work to close over the original + * allocator; you don't have enough space to do that! Also, + * it's unsafe to assume that the passed in pointer in + * question is the memory pointer in question; it might not + * be; be sure to read the source code of the Allocator + * in question to confirm this. + */ + C10_NODISCARD bool compare_exchange_deleter( + DeleterFnPtr expected_deleter, + DeleterFnPtr new_deleter) { + return ptr_.compare_exchange_deleter(expected_deleter, new_deleter); + } + Device device() const { + return device_; + } + // Unsafely mutates the device on a DataPtr. Under normal use, + // you should never actually need to call this function. + // We need this for the implementation of the hack detailed + // in Note [Masquerading as CUDA] + void unsafe_set_device(Device device) { + device_ = device; + } +}; + +// NB: Device is NOT tested for here; a CUDA nullptr is as much a nullptr as a +// CPU nullptr + +inline bool operator==(const DataPtr& dp, std::nullptr_t) noexcept { + return !dp; +} +inline bool operator==(std::nullptr_t, const DataPtr& dp) noexcept { + return !dp; +} +inline bool operator!=(const DataPtr& dp, std::nullptr_t) noexcept { + return dp; +} +inline bool operator!=(std::nullptr_t, const DataPtr& dp) noexcept { + return dp; +} + +// Note [raw_allocate/raw_deallocate and Thrust] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Thrust's support for custom allocators requires us to write something +// like this: +// +// class ThrustAllocator { +// char* allocate(size_t); +// void deallocate(char*, size_t); +// }; +// +// This is not good for our unique_ptr based allocator interface, as +// there is no way to get to the context when we free. +// +// However, in some cases the context is exactly the same as +// the data pointer. In this case, we can support the "raw" +// allocate and deallocate interface. This is what +// raw_deleter signifies. By default, it returns a nullptr, which means that +// the raw interface is not implemented. Be sure to implement it whenever +// possible, or the raw interface will incorrectly reported as unsupported, +// when it is actually possible. + +struct C10_API Allocator { + virtual ~Allocator() = default; + + virtual DataPtr allocate(size_t n) const = 0; + + // If this returns a non nullptr, it means that allocate() + // is guaranteed to return a unique_ptr with this deleter attached; + // it means the rawAllocate and rawDeallocate APIs are safe to use. + // This function MUST always return the same BoundDeleter. + virtual DeleterFnPtr raw_deleter() const { + return nullptr; + } + void* raw_allocate(size_t n) { + auto dptr = allocate(n); + AT_ASSERT(dptr.get() == dptr.get_context()); + return dptr.release_context(); + } + void raw_deallocate(void* ptr) { + auto d = raw_deleter(); + AT_ASSERT(d); + d(ptr); + } +}; + +// This context is used to generate DataPtr which have arbitrary +// std::function deleters associated with them. In some user facing +// functions, we give a (user-friendly) interface for constructing +// tensors from external data which take an arbitrary std::function +// deleter. Grep for InefficientStdFunctionContext to find these +// occurrences. +// +// This context is inefficient because we have to do a dynamic +// allocation InefficientStdFunctionContext, on top of the dynamic +// allocation which is implied by std::function itself. +struct C10_API InefficientStdFunctionContext { + std::unique_ptr> ptr_; + InefficientStdFunctionContext( + std::unique_ptr>&& ptr) + : ptr_(std::move(ptr)) {} + static DataPtr makeDataPtr( + void* ptr, + const std::function& deleter, + Device device); +}; + +/** Set the allocator for DeviceType `t`. The passed in allocator pointer is + * expected to have static lifetime; this function does NOT take ownership + * of the raw pointer. (The reason for this is to prevent existing pointers + * to an allocator of a particular device from being invalidated when + * SetAllocator is called.) + * + * Also note that this is not thread-safe, and we assume this function will + * only be called during initialization. + * + * The 'priority' flag is introduced when we want to overwrite the default + * allocator, since the allocators are set statically. The default priority + * is 0, which means the lowest. Only higher or equal priority can overwrite + * existing ones. + */ +C10_API void SetAllocator(DeviceType t, Allocator* alloc, uint8_t priority = 0); +C10_API Allocator* GetAllocator(const DeviceType& t); + +template +struct AllocatorRegisterer { + explicit AllocatorRegisterer(Allocator* alloc) { + SetAllocator(t, alloc); + } +}; + +#define REGISTER_ALLOCATOR(t, f) \ + namespace { \ + static c10::AllocatorRegisterer g_allocator_d(f); \ + } + +// An interface for reporting thread local memory usage +// per device +struct C10_API MemoryReportingInfoBase : public c10::DebugInfoBase { + MemoryReportingInfoBase(); + ~MemoryReportingInfoBase() override = default; + + /** + * alloc_size corresponds to the size of the ptr. + * + * total_allocated corresponds to total allocated memory. + * + * total_reserved corresponds to total size of memory pool, both used and + * unused, if applicable. + */ + virtual void reportMemoryUsage( + void* ptr, + int64_t alloc_size, + size_t total_allocated, + size_t total_reserved, + Device device) = 0; + + virtual void reportOutOfMemory( + int64_t alloc_size, + size_t total_allocated, + size_t total_reserved, + Device device); + + virtual bool memoryProfilingEnabled() const = 0; +}; + +C10_API bool memoryProfilingEnabled(); +C10_API void reportMemoryUsageToProfiler( + void* ptr, + int64_t alloc_size, + size_t total_allocated, + size_t total_reserved, + Device device); + +C10_API void reportOutOfMemoryToProfiler( + int64_t alloc_size, + size_t total_allocated, + size_t total_reserved, + Device device); + +} // namespace c10 diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/Backend.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/Backend.h new file mode 100644 index 0000000000000000000000000000000000000000..5f3d07797748c1aacffa21361fa4b3e4332b9452 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/Backend.h @@ -0,0 +1,338 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace c10 { + +/** + * This legacy enum class defines the set of backends supported by old school, + * code generated Type-based ATen. A "backend" in this sense roughly + * corresponds to the cartesian product of (device type, layout), but restricted + * only to combinations which we actually have kernels for. Backend does NOT + * include dtype. + * + * The reason we are sunsetting this enum class is because it doesn't allow for + * open registration; e.g., if you want to add SparseXLA, you'd have to + * edit this enum; you wouldn't be able to do it out of tree. DispatchKey is + * the replacement for Backend which supports open registration. + * + * NB: The concept of 'Backend' here disagrees with the notion of backend + * exposed to users in torch.backends. Backend here is something like "CPU" + * or "SparseCUDA"; backend in torch.backends is something like "MKL" or + * "CUDNN". + */ +enum class Backend { + CPU, + CUDA, + HIP, + VE, + FPGA, + IPU, + XPU, + SparseCPU, + SparseCUDA, + SparseCsrCPU, + SparseCsrCUDA, + SparseHIP, + SparseVE, + SparseXPU, + ORT, + XLA, + Vulkan, + Metal, + Meta, + QuantizedCPU, + QuantizedCUDA, + QuantizedXPU, + Undefined, + MkldnnCPU, + MPS, + HPU, + Lazy, + MTIA, + PrivateUse1, + NumOptions +}; + +static inline Backend dispatchKeyToBackend(DispatchKey t) { + if (t == DispatchKey::CPU || t == DispatchKey::AutogradCPU) { + return Backend::CPU; + } else if (t == DispatchKey::CUDA || t == DispatchKey::AutogradCUDA) { + return Backend::CUDA; + } else if (t == DispatchKey::HIP) { + return Backend::HIP; + } else if (t == DispatchKey::VE) { + return Backend::VE; + } else if (t == DispatchKey::FPGA) { + return Backend::FPGA; + } else if (t == DispatchKey::ORT) { + return Backend::ORT; + } else if (t == DispatchKey::XLA || t == DispatchKey::AutogradXLA) { + return Backend::XLA; + } else if (t == DispatchKey::Lazy || t == DispatchKey::AutogradLazy) { + return Backend::Lazy; + } else if (t == DispatchKey::MPS || t == DispatchKey::AutogradMPS) { + return Backend::MPS; + } else if (t == DispatchKey::Vulkan) { + return Backend::Vulkan; + } else if (t == DispatchKey::Metal) { + return Backend::Metal; + } else if (t == DispatchKey::Meta) { + return Backend::Meta; + } else if (t == DispatchKey::SparseCPU) { + return Backend::SparseCPU; + } else if (t == DispatchKey::SparseCUDA) { + return Backend::SparseCUDA; + } else if (t == DispatchKey::SparseHIP) { + return Backend::SparseHIP; + } else if (t == DispatchKey::SparseVE) { + return Backend::SparseVE; + } else if (t == DispatchKey::SparseCsrCPU) { + return Backend::SparseCsrCPU; + } else if (t == DispatchKey::SparseCsrCUDA) { + return Backend::SparseCsrCUDA; + } else if (t == DispatchKey::MkldnnCPU) { + return Backend::MkldnnCPU; + } else if (t == DispatchKey::QuantizedCPU) { + return Backend::QuantizedCPU; + } else if (t == DispatchKey::QuantizedCUDA) { + return Backend::QuantizedCUDA; + } else if (t == DispatchKey::IPU || t == DispatchKey::AutogradIPU) { + return Backend::IPU; + } else if (t == DispatchKey::XPU || t == DispatchKey::AutogradXPU) { + return Backend::XPU; + } else if (t == DispatchKey::SparseXPU) { + return Backend::SparseXPU; + } else if (t == DispatchKey::QuantizedXPU) { + return Backend::QuantizedXPU; + } else if (t == DispatchKey::HPU || t == DispatchKey::AutogradHPU) { + return Backend::HPU; + } else if (t == DispatchKey::MTIA) { + return Backend::MTIA; + } else if (t == DispatchKey::PrivateUse1) { + return Backend::PrivateUse1; + } else if (t == DispatchKey::Undefined) { + return Backend::Undefined; + } else { + TORCH_CHECK(false, "Unrecognized tensor type ID: ", t); + } +} + +static inline DispatchKey backendToDispatchKey(Backend b) { + switch (b) { + case Backend::CPU: + return DispatchKey::CPU; + case Backend::CUDA: + return DispatchKey::CUDA; + case Backend::HIP: + return DispatchKey::HIP; + case Backend::VE: + return DispatchKey::VE; + case Backend::FPGA: + return DispatchKey::FPGA; + case Backend::ORT: + return DispatchKey::ORT; + case Backend::XLA: + return DispatchKey::XLA; + case Backend::Lazy: + return DispatchKey::Lazy; + case Backend::IPU: + return DispatchKey::IPU; + case Backend::XPU: + return DispatchKey::XPU; + case Backend::SparseXPU: + return DispatchKey::SparseXPU; + case Backend::SparseCPU: + return DispatchKey::SparseCPU; + case Backend::SparseCUDA: + return DispatchKey::SparseCUDA; + case Backend::SparseHIP: + return DispatchKey::SparseHIP; + case Backend::SparseVE: + return DispatchKey::SparseVE; + case Backend::SparseCsrCPU: + return DispatchKey::SparseCsrCPU; + case Backend::SparseCsrCUDA: + return DispatchKey::SparseCsrCUDA; + case Backend::MkldnnCPU: + return DispatchKey::MkldnnCPU; + case Backend::Vulkan: + return DispatchKey::Vulkan; + case Backend::Metal: + return DispatchKey::Metal; + case Backend::Meta: + return DispatchKey::Meta; + case Backend::QuantizedCPU: + return DispatchKey::QuantizedCPU; + case Backend::QuantizedCUDA: + return DispatchKey::QuantizedCUDA; + case Backend::Undefined: + return DispatchKey::Undefined; + case Backend::MPS: + return DispatchKey::MPS; + case Backend::HPU: + return DispatchKey::HPU; + case Backend::MTIA: + return DispatchKey::MTIA; + case Backend::PrivateUse1: + return DispatchKey::PrivateUse1; + default: + throw std::runtime_error("Unknown backend"); + } +} + +static inline DeviceType backendToDeviceType(Backend b) { + switch (b) { + case Backend::CPU: + return DeviceType::CPU; + case Backend::CUDA: + return DeviceType::CUDA; + case Backend::HIP: + return DeviceType::HIP; + case Backend::VE: + return DeviceType::VE; + case Backend::FPGA: + return DeviceType::FPGA; + case Backend::ORT: + return DeviceType::ORT; + case Backend::XLA: + return DeviceType::XLA; + case Backend::Lazy: + return DeviceType::Lazy; + case Backend::SparseCPU: + return DeviceType::CPU; + case Backend::SparseCUDA: + return DeviceType::CUDA; + case Backend::SparseHIP: + return DeviceType::HIP; + case Backend::SparseVE: + return DeviceType::VE; + case Backend::SparseCsrCPU: + return DeviceType::CPU; + case Backend::SparseCsrCUDA: + return DeviceType::CUDA; + case Backend::IPU: + return DeviceType::IPU; + case Backend::XPU: + case Backend::SparseXPU: + case Backend::QuantizedXPU: + return DeviceType::XPU; + case Backend::MkldnnCPU: + case Backend::QuantizedCPU: + return DeviceType::CPU; + case Backend::QuantizedCUDA: + return DeviceType::CUDA; + case Backend::Vulkan: + return DeviceType::Vulkan; + case Backend::Metal: + return DeviceType::Metal; + case Backend::Meta: + return DeviceType::Meta; + case Backend::MPS: + return DeviceType::MPS; + case Backend::HPU: + return DeviceType::HPU; + case Backend::MTIA: + return DeviceType::MTIA; + case Backend::PrivateUse1: + return DeviceType::PrivateUse1; + case Backend::Undefined: + TORCH_CHECK(false, "Undefined backend is not a valid device type"); + default: + TORCH_CHECK(false, "Unknown backend"); + } +} + +// TODO: This probably shouldn't actually be static inline +static inline const char* toString(Backend b) { + switch (b) { + case Backend::CPU: + return "CPU"; + case Backend::CUDA: + return "CUDA"; + case Backend::HIP: + return "HIP"; + case Backend::VE: + return "VE"; + case Backend::FPGA: + return "FPGA"; + case Backend::XPU: + return "XPU"; + case Backend::IPU: + return "IPU"; + case Backend::ORT: + return "ORT"; + case Backend::XLA: + return "XLA"; + case Backend::Lazy: + return "Lazy"; + case Backend::MPS: + return "MPS"; + case Backend::SparseCPU: + return "SparseCPU"; + case Backend::SparseCUDA: + return "SparseCUDA"; + case Backend::SparseHIP: + return "SparseHIP"; + case Backend::SparseVE: + return "SparseVE"; + case Backend::SparseXPU: + return "SparseXPU"; + case Backend::SparseCsrCPU: + return "SparseCsrCPU"; + case Backend::SparseCsrCUDA: + return "SparseCsrCUDA"; + case Backend::MkldnnCPU: + return "MkldnnCPU"; + case Backend::Vulkan: + return "Vulkan"; + case Backend::Metal: + return "Metal"; + case Backend::Meta: + return "Meta"; + case Backend::QuantizedCPU: + return "QuantizedCPU"; + case Backend::QuantizedCUDA: + return "QuantizedCUDA"; + case Backend::QuantizedXPU: + return "QuantizedXPU"; + case Backend::HPU: + return "HPU"; + case Backend::MTIA: + return "MTIA"; + case Backend::PrivateUse1: + return "PrivateUseOne"; + default: + return "UNKNOWN_BACKEND"; + } +} + +static inline bool isSparse(Backend b) { + switch (b) { + case Backend::SparseXPU: + case Backend::SparseCPU: + case Backend::SparseCUDA: + case Backend::SparseHIP: + case Backend::SparseVE: + return true; + default: + return false; + } +} + +static inline bool isSparseCsr(Backend b) { + switch (b) { + case Backend::SparseCsrCPU: + case Backend::SparseCsrCUDA: + return true; + default: + return false; + } +} + +} // namespace c10 diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/CPUAllocator.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/CPUAllocator.h new file mode 100644 index 0000000000000000000000000000000000000000..14fe876008d0e245127e84888d0d921a3f204667 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/CPUAllocator.h @@ -0,0 +1,57 @@ +#pragma once + +#include +#include +#include + +#include +#include + +// TODO: rename to c10 +C10_DECLARE_bool(caffe2_report_cpu_memory_usage); + +namespace c10 { + +using MemoryDeleter = void (*)(void*); + +// A helper function that is basically doing nothing. +C10_API void NoDelete(void*); + +// A simple struct that is used to report C10's memory allocation, +// deallocation status and out-of-memory events to the profiler +class C10_API ProfiledCPUMemoryReporter { + public: + ProfiledCPUMemoryReporter() = default; + void New(void* ptr, size_t nbytes); + void OutOfMemory(size_t nbytes); + void Delete(void* ptr); + + private: + std::mutex mutex_; + std::unordered_map size_table_; + size_t allocated_ = 0; + size_t log_cnt_ = 0; +}; + +C10_API ProfiledCPUMemoryReporter& profiledCPUMemoryReporter(); + +// Get the CPU Allocator. +C10_API at::Allocator* GetCPUAllocator(); +// Sets the CPU allocator to the given allocator: the caller gives away the +// ownership of the pointer. +C10_API void SetCPUAllocator(at::Allocator* alloc, uint8_t priority = 0); + +// Get the Default CPU Allocator +C10_API at::Allocator* GetDefaultCPUAllocator(); + +// Get the Default Mobile CPU Allocator +C10_API at::Allocator* GetDefaultMobileCPUAllocator(); + +// The CPUCachingAllocator is experimental and might disappear in the future. +// The only place that uses it is in StaticRuntime. +// Set the CPU Caching Allocator +C10_API void SetCPUCachingAllocator(Allocator* alloc, uint8_t priority = 0); +// Get the CPU Caching Allocator +C10_API Allocator* GetCPUCachingAllocator(); + +} // namespace c10 diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/CompileTimeFunctionPointer.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/CompileTimeFunctionPointer.h new file mode 100644 index 0000000000000000000000000000000000000000..6314e3e77082918fa2529d71ecdb9f886dcabc7e --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/CompileTimeFunctionPointer.h @@ -0,0 +1,56 @@ +#pragma once + +#include + +namespace c10 { + +/** + * Represent a function pointer as a C++ type. + * This allows using the function pointer as a type + * in a template and calling it from inside the template + * allows the compiler to inline the call because it + * knows the function pointer at compile time. + * + * Example 1: + * int add(int a, int b) {return a + b;} + * using Add = TORCH_FN_TYPE(add); + * template struct Executor { + * int execute(int a, int b) { + * return Func::func_ptr()(a, b); + * } + * }; + * Executor executor; + * EXPECT_EQ(3, executor.execute(1, 2)); + * + * Example 2: + * int add(int a, int b) {return a + b;} + * template int execute(Func, int a, int b) { + * return Func::func_ptr()(a, b); + * } + * EXPECT_EQ(3, execute(TORCH_FN(add), 1, 2)); + */ +template +struct CompileTimeFunctionPointer final { + static_assert( + guts::is_function_type::value, + "TORCH_FN can only wrap function types."); + using FuncType = FuncType_; + + static constexpr FuncType* func_ptr() { + return func_ptr_; + } +}; + +template +struct is_compile_time_function_pointer : std::false_type {}; +template +struct is_compile_time_function_pointer< + CompileTimeFunctionPointer> : std::true_type {}; + +} // namespace c10 + +#define TORCH_FN_TYPE(func) \ + ::c10::CompileTimeFunctionPointer< \ + std::remove_pointer_t>, \ + func> +#define TORCH_FN(func) TORCH_FN_TYPE(func)() diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/DeviceGuard.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/DeviceGuard.h new file mode 100644 index 0000000000000000000000000000000000000000..bfe9af8fd5308a9cf1b9b05968df6f37f92eee69 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/DeviceGuard.h @@ -0,0 +1,195 @@ +#pragma once + +#include + +namespace c10 { + +/// RAII guard that sets a certain default device in its constructor, and +/// changes it back to the device that was originally active upon destruction. +/// +/// The device is always reset to the one that was active at the time of +/// construction of the guard. Even if you `set_device` after construction, the +/// destructor will still reset the device to the one that was active at +/// construction time. +/// +/// This device guard does NOT have an uninitialized state; it is guaranteed +/// to reset a device on exit. If you are in a situation where you *might* +/// want to setup a guard (i.e., are looking for the moral equivalent +/// of optional), see OptionalDeviceGuard. +class DeviceGuard { + public: + /// No default constructor; see Note [Omitted default constructor from RAII] + explicit DeviceGuard() = delete; + + /// Set the current device to the passed Device. + explicit DeviceGuard(Device device) : guard_(device) {} + + /// This constructor is for testing only. + explicit DeviceGuard( + Device device, + const impl::DeviceGuardImplInterface* impl) + : guard_(device, impl) {} + + /// Copy is disallowed + DeviceGuard(const DeviceGuard&) = delete; + DeviceGuard& operator=(const DeviceGuard&) = delete; + + /// Move is disallowed, as DeviceGuard does not have an uninitialized state, + /// which is required for moves on types with nontrivial destructors. + DeviceGuard(DeviceGuard&& other) = delete; + DeviceGuard& operator=(DeviceGuard&& other) = delete; + + /// Sets the device to the given one. The specified device must be consistent + /// with the device type originally specified during guard construction. + /// + /// TODO: The consistency check here is inconsistent with StreamGuard's + /// behavior with set_stream, where a stream on a different device than + /// the original one isn't an error; we just reset the stream and then + /// switch devices. + void reset_device(at::Device device) { + guard_.reset_device(device); + } + + /// This method is for testing only. + void reset_device( + at::Device device, + const impl::DeviceGuardImplInterface* impl) { + guard_.reset_device(device, impl); + } + + /// Sets the device index to the given one. The device type is inferred + /// from the original device type the guard was constructed with. + void set_index(DeviceIndex index) { + guard_.set_index(index); + } + + /// Returns the device that was set at the time the guard was constructed. + Device original_device() const { + return guard_.original_device(); + } + + /// Returns the most recent device that was set using this device guard, + /// either from construction, or via set_device. + Device current_device() const { + return guard_.current_device(); + } + + private: + impl::InlineDeviceGuard guard_; +}; + +/** + * A OptionalDeviceGuard is an RAII class that sets a device to some value on + * initialization, and resets the device to its original value on destruction. + * Morally, a OptionalDeviceGuard is equivalent to optional, but + * with extra constructors and methods as appropriate. + * + * Besides its obvious use (optionally applying a DeviceGuard), + * OptionalDeviceGuard is often also used for the following idiom: + * + * OptionalDeviceGuard g; + * for (const auto& t : tensors) { + * g.set_device(t.device()); + * do_something_with(t); + * } + * + * This usage is marginally more efficient than constructing a DeviceGuard every + * iteration of the for loop, as it avoids an unnecessary device reset. + * + * Unlike DeviceGuard, a OptionalDeviceGuard may be uninitialized. This occurs + * when you use the nullary constructor, or pass a nullopt to the constructor. + * Uninitialized OptionalDeviceGuards do *nothing*; they do not know what the + * original device was and they do not reset on destruction. This is why + * original_device() and current_device() return optional rather than + * Device (as they do in DeviceGuard), and also is why we didn't just + * provide OptionalDeviceGuard by default and hide DeviceGuard from users. + * + * The semantics of an OptionalDeviceGuard are exactly explained by thinking + * of it as an optional. In particular, an initialized + * OptionalDeviceGuard doesn't restore device to its value at construction; it + * restores device to its value *at initialization*. So if you have the + * program: + * + * setDevice(1); + * OptionalDeviceGuard g; + * setDevice(2); + * g.reset_device(Device(DeviceType::CUDA, 3)); // initializes! + * + * On destruction, g will reset device to 2, rather than 1. + * + * An uninitialized OptionalDeviceGuard is distinct from a (initialized) + * DeviceGuard whose original_device_ and current_device_ match, since the + * DeviceGuard will still reset the device to original_device_. + */ +class OptionalDeviceGuard { + public: + /// Create an uninitialized guard. Set the guard later using reset_device. + explicit OptionalDeviceGuard() = default; + + /// Initialize the guard, setting the current device to the passed Device. + explicit OptionalDeviceGuard(Device device) : guard_(device) {} + + /// Initialize the guard if a Device is passed; otherwise leave the + /// guard uninitialized. + explicit OptionalDeviceGuard(optional device) : guard_(device) {} + + /// Constructor for testing only. + explicit OptionalDeviceGuard( + Device device, + const impl::DeviceGuardImplInterface* impl) + : guard_(device, impl) {} + + /// Copy is disallowed + OptionalDeviceGuard(const OptionalDeviceGuard&) = delete; + OptionalDeviceGuard& operator=(const OptionalDeviceGuard&) = delete; + + /// Move is disallowed + /// See Note [Explicit initialization of optional fields] + /// and // Note [Move construction for RAII guards is tricky] + /// for rationale. + OptionalDeviceGuard(OptionalDeviceGuard&& other) = delete; + OptionalDeviceGuard& operator=(OptionalDeviceGuard&& other) = delete; + + /// Sets the device to the given one. The specified device must be consistent + /// with the device type originally specified during guard construction. + void reset_device(at::Device device) { + guard_.reset_device(device); + } + + /// For testing only + void reset_device( + at::Device device, + const impl::DeviceGuardImplInterface* impl) { + guard_.reset_device(device, impl); + } + + /// Returns the device that was set at the time the guard was constructed. + optional original_device() const { + return guard_.original_device(); + } + + /// Returns the most recent device that was set using this device guard, + /// either from construction, or via reset_device. + optional current_device() const { + return guard_.current_device(); + } + + private: + impl::InlineOptionalDeviceGuard guard_{}; +}; + +// Note [Whither the DeviceGuard boilerplate] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Design note: in principle, we could avoid these wrappers using: +// +// using DeviceGuard = impl::InlineDeviceGuard; +// using OptionalDeviceGuard = +// impl::InlineOptionalDeviceGuard; +// +// But the error messages are worse, and our users can't just look at the +// header file to find out what's going on. Furthermore, for specializations +// like CUDAStreamGuard, it can be profitable to replace some interfaces with +// refined types (e.g., return CUDAStream instead of Stream). So, we eat +// the boilerplate and write out the API explicitly. + +} // namespace c10 diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/DeviceType.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/DeviceType.h new file mode 100644 index 0000000000000000000000000000000000000000..06eca223dd053ab3a6819056f162efb347f94829 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/DeviceType.h @@ -0,0 +1,117 @@ +#pragma once + +// This is directly synchronized with caffe2/proto/caffe2.proto, but +// doesn't require me to figure out how to get Protobuf headers into +// ATen/core (which would require a lot more build system hacking.) +// If you modify me, keep me synchronized with that file. + +#include + +#include +#include + +namespace c10 { + +// These contains all device types that also have a BackendComponent +// and therefore participate in per-backend functionality dispatch keys. +// This is most backends except PrivateUse2 and PrivateUse3 +#define C10_FORALL_BACKEND_DEVICE_TYPES(_, extra) \ + _(CPU, extra) \ + _(CUDA, extra) \ + _(HIP, extra) \ + _(XLA, extra) \ + _(MPS, extra) \ + _(IPU, extra) \ + _(XPU, extra) \ + _(HPU, extra) \ + _(VE, extra) \ + _(Lazy, extra) \ + _(Meta, extra) \ + _(MTIA, extra) \ + _(PrivateUse1, extra) + +enum class DeviceType : int8_t { + CPU = 0, + CUDA = 1, // CUDA. + MKLDNN = 2, // Reserved for explicit MKLDNN + OPENGL = 3, // OpenGL + OPENCL = 4, // OpenCL + IDEEP = 5, // IDEEP. + HIP = 6, // AMD HIP + FPGA = 7, // FPGA + ORT = 8, // ONNX Runtime / Microsoft + XLA = 9, // XLA / TPU + Vulkan = 10, // Vulkan + Metal = 11, // Metal + XPU = 12, // XPU + MPS = 13, // MPS + Meta = 14, // Meta (tensors with no data) + HPU = 15, // HPU / HABANA + VE = 16, // SX-Aurora / NEC + Lazy = 17, // Lazy Tensors + IPU = 18, // Graphcore IPU + MTIA = 19, // Meta training and inference devices + PrivateUse1 = 20, // PrivateUse1 device + // NB: If you add more devices: + // - Change the implementations of DeviceTypeName and isValidDeviceType + // in DeviceType.cpp + // - Change the number below + COMPILE_TIME_MAX_DEVICE_TYPES = 21, +}; + +constexpr DeviceType kCPU = DeviceType::CPU; +constexpr DeviceType kCUDA = DeviceType::CUDA; +constexpr DeviceType kHIP = DeviceType::HIP; +constexpr DeviceType kFPGA = DeviceType::FPGA; +constexpr DeviceType kORT = DeviceType::ORT; +constexpr DeviceType kXLA = DeviceType::XLA; +constexpr DeviceType kMPS = DeviceType::MPS; +constexpr DeviceType kMeta = DeviceType::Meta; +constexpr DeviceType kVulkan = DeviceType::Vulkan; +constexpr DeviceType kMetal = DeviceType::Metal; +constexpr DeviceType kXPU = DeviceType::XPU; +constexpr DeviceType kHPU = DeviceType::HPU; +constexpr DeviceType kVE = DeviceType::VE; +constexpr DeviceType kLazy = DeviceType::Lazy; +constexpr DeviceType kIPU = DeviceType::IPU; +constexpr DeviceType kMTIA = DeviceType::MTIA; +constexpr DeviceType kPrivateUse1 = DeviceType::PrivateUse1; + +// define explicit int constant +constexpr int COMPILE_TIME_MAX_DEVICE_TYPES = + static_cast(DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES); + +static_assert( + COMPILE_TIME_MAX_DEVICE_TYPES <= 21, + "Hey! You seem to be adding a lot of new DeviceTypes. The intent was " + "for this constant to reflect the actual number of DeviceTypes we support " + "in PyTorch; it's important that this number is not too large as we " + "use this to allocate stack arrays in some places in our code. If you " + "are indeed just adding the 20th device type, feel free to change " + "the check to 32; but if you are adding some sort of extensible device " + "types registration, please be aware that you are affecting code that " + "this number is small. Try auditing uses of this constant."); + +C10_API std::string DeviceTypeName(DeviceType d, bool lower_case = false); + +C10_API bool isValidDeviceType(DeviceType d); + +C10_API std::ostream& operator<<(std::ostream& stream, DeviceType type); + +C10_API void register_privateuse1_backend(std::string backend_name); +C10_API std::string get_privateuse1_backend(bool lower_case = true); + +} // namespace c10 + +namespace std { +template <> +struct hash { + std::size_t operator()(c10::DeviceType k) const { + return std::hash()(static_cast(k)); + } +}; +} // namespace std + +namespace torch { +using c10::DeviceType; +} diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/DispatchKey.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/DispatchKey.h new file mode 100644 index 0000000000000000000000000000000000000000..abc4ab7e9852a6a896a7ddeb307b0c4e51d48ad4 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/DispatchKey.h @@ -0,0 +1,716 @@ +#pragma once + +#include +#include +#include +#include + +namespace c10 { + +// Semantically, each value of BackendComponent identifies a "backend" for our +// dispatch. Some functionalities that we may dispatch to are allowed to +// register different handlers for each backend. The BackendComponent is then +// used to figure out which backend implementation to dispatch to. + +// In implementation terms, the backend component identifies a specific "bit" in +// a DispatchKeySet. The bits in the DispatchKeySet are split between the bottom +// ~12 "BackendComponent" bits, while the remaining upper bits are assigned to +// functionalities. When we encounter a functionality bit that is known to be +// customizeable per-backend, then we also look at the lower BackendComponent +// bits and take the highest bit to determine which backend's implementation to +// use. + +// WARNING! If you add a new backend component to the end of this list, +// make sure you update PrivateUse3Bit. (But you shouldn't: private use +// keys should have higher precedence than all built-in keys) + +// If you add a new (non-privateuse) backend here, +// make sure to add an Autograd fallthrough kernel +// in aten/src/ATen/core/VariableFallbackKernel.cpp + +#define C10_FORALL_BACKEND_COMPONENTS(_, extra) \ + _(CPU, extra) \ + _(CUDA, extra) \ + _(HIP, extra) \ + _(XLA, extra) \ + _(MPS, extra) \ + _(IPU, extra) \ + _(XPU, extra) \ + _(HPU, extra) \ + _(VE, extra) \ + _(Lazy, extra) \ + _(Meta, extra) \ + _(MTIA, extra) \ + _(PrivateUse1, extra) \ + _(PrivateUse2, extra) \ + _(PrivateUse3, extra) + +// WARNING! If we add a new per-backend functionality key that has higher +// priority than Autograd, then make sure you update EndOfRuntimeBackendKeys + +#define C10_FORALL_FUNCTIONALITY_KEYS(_) \ + _(Dense, ) \ + _(Quantized, Quantized) \ + _(Sparse, Sparse) \ + _(NestedTensor, NestedTensor) \ + _(AutogradFunctionality, Autograd) + +enum class BackendComponent : uint8_t { + + // A "backend" is colloquially used to refer to handlers for dispatch + // which actually implement the numerics of an operation in question. + // + // Due to the nature of the enum, these backends are specified in + // an ordered way, but for most backends this order is not semantically + // meaningful (e.g., it's valid to reorder these backends without changing + // semantics). The only situation when backend ordering is meaningful + // is when the backend participates in multiple dispatch with another + // backend; e.g., CPU and CUDA (cuda must have higher priority). + + // These keys don't correspond to individual kernels. + // Instead, they represent the backends that are allowed to override specific + // pieces of functionality: + // - dense kernels (e.g. DispatchKey::CPU) + // - sparse kernels (e.g. DispatchKey::SparseCPU) + // - quantized kernels (e.g. DispatchKey::QuantizedCPU) + // - autograd kernels (e.g. DispatchKey::AutogradCPU) + // We reserve space in the runtime operator table for this full cross product + // of + // [backends in this enum] x [keys below that are explicitly marked as having + // per-backend functionality] + // + // A meta tensor is a tensor without any data associated with it. (They + // have also colloquially been referred to as tensors on the "null" device). + // A meta tensor can be used to dry run operators without actually doing any + // computation, e.g., add on two meta tensors would give you another meta + // tensor with the output shape and dtype, but wouldn't actually add anything. + + InvalidBit = 0, +#define DEFINE_BACKEND_COMPONENT(n, _) n##Bit, + C10_FORALL_BACKEND_COMPONENTS(DEFINE_BACKEND_COMPONENT, unused) +#undef DEFINE_BACKEND_COMPONENT + + // Define an alias to represent end of backend dispatch keys. + // If you add new backend keys after PrivateUse3, please also update it here. + EndOfBackendKeys = PrivateUse3Bit, +}; + +// Semantically, a dispatch key identifies a possible "level" in our +// dispatch, for which a handler may be registered. Each handler corresponds +// to a type of functionality. +// +// In implementation terms, the dispatch key identifies a specific "bit" in a +// DispatchKeySet. Higher bit indexes get handled by dispatching first (because +// we "count leading zeros" when we extract the highest priority dispatch +// key.) +// +// Note [DispatchKey Classification] +// This enum actually contains several types of keys, which are explained +// in more detail further down: +// (1) non-customizable backends (e.g. FPGA) +// (2) non-customizable functionalities (e.g. Functionalize) +// (3) functionalized that are customizable per backend (e.g. Dense, Sparse, +// AutogradFunctionality) (4) per-backend instances of customizable +// functionalities (e.g. CPU, SparseCPU, AutogradCPU) (5) alias keys (e.g. +// CompositeImplicitAutograd) +// +// Of the categories above, it's important to note: +// (a) which keys are assigned individual bits in a DispatchKeySet +// (b) which keys are assigned individual slots in the runtime operator table +// ("Runtime keys") +// +// (1), (2) and (3) all get their own dedicated bits in the DispatchKeySet. +// (1), (2) and (4) all get their own dedicated slots in the runtime operator +// table. + +// See Note [DispatchKeySet Internal Representation] for more details. +// +// NOTE: Keep the list in sync with `DispatchKey` in torchgen/model.py +enum class DispatchKey : uint16_t { + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~ UNDEFINED ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // + // This is not a "real" functionality, but it exists to give us a "nullopt" + // element we can return for cases when a DispatchKeySet contains no elements. + // You can think a more semantically accurate definition of DispatchKey is: + // + // using DispatchKey = optional + // + // and Undefined == nullopt. We didn't actually represent + // it this way because optional would take two + // words, when DispatchKey fits in eight bits. + + Undefined = 0, + + // Define an alias for Undefined to represent CatchAll (long term + // this will get eliminated, but for now it's convenient) + CatchAll = Undefined, + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~ Functionality Keys ~~~~~~~~~~~~~~~~~~~~~~ // + // Every value in the enum (up to EndOfFunctionalityKeys) + // corresponds to an individual "functionality" that can be dispatched to. + // This is represented in the DispatchKeySet by assigning each of these enum + // values + // to each of the remaining (64 - len(BackendComponent)) bits. + // + // Most of these functionalities have a single handler assigned to them, + // making them "runtime keys". + // That map to a single slot in the runtime operator table. + // + // A few functionalities are allowed to be customizable per backend. + // See [Note: Per-Backend Functionality Dispatch Keys] for details. + + // See [Note: Per-Backend Functionality Dispatch Keys] + Dense, + + // Below are non-extensible backends. + // These are backends that currently don't have their own overrides for + // Autograd/Sparse/Quantized kernels, + // and we therefore don't waste space in the runtime operator table allocating + // space for them. + // If any of these backends ever need to customize, e.g., Autograd, then we'll + // need to add a DispatchKey::*Bit for them. + + // TODO: put this in BackendComponents + FPGA, // Xilinx support lives out of tree at + // https://gitlab.com/pytorch-complex/vitis_kernels + + // TODO: put this in BackendComponents + // ONNX Runtime, lives out of tree at https://github.com/pytorch/ort and + // https://github.com/microsoft/onnxruntime, and is also used to test general + // backend/extension machinery in the core. cf: + // - test/cpp_extensions/ort_extension.cpp + // - test/test_torch.py + // - aten/src/ATen/test/extension_backend_test.cpp + ORT, + + Vulkan, // TODO: put this in BackendComponents + Metal, // TODO: put this in BackendComponents + + // See [Note: Per-Backend Functionality Dispatch Keys] + Quantized, + + // This backend is to support custom RNGs; it lets you go + // to a different kernel if you pass in a generator that is not a + // traditional CPUGeneratorImpl/CUDAGeneratorImpl. To make use of this + // key: + // 1) set it as a second parameter of at::Generator constructor call in + // the user-defined PRNG class. + // 2) use it as a dispatch key while registering custom kernels + // (templatized kernels specialized for user-defined PRNG class) + // intended for out of tree use; tested by aten/src/ATen/test/rng_test.cpp + CustomRNGKeyId, + + // TODO: Make Mkldnn a functionality key, so we can give it Meta + // support + // Here are backends which specify more specialized operators + // based on the layout of the tensor. Note that the sparse backends + // are one case where ordering matters: sparse multi-dispatches with + // the corresponding dense tensors, and must be handled before them. + MkldnnCPU, // registered at build/aten/src/ATen/RegisterMkldnnCPU.cpp + // NB: not to be confused with MKLDNN, which is Caffe2 only + + // See [Note: Per-Backend Functionality Dispatch Keys] + Sparse, + + // TODO: Make SparseCsr a functionality key + SparseCsrCPU, + SparseCsrCUDA, + + NestedTensor, + + // In some situations, it is not immediately obvious what the correct + // backend for function is, because the function in question doesn't + // have any "tensor" arguments. In this case, a BackendSelect function + // can be registered to implement the custom determination of the + // correct backend. + BackendSelect, + + Python, + + // Out-of-core key for Fake Tensor in torchdistx. + // See https://pytorch.org/torchdistx/latest/fake_tensor.html + // TODO: delete this in favor of Python-implemented fake tensor + Fake, + // See Note [Out-of-tree vmap+grad prototype]. The purpose of this key + // is to insert code after the "autograd subsystem" runs, so this key should + // be directly after ADInplaceOrView and all of the autograd keys. + FuncTorchDynamicLayerBackMode, + + // Alias and mutation removal. + // If some backends want to opt into only alias removal or only mutation + // removal, + // we can consider adding separate keys dedicated to those individual passes. + // See Note [Functionalization Pass In Core] for details. + Functionalize, + + // The named dispatch key is set for any tensors with named dimensions. + // Although we have a dispatch key for named tensors, for historical reasons, + // this dispatch key doesn't do any of the substantive functionality for named + // tensor (though, hypothetically, it could!) At the moment, it's just + // responsible for letting us give good error messages when operations + // don't support named tensors. + // + // NB: If you ever consider moving named tensor functionality into + // this dispatch key, note that it might be necessary add another dispatch + // key that triggers before composite operators, in case a composite operator + // has named dimension propagation that doesn't match that of its + // constituent parts. + // TODO: delete this once torchdim lands in functorch + Named, + + // The Conjugate dispatch key is set for any tensors that need to perform + // conjugation + // This is implemented at a dispatch level right before any backends run + Conjugate, + + // The Negative dispatch key is set for any tensors that need to perform + // negation + // This is implemented at a dispatch level right before any backends run + Negative, + + ZeroTensor, // registered at build/aten/src/ATen/RegisterZeroTensor.cpp + + // Note [ADInplaceOrView key] + // ADInplaceOrView key is used by inplace or view ops to register a kernel + // that does additional setup for future autograd computation. + // + // 1. For inplace ops this kernel does version bump + // 2. For view ops this kernel does `as_view` setup where we properly setup + // DifferentiableViewMeta on the view tensors. + // + // For other ops it's fallthrough kernel since there's no extra + // work to do. + // + // Note [Dream: skip VariableType kernel when requires_grad=false] + // + // In an ideal world where we can skip VariableType kernel for inputs + // with requires_grad=false, instead of a fallthrough kernel, we'll + // register a kernel shown below to all functional ops as well: + // torch::Tensor my_functional_op(...) { + // { + // // Note for every op in VariableType, you need to go through + // // `AutoDispatchBelowADInplaceOrView` guard exactly once to add the + // // key to TLS excluded set. If you don't go through it at all, + // // inplace/view ops called through `at::` inside your backend + // // kernel will dispatch to ADInplaceOrView kernels and do a lot + // // of extra work. + // at::AutoDispatchBelowADInplaceOrView guard; + // at::redispatch::my_functional_op(...); + // } + // } + // But this work is currently blocked since it adds an extra dispatch + // for all ops and it's non-trivial overhead at model level(a few percents). + // Thus our current approach takes advantage of the fact every kernel go + // through VariableType kernel first and pulls the + // `at::AutoDispatchBelowADInplaceOrView` guard of functional ops + // up to the `VariableType` kernel. Thus we only add the extra dispatch + // to view/inplace ops to minimize its perf impact to real models. + ADInplaceOrView, + // Note [Alias Dispatch Key : Autograd] + // All backends are oblivious to autograd; autograd is handled as a + // layer which happens on top of all backends. It inspects the autograd + // metadata of all inputs, determines what autograd metadata should be + // constructed by the output, and otherwise defers to the backend to + // actually do the numeric computation. Autograd contains + // the bulk of this logic. + + // Autograd is now an alias dispatch key which by default maps to all + // backend-specific autograd keys. + // Backend-specific allow backends to override the default kernel registered + // to Autograd key as needed. + // For example, XLA wants to define autograd for einsum directly. + // Registering a custom autograd implementation at the XLA key won't work + // because we process Autograd before XLA. This key has higher priority and + // gets processed first. You generally should NOT redispatch after handling + // autograd here (since that would result in execution of the Autograd + // operator, which you're trying to skip). In AutogradXLA implementations, + // you are responsible for handling autograd yourself, or deferring to other + // operators which support autograd. + + // Currently we only have backend-specific autograd keys for CPU/CUDA/XLA and + // reserved user-defined backends. All other in-tree backends share the + // AutogradOther key. We can add specific autograd key for those backends + // upon request. + AutogradOther, + + // See [Note: Per-Backend Functionality Dispatch Keys] + AutogradFunctionality, + + // NestedTensor is an example of something that isn't a "real backend" + // (because it mostly consists of redispatching kernels) + // but it would like to override autograd functionality in C++. + // We can handle cases like this by adding an extra functionality key + // exclusively for handling autograd for NestedTensor. + // lives out of tree at + // https://github.com/pytorch/nestedtensor + AutogradNestedTensor, + + Tracer, + + // TODO: make Autocast a functionality key + // Autocasting precedes VariableTypeId, to ensure casts are autograd-exposed + // and inputs are saved for backward in the post-autocast type. + AutocastCPU, + AutocastXPU, + AutocastHPU, + // Naughtily, AutocastCUDA is also being used for XLA. In the terminal state, + // it probably should get its own Autocast key + AutocastCUDA, + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ WRAPPERS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // + // There are a number of alternative modes which may want to handle before + // autograd; for example, error checking, tracing, profiling or vmap. They + // go here. + + FuncTorchBatched, // See Note [Out-of-tree vmap+grad prototype] + FuncTorchVmapMode, // See Note [Out-of-tree vmap+grad prototype] + + // This is the dispatch key for BatchedTensorImpl, which is used to implement + // batching rules for vmap. + Batched, + + // When we are inside a vmap, all tensors dispatch on this key. + // See Note: [DispatchKey::VmapMode usage] for more details. + VmapMode, + + FuncTorchGradWrapper, // See Note [Out-of-tree vmap+grad prototype] + + // Out-of-core key for Deferred Module Initialization in torchdistx. + // See https://pytorch.org/torchdistx/latest/deferred_init.html + DeferredInit, + + // Used by Python key logic to know the set of tls on entry to the dispatcher + // This kernel assumes it is the top-most non-functorch-related DispatchKey. + // If you add a key above, make sure to update the fallback implementation for + // this. + PythonTLSSnapshot, + + // This key should be at the very top of the dispatcher + FuncTorchDynamicLayerFrontMode, // See Note [Out-of-tree vmap+grad prototype] + + // TESTING: This is intended to be a generic testing tensor type id. + // Don't use it for anything real; its only acceptable use is within a single + // process test. Use it by creating a TensorImpl with this DispatchKey, and + // then registering operators to operate on this type id. See + // aten/src/ATen/core/dispatch/backend_fallback_test.cpp for a usage example. + TESTING_ONLY_GenericWrapper, + + // TESTING: This is intended to be a generic testing tensor type id. + // Don't use it for anything real; its only acceptable use is within a ingle + // process test. Use it by toggling the mode on and off via + // TESTING_ONLY_tls_generic_mode_set_enabled and then registering operators + // to operate on this type id. See + // aten/src/ATen/core/dispatch/backend_fallback_test.cpp + // for a usage example + TESTING_ONLY_GenericMode, + + // This is a bypass that allows you to skip running the C++ dispatcher + // entirely + PythonDispatcher, + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FIN ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // + EndOfFunctionalityKeys, // End of functionality keys. + +// ~~~~~~~~~~~~~~ "Dense" Per-Backend Dispatch keys ~~~~~~~~~~~~~~~~~~~~ // +// Here are backends which you think of as traditionally specifying +// how to implement operations on some device. + +#define DEFINE_PER_BACKEND_KEYS_FOR_BACKEND(n, prefix) prefix##n, + +#define DEFINE_PER_BACKEND_KEYS(fullname, prefix) \ + StartOf##fullname##Backends, \ + C10_FORALL_BACKEND_COMPONENTS( \ + DEFINE_PER_BACKEND_KEYS_FOR_BACKEND, prefix) \ + EndOf##fullname##Backends = prefix##PrivateUse3, + + C10_FORALL_FUNCTIONALITY_KEYS(DEFINE_PER_BACKEND_KEYS) + +#undef DEFINE_PER_BACKEND_KEYS +#undef DEFINE_PER_BACKEND_KEYS_FOR_BACKEND + + EndOfRuntimeBackendKeys = EndOfAutogradFunctionalityBackends, + + // ~~~~~~~~~~~~~~~~~~~~~~ Alias Dispatch Keys ~~~~~~~~~~~~~~~~~~~~~~~~~~ // + // Note [Alias Dispatch Keys] + // Alias dispatch keys are synthetic dispatch keys which map to multiple + // runtime dispatch keys. Alisa keys have precedence, but they are always + // lower precedence than runtime keys. You can register a kernel to an + // alias key, the kernel might be populated to the mapped runtime keys + // during dispatch table computation. + // If a runtime dispatch key has multiple kernels from alias keys, which + // kernel wins is done based on the precedence of alias keys (but runtime + // keys always have precedence over alias keys). + // Alias keys won't be directly called during runtime. + + // See Note [Alias Dispatch Key : Autograd] + Autograd, + CompositeImplicitAutograd, // registered at + // build/aten/src/ATen/RegisterCompositeImplicitAutograd.cpp + + // Note: The alias keyset for FuncTorchBatchedDecomposition is disjoint from + // all + // other alias keysets + // and so precedence order doesn't matter + FuncTorchBatchedDecomposition, // registered at + // build/aten/src/ATen/RegisterFuncTorchBatchedDecomposition.cpp + // Note: The alias keyset for CompositeImplicitAutogradNestedTensor is + // disjoint from all other alias keysets + CompositeImplicitAutogradNestedTensor, // registered at + // build/aten/src/ATen/RegisterCompositeImplicitAutogradNestedTensor.cpp + CompositeExplicitAutograd, // registered at + // build/aten/src/ATen/RegisterCompositeExplicitAutograd.cpp + // See Note [CompositeExplicitAutogradNonFunctional Key] + CompositeExplicitAutogradNonFunctional, // registered at + // build/aten/src/ATen/RegisterCompositeExplicitAutograd.cpp + + // Define an alias key to represent end of alias dispatch keys. + // If you add new alias keys after Autograd, please also update it here. + StartOfAliasKeys = Autograd, + EndOfAliasKeys = CompositeExplicitAutogradNonFunctional, // + + // ~~~~~~~~~~~~~~~~~~~~~~~~~ BC ALIASES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // + // The aliases exist for backwards compatibility reasons, they shouldn't + // be used + CPUTensorId = CPU, + CUDATensorId = CUDA, + DefaultBackend = CompositeExplicitAutograd, + PrivateUse1_PreAutograd = AutogradPrivateUse1, + PrivateUse2_PreAutograd = AutogradPrivateUse2, + PrivateUse3_PreAutograd = AutogradPrivateUse3, + Autocast = AutocastCUDA, +}; + +// Note [Private use DispatchKey] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// Private use tensor IDs are preallocated tensor type IDs for use in user +// applications. Similar to private use fields in HTTP, they can be used +// by end users for experimental or private applications, without needing +// to "standardize" the tensor ID (which would be done by submitting a PR +// to PyTorch to add your type ID). +// +// Private use tensor IDs are appropriate to use if you want to experiment +// with adding a new tensor type (without having to patch PyTorch first) or +// have a private, non-distributed application that needs to make use of a +// new tensor type. Private use tensor IDs are NOT appropriate to use for +// libraries intended to be distributed to further users: please contact +// the PyTorch developers to get a type ID registered in this case. +// +// We provide two classes of private user tensor id: regular DispatchKeys +// and Autograd DispatchKeys. DispatchKeys serve the role of ordinary "backend" +// DispatchKeys; if you were adding support for a new type of accelerator, you +// would use a backend DispatchKey, and ideally automatically reuse +// AutogradOther definitions already defined in PyTorch. AutogradPrivateUse +// DispatchKeys serve as "wrapper" DispatchKeys: they are only necessary for +// tensors that compose multiple internal tensors, and for cases when the +// built-in autograd formulas for operators are not appropriate. + +static_assert( + (static_cast(BackendComponent::EndOfBackendKeys) + + static_cast(DispatchKey::EndOfFunctionalityKeys)) <= 64, + "The BackendComponent and DispatchKey enums (below EndOfFunctionalityKeys)" + " both map to backend and functionality bits" + " into a 64-bit bitmask; you must have less than 64 total entries between them"); + +// Check if a DispatchKey is an alias mapping to other runtime keys. +constexpr bool isAliasDispatchKey(DispatchKey k) { + return k >= DispatchKey::StartOfAliasKeys && k <= DispatchKey::EndOfAliasKeys; +} + +// [Note: Per-Backend Functionality Dispatch Keys] +// Check if a DispatchKey is a per-backend functionality key +// Any functionalities that can be customized per-backend should be added here. +// These keys correspond to functionalities that can be customized indivually +// per backend. While they only take up one bit in the `DispatchKeySet` bitset, +// they map to (# backends) slots in the operator table. +// Each of these keys also has a separate set of "runtime keys" in the dispatch +// key enum, per backend, which *do* map to the individual operator table slots. +// For example, the "Sparse" key maps to an individual bit in the +// DispatchKeySet, while `SparseCPU`, `SparseCUDA`, etc all map to individual +// slots in the runtime operator table. + +constexpr bool isPerBackendFunctionalityKey(DispatchKey k) { + if (k == DispatchKey::Dense || k == DispatchKey::Quantized || + k == DispatchKey::Sparse || k == DispatchKey::AutogradFunctionality || + k == DispatchKey::NestedTensor) { + return true; + } else { + return false; + } +} + +// Note that this includes Undefined in the total count. +// BUT EndOfFunctionalityKeys is its own (placeholder) key. +// e.g. Undefined=0, Dense=1, Sparse=2, EndOfFunctionalityKeys=3. +// In the above example, there are 3 total functionality keys. +constexpr uint8_t num_functionality_keys = + static_cast(DispatchKey::EndOfFunctionalityKeys); + +constexpr uint8_t num_backends = + static_cast(BackendComponent::EndOfBackendKeys); + +// Note [No More Than 16 Backends] +// Search for this note to find places in the code where the "no more than 16 +// backends" invariant is baked in. +static_assert( + static_cast(BackendComponent::EndOfBackendKeys) <= 16, + "BackendComponent currently only supports <= 16 backends. If we really need to extend this, \ +there are a few places where this invariant is baked in"); + +constexpr uint8_t numPerBackendFunctionalityKeys() { + uint8_t count = 0; + for (uint8_t k = 0; k <= num_functionality_keys; ++k) { + if (isPerBackendFunctionalityKey(static_cast(k))) + ++count; + } + return count; +} + +#if defined(C10_MOBILE_TRIM_DISPATCH_KEYS) +// See [Note: Trimmed Mobile Dispatch Keys] +constexpr uint16_t num_runtime_entries = 8; +#else +constexpr uint16_t num_runtime_entries = num_functionality_keys + + (numPerBackendFunctionalityKeys() * (num_backends - 1)); +#endif + +// See Note [No More Than 16 Backends] +constexpr uint16_t full_backend_mask = + (static_cast(1) << num_backends) - 1; + +C10_API const char* toString(DispatchKey); +C10_API const char* toString(BackendComponent); +C10_API std::ostream& operator<<(std::ostream&, DispatchKey); +C10_API std::ostream& operator<<(std::ostream&, BackendComponent); + +C10_API DispatchKey getAutogradKeyFromBackend(BackendComponent k); + +// Parses a string into a dispatch key. +// If the string cannot be correctly parsed, throws an exception. +C10_API c10::DispatchKey parseDispatchKey(const std::string& k); + +// These are some convenience identifiers for dispatch keys which are +// shorter to type than their long counterparts. Note that some of these +// dispatch keys directly correspond to DeviceType; and most APIs that +// accept DispatchKey also accept DeviceType; e.g., +// torch::dispatch(torch::kCPU, ...) is also valid. +constexpr DispatchKey kAutograd = DispatchKey::Autograd; + +// See Note [The Ordering of Per-Backend Dispatch Keys Matters!] +// This function relies on the invariant that the dispatch keys between +// StartOfDenseBackends and EndOfRuntimeBackendKeys are ordered by backend +// in the same order as `BackendComponent`. +constexpr BackendComponent toBackendComponent(DispatchKey k) { + if (k >= DispatchKey::StartOfDenseBackends && + k <= DispatchKey::EndOfDenseBackends) { + return static_cast( + static_cast(k) - + static_cast(DispatchKey::StartOfDenseBackends)); + } else if ( + k >= DispatchKey::StartOfQuantizedBackends && + k <= DispatchKey::EndOfQuantizedBackends) { + return static_cast( + static_cast(k) - + static_cast(DispatchKey::StartOfQuantizedBackends)); + } else if ( + k >= DispatchKey::StartOfSparseBackends && + k <= DispatchKey::EndOfSparseBackends) { + return static_cast( + static_cast(k) - + static_cast(DispatchKey::StartOfSparseBackends)); + } else if ( + k >= DispatchKey::StartOfNestedTensorBackends && + k <= DispatchKey::EndOfNestedTensorBackends) { + return static_cast( + static_cast(k) - + static_cast(DispatchKey::StartOfNestedTensorBackends)); + } else if ( + k >= DispatchKey::StartOfAutogradFunctionalityBackends && + k <= DispatchKey::EndOfAutogradFunctionalityBackends) { + return static_cast( + static_cast(k) - + static_cast( + DispatchKey::StartOfAutogradFunctionalityBackends)); + } else { + return BackendComponent::InvalidBit; + } +} + +constexpr DispatchKey toFunctionalityKey(DispatchKey k) { + if (k <= DispatchKey::EndOfFunctionalityKeys) { + return k; + } else if (k <= DispatchKey::EndOfDenseBackends) { + return DispatchKey::Dense; + } else if (k <= DispatchKey::EndOfQuantizedBackends) { + return DispatchKey::Quantized; + } else if (k <= DispatchKey::EndOfSparseBackends) { + return DispatchKey::Sparse; + } else if (k <= DispatchKey::EndOfNestedTensorBackends) { + return DispatchKey::NestedTensor; + } else if (k <= DispatchKey::EndOfAutogradFunctionalityBackends) { + return DispatchKey::AutogradFunctionality; + } else { + return DispatchKey::Undefined; + } +} + +BackendComponent toBackendComponent(DeviceType device_type); + +// Given (DispatchKey::Dense, BackendComponent::CUDABit), returns +// DispatchKey::CUDA. +// See Note [The Ordering of Per-Backend Dispatch Keys Matters!] +// This function relies on the invariant that the dispatch keys between +// StartOfDenseBackends and EndOfRuntimeBackendKeys are ordered by backend +// in the same order as `BackendComponent`. +constexpr DispatchKey toRuntimePerBackendFunctionalityKey( + DispatchKey functionality_k, + BackendComponent backend_k) { + if (functionality_k == DispatchKey::Dense) { + return static_cast( + static_cast(DispatchKey::StartOfDenseBackends) + + static_cast(backend_k)); + } + if (functionality_k == DispatchKey::Sparse) { + return static_cast( + static_cast(DispatchKey::StartOfSparseBackends) + + static_cast(backend_k)); + } + if (functionality_k == DispatchKey::Quantized) { + return static_cast( + static_cast(DispatchKey::StartOfQuantizedBackends) + + static_cast(backend_k)); + } + if (functionality_k == DispatchKey::NestedTensor) { + return static_cast( + static_cast(DispatchKey::StartOfNestedTensorBackends) + + static_cast(backend_k)); + } + if (functionality_k == DispatchKey::AutogradFunctionality) { + return static_cast( + static_cast( + DispatchKey::StartOfAutogradFunctionalityBackends) + + static_cast(backend_k)); + } + return DispatchKey::Undefined; +} + +} // namespace c10 + +namespace torch { +// Expose the constant, but not the TYPE (DispatchKey is an implementation +// detail!) +using c10::kAutograd; +} // namespace torch + +// NB: You really shouldn't use this instance; this enum is guaranteed +// to be pretty small so a regular array should be acceptable. +namespace std { +template <> +struct hash { + typedef size_t result_type; + typedef c10::DispatchKey argument_type; + + size_t operator()(c10::DispatchKey x) const { + return static_cast(x); + } +}; +} // namespace std diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/DispatchKeySet.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/DispatchKeySet.h new file mode 100644 index 0000000000000000000000000000000000000000..df9ac27919e10e72d918aa235b9a2e07dad07237 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/DispatchKeySet.h @@ -0,0 +1,912 @@ +#pragma once +#include +#include +#include +#include +#include + +namespace c10 { + +struct FunctionalityOffsetAndMask { + // empty constructor shouldn't be used; only needed to initialize + // the array before populating it. + FunctionalityOffsetAndMask() = default; + FunctionalityOffsetAndMask(uint16_t offset, uint16_t mask) + : offset(offset), mask(mask) {} + // This needs to big enough to cover the size of the operator table. + uint16_t offset{}; + // See Note [No More Than 16 Backends] + // This mask needs to be big enough to mask all of the backend bits. + // We probably don't ever want to have more than 16 backend bits, so uint16_t + // should be enough. + uint16_t mask{}; +}; +static_assert( + c10::num_runtime_entries < 65536, + "The dispatcher currently only supports up to 2^16 runtime entries"); + +C10_API std::array +initializeFunctionalityOffsetsAndMasks(); + +C10_ALWAYS_INLINE static const std:: + array& + offsetsAndMasks() { + static auto offsets_and_masks_ = initializeFunctionalityOffsetsAndMasks(); + return offsets_and_masks_; +} + +// A representation of a set of DispatchKeys. A DispatchKeySet contains both +// "functionality" bits and "backend bits", and every tensor holds its own +// DispatchKeySet. The Dispatcher implements multiple dispatch by grabbing the +// keyset on every input tensor, or’ing them together, and dispatching to a +// specific piece of functionality. The functionality bits are *ordered*. When +// multiple functionality bits are set, we use the highest priority +// functionality. Similarly, multiple backend bits can theoretically be set if +// you call an operator with multiple tensors from difference devices (e.g. CPU +// and CUDA), although support for mixed device dispatch is limited (the only +// kernels that gracefully handle mixed device inputs for now are cuda kernels +// that take in a scalar cpu tensor). + +// A representation of a set of DispatchKeys. A tensor may have multiple +// tensor type ids, e.g., a Variable tensor can also be a CPU tensor; the +// DispatchKeySet specifies what type ids apply. The internal representation is +// as a 64-bit bit set (this means only 64 tensor type ids are supported). +// +// As mentioned above, DispatchKeys are ordered; thus, we can ask questions like +// "what is the highest priority DispatchKey in the set"? (The set itself is +// not ordered; two sets with the same ids will always have the ids ordered in +// the same way.) +// +// Note [DispatchKeySet Internal Representation] +// Internally, dispatch keys are packed into 64-bit DispatchKeySet objects +// that get passed around at runtime. +// However, there isn't necessarily a 1-to-1 mapping between bits in the keyset +// and individual dispatch keys. +// +// First: why do we have this distinction, and why not map every dispatch key +// directly to a bit? This is mostly because we have several types of +// functionalities that different backends would like to customize. For example, +// we have: +// - "Dense": CPU, CUDA, XLA, ... (~12 keys) +// - "Sparse": SparseCPU, SparseCUDA, ... +// - "Quantized": QuantizedCPU, QuantizedCUDA, QuantizedXLA, ... +// - "Autograd": AutogradCPU, AutogradCUDA, Autograd XLA, ... +// The problem is that total number of keys grows quadratically with [# +// backends] x [# functionalities], making it very difficult to map each key +// directly to a bit in a bitset without dramatically increasing the size of the +// bitset over time. +// +// The two enums (BackendComponent and DispatchKey) can be divided roughly into +// 5 categories. +// +// (1) "Building block" keys +// (a) backends: jEverything in the BackendComponent enum (e.g. CPUBit, +// CUDABIt) (b) functionalities: (per-backend) functionality-bit DispatchKeys +// (e.g. AutogradFunctionality, Sparse, Dense) +// (2) "Runtime" keys +// (a) "non-customizable backends" (e.g. FPGA) +// (b) "non-customizable functionalities" (e.g. Functionalize) +// (c) "per-backend instances of customizable functionalities" (e.g. CPU, +// SparseCPU, AutogradCPU) +// (3) "Alias" DispatchKeys (see Note [Alias Dispatch Keys]) +// +// (1) Building block keys always correspond to individual bits in a +// DispatchKeySet. They can also be combined in a DispatchKeySet to form actual +// runtime keys. e.g. +// auto dense_cpu_ks = DispatchKeySet({DispatchKey::CPUBit, +// DispatchKey::Dense}); +// // The keyset has the runtime dense-cpu key. +// dense_cpu_ks.has(DispatchKey::CPU); +// // And it contains the building block keys too. +// dense_cpu_ks.has(DispatchKey::CPUBit); +// dense_cpu_ks.has(DispatchKey::Dense); +// +// Not every backend and not every functionality counts as a "building block +// key". This is mostly to give us more levers to pull in the design space. +// Backend keys and functionality keys that count as "building blocks" will +// contribute to a full cross product of functionality that can be overriden. +// +// For example, right now we have at least 12 "backend" building blocks (CPU, +// CUDA, XLA, ...) and at least 4 "functionality" building blocks (Dense, +// Sparse, Quantized, AutogradFunctionality, ...). These keys together allow +// every dispatcher operator to be customized in up to 12*4 different ways. Each +// of those requires a slot in the operator table of every dispatcher operator. +// Not every piece of functionality necessarily needs to be customizeable +// per-backend, and not every backend necessarily needs to be able to customize +// every type of functionality. +// +// +// (2) Every runtime key corresponds directly to a slot in an operator's runtime +// dispatch table, and you can directly register kernels to a runtime dispatch +// key. +// +// For per-backend functionalities like "Dense" or "AutogradFunctionality", +// you can think of the corresponding runtime dispatch keys as "instances" of +// that functionality, per backend. E.g. "CPU", "CUDA", "XLA", etc. are all +// runtime instances of the "Dense" building block key. + +// (2a) and (2b) are represented identically in the DispatchKeySet logic: +// - backend-agnostic functionalities (e.g. FuncTorchBatched) are NOT +// customizeable per backend. +// In order to do so, we'd need to promote it to a per-backend functionality +// "building block" key. +// - non-customizeable backends (e.g. FPGA) can NOT customize existing +// functionality like Sparse, Autograd, etc. +// In order to do so, we'd need to promote it to a backend "building block" +// key. +// +// In both cases, these keys directly correspond to runtime slots in the +// operator table. +// +// +// (3) "Alias" keys +// See Note [Alias Dispatch Keys] +// +// Final note: for anyone making future changes to the Dispatcher + +// DispatchKeySet internals, there's a closed PR with a basic +// python-implementation of the Dispatcher that might be useful in quickly +// testing out and validating changes. See it at +// https://github.com/pytorch/pytorch/pull/68743 + +// An undefined tensor is one with an empty tensor type set. +class DispatchKeySet final { + public: + enum Full { FULL }; + enum FullAfter { FULL_AFTER }; + enum Raw { RAW }; + + // NB: default constructor representation as zero is MANDATORY as + // use of DispatchKeySet in TLS requires this. + constexpr DispatchKeySet() = default; + + constexpr DispatchKeySet(Full) + : repr_((1ULL << (num_backends + num_functionality_keys - 1)) - 1) {} + + constexpr DispatchKeySet(FullAfter, DispatchKey t) + // LSB after t are OK, but not t itself. + // "functionalities" have a notion of ordering (e.g. Autograd > Sparse > + // Quantized > Dense). But backends don't really have an ordering. + // Therefore, we're enforcing that FullAfter can only be used on + // "functionality" keys. + : repr_( + (1ULL + << (num_backends + static_cast(toFunctionalityKey(t)) - + 1)) - + 1) { + *this = add(DispatchKey::PythonDispatcher); + } + + // Public version of DispatchKeySet(uint64_t) API; external users + // must be explicit when they do this! + constexpr DispatchKeySet(Raw, uint64_t x) : repr_(x) {} + + constexpr explicit DispatchKeySet(BackendComponent k) { + if (k == BackendComponent::InvalidBit) { + repr_ = 0; + } else { + repr_ = 1ULL << (static_cast(k) - 1); + } + } + + constexpr explicit DispatchKeySet(DispatchKey k) { + if (k == DispatchKey::Undefined) { + // Case 1: handle Undefined specifically + repr_ = 0; + } else if (k <= DispatchKey::EndOfFunctionalityKeys) { + // Case 2: handle "functionality-only" keys + // These keys have a functionality bit set, but no backend bits + // These can technically be either: + // - valid runtime keys (e.g. DispatchKey::AutogradOther, + // DispatchKey::FuncTorchBatched, etc) + // - "building block" keys that aren't actual runtime keys (e.g. + // DispatchKey::Dense or Sparse) + uint64_t functionality_val = 1ULL + << (num_backends + static_cast(k) - 1); + repr_ = functionality_val; + } else if (k <= DispatchKey::EndOfRuntimeBackendKeys) { + // Case 3: "runtime" keys that have a functionality bit AND a backend bit. + // First compute which bit to flip for the functionality. + auto functionality_k = toFunctionalityKey(k); + // The - 1 is because Undefined is technically a "functionality" that + // doesn't show up in the bitset. So e.g. Dense is technically the second + // functionality, but the lowest functionality bit. + uint64_t functionality_val = 1ULL + << (num_backends + static_cast(functionality_k) - 1); + + // then compute which bit to flip for the backend + // Case 4a: handle the runtime instances of "per-backend functionality" + // keys For example, given DispatchKey::CPU, we should set: + // - the Dense functionality bit + // - the CPUBit backend bit + // first compute which bit to flip for the backend + auto backend_k = toBackendComponent(k); + uint64_t backend_val = backend_k == BackendComponent::InvalidBit + ? 0 + : 1ULL << (static_cast(backend_k) - 1); + repr_ = functionality_val + backend_val; + } else { + // At this point, we should have covered every case except for alias keys. + // Technically it would be possible to add alias dispatch keys to a + // DispatchKeySet, but the semantics are a little confusing and this + // currently isn't needed anywhere. + repr_ = 0; + } + } + + constexpr uint64_t keys_to_repr(std::initializer_list ks) { + uint64_t repr = 0; + for (auto k : ks) { + repr |= DispatchKeySet(k).repr_; + } + return repr; + } + + constexpr uint64_t backend_bits_to_repr( + std::initializer_list ks) { + uint64_t repr = 0; + for (auto k : ks) { + repr |= DispatchKeySet(k).repr_; + } + return repr; + } + + explicit constexpr DispatchKeySet(std::initializer_list ks) + : repr_(keys_to_repr(ks)) {} + + explicit constexpr DispatchKeySet(std::initializer_list ks) + // Note: for some reason, putting this logic directly in the constructor + // appears to fail to compile on CUDA 10.1. + // See an example internal failure at + // https://www.internalfb.com/intern/skycastle/run/76561193669136035/artifact/actionlog.76561193742069401.stderr + : repr_(backend_bits_to_repr(ks)) {} + + // Test if a DispatchKey is in the set + inline bool has(DispatchKey t) const { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(t != DispatchKey::Undefined); + return has_all(DispatchKeySet(t)); + } + constexpr bool has_backend(BackendComponent t) const { + return has_all(DispatchKeySet(t)); + } + + // Test if a DispatchKey is in the set + // Given a DispatchKeySet of functionality keys and (potentially) backend + // keys, tests if all of them are in the current set. + constexpr bool has_all(DispatchKeySet ks) const { + return static_cast((repr_ & ks.repr_) == ks.repr_); + } + + // Given a DispatchKeySet of functionality keys and (potentially) backend + // keys, tests if any of them are in the current set. This could technically + // be pretty easily implemented using has(). It is strictly a perf + // optimization though. There are many places in the code base where we want + // to test for multiple functionality keys together. HOWEVER, runtime + // per-backend functionality keys aren't allowed to be used with this + // function, because you can end up with weird results. e.g. + // DispatchKeySet(DispatchKey::AutogradCPU).has_any(DispatchKeySet(DispatchKey::CPU)) + // would return true. + inline bool has_any(DispatchKeySet ks) const { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + // Either there are no backend bits in the input keyset + ((ks.repr_ & full_backend_mask) == 0) || + // or there are no per-backend-functionality bits + // See [Note: Per-Backend Functionality Dispatch Keys] + ((ks & + DispatchKeySet({ + DispatchKey::Dense, + DispatchKey::Quantized, + DispatchKey::Sparse, + DispatchKey::AutogradFunctionality, + }) + .repr_) == 0)); + return static_cast((repr_ & ks.repr_) != 0); + } + // Test if DispatchKeySet is a superset of ks. + bool isSupersetOf(DispatchKeySet ks) const { + return (repr_ & ks.repr_) == ks.repr_; + } + // Perform set union + constexpr DispatchKeySet operator|(DispatchKeySet other) const { + return DispatchKeySet(repr_ | other.repr_); + } + // Perform set intersection + constexpr DispatchKeySet operator&(DispatchKeySet other) const { + return DispatchKeySet(repr_ & other.repr_); + } + // Compute the set difference self - other, + // but ONLY for the functionality keys. + // Any backend bits set on self will remain unchanged. + // See Note [Removing keys from DispatchKeySet Only Affects Functionality + // Keys] + constexpr DispatchKeySet operator-(DispatchKeySet other) const { + return DispatchKeySet(repr_ & (full_backend_mask | ~other.repr_)); + } + + // Compute self ^ other + constexpr DispatchKeySet operator^(DispatchKeySet other) const { + return DispatchKeySet(repr_ ^ other.repr_); + } + bool operator==(DispatchKeySet other) const { + return repr_ == other.repr_; + } + bool operator!=(DispatchKeySet other) const { + return repr_ != other.repr_; + } + // Add a DispatchKey to the DispatchKey set. Does NOT mutate, + // returns the extended DispatchKeySet! + C10_NODISCARD constexpr DispatchKeySet add(DispatchKey t) const { + return *this | DispatchKeySet(t); + } + C10_NODISCARD constexpr DispatchKeySet add(DispatchKeySet ks) const { + return *this | ks; + } + + // Remove a DispatchKey from the DispatchKey set. + // This is generally not an operation you should be doing + // (it's used to implement the printing overload, operator<<) + // + // Note [Removing keys from DispatchKeySet Only Affects Functionality Keys] + // Only functionality bits are allowed to be removed from a keyset. + // For now, we're only allowing removal of "functionality bits" from the + // keyset, which is specifically needed by the fallthrough key calculation + // logic. Why is removing backend bits problematic? Consider this example: + // + // DispatchKeySet([DispatchKey.CPU, DispatchKey.AutogradCUDA, + // DispatchKey.CUDA]).remove(DispatchKey.AutogradCUDA) + // DispatchKeySet([DispatchKey.CPU, + // DispatchKey.AutogradCUDA]).remove(DispatchKey.AutogradCUDA) + // + // What do we want to happen? + // Technically, we'd like it to be true that after removal, + // the first keyset still has the CUDA dispatch key while the second doesn't. + // Unfortunately there's no way to represent that, because the two keysets are + // represented the same way internally: functionality bits: Autograd, Dense + // backend bits: CPU, CUDA + // + // Instead, remove(DispatchKey.AutogradCPU) will only remove the "Autograd" + // bit from the bitset. + C10_NODISCARD constexpr DispatchKeySet remove(DispatchKey t) const { + return DispatchKeySet( + repr_ & ~(DispatchKeySet(t).repr_ & ~full_backend_mask)); + } + // You're allowed to remove a backend bit from a DispatchKeySet, + // but you have to be explicit about it (remove_backend() instead of + // remove()). + constexpr DispatchKeySet remove_backend(BackendComponent b) const { + return DispatchKeySet(repr_ & ~(DispatchKeySet(b).repr_)); + } + // Is the set empty? (AKA undefined tensor) + bool empty() const { + return repr_ == 0; + } + uint64_t raw_repr() { + return repr_; + } + + DispatchKey highestFunctionalityKey() const { + auto functionality_idx = indexOfHighestBit(); + // This means that none of the functionality bits were set. + if (functionality_idx < num_backends) + return DispatchKey::Undefined; + // The first num_backend bits in the keyset don't correspond to real + // dispatch keys. + return static_cast(functionality_idx - num_backends); + } + + // This is similar like toBackendComponent(DispatchKey), but less restrictive. + // toBackendComponent() errors out if the key that it was passed has no + // backend bits, which is useful for error checking. We need a version of that + // here that can also handle "fake" backends like FPGA, because they need to + // map to the AutogradOther key. For those backends, we return + // BackendComponent::InvalidBit. + BackendComponent highestBackendKey() const { + // mask to mask out functionality bits + auto backend_idx = + DispatchKeySet(repr_ & full_backend_mask).indexOfHighestBit(); + // all zeros across the backend bits means that no backend bits are set. + if (backend_idx == 0) + return BackendComponent::InvalidBit; + return static_cast(backend_idx); + } + + // returns the DispatchKey of highest priority in the set. + DispatchKey highestPriorityTypeId() const { + auto functionality_k = highestFunctionalityKey(); + if (isPerBackendFunctionalityKey(functionality_k)) { + return toRuntimePerBackendFunctionalityKey( + functionality_k, highestBackendKey()); + } + return functionality_k; + } + + // Returns the index of the most-significant bit in the keyset. + // This is used to as part of the calculation into the operator table to get: + // - the highest "functionality" bit in the keyset. + // - the highest "backend" bit in the keyset. + uint8_t indexOfHighestBit() const { + return 64 - llvm::countLeadingZeros(repr_); + } + +#if defined(C10_MOBILE_TRIM_DISPATCH_KEYS) + // [Note: Trimmed Mobile Dispatch Keys] + /** + * The method below maps the dispatch key in the enum DispatchKey to an + * integer index in the dispatchTable_ array in OperatorEntry. The array + * is trimmed for mobile to reduce peak memory usage since it's + * unnecessary to reserve additional space for dispatch keys that will + * never be used on mobile. + */ + int getDispatchTableIndexForDispatchKeySet() const { + auto dk = highestPriorityTypeId(); + switch (dk) { + case DispatchKey::Undefined: + return 0; + case DispatchKey::CPU: + return 1; + case DispatchKey::QuantizedCPU: + return 2; + case DispatchKey::SparseCPU: + return 3; + case DispatchKey::BackendSelect: + return 4; + case DispatchKey::ADInplaceOrView: + return 5; + case DispatchKey::AutogradOther: + return 6; + case DispatchKey::AutogradCPU: + return 7; + default: + return -1; + } + } +#else + // returns the index in the operator table of highest priority key in the the + // keyset Note that we could in theory implement this using + // highestPriorityTypeId(), but this code is very hotpath and we can do it + // faster without it. + int getDispatchTableIndexForDispatchKeySet() const { + auto functionality_idx = + DispatchKeySet(repr_ >> num_backends).indexOfHighestBit(); + auto offset_and_mask = offsetsAndMasks()[functionality_idx]; + // Mask the functionality bits out first, then right-shift by 1. + // right-shifting by 1 because everything is zero-indexed. + // E.g. 000001 (CPU) should give us an offset of 0, 000010 (CUDA) should + // give us an offset of 1, etc. + auto backend_idx = + DispatchKeySet((repr_ & offset_and_mask.mask) >> 1).indexOfHighestBit(); + return offset_and_mask.offset + backend_idx; + } +#endif + + // returns the "index" of the highest priority backend in the keyset. + // This is pretty similar to getBackendKey(), but: + // - It's hotpath code (part of the runtime bitset calculation) + // - I's returns an integer index, not an enum value + // - Everything is shifted to the right by 1. + // BackendComponent::InvalidBit is technically the lowest enum value, + // but it isn't included in the runtime table. So CPUBit = 1, CUDABit = 2, + // etc. + uint64_t getBackendIndex() const { + return DispatchKeySet((repr_ & full_backend_mask) >> 1).indexOfHighestBit(); + } + + private: + constexpr DispatchKeySet(uint64_t repr) : repr_(repr) {} + uint64_t repr_ = 0; + + public: + // STL iterator for DispatchKeySet. Iterates through all runtime DispatchKeys + // in the set. The iterator is only invalidated by the destruction of the + // underlying DispatchKeySet as the iterator stores a pointer to the raw + // representation of the DispatchKeySet. Note: When we encounter a per-backend + // functionality (e.g. Dense or Sparse), we will iterate through EVERY backend + // in the keyset, for that functionality. For example, if the next + // functionality key to iterate over is Autograd, and the backend bits in the + // keyset correspond to [BackendComponent::CPUBit, BackendComponent::CUDABit], + // then the next two keys we return will be DispatchKey::AutogradCPU, + // DispatchKey::AutogradCUDA (CPU first because it has lower precedence than + // CUDA in DispatchKey.h). + class iterator { + public: + using self_type = iterator; + using iterator_category = std::input_iterator_tag; + using value_type = DispatchKey; + using difference_type = ptrdiff_t; + using reference = value_type&; + using pointer = value_type*; + // final mask value should mask out the entire keyset + static const uint8_t end_iter_mask_val = + num_backends + num_functionality_keys; + // final key value should be the last DispatchKey + static const uint8_t end_iter_key_val = num_functionality_keys; + + // current_dispatchkey_idx_ will iterate through all functionality bits. + // current_backendcomponent_idx_ will iterate through all backend bits. + explicit iterator( + const uint64_t* data_ptr, + uint8_t next_functionality = num_backends, + uint8_t next_backend = 0) + : data_ptr_(data_ptr), + next_functionality_(next_functionality), + next_backend_(next_backend), + // These are in an invalid state at construction time, and set by the + // first increment call + current_dispatchkey_idx_(end_iter_key_val), + current_backendcomponent_idx_(end_iter_key_val) { + // Go to the first key in the set + TORCH_INTERNAL_ASSERT( + next_functionality_ >= num_backends, + "num_backends=", + static_cast(num_backends), + "next_functionality_=", + static_cast(next_functionality_)); + ++(*this); + } + + C10_API self_type& operator++(); + + self_type operator++(int) { + self_type previous_iterator = *this; + ++(*this); + return previous_iterator; + } + + bool operator==(const self_type& rhs) const { + return next_functionality_ == rhs.next_functionality_ && + current_dispatchkey_idx_ == rhs.current_dispatchkey_idx_ && + next_backend_ == rhs.next_backend_ && + current_backendcomponent_idx_ == rhs.current_backendcomponent_idx_; + } + bool operator!=(const self_type& rhs) const { + return next_functionality_ != rhs.next_functionality_ || + current_dispatchkey_idx_ != rhs.current_dispatchkey_idx_ || + next_backend_ != rhs.next_backend_ || + current_backendcomponent_idx_ != rhs.current_backendcomponent_idx_; + } + DispatchKey operator*() const { + auto functionality_key = + static_cast(current_dispatchkey_idx_); + if (isPerBackendFunctionalityKey(functionality_key)) { + auto next_key = toRuntimePerBackendFunctionalityKey( + functionality_key, + static_cast(current_backendcomponent_idx_)); + // We expect all of the Dense, Sparse, Quantized, and Autograd keys to + // be ordered the same way with respect to their backends + TORCH_INTERNAL_ASSERT( + toBackendComponent(next_key) == + static_cast(current_backendcomponent_idx_), + "Tried to map functionality key ", + toString(functionality_key), + " and backend bit ", + toString( + static_cast(current_backendcomponent_idx_)), + " to a runtime key, but ended up with ", + toString(next_key), + ". This can happen if the order of the backend dispatch keys in DispatchKey.h isn't consistent.", + " Please double check that enum for inconsistencies."); + return next_key; + } else { + return functionality_key; + } + } + + private: + const uint64_t* data_ptr_; + uint8_t next_functionality_; + uint8_t next_backend_; + uint8_t current_dispatchkey_idx_; + uint8_t current_backendcomponent_idx_; + }; + + public: + // Returns iterator to the first key in the set. If no keys are in the + // set, then will return the end iterator. + iterator begin() const { + return iterator(&repr_); + } + + // We do not need to iterate beyond EndOfFunctionalityKeys so we will treat + // this as the end iterator. + iterator end() const { + return iterator(&repr_, iterator::end_iter_mask_val); + } +}; + +C10_API std::string toString(DispatchKeySet); +C10_API std::ostream& operator<<(std::ostream&, DispatchKeySet); + +C10_API inline int getDispatchTableIndexForDispatchKey(DispatchKey k) { + return DispatchKeySet(k).getDispatchTableIndexForDispatchKeySet(); +} + +// Alias key DispatchKey::Autograd maps to +// (autograd_dispatch_keyset x full_backend_mask) +// NB: keys in this set also get associated with CompositeImplicitAutograd +// +// Note [autograd_dispatch_keyset Does Not Include Backend Bits] +// We don't want to include any backend bits (BackendComponent::CPUBit, etc) +// directly in autograd_dispatch_keyset. +// Why? keysets like autograd_dispatch_keyset are commonly used to remove +// autograd keys from a DispatchKeySet throughout the code base. However, you +// are only allowed to remove functionality bits from a keyset, not backend +// bits. See Note [Removing keys from DispatchKeySet Only Affects Functionality +// Keys] for details. To be consistent and avoid confusion, we're explicitly +// setting up autograd_dispatch_keyset to not have any backend bits. +constexpr DispatchKeySet autograd_dispatch_keyset = DispatchKeySet({ + DispatchKey::AutogradFunctionality, + DispatchKey::AutogradOther, + DispatchKey::AutogradNestedTensor, +}); + +constexpr DispatchKeySet autocast_dispatch_keyset = DispatchKeySet({ + DispatchKey::AutocastCPU, + DispatchKey::AutocastCUDA, + DispatchKey::AutocastXPU, + DispatchKey::AutocastHPU, +}); + +// See Note [TLS Initialization] +constexpr DispatchKeySet default_included_set = DispatchKeySet({ + DispatchKey::BackendSelect, + DispatchKey::ADInplaceOrView, +}); + +constexpr DispatchKeySet default_excluded_set = DispatchKeySet({ + DispatchKey::AutocastCPU, + DispatchKey::AutocastCUDA, + DispatchKey::AutocastXPU, + DispatchKey::AutocastHPU, +}); + +constexpr DispatchKeySet autograd_dispatch_keyset_with_ADInplaceOrView = + autograd_dispatch_keyset | DispatchKeySet(DispatchKey::ADInplaceOrView); + +constexpr DispatchKeySet python_ks = DispatchKeySet({ + DispatchKey::Python, + DispatchKey::PythonTLSSnapshot, +}); + +constexpr DispatchKeySet sparse_ks = DispatchKeySet(DispatchKey::Sparse); + +constexpr DispatchKeySet sparse_csr_ks = + DispatchKeySet({DispatchKey::SparseCsrCPU, DispatchKey::SparseCsrCUDA}); + +constexpr DispatchKeySet mkldnn_ks = DispatchKeySet(DispatchKey::MkldnnCPU); + +// backend dispatch keys that map to DispatchKey::AutogradOther +// NB: keys in this set also get associated with CompositeImplicitAutograd +constexpr DispatchKeySet autogradother_backends = + DispatchKeySet( + // HIP and VE aren't in this list: they now have their own backend bits + // which means that they can now have their own Autograd keys. + // Technically, HIP will now redispatch to its own custom AutogradHIP + // slot in the runtime table. + {DispatchKey::FPGA, + DispatchKey::ORT, + DispatchKey::Vulkan, + DispatchKey::Metal, + DispatchKey::SparseCsrCPU, + DispatchKey::SparseCsrCUDA, + DispatchKey::CustomRNGKeyId, + DispatchKey::MkldnnCPU, + // Sparse and Quantized backends also live here. + DispatchKey::Sparse, + DispatchKey::Quantized}) + // Including the backend bits because this keyset is used during op + // registration, which requires looping over all runtime autogradother + // backend keys. + | DispatchKeySet(DispatchKeySet::RAW, full_backend_mask); + +// The set of dispatch keys that come after autograd +// n.b. this relies on the fact that AutogradOther is currently the lowest +// Autograd key +constexpr DispatchKeySet after_autograd_keyset = + DispatchKeySet(DispatchKeySet::FULL_AFTER, c10::DispatchKey::AutogradOther); + +// The set of dispatch keys that come after ADInplaceOrView +constexpr DispatchKeySet after_ADInplaceOrView_keyset = DispatchKeySet( + DispatchKeySet::FULL_AFTER, + c10::DispatchKey::ADInplaceOrView); + +// The set of dispatch keys that come after Functionalize +constexpr DispatchKeySet after_func_keyset = + DispatchKeySet(DispatchKeySet::FULL_AFTER, c10::DispatchKey::Functionalize) + .remove( + // NOTE: we also need to remove ADInplaceOrView from the keyset when + // redispatching after the func kernels. This is because we're not + // calling the same op; we originally called an inplace op, and now + // we aren't. The original key calculation figured out which keys + // were Fallthrough based on the inplace op. That means that it did + // not include the ADInPlaceOrView kernel as a fallthrough key. + // However, we WANT the ADInPlaceOrView kernel to be ignored now + // that we're calling an out-of-place op. Re-invoking + // Dispatcher::call would re-run the Fallthrough key calculation and + // get us that, But at::redispatch is more performant. We can get + // away with it by explicitly removing the key here. + c10::DispatchKey::ADInplaceOrView); + +constexpr DispatchKeySet backend_bitset_mask = + DispatchKeySet(DispatchKeySet::RAW, (1ULL << num_backends) - 1); + +constexpr auto inplace_or_view_ks = + DispatchKeySet(DispatchKey::ADInplaceOrView); +constexpr auto autograd_cpu_ks = DispatchKeySet(DispatchKey::AutogradCPU); +constexpr auto autograd_ipu_ks = DispatchKeySet(DispatchKey::AutogradIPU); +constexpr auto autograd_xpu_ks = DispatchKeySet(DispatchKey::AutogradXPU); +constexpr auto autograd_cuda_ks = DispatchKeySet(DispatchKey::AutogradCUDA); +constexpr auto autograd_xla_ks = DispatchKeySet(DispatchKey::AutogradXLA); +constexpr auto autograd_lazy_ks = DispatchKeySet(DispatchKey::AutogradLazy); +constexpr auto autograd_meta_ks = DispatchKeySet(DispatchKey::AutogradMeta); +constexpr auto autograd_mps_ks = DispatchKeySet(DispatchKey::AutogradMPS); +constexpr auto autograd_hpu_ks = DispatchKeySet(DispatchKey::AutogradHPU); +constexpr auto autograd_privateuse1_ks = + DispatchKeySet(DispatchKey::AutogradPrivateUse1); +constexpr auto autograd_privateuse2_ks = + DispatchKeySet(DispatchKey::AutogradPrivateUse2); +constexpr auto autograd_privateuse3_ks = + DispatchKeySet(DispatchKey::AutogradPrivateUse3); +constexpr auto autograd_other_ks = DispatchKeySet(DispatchKey::AutogradOther); +constexpr auto autograd_nested = + DispatchKeySet(DispatchKey::AutogradNestedTensor); +// keyset correpsonding to functorch keys that have their own dedicated +// TensorImpl subclass. +constexpr auto functorch_transforms_ks = DispatchKeySet( + {DispatchKey::FuncTorchBatched, + DispatchKey::FuncTorchVmapMode, + DispatchKey::Batched, + DispatchKey::VmapMode, + DispatchKey::FuncTorchGradWrapper}); + +constexpr auto functorch_batched_ks = + DispatchKeySet({DispatchKey::FuncTorchBatched}); + +// This keyset has: +// (1) the functionality bits corresponding to backends (dense, sparse, +// quantized) (2) all of the backend bits set +constexpr DispatchKeySet backend_functionality_keys = + DispatchKeySet({ + DispatchKey::Dense, + DispatchKey::Quantized, + DispatchKey::Sparse, + }) | + DispatchKeySet(DispatchKeySet::RAW, full_backend_mask); + +struct OpTableOffsetAndMask { + uint16_t offset; + uint16_t backend_mask; +}; + +static_assert( + num_backends <= 16, + "Right now we expect the number of backends not to exceed 16. In the (unlikely) event" + " that this changes, the size of OpTableOffsetAndMask::backend_mask needs to be increased too."); + +// true if t is a backend dispatch key +C10_API bool isBackendDispatchKey(DispatchKey t); + +// Resolve alias dispatch key to DispatchKeySet if applicable +C10_API DispatchKeySet getRuntimeDispatchKeySet(DispatchKey t); + +// Resolve alias dispatch key to DispatchKeySet if applicable, +// and chek if k is a part of that set +C10_API bool runtimeDispatchKeySetHas(DispatchKey t, DispatchKey k); + +// Returns a DispatchKeySet of all backend keys mapped to Autograd dispatch key +// t, DispatchKeySet is empty if t is not alias of DispatchKey::Autograd. +C10_API DispatchKeySet getBackendKeySetFromAutograd(DispatchKey t); + +// Returns a DispatchKeySet of autograd related keys mapped to backend. +// for a given backend key, use the associated autograd key. +// for non-backend keys, use AutogradOther as a default. +// Note: it's convenient and fast to return a default here rather than (say) +// returning an optional, or throwing. But it makes callers +// responsible for either a) enforcing the invariant that only backend keys +// be passed as arguments, or b) interpreting our return value carefully. +inline DispatchKeySet getAutogradRelatedKeySetFromBackend(BackendComponent t) { + switch (t) { + case BackendComponent::CPUBit: + return inplace_or_view_ks | autograd_cpu_ks; + case BackendComponent::IPUBit: + return inplace_or_view_ks | autograd_ipu_ks; + case BackendComponent::XPUBit: + return inplace_or_view_ks | autograd_xpu_ks; + case BackendComponent::CUDABit: + return inplace_or_view_ks | autograd_cuda_ks; + case BackendComponent::XLABit: + return inplace_or_view_ks | autograd_xla_ks; + case BackendComponent::LazyBit: + return inplace_or_view_ks | autograd_lazy_ks; + case BackendComponent::MetaBit: + return inplace_or_view_ks | autograd_meta_ks; + case BackendComponent::MPSBit: + return inplace_or_view_ks | autograd_mps_ks; + case BackendComponent::HPUBit: + return inplace_or_view_ks | autograd_hpu_ks; + case BackendComponent::PrivateUse1Bit: + return inplace_or_view_ks | autograd_privateuse1_ks; + case BackendComponent::PrivateUse2Bit: + return inplace_or_view_ks | autograd_privateuse2_ks; + case BackendComponent::PrivateUse3Bit: + return inplace_or_view_ks | autograd_privateuse3_ks; + default: + return inplace_or_view_ks | autograd_other_ks; + } +} + +// Returns a DispatchKeySet of autocast related keys mapped to backend. +inline DispatchKeySet getAutocastRelatedKeySetFromBackend(BackendComponent t) { + constexpr auto autocast_cpu_ks = DispatchKeySet(DispatchKey::AutocastCPU); + constexpr auto autocast_xpu_ks = DispatchKeySet(DispatchKey::AutocastXPU); + constexpr auto autocast_hpu_ks = DispatchKeySet(DispatchKey::AutocastHPU); + constexpr auto autocast_cuda_ks = DispatchKeySet(DispatchKey::AutocastCUDA); + switch (t) { + case BackendComponent::CPUBit: + return autocast_cpu_ks; + case BackendComponent::XPUBit: + return autocast_xpu_ks; + case BackendComponent::HPUBit: + return autocast_hpu_ks; + case BackendComponent::CUDABit: + case BackendComponent::XLABit: + return autocast_cuda_ks; + default: + return DispatchKeySet(); + } +} + +// returns the "backend" DispatchKey of highest priority in the set. +// This is basically like highestBackendKey(), except that we have some +// "functionality" bits that correspond to backends (Sparse, Quantized) +inline DispatchKey highestPriorityBackendTypeId(DispatchKeySet ks) { + return (ks & backend_functionality_keys).highestPriorityTypeId(); +} + +// This API exists because we have a use case for checking +// getRuntimeDispatchKeySet(alias).has(DispatchKey::Undefined) +// in OperatorEntry.cpp but we disallow it in has() API. +C10_API bool isIncludedInAlias(DispatchKey k, DispatchKey alias); + +// Historically, every tensor only had a single DispatchKey, and it was always +// something like CPU, and there wasn't any of this business where TLS +// could cause the DispatchKey of a tensor to change. But we still have some +// legacy code that is still using DispatchKey for things like instanceof +// checks; if at all possible, refactor the code to stop using DispatchKey in +// those cases. +static inline DispatchKey legacyExtractDispatchKey(DispatchKeySet s) { + // NB: If you add any extra keys that can be stored in TensorImpl on + // top of existing "backend" keys like CPU/CUDA, you need to add it + // here. At the moment, autograd keys and ADInplaceOrView key need this + // treatment; + return (s - autograd_dispatch_keyset_with_ADInplaceOrView - + autocast_dispatch_keyset - + DispatchKeySet( + {DispatchKey::Functionalize, + DispatchKey::PythonTLSSnapshot, + DispatchKey::Python})) + .highestPriorityTypeId(); +} + +template +using is_not_DispatchKeySet = guts::negation>; + +// Given a function type, constructs a function_traits type that drops the first +// parameter type if the first parameter is of type DispatchKeySet. NB: +// DispatchKeySet is currently explicitly hidden from JIT (mainly to avoid +// pushing unnecessary arguments on the stack - see Note [ Plumbing Keys Through +// the Dispatcher] for details). If at any point in the future we need to expose +// this type to JIT, revisit the usage of this type alias. +template +using remove_DispatchKeySet_arg_from_func = guts::make_function_traits_t< + typename guts::infer_function_traits_t::return_type, + typename std::conditional_t< + std::is_same< + DispatchKeySet, + typename guts::typelist::head_with_default_t< + void, + typename guts::infer_function_traits_t< + FuncType>::parameter_types>>::value, + guts::typelist::drop_if_nonempty_t< + typename guts::infer_function_traits_t::parameter_types, + 1>, + typename guts::infer_function_traits_t::parameter_types>>; +} // namespace c10 diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/MemoryFormat.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/MemoryFormat.h new file mode 100644 index 0000000000000000000000000000000000000000..f4e3e930790674b46babbf6a77a8f7769954644c --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/MemoryFormat.h @@ -0,0 +1,287 @@ +#pragma once + +#include +#include +#include + +#include + +// Memory format is not the property of a Tensor. It is the way to tell an +// operator how the result should be organized in memory and nothing more. That +// means memory format should never be used as return value for any tensor state +// interrogation functions (internally and externally). +// +// Possible options are: +// Preserve: +// If any of the input tensors is in channels_last format, operator output +// should be in channels_last format +// +// Contiguous: +// Regardless of input tensors format, the output should be contiguous +// Tensor. +// +// ChannelsLast: +// Regardless of input tensors format, the output should be in channels_last +// format. + +namespace c10 { +enum class MemoryFormat : int8_t { + Contiguous, + Preserve, + ChannelsLast, + ChannelsLast3d, + NumOptions +}; + +// If you are seeing this, it means that this call site was not checked if +// the memory format could be preserved, and it was switched to old default +// behaviour of contiguous +#define LEGACY_CONTIGUOUS_MEMORY_FORMAT c10::get_contiguous_memory_format() + +inline MemoryFormat get_contiguous_memory_format() { + return MemoryFormat::Contiguous; +} + +inline std::ostream& operator<<( + std::ostream& stream, + at::MemoryFormat memory_format) { + switch (memory_format) { + case MemoryFormat::Preserve: + return stream << "Preserve"; + case MemoryFormat::Contiguous: + return stream << "Contiguous"; + case MemoryFormat::ChannelsLast: + return stream << "ChannelsLast"; + case MemoryFormat::ChannelsLast3d: + return stream << "ChannelsLast3d"; + default: + TORCH_CHECK(false, "Unknown memory format ", memory_format); + } +} + +// Note: Hardcoded the channel last stride indices here to get better +// performance +template +inline std::vector get_channels_last_strides_2d(ArrayRef sizes) { + std::vector strides(sizes.size()); + switch (sizes.size()) { + case 4: + strides[1] = 1; + strides[3] = sizes[1]; + strides[2] = strides[3] * sizes[3]; + strides[0] = strides[2] * sizes[2]; + return strides; + case 3: + strides[0] = 1; + strides[2] = sizes[0]; + strides[1] = strides[2] * sizes[2]; + return strides; + default: + TORCH_INTERNAL_ASSERT( + false, "ChannelsLast2d doesn't support size ", sizes.size()); + } +} + +inline std::vector get_channels_last_strides_2d(IntArrayRef sizes) { + return get_channels_last_strides_2d(sizes); +} + +template +std::vector get_channels_last_strides_3d(ArrayRef sizes) { + std::vector strides(sizes.size()); + switch (sizes.size()) { + case 5: + strides[1] = 1; + strides[4] = sizes[1]; + strides[3] = strides[4] * sizes[4]; + strides[2] = strides[3] * sizes[3]; + strides[0] = strides[2] * sizes[2]; + return strides; + case 4: + strides[0] = 1; + strides[3] = sizes[0]; + strides[2] = strides[3] * sizes[3]; + strides[1] = strides[2] * sizes[2]; + return strides; + default: + TORCH_INTERNAL_ASSERT( + false, "ChannelsLast3d doesn't support size ", sizes.size()); + } +} + +inline std::vector get_channels_last_strides_3d(IntArrayRef sizes) { + return get_channels_last_strides_3d(sizes); +} + +// NOTE: +// Below are Helper functions for is_channels_last_strides_xd. +// 1. Please do not combine these helper functions, each helper function handles +// exactly one case of sizes + memory_format, by doing this, the strides indices +// will be a constant array and we can access it using constant index number, +// the compiler will fully unroll the loop on strides indices to gain a better +// performance. +// 2. No error check in helper function, caller ensures the correctness of the +// input +// 3. All helper functions have similar comments, only 1st helper function is +// commented here. +template +inline bool is_channels_last_strides_2d_s4( + const ArrayRef sizes, + const ArrayRef strides) { + T min = 0; + // special case for trivial C dimension. default to NCHW + if (strides[1] == 0) { + return false; + } + // loop strides indices + for (auto& d : {1, 3, 2, 0}) { + if (sizes[d] == 0) { + return false; + } + if (strides[d] < min) { + return false; + } + // Fallback to NCHW as default layout for ambiguous cases + // This is the flaw of implicit memory_format from strides. + // N111 tensor with identical strides for size 1 dimension; + // Two cases could lead us here: + // a. N111 contiguous Tensor ([N,1,1,1]@[1,1,1,1]) + // b. N11W contiguous Tensor sliced on the W-dimension. + // ([N,1,1,1]@[W,W,W,W]) + if (d == 0 && min == strides[1]) { + return false; + } + // This is necessary to: + // 1. distinguish the memory_format of N1H1; + // [H, 1, 1, 1] channels_last stride + // [H, H, 1, 1] contiguous stride + // 2. permutation of 1C1W: + // [1, C, 1, H]@[HC, H, H, 1] transpose(1, 3) + // [1, H, 1, C]@[HC, 1, H, H] shouldn't be identified as channels_last + min = strides[d]; + if (sizes[d] > 1) { + min *= sizes[d]; + } + } + return true; +} + +template +inline bool is_channels_last_strides_3d_s5( + const ArrayRef sizes, + const ArrayRef strides) { + T min = 0; + if (strides[1] == 0) { + return false; + } + for (auto& d : {1, 4, 3, 2, 0}) { + if (sizes[d] == 0) { + return false; + } + if (strides[d] < min) { + return false; + } + if (d == 0 && min == strides[1]) { + return false; + } + min = strides[d]; + if (sizes[d] > 1) { + min *= sizes[d]; + } + } + return true; +} + +// Note [Ambiguous is_channels_last_strides_xd] +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +// The flaw of carrying memory_format implicitly through strides is very hard +// to WAR properly. issue #24090 +// Without the history of permutation, we can't infer the memory_format of a +// tensor from the snapshot of its size & stride +// e.g. +// +// 1. We can NOT specify the memory_format of N111 tensor through strides in a +// meaningful way; +// +// 2. Two path that ended up with identical size/stride +// N11W contiguous tensor sliced at w-dimension becomes [N,1,1,1]@[W,W,W,W] +// NC11 channels_last tensor sliced at c-dimension becomes [N,1,1,1]@[C,C,C,C] +// So if we see a tensor [N,1,1,1]@[X,X,X,X], there's no way for us to infer +// the memory_format of the original tensor. +// +// Due to the limitations, our temporary WAR `is_channels_last_strides` does the +// best effort to infer whether the original memory_format of a tensor is +// at::MemoryFormat::ChannelsLast. The two objectives of this function (ordered +// by their importance): +// 1. Ensure that normal shape manipulation does not accidentally change the +// MemoryFormat of an existing tensor. +// 2. Allows user to mark MemoryFormat::ChannelsLast to tensors; +// +// The function does so via checking strides of the tensor, including strides of +// size-1 dimensions. Although conventionally PyTorch implies no restriction on +// trivial stride (stride for size-1 dimension). +// +// Note that this approach is a compromise. We did not solve the problem +// completely. Many cases we will not be able to infer the correct memory +// format. +// The implementation of `is_channels_last_strides` is to serve the objectives: +// MemoryFormat::ChannelsLast has to be explicitly opted-in (no accidental +// conversion); Best effort to maintain the ChannelsLast flag. +// +// Due to the fact that this is not a bulletproof solution, through testing +// (aten/src/ATen/test/memory_format_test.cpp) +// a. we ensure that the common tasks are supported; +// a. we identify corner cases where the implementation compromises on. +// +// By the time accumulated permutation is enabled to replace implicit +// memory_format through strides, we should be updating our tests and fix the +// issues in our tests. +// +// We use Channels Last 2d as an example above. +// This is a general problem for all the is_channels_last_strides_xd +// implementation. Please check the helper functions +// (is_channels_last_strides_*d_s*) for more details. + +template +inline bool is_channels_last_strides_2d( + const ArrayRef sizes, + const ArrayRef strides) { + switch (sizes.size()) { + case 4: + return is_channels_last_strides_2d_s4(sizes, strides); + case 3: + // TODO dim == 3 case will be enabled once it is fully tested + return false; + default: + return false; + } +} + +template +inline bool is_channels_last_strides_3d( + const ArrayRef sizes, + const ArrayRef strides) { + switch (sizes.size()) { + case 5: + return is_channels_last_strides_3d_s5(sizes, strides); + case 4: + // TODO dim == 4 case will be enabled once it is fully tested + return false; + default: + return false; + } +} + +inline bool is_channels_last_strides_2d( + const IntArrayRef sizes, + const IntArrayRef strides) { + return is_channels_last_strides_2d(sizes, strides); +} + +inline bool is_channels_last_strides_3d( + const IntArrayRef sizes, + const IntArrayRef strides) { + return is_channels_last_strides_3d(sizes, strides); +} + +} // namespace c10 diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/QEngine.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/QEngine.h new file mode 100644 index 0000000000000000000000000000000000000000..71eb4b34ac9e11938eb45b86dca83cbe1a27acfa --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/QEngine.h @@ -0,0 +1,46 @@ +#pragma once + +#include +#include +#include + +namespace c10 { + +/** + * QEngine is an enum that is used to select the engine to run quantized ops. + * Keep this enum in sync with get_qengine_id() in + * torch/backends/quantized/__init__.py + */ +enum class QEngine : uint8_t { + NoQEngine = 0, + FBGEMM = 1, + QNNPACK = 2, + ONEDNN = 3, + X86 = 4, +}; + +constexpr auto kNoQEngine = QEngine::NoQEngine; +constexpr auto kFBGEMM = QEngine::FBGEMM; +constexpr auto kQNNPACK = QEngine::QNNPACK; +constexpr auto kONEDNN = QEngine::ONEDNN; +constexpr auto kX86 = QEngine::X86; + +inline std::string toString(QEngine qengine) { + switch (qengine) { + case kNoQEngine: + return "NoQEngine"; + case kFBGEMM: + return "FBGEMM"; + case kQNNPACK: + return "QNNPACK"; + case kONEDNN: + return "ONEDNN"; + case kX86: + return "X86"; + default: + TORCH_CHECK( + false, "Unrecognized Quantized Engine: ", static_cast(qengine)); + } +} + +} // namespace c10 diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/Scalar.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/Scalar.h new file mode 100644 index 0000000000000000000000000000000000000000..6fd686b8993dd47739ec4af454b111c31e2d4cf3 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/Scalar.h @@ -0,0 +1,341 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-int-float-conversion") +#endif + +namespace c10 { + +/** + * Scalar represents a 0-dimensional tensor which contains a single element. + * Unlike a tensor, numeric literals (in C++) are implicitly convertible to + * Scalar (which is why, for example, we provide both add(Tensor) and + * add(Scalar) overloads for many operations). It may also be used in + * circumstances where you statically know a tensor is 0-dim and single size, + * but don't know its type. + */ +class C10_API Scalar { + public: + Scalar() : Scalar(int64_t(0)) {} + + void destroy() { + if (Tag::HAS_si == tag || Tag::HAS_sd == tag) { + raw::intrusive_ptr::decref(v.p); + v.p = nullptr; + } + } + + ~Scalar() { + destroy(); + } + +#define DEFINE_IMPLICIT_CTOR(type, name) \ + Scalar(type vv) : Scalar(vv, true) {} + + AT_FORALL_SCALAR_TYPES_AND3(Half, BFloat16, ComplexHalf, DEFINE_IMPLICIT_CTOR) + AT_FORALL_COMPLEX_TYPES(DEFINE_IMPLICIT_CTOR) + +#undef DEFINE_IMPLICIT_CTOR + + // Value* is both implicitly convertible to SymbolicVariable and bool which + // causes ambiguity error. Specialized constructor for bool resolves this + // problem. + template < + typename T, + typename std::enable_if::value, bool>::type* = + nullptr> + Scalar(T vv) : tag(Tag::HAS_b) { + v.i = convert(vv); + } + +#define DEFINE_ACCESSOR(type, name) \ + type to##name() const { \ + if (Tag::HAS_d == tag) { \ + return checked_convert(v.d, #type); \ + } else if (Tag::HAS_z == tag) { \ + return checked_convert>(v.z, #type); \ + } \ + if (Tag::HAS_b == tag) { \ + return checked_convert(v.i, #type); \ + } else if (Tag::HAS_i == tag) { \ + return checked_convert(v.i, #type); \ + } else if (Tag::HAS_si == tag) { \ + TORCH_CHECK(false, "tried to get " #name " out of SymInt") \ + } else if (Tag::HAS_sd == tag) { \ + TORCH_CHECK(false, "tried to get " #name " out of SymFloat") \ + } \ + TORCH_CHECK(false) \ + } + + // TODO: Support ComplexHalf accessor + AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_ACCESSOR) + +#undef DEFINE_ACCESSOR + + SymInt toSymInt() const { + if (Tag::HAS_si == tag) { + return c10::SymInt(intrusive_ptr::reclaim_copy( + static_cast(v.p))); + } else { + return toLong(); + } + } + + SymFloat toSymFloat() const { + if (Tag::HAS_sd == tag) { + return c10::SymFloat(intrusive_ptr::reclaim_copy( + static_cast(v.p))); + } else { + return toDouble(); + } + } + + // also support scalar.to(); + // Deleted for unsupported types, but specialized below for supported types + template + T to() const = delete; + + // audit uses of data_ptr + const void* data_ptr() const { + TORCH_INTERNAL_ASSERT(!isSymbolic()); + return static_cast(&v); + } + + bool isFloatingPoint() const { + return Tag::HAS_d == tag || Tag::HAS_sd == tag; + } + + C10_DEPRECATED_MESSAGE( + "isIntegral is deprecated. Please use the overload with 'includeBool' parameter instead.") + bool isIntegral() const { + return Tag::HAS_i == tag || Tag::HAS_si == tag; + } + bool isIntegral(bool includeBool) const { + return Tag::HAS_i == tag || Tag::HAS_si == tag || + (includeBool && isBoolean()); + } + + bool isComplex() const { + return Tag::HAS_z == tag; + } + bool isBoolean() const { + return Tag::HAS_b == tag; + } + + // you probably don't actually want these; they're mostly for testing + bool isSymInt() const { + return Tag::HAS_si == tag; + } + bool isSymFloat() const { + return Tag::HAS_sd == tag; + } + + bool isSymbolic() const { + return Tag::HAS_si == tag || Tag::HAS_sd == tag; + } + + C10_ALWAYS_INLINE Scalar& operator=(Scalar&& other) noexcept { + if (&other == this) { + return *this; + } + + destroy(); + moveFrom(std::move(other)); + return *this; + } + + C10_ALWAYS_INLINE Scalar& operator=(const Scalar& other) { + if (&other == this) { + return *this; + } + + *this = Scalar(other); + return *this; + } + + Scalar operator-() const; + Scalar conj() const; + Scalar log() const; + + template < + typename T, + typename std::enable_if::value, int>::type = 0> + bool equal(T num) const { + if (isComplex()) { + TORCH_INTERNAL_ASSERT(!isSymbolic()); + auto val = v.z; + return (val.real() == num) && (val.imag() == T()); + } else if (isFloatingPoint()) { + TORCH_CHECK(!isSymbolic(), "NYI SymFloat equality"); + return v.d == num; + } else if (isIntegral(/*includeBool=*/false)) { + TORCH_CHECK(!isSymbolic(), "NYI SymInt equality"); + return v.i == num; + } else if (isBoolean()) { + // boolean scalar does not equal to a non boolean value + TORCH_INTERNAL_ASSERT(!isSymbolic()); + return false; + } else { + TORCH_INTERNAL_ASSERT(false); + } + } + + template < + typename T, + typename std::enable_if::value, int>::type = 0> + bool equal(T num) const { + if (isComplex()) { + TORCH_INTERNAL_ASSERT(!isSymbolic()); + return v.z == num; + } else if (isFloatingPoint()) { + TORCH_CHECK(!isSymbolic(), "NYI SymFloat equality"); + return (v.d == num.real()) && (num.imag() == T()); + } else if (isIntegral(/*includeBool=*/false)) { + TORCH_CHECK(!isSymbolic(), "NYI SymInt equality"); + return (v.i == num.real()) && (num.imag() == T()); + } else if (isBoolean()) { + // boolean scalar does not equal to a non boolean value + TORCH_INTERNAL_ASSERT(!isSymbolic()); + return false; + } else { + TORCH_INTERNAL_ASSERT(false); + } + } + + bool equal(bool num) const { + if (isBoolean()) { + TORCH_INTERNAL_ASSERT(!isSymbolic()); + return static_cast(v.i) == num; + } else { + return false; + } + } + + ScalarType type() const { + if (isComplex()) { + return ScalarType::ComplexDouble; + } else if (isFloatingPoint()) { + return ScalarType::Double; + } else if (isIntegral(/*includeBool=*/false)) { + return ScalarType::Long; + } else if (isBoolean()) { + return ScalarType::Bool; + } else { + throw std::runtime_error("Unknown scalar type."); + } + } + + Scalar(Scalar&& rhs) noexcept : tag(rhs.tag) { + moveFrom(std::move(rhs)); + } + + Scalar(const Scalar& rhs) : tag(rhs.tag), v(rhs.v) { + if (isSymbolic()) { + c10::raw::intrusive_ptr::incref(v.p); + } + } + + Scalar(c10::SymInt si) { + if (si.is_symbolic()) { + tag = Tag::HAS_si; + v.p = std::move(si).release(); + } else { + tag = Tag::HAS_i; + v.i = si.as_int_unchecked(); + } + } + + Scalar(c10::SymFloat sd) { + if (sd.is_symbolic()) { + tag = Tag::HAS_sd; + v.p = std::move(sd).release(); + } else { + tag = Tag::HAS_d; + v.d = sd.as_float_unchecked(); + } + } + + // We can't set v in the initializer list using the + // syntax v{ .member = ... } because it doesn't work on MSVC + private: + enum class Tag { HAS_d, HAS_i, HAS_z, HAS_b, HAS_sd, HAS_si }; + + // NB: assumes that self has already been cleared + C10_ALWAYS_INLINE void moveFrom(Scalar&& rhs) noexcept { + v = rhs.v; + tag = rhs.tag; + if (rhs.tag == Tag::HAS_si || rhs.tag == Tag::HAS_sd) { + // Move out of scalar + rhs.tag = Tag::HAS_i; + rhs.v.i = 0; + } + } + + Tag tag; + + union v_t { + double d{}; + int64_t i; + c10::complex z; + c10::intrusive_ptr_target* p; + v_t() {} // default constructor + } v; + + template < + typename T, + typename std::enable_if< + std::is_integral::value && !std::is_same::value, + bool>::type* = nullptr> + Scalar(T vv, bool) : tag(Tag::HAS_i) { + v.i = convert(vv); + } + + template < + typename T, + typename std::enable_if< + !std::is_integral::value && !c10::is_complex::value, + bool>::type* = nullptr> + Scalar(T vv, bool) : tag(Tag::HAS_d) { + v.d = convert(vv); + } + + template < + typename T, + typename std::enable_if::value, bool>::type* = nullptr> + Scalar(T vv, bool) : tag(Tag::HAS_z) { + v.z = convert(vv); + } +}; + +using OptionalScalarRef = c10::OptionalRef; + +// define the scalar.to() specializations +#define DEFINE_TO(T, name) \ + template <> \ + inline T Scalar::to() const { \ + return to##name(); \ + } +AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_TO) +#undef DEFINE_TO + +} // namespace c10 + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/StorageImpl.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/StorageImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..1d80daed871a25440b3cda2fdb9ed836443ac9dc --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/StorageImpl.h @@ -0,0 +1,226 @@ +#pragma once + +#include +#include +#include + +#include + +namespace c10 { + +// A storage represents the underlying backing data buffer for a +// tensor. This concept was inherited from the original Torch7 +// codebase; we'd kind of like to get rid of the concept +// (see https://github.com/pytorch/pytorch/issues/14797) but +// it's hard work and no one has gotten around to doing it. +// +// NB: storage is supposed to uniquely own a data pointer; e.g., +// two non-null data pointers alias if and only if they are from +// the same storage. Technically you can violate this invariant +// (e.g., you can create a non-owning StorageImpl with at::from_blob) +// but a lot of things won't work correctly, including: +// +// - An ordinary deleter on such a storage is wrong, because normal deleters +// assume unique ownership, but if you have two storages at the same data, +// that implies there is some sort of shared ownership. So your deleter would +// have to actually be internally doing some sort of refcount thing +// - Deepcopy in Python side relies on storage equality and not data pointer +// equality; so if there are two separate storages pointing to the same data, +// the data will actually get duplicated in that case (one data ptr before, +// two data ptrs after) +// - Version counts won't work correctly, because we do all VC tracking at the +// level of storages (unless you explicitly disconnect the VC with detach); +// mutation because data pointers are the same are totally untracked +struct C10_API StorageImpl : public c10::intrusive_ptr_target { + public: + struct use_byte_size_t {}; + + StorageImpl( + use_byte_size_t /*use_byte_size*/, + SymInt size_bytes, + at::DataPtr data_ptr, + at::Allocator* allocator, + bool resizable) + : data_ptr_(std::move(data_ptr)), + size_bytes_(std::move(size_bytes)), + size_bytes_is_symbolic_(size_bytes_.is_symbolic()), + resizable_(resizable), + received_cuda_(false), + allocator_(allocator) { + if (resizable) { + TORCH_INTERNAL_ASSERT( + allocator_, "For resizable storage, allocator must be provided"); + } + } + + StorageImpl( + use_byte_size_t /*use_byte_size*/, + SymInt size_bytes, + at::Allocator* allocator, + bool resizable) + : StorageImpl( + use_byte_size_t(), + size_bytes, + size_bytes.is_symbolic() + ? allocator->allocate(0) + : allocator->allocate(size_bytes.as_int_unchecked()), + allocator, + resizable) {} + + StorageImpl& operator=(StorageImpl&& other) = default; + StorageImpl& operator=(const StorageImpl&) = delete; + StorageImpl() = delete; + StorageImpl(StorageImpl&& other) = default; + StorageImpl(const StorageImpl&) = delete; + ~StorageImpl() override = default; + + void reset() { + data_ptr_.clear(); + size_bytes_ = 0; + size_bytes_is_symbolic_ = false; + } + + template + inline T* data() const { + return unsafe_data(); + } + + template + inline T* unsafe_data() const { + return static_cast(this->data_ptr_.get()); + } + + // Destructor doesn't call release_resources because it's + // unnecessary; don't forget to change that if needed! + void release_resources() override { + data_ptr_.clear(); + } + + size_t nbytes() const { + TORCH_CHECK(!size_bytes_is_symbolic_); + return size_bytes_.as_int_unchecked(); + } + + SymInt sym_nbytes() const { + return size_bytes_; + } + + // TODO: remove later + void set_nbytes(size_t size_bytes) { + size_bytes_ = size_bytes; + size_bytes_is_symbolic_ = false; + } + + void set_nbytes(c10::SymInt size_bytes) { + size_bytes_ = std::move(size_bytes); + } + + bool resizable() const { + return resizable_; + }; + + at::DataPtr& data_ptr() { + return data_ptr_; + }; + + const at::DataPtr& data_ptr() const { + return data_ptr_; + }; + + // Returns the previous data_ptr + at::DataPtr set_data_ptr(at::DataPtr&& data_ptr) { + at::DataPtr old_data_ptr(std::move(data_ptr_)); + data_ptr_ = std::move(data_ptr); + return old_data_ptr; + }; + + void set_data_ptr_noswap(at::DataPtr&& data_ptr) { + data_ptr_ = std::move(data_ptr); + } + + // TODO: Return const ptr eventually if possible + void* data() { + return data_ptr_.get(); + } + + void* data() const { + return data_ptr_.get(); + } + + at::DeviceType device_type() const { + return data_ptr_.device().type(); + } + + at::Allocator* allocator() { + return allocator_; + } + + const at::Allocator* allocator() const { + return allocator_; + }; + + // You generally shouldn't use this method, but it is occasionally + // useful if you want to override how a tensor will be reallocated, + // after it was already allocated (and its initial allocator was + // set) + void set_allocator(at::Allocator* allocator) { + allocator_ = allocator; + } + + Device device() const { + return data_ptr_.device(); + } + + void set_resizable(bool resizable) { + if (resizable) { + // We need an allocator to be resizable + AT_ASSERT(allocator_); + } + resizable_ = resizable; + } + + /** + * Can only be called when use_count is 1 + */ + void UniqueStorageShareExternalPointer( + void* src, + size_t size_bytes, + DeleterFnPtr d = nullptr) { + UniqueStorageShareExternalPointer( + at::DataPtr(src, src, d, data_ptr_.device()), size_bytes); + } + + /** + * Can only be called when use_count is 1 + */ + void UniqueStorageShareExternalPointer( + at::DataPtr&& data_ptr, + size_t size_bytes) { + data_ptr_ = std::move(data_ptr); + size_bytes_ = size_bytes; + size_bytes_is_symbolic_ = false; + allocator_ = nullptr; + resizable_ = false; + } + + // This method can be used only after storage construction and cannot be used + // to modify storage status + void set_received_cuda(bool received_cuda) { + received_cuda_ = received_cuda; + } + + bool received_cuda() { + return received_cuda_; + } + + private: + DataPtr data_ptr_; + SymInt size_bytes_; + bool size_bytes_is_symbolic_; + bool resizable_; + // Identifies that Storage was received from another process and doesn't have + // local to process cuda memory allocation + bool received_cuda_; + Allocator* allocator_; +}; +} // namespace c10 diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/SymBool.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/SymBool.h new file mode 100644 index 0000000000000000000000000000000000000000..3074aefe64c241da84143a001f8e6f59852f5404 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/SymBool.h @@ -0,0 +1,67 @@ +#pragma once + +#include +#include +#include +#include + +namespace c10 { + +class C10_API SymBool { + public: + /*implicit*/ SymBool(bool b) : data_(b){}; + SymBool(SymNode ptr) : data_(false), ptr_(std::move(ptr)) { + TORCH_CHECK(ptr_->is_bool()); + }; + SymBool() : data_(false) {} + + SymNodeImpl* toSymNodeImplUnowned() const { + return ptr_.get(); + } + + SymNodeImpl* release() && { + return std::move(ptr_).release(); + } + + SymNode toSymNodeImpl() const; + + bool expect_bool() const { + TORCH_CHECK(!is_symbolic()); + return data_; + } + + SymBool sym_and(const SymBool&) const; + SymBool sym_or(const SymBool&) const; + SymBool sym_not() const; + + SymBool operator&(const SymBool& other) const { + return sym_and(other); + } + SymBool operator|(const SymBool& other) const { + return sym_or(other); + } + SymBool operator~() const { + return sym_not(); + } + + // Insert a guard for the bool to be its concrete value, and then return + // that value. Note that C++ comparison operations default to returning + // bool, so it's not so common to have to call this + bool guard_bool(const char* file, int64_t line) const; + + C10_ALWAYS_INLINE bool is_symbolic() const { + return ptr_; + } + + bool as_bool_unchecked() const { + return data_; + } + + private: + // TODO: optimize to union + bool data_; + SymNode ptr_; +}; + +C10_API std::ostream& operator<<(std::ostream& os, const SymBool& s); +} // namespace c10 diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/SymFloat.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/SymFloat.h new file mode 100644 index 0000000000000000000000000000000000000000..50512dc6fb2061b567f50d6ba599748c8e720fed --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/SymFloat.h @@ -0,0 +1,74 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace c10 { + +// NB: this is actually double precision; we're using the Python naming here +class C10_API SymFloat { + public: + /*implicit*/ SymFloat(double d) : data_(d){}; + SymFloat(SymNode ptr) + : data_(std::numeric_limits::quiet_NaN()), ptr_(std::move(ptr)) { + TORCH_CHECK(ptr_->is_float()); + }; + SymFloat() : data_(0.0) {} + + SymNodeImpl* toSymNodeImplUnowned() const { + return ptr_.get(); + } + + SymNodeImpl* release() && { + return std::move(ptr_).release(); + } + + SymNode toSymNodeImpl() const; + + double expect_float() const { + TORCH_CHECK(!is_symbolic()); + return data_; + } + + SymFloat operator+(const SymFloat&) const; + SymFloat operator-(const SymFloat&) const; + SymFloat operator*(const SymFloat&) const; + SymFloat operator/(const SymFloat&) const; + + // Need guidance on where to put this code + SymFloat sqrt() const; + + // Insert a guard for the float to be its concrete value, and then return + // that value. This operation always works, even if the float is symbolic, + // so long as we know what the underlying value is. Don't blindly put this + // everywhere; you can cause overspecialization of PyTorch programs with + // this method. + // + // It should be called as guard_float(__FILE__, __LINE__). The file and line + // number can be used to diagnose overspecialization. + double guard_float(const char* file, int64_t line) const; + + // N.B. It's important to keep this definition in the header + // as we expect if checks to be folded for mobile builds + // where `is_symbolic` is always false + C10_ALWAYS_INLINE bool is_symbolic() const { + return ptr_; + } + + double as_float_unchecked() const { + return data_; + } + + private: + // TODO: optimize to union + double data_; + SymNode ptr_; +}; + +C10_API std::ostream& operator<<(std::ostream& os, const SymFloat& s); +} // namespace c10 diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/SymInt.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/SymInt.h new file mode 100644 index 0000000000000000000000000000000000000000..07e174275dda5f469a54ea95413495b1b8ca3b70 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/SymInt.h @@ -0,0 +1,310 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace c10 { + +class SymFloat; + +// SymInt represents either a regular int64_t, or a symbolic integer +// (represented in a type erased way as SymNode). The intention is for SymInt +// to represent symbolic sizes that arise when doing shape computation in +// operator kernels. This allows for tracing through programs without baking in +// concrete sizes into kernel calls. +// +// SymInt has an API equivalent to int64_t. In particular, it is a value type. +// Internally, SymInt is represented in a clever packed way, so that it only +// occupies one word of space; but morally, it is a union between an int64_t +// and an intrusive pointer to SymNodeImpl. +// +// Invariant: the referenced SymNodeImpl is guaranteed to be a SymNode where +// is_int() returns true + +class C10_API SymInt { + public: + enum Unchecked { + UNCHECKED, + }; + + /*implicit*/ SymInt(int64_t d) : data_(d) { + // NB: this relies on exception in constructor inhibiting + // destructor; otherwise we would attempt to deallocate + // the garbage data! + TORCH_CHECK(!is_symbolic()); + }; + SymInt() : data_(0) {} + SymInt(SymNode n); + + // unchecked c-tor accepting raw `data_` + // One appropriate use for this is when you are constructing a symint + // in a situation where you know it is non-negative (or, if it is negative, + // the negative value is -1; i.e., not user controlled) + SymInt(Unchecked, int64_t d) : data_(d) {} + + // TODO: these implementations are not optimal because they allocate a + // temporary and then use the move constructor/assignment + SymInt(const SymInt& s) : data_(0) { + if (s.is_symbolic()) { + *this = SymInt(s.toSymNodeImpl()); + } else { + data_ = s.data_; + } + } + SymInt(SymInt&& s) noexcept : data_(s.data_) { + s.data_ = 0; + } + + SymInt& operator=(const SymInt& s) { + if (this != &s) { + if (s.is_symbolic()) { + *this = SymInt(s.toSymNodeImpl()); + } else { + data_ = s.data_; + } + } + return *this; + } + SymInt& operator=(SymInt&& s) noexcept { + if (this != &s) { + release_(); // release the current SymNode if any + data_ = s.data_; + if (s.is_symbolic()) + s.data_ = 0; + }; + return *this; + } + + SymInt clone() const { + if (is_symbolic()) { + return SymInt(toSymNodeImplUnowned()->clone()); + } + return *this; + } + + SymNodeImpl* toSymNodeImplUnowned() const { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(is_symbolic()); + uint64_t unextended_bits = static_cast(data_) & ~MASK; + uint64_t sign_bit_mask = 1ULL << (62 - 1); + // https://stackoverflow.com/questions/42534749/signed-extension-from-24-bit-to-32-bit-in-c + uint64_t extended_bits = (unextended_bits ^ sign_bit_mask) - sign_bit_mask; + return static_cast( + reinterpret_cast(static_cast(extended_bits))); + } + + void release_() { + if (is_symbolic()) { + SymNode::reclaim(toSymNodeImplUnowned()); // steal + } + } + + SymNodeImpl* release() && { +#ifndef C10_MOBILE + TORCH_INTERNAL_ASSERT(is_symbolic()); + auto* r = toSymNodeImplUnowned(); + data_ = 0; // transfer ownership + return r; +#else + TORCH_INTERNAL_ASSERT(false); +#endif + } + + SymNode toSymNodeImpl() const; + + ~SymInt() { + release_(); + } + + // Require the int to be non-symbolic, and if it is symbolic raise an + // error. This is safe to use for C++ code that doesn't work for symbolic + // shapes, and you don't have time to fix it immediately, as if we + // try to trigger the path in C++ you'll appropriately get an error + int64_t expect_int() const { + TORCH_CHECK(!is_symbolic()); + return data_; + } + + // Insert a guard for the int to be its concrete value, and then return + // that value. This operation always works, even if the int is symbolic, + // so long as we know what the underlying value is (e.g., this won't work + // if you call it on the size of nonzero output). Don't blindly put this + // everywhere; you can cause overspecialization of PyTorch programs with + // this method. + // + // It should be called as guard_int(__FILE__, __LINE__). The file and line + // number can be used to diagnose overspecialization. + int64_t guard_int(const char* file, int64_t line) const; + + // N.B. It's important to keep this definition in the header + // as we expect if checks to be folded for mobile builds + // where `is_symbolic` is always false and optimize dead code paths + C10_ALWAYS_INLINE bool is_symbolic() const { +#ifdef C10_MOBILE + return false; +#else + return !check_range(data_); +#endif + } + + SymInt operator+(const SymInt& sci) const; + SymInt operator-(const SymInt& sci) const; + SymInt operator*(const SymInt& sci) const; + SymInt operator/(const SymInt& sci) const; + SymInt operator%(const SymInt& sci) const; + void operator*=(const SymInt& sci); + void operator+=(const SymInt& sci); + void operator/=(const SymInt& sci); + + SymBool sym_eq(const SymInt&) const; + SymBool sym_ne(const SymInt&) const; + SymBool sym_lt(const SymInt&) const; + SymBool sym_le(const SymInt&) const; + SymBool sym_gt(const SymInt&) const; + SymBool sym_ge(const SymInt&) const; + + bool operator==(const SymInt& o) const { + return sym_eq(o).guard_bool(__FILE__, __LINE__); + } + bool operator!=(const SymInt& o) const { + return sym_ne(o).guard_bool(__FILE__, __LINE__); + } + bool operator<(const SymInt& o) const { + return sym_lt(o).guard_bool(__FILE__, __LINE__); + } + bool operator<=(const SymInt& o) const { + return sym_le(o).guard_bool(__FILE__, __LINE__); + } + bool operator>(const SymInt& o) const { + return sym_gt(o).guard_bool(__FILE__, __LINE__); + } + bool operator>=(const SymInt& o) const { + return sym_ge(o).guard_bool(__FILE__, __LINE__); + } + + SymInt min(const SymInt& sci) const; + SymInt max(const SymInt& sci) const; + + SymInt operator*(int64_t sci) const; + bool operator<(int64_t sci) const; + bool operator==(int64_t sci) const; + bool operator!=(int64_t sci) const; + bool operator<=(int64_t sci) const; + bool operator>(int64_t sci) const; + bool operator>=(int64_t sci) const; + + operator SymFloat() const; + + int64_t as_int_unchecked() const { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!is_symbolic()); + return data_; + } + + // Return whether the integer is representable as a SymInt. + static bool check_range(int64_t i) { + return i > MAX_UNREPRESENTABLE_INT; + } + + // Return the min represetable integer as a SymInt + static constexpr int64_t min_representable_int() { + return MAX_UNREPRESENTABLE_INT + 1; + } + + private: + // Constraints on the internal representation: + // + // - Should represent positive and small negative ints + // - No conversion necessary for operations on ints + // - Must represent valid 64-bit pointers + // - Is symbolic test should be FAST (two arithmetic instructions is too + // much). + // This code being a hotpath is based on Strobelight profiles of + // is_symbolic(). FB only: https://fburl.com/strobelight/5l50ncxd + // (you will need to change the time window). + // + // So, the scheme is to reserve large negative numbers (asssuming + // two's complement): + // + // - 0b0.... means we are a positive int + // - 0b11... means we are a small negative int + // - 0b10... means we are are a pointer. This means that + // [-2^63, -2^62-1] are not representable as ints. + // We don't actually need all of this space as on x86_64 + // as the top 16bits aren't used for anything + static constexpr uint64_t MASK = 1ULL << 63 | 1ULL << 62 | 1ULL << 61; + static constexpr uint64_t IS_SYM = 1ULL << 63 | 1ULL << 61; + // We must manually translate the bit pattern test into a greater + // than test because compiler doesn't figure it out: + // https://godbolt.org/z/356aferaW + static constexpr int64_t MAX_UNREPRESENTABLE_INT = + -1LL & static_cast(~(1ULL << 62)); + int64_t data_; +}; + +/// Sum of a list of SymInt; accumulates into the c10::SymInt expression +template < + typename C, + typename std::enable_if< + std::is_same::value, + int>::type = 0> +inline c10::SymInt multiply_integers(const C& container) { + return std::accumulate( + container.begin(), + container.end(), + c10::SymInt(1), + [](const c10::SymInt& a, const c10::SymInt& b) { return a * b; }); +} + +template < + typename Iter, + typename = std::enable_if_t::value_type, + c10::SymInt>::value>> +inline c10::SymInt multiply_integers(Iter begin, Iter end) { + return std::accumulate( + begin, + end, + c10::SymInt(1), + [](const c10::SymInt& a, const c10::SymInt& b) { return a * b; }); +} + +inline SymInt operator+(int64_t a, const SymInt& b) { + return c10::SymInt(a) + b; +} +inline SymInt operator-(int64_t a, const SymInt& b) { + return c10::SymInt(a) - b; +} +inline SymInt operator*(int64_t a, const SymInt& b) { + return c10::SymInt(a) * b; +} +inline SymInt operator/(int64_t a, const SymInt& b) { + return c10::SymInt(a) / b; +} +inline SymInt operator%(int64_t a, const SymInt& b) { + return c10::SymInt(a) % b; +} +inline bool operator==(int64_t a, const SymInt& b) { + return c10::SymInt(a) == b; +} +inline bool operator!=(int64_t a, const SymInt& b) { + return c10::SymInt(a) != b; +} +inline bool operator<(int64_t a, const SymInt& b) { + return c10::SymInt(a) < b; +} +inline bool operator<=(int64_t a, const SymInt& b) { + return c10::SymInt(a) <= b; +} +inline bool operator>(int64_t a, const SymInt& b) { + return c10::SymInt(a) > b; +} +inline bool operator>=(int64_t a, const SymInt& b) { + return c10::SymInt(a) >= b; +} + +C10_API std::ostream& operator<<(std::ostream& os, const SymInt& s); +C10_API SymInt operator-(const SymInt& s); +} // namespace c10 diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/SymIntArrayRef.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/SymIntArrayRef.h new file mode 100644 index 0000000000000000000000000000000000000000..c86d5ebb74c75c9d00c6d11de592a1be1570838c --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/SymIntArrayRef.h @@ -0,0 +1,65 @@ +#pragma once + +#include +#include +#include +#include + +namespace c10 { +using SymIntArrayRef = ArrayRef; + +inline at::IntArrayRef asIntArrayRefUnchecked(c10::SymIntArrayRef ar) { + return IntArrayRef(reinterpret_cast(ar.data()), ar.size()); +} + +inline c10::optional asIntArrayRefSlowOpt( + c10::SymIntArrayRef ar) { + for (const c10::SymInt& sci : ar) { + if (sci.is_symbolic()) { + return c10::nullopt; + } + } + + return {asIntArrayRefUnchecked(ar)}; +} + +inline at::IntArrayRef asIntArrayRefSlow( + c10::SymIntArrayRef ar, + const char* file, + int64_t line) { + for (const c10::SymInt& sci : ar) { + TORCH_CHECK( + !sci.is_symbolic(), + file, + ":", + line, + ": SymIntArrayRef expected to contain only concrete integers"); + } + return asIntArrayRefUnchecked(ar); +} + +#define C10_AS_INTARRAYREF_SLOW(a) c10::asIntArrayRefSlow(a, __FILE__, __LINE__) + +// Prefer using a more semantic constructor, like +// fromIntArrayRefKnownNonNegative +inline SymIntArrayRef fromIntArrayRefUnchecked(IntArrayRef array_ref) { + return SymIntArrayRef( + reinterpret_cast(array_ref.data()), array_ref.size()); +} + +inline SymIntArrayRef fromIntArrayRefKnownNonNegative(IntArrayRef array_ref) { + return fromIntArrayRefUnchecked(array_ref); +} + +inline SymIntArrayRef fromIntArrayRefSlow(IntArrayRef array_ref) { + for (long i : array_ref) { + TORCH_CHECK( + SymInt::check_range(i), + "IntArrayRef contains an int that cannot be represented as a SymInt: ", + i); + } + return SymIntArrayRef( + reinterpret_cast(array_ref.data()), array_ref.size()); +} + +} // namespace c10 diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/SymNodeImpl.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/SymNodeImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..1e5a4ff8dbdb708a3908ea6853ca2b511b8f0611 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/SymNodeImpl.h @@ -0,0 +1,141 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace c10 { + +class SymNodeImpl; +using SymNode = c10::intrusive_ptr; + +class C10_API SymNodeImpl : public c10::intrusive_ptr_target { + public: + ~SymNodeImpl() override = default; + + template + c10::intrusive_ptr dyn_cast() const { + return c10::intrusive_ptr::reclaim_copy(dynamic_cast(this)); + } + + // these could be pure virtual when we implement LTC versions + virtual bool is_int() { + TORCH_CHECK(false, "NYI"); + }; + virtual bool is_bool() { + TORCH_CHECK(false, "NYI"); + }; + virtual bool is_float() { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode add(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sub(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode mul(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode truediv(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode pow(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode floordiv(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode mod(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode eq(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode ne(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode gt(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode lt(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode le(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode ge(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode ceil() { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode floor() { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode neg() { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_min(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_max(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_or(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_and(const SymNode& other) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_not() { + TORCH_CHECK(false, "NYI"); + }; + // NB: self is ignored here, only the arguments are used + virtual SymNode is_non_overlapping_and_dense( + ArrayRef sizes, + ArrayRef strides) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode clone() { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode sym_float() { + TORCH_CHECK(false, "NYI"); + } + virtual SymNode wrap_int(int64_t num) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode wrap_float(double num) { + TORCH_CHECK(false, "NYI"); + }; + virtual SymNode wrap_bool(bool num) { + TORCH_CHECK(false, "NYI"); + }; + virtual int64_t guard_int(const char* file, int64_t line) { + TORCH_CHECK(false, "NYI"); + }; + virtual bool guard_bool(const char* file, int64_t line) { + TORCH_CHECK(false, "NYI"); + }; + virtual double guard_float(const char* file, int64_t line) { + TORCH_CHECK(false, "NYI"); + }; + virtual int64_t int_() { + TORCH_CHECK(false, "NYI"); + }; + virtual bool bool_() { + TORCH_CHECK(false, "NYI"); + }; + virtual std::string str() { + TORCH_CHECK(false, "NYI"); + }; + std::ostream& operator<<(std::ostream& os) { + os << str(); + return os; + }; +}; + +} // namespace c10 diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/TensorOptions.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/TensorOptions.h new file mode 100644 index 0000000000000000000000000000000000000000..ee21ca727a966aceb974481d3d5c273938d23a70 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/TensorOptions.h @@ -0,0 +1,775 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +namespace c10 { + +DispatchKey computeDispatchKey( + c10::optional dtype, + c10::optional layout, + c10::optional device); + +inline ScalarType dtype_or_default(c10::optional dtype) { + return value_or_else(dtype, [] { return get_default_dtype_as_scalartype(); }); +} + +inline caffe2::TypeMeta dtype_or_default( + c10::optional dtype) { + return value_or_else(dtype, [] { return get_default_dtype(); }); +} + +inline Layout layout_or_default(c10::optional layout) { + return layout.value_or(kStrided); +} + +inline Device device_or_default(c10::optional device) { + return value_or_else(device, [] { return Device(kCPU); }); +} + +inline bool pinned_memory_or_default(c10::optional pinned_memory) { + return pinned_memory.value_or(false); +} + +/// A class to encapsulate construction axes of an Tensor. TensorOptions was +/// designed to support the Python style API for specifying construction options +/// on factory functions, e.g., +/// +/// torch.zeros(2, 3, dtype=torch.int32) +/// +/// Because C++ doesn't natively support keyword arguments, there must be +/// another way of specifying keyword-like arguments. TensorOptions is a +/// builder class which can be used to construct this "dictionary" of keyword +/// arguments: functions which support TensorOptions conventionally take this +/// argument optionally as their last argument. +/// +/// WARNING: In PyTorch, there are `torch::` variants of factory functions, +/// e.g., torch::zeros for at::zeros. These return Variables (while the +/// stock ATen functions return plain Tensors). If you mix these functions +/// up, you WILL BE SAD. +/// +/// Rather than use the constructor of this class directly, you should prefer to +/// use the constructor functions, and then chain setter methods on top of them. +/// +/// at::device(at::kCUDA).dtype(kInt) +/// at::dtype(at::kInt) +/// +/// Additionally, anywhere a TensorOptions is expected, you can directly +/// pass at::kCUDA / at::kInt, and it will implicitly convert to a +/// TensorOptions. +/// +/// Here are some recommended ways to create a 2x2 tensor of zeros +/// with certain properties. These all *implicitly* make use of +/// TensorOptions, even if they don't mention the class explicitly: +/// +/// at::zeros({2,2}, at::kCUDA); +/// at::zeros({2,2}, at::kLong); +/// at::zeros({2,2}, at::device(at::kCUDA).dtype(at::kLong())); +/// at::zeros({2,2}, at::device({at::kCUDA, 1})); // place on device 1 +/// at::zeros({2,2}, at::requires_grad()); +/// + +/// NOTE [ TensorOptions Constructors ] +/// +/// TensorOptions is like a dictionary with entries from the set: +/// {requires_grad, device, dtype, layout}, where each entry may be +/// unspecified (i.e., is optional). It is used to specify the properties of +/// tensors in many places both in C++ internal and API, e.g., tensor factory +/// methods like `at::empty({10}, options)`, tensor conversions like +/// `tensor.to(...)`, etc. +/// +/// To provide a simple API that is consistent with Python, where one can do +/// `torch.empty(sizes, X)` with `X` being a `torch.device`, `torch.dtype`, or a +/// `torch.layout`, we want TensorOptions to be implicitly convertible from +/// `ScalarType dtype`, `Layout layout` and `Device device`. Therefore, we have +/// three implicit constructors from each of these three types. +/// +/// This is sufficient for `ScalarType` and `Layout` as they are simple Enum +/// classes. However, `Device` is an ordinary class with implicit constructors +/// `Device(DeviceType, DeviceIndex = -1)` and `Device(std::string)` to be +/// consistent with Python API, where strings are treated as equivalent with a +/// `torch.device` object (e.g., "cuda:1" can be passed to everywhere a +/// `torch.device("cuda:1")` is accepted). To support the syntax +/// `at::empty({10}, {kCUDA, 1})` and `tensor.to(kCUDA)`, we need to make sure +/// that `TensorOptions` is implicitly constructible with any argments that a +/// `Device` can constructed from. So we have, +/// +/// /* implicit */ TensorOptions(T&& device) : TensorOptions() { +/// this->set_device(device); +/// } +/// +/// template ::value>> +/// /* implicit */ TensorOptions(Args&&... args) +/// : TensorOptions(Device(std::forward(args)...)) {} +/// +/// +/// But this will be problematic. Consider this: `TensorOptions({kCUDA, 1})`. +/// Compiler will compain about ambiguity between the copy constructor and the +/// `Device` constructor because `{kCUDA, 1}` can be converted to both a +/// `TensorOption` and a `Device`. +/// +/// To get around this, we templatize the `Device` constructor. Since overload +/// resolution is done before template resolution, our problem is solved. + +DispatchKey computeDispatchKey( + optional dtype, + optional layout, + optional device); + +struct C10_API TensorOptions { + TensorOptions() + : requires_grad_(false), + pinned_memory_(false), + has_device_(false), + has_dtype_(false), + has_layout_(false), + has_requires_grad_(false), + has_pinned_memory_(false), + has_memory_format_(false) {} + + /// Constructs a `TensorOptions` object with the given layout. + /* implicit */ TensorOptions(Layout layout) : TensorOptions() { + this->set_layout(layout); + } + + /// Constructs a `TensorOptions` object with the given device. + /// See NOTE [ TensorOptions Constructors ] on why this is templatized. + template < + typename T, + typename = std::enable_if_t, Device>::value>> + /* implicit */ TensorOptions(T&& device) : TensorOptions() { + this->set_device(std::forward(device)); + } + + /// Constructs a `TensorOptions` object from arguments allowed in `Device` + /// constructors. + /// + /// See NOTE [ TensorOptions Constructors ]. + /// + /// NB: Ideally we only allow implicit constructors here. But there is no easy + /// way to detect them. So we have this one that allows explicit + /// constructors too. + template < + typename... Args, + typename = + std::enable_if_t::value>> + /* implicit */ TensorOptions(Args&&... args) + : TensorOptions(Device(std::forward(args)...)) {} + + /// Constructs a `TensorOptions` object with the given dtype. + /* implicit */ TensorOptions(caffe2::TypeMeta dtype) : TensorOptions() { + this->set_dtype(dtype); + } + + /// legacy constructor to support ScalarType + /* implicit */ TensorOptions(ScalarType dtype) : TensorOptions() { + this->set_dtype(dtype); + } + + /// Constructs a `TensorOptions` object with the given memory format. + /* implicit */ TensorOptions(MemoryFormat memory_format) : TensorOptions() { + set_memory_format(memory_format); + } + + /// Return a copy of `TensorOptions` with `device` set to the given one, or + /// cleared if `device` is `nullopt`. + C10_NODISCARD TensorOptions + device(c10::optional device) const noexcept { + TensorOptions r = *this; + r.set_device(device); + return r; + } + + /// Return a copy of `TensorOptions` with `device` set to the given one. + /// (This overload ensures that variadic template c10::optional constructor + /// for Device work correctly.) + template + C10_NODISCARD TensorOptions device(Args&&... args) const noexcept { + return device( + c10::optional(c10::in_place, std::forward(args)...)); + } + + /// Return a copy of `TensorOptions`, but with device set to CUDA, and the + /// device index set to the given one. + /// + /// TODO: This function encourages bad behavior (assuming CUDA is + /// the only device that matters). Get rid of it / rename it. + C10_NODISCARD TensorOptions + device_index(c10::DeviceIndex device_index) const noexcept { + return device(Device::Type::CUDA, device_index); + } + + /// Return a copy of `TensorOptions` with `dtype` set to the given one. + C10_NODISCARD TensorOptions + dtype(c10::optional dtype) const noexcept { + TensorOptions r = *this; + r.set_dtype(dtype); + return r; + } + + // legacy function to support ScalarType + C10_NODISCARD TensorOptions + dtype(c10::optional dtype) const noexcept { + TensorOptions r = *this; + r.set_dtype(dtype); + return r; + } + + // Since dtype is taken... + template + TensorOptions& dtype() { + dtype_ = caffe2::TypeMeta::Make(); + has_dtype_ = true; + return *this; + } + + /// Sets the layout of the `TensorOptions`. + C10_NODISCARD TensorOptions + layout(c10::optional layout) const noexcept { + TensorOptions r = *this; + r.set_layout(layout); + return r; + } + + /// Sets the `requires_grad` property of the `TensorOptions`. + C10_NODISCARD TensorOptions + requires_grad(c10::optional requires_grad) const noexcept { + TensorOptions r = *this; + r.set_requires_grad(requires_grad); + return r; + } + + /// Sets the `pinned_memory` property on the `TensorOptions`. + C10_NODISCARD TensorOptions + pinned_memory(c10::optional pinned_memory) const noexcept { + TensorOptions r = *this; + r.set_pinned_memory(pinned_memory); + return r; + } + + /// Sets the `memory_format` property on `TensorOptions`. + C10_NODISCARD TensorOptions + memory_format(c10::optional memory_format) const noexcept { + TensorOptions r = *this; + r.set_memory_format(memory_format); + return r; + } + + /// Returns the device of the `TensorOptions`. + Device device() const noexcept { + return device_or_default(device_opt()); + } + + /// Returns whether the device is specified. + bool has_device() const noexcept { + return has_device_; + } + + /// Returns the device of the `TensorOptions`, or `c10::nullopt` if + /// device is not specified. + c10::optional device_opt() const noexcept { + return has_device_ ? c10::make_optional(device_) : c10::nullopt; + } + + /// Returns the device index of the `TensorOptions`. + int32_t device_index() const noexcept { + return device().index(); + } + + /// Returns the dtype of the `TensorOptions`. + caffe2::TypeMeta dtype() const noexcept { + return dtype_or_default(dtype_opt()); + } + + /// Returns whether the dtype is specified. + bool has_dtype() const noexcept { + return has_dtype_; + } + + /// Returns the dtype of the `TensorOptions`, or `c10::nullopt` if + /// device is not specified. + c10::optional dtype_opt() const noexcept { + return has_dtype_ ? c10::make_optional(dtype_) : c10::nullopt; + } + + /// Returns the layout of the `TensorOptions`. + Layout layout() const noexcept { + return layout_or_default(layout_opt()); + } + + /// Returns whether the layout is specified. + bool has_layout() const noexcept { + return has_layout_; + } + + /// Returns the layout of the `TensorOptions`, or `c10::nullopt` if + /// layout is not specified. + c10::optional layout_opt() const noexcept { + return has_layout_ ? c10::make_optional(layout_) : c10::nullopt; + } + + /// Returns the `requires_grad` property of the `TensorOptions`. + bool requires_grad() const noexcept { + return has_requires_grad_ ? requires_grad_ : false; + } + + /// Returns whether the `requires_grad` is specified. + bool has_requires_grad() const noexcept { + return has_requires_grad_; + } + + /// Returns the `requires_grad` property of the `TensorOptions`, or + /// `c10::nullopt` if `requires_grad` is not specified. + c10::optional requires_grad_opt() const noexcept { + return has_requires_grad_ ? c10::make_optional(requires_grad_) + : c10::nullopt; + } + + /// Returns the `pinned_memory` property of the `TensorOptions`. + bool pinned_memory() const noexcept { + return pinned_memory_or_default(pinned_memory_opt()); + } + + /// Returns whether the `pinned_memory` is specified. + bool has_pinned_memory() const noexcept { + return has_pinned_memory_; + } + + /// Returns if the layout is sparse + bool is_sparse() const { + return layout_ == c10::Layout::Sparse; + } + + bool is_sparse_csr() const { + return layout_ == c10::Layout::SparseCsr; + } + + // For compatibility with legacy tensor.type() comparisons + bool type_equal(const TensorOptions& other) const { + return computeDispatchKey() == other.computeDispatchKey() && + typeMetaToScalarType(dtype_) == typeMetaToScalarType(other.dtype()); + } + + /// Returns the `pinned_memory` property of the `TensorOptions`, or + /// `c10::nullopt` if `pinned_memory` is not specified. + c10::optional pinned_memory_opt() const noexcept { + return has_pinned_memory_ ? c10::make_optional(pinned_memory_) + : c10::nullopt; + } + + /// Returns whether the `memory_layout` is specified + bool has_memory_format() const noexcept { + return has_memory_format_; + } + + // NB: memory_format() getter is PURPOSELY not defined, as the default + // behavior of memory_format varies from function to function. + + /// Returns the `memory_layout` property of `TensorOptions, or + /// `c10::nullopt` if `memory_format` is not specified. + c10::optional memory_format_opt() const noexcept { + return has_memory_format_ ? c10::make_optional(memory_format_) + : c10::nullopt; + } + + // Resolves the ATen backend specified by the current construction axes. + // TODO: Deprecate this + Backend backend() const { + return at::dispatchKeyToBackend(computeDispatchKey()); + } + + /// Return the right-biased merge of two TensorOptions. This has the + /// effect of overwriting settings from self with specified options + /// of options. + /// + /// NB: This merging operation does NOT respect device merges. + /// For example, if you device({kCUDA, 1}).merge_in(kCUDA) + /// you will get kCUDA in the end! Functions like Tensor.new_empty + /// ensure the right device is selected anyway by way of a + /// device guard. + /// + TensorOptions merge_in(TensorOptions options) const noexcept { + TensorOptions merged = *this; + if (options.has_device()) + merged.set_device(options.device_opt()); + if (options.has_dtype()) + merged.set_dtype(options.dtype_opt()); + if (options.has_layout()) + merged.set_layout(options.layout_opt()); + // NB: requires grad is right biased; not a logical AND/OR! + if (options.has_requires_grad()) + merged.set_requires_grad(options.requires_grad_opt()); + if (options.has_pinned_memory()) + merged.set_pinned_memory(options.pinned_memory_opt()); + if (options.has_memory_format()) + merged.set_memory_format(options.memory_format_opt()); + return merged; + } + + // TODO remove after TensorOptions rationalization + TensorOptions merge_memory_format( + c10::optional optional_memory_format) const noexcept { + TensorOptions merged = *this; + if (optional_memory_format.has_value()) { + merged.set_memory_format(*optional_memory_format); + } + return merged; + } + + // INVARIANT: computeDispatchKey returns only the subset of dispatch keys for + // which dispatchKeyToBackend is injective, if it is defined at all (for + // the most part, this just means that this function never returns an + // Autograd key) + DispatchKey computeDispatchKey() const { + return c10::computeDispatchKey( + optTypeMetaToScalarType(dtype_opt()), layout_opt(), device_opt()); + } + + private: + // These methods are currently private because I'm not sure if it's wise + // to actually publish them. They are methods because I need them in + // the constructor and the functional API implementation. + // + // If you really, really need it, you can make these public, but check if you + // couldn't just do what you need with the functional API. Similarly, these + // methods are not chainable, because if you wanted chaining, you probably + // want to use the functional API instead. (It's probably OK to make + // these chainable, because these functions are all explicitly annotated + // with a ref-qualifier, the trailing &, that makes them illegal to call + // on temporaries.) + + /// Mutably set the device of `TensorOptions`. + void set_device(c10::optional device) & noexcept { + if (device) { + device_ = *device; + has_device_ = true; + } else { + has_device_ = false; + } + } + + /// Mutably set the dtype of `TensorOptions`. + void set_dtype(c10::optional dtype) & noexcept { + if (dtype) { + dtype_ = *dtype; + has_dtype_ = true; + } else { + has_dtype_ = false; + } + } + + // legacy function to support ScalarType + void set_dtype(c10::optional dtype) & noexcept { + if (dtype) { + dtype_ = scalarTypeToTypeMeta(*dtype); + has_dtype_ = true; + } else { + has_dtype_ = false; + } + } + + /// Mutably set the layout of `TensorOptions`. + void set_layout(c10::optional layout) & noexcept { + if (layout) { + layout_ = *layout; + has_layout_ = true; + } else { + has_layout_ = false; + } + } + + /// Mutably set the `requires_grad` property of `TensorOptions`. + void set_requires_grad(c10::optional requires_grad) & noexcept { + if (requires_grad) { + requires_grad_ = *requires_grad; + has_requires_grad_ = true; + } else { + has_requires_grad_ = false; + } + } + + /// Mutably set the `pinned_memory` property of `TensorOptions`. + void set_pinned_memory(c10::optional pinned_memory) & noexcept { + if (pinned_memory) { + pinned_memory_ = *pinned_memory; + has_pinned_memory_ = true; + } else { + has_pinned_memory_ = false; + } + } + + /// Mutably set the `memory_Format` property of `TensorOptions`. + void set_memory_format(c10::optional memory_format) & noexcept { + if (memory_format) { + memory_format_ = *memory_format; + has_memory_format_ = true; + } else { + has_memory_format_ = false; + } + } + + // WARNING: If you edit TensorOptions to add more options, you + // may need to adjust the implementation of Tensor::options. + // The criteria for whether or not Tensor::options must be adjusted + // is whether or not the new option you added should preserved + // by functions such as empty_like(); if it should be preserved, + // you must adjust options(). + // + // TODO: MemoryFormat is not implemented in this way + + // NB: We didn't use c10::optional here, because then we can't pack + // the has_***_ boolean fields. + + Device device_ = at::kCPU; // 16-bit + caffe2::TypeMeta dtype_ = caffe2::TypeMeta::Make(); // 16-bit + Layout layout_ = at::kStrided; // 8-bit + MemoryFormat memory_format_ = MemoryFormat::Contiguous; // 8-bit + + // Bitmask required here to get this to fit inside 32 bits (or even 64 bits, + // for that matter) + + bool requires_grad_ : 1; + bool pinned_memory_ : 1; + + bool has_device_ : 1; + bool has_dtype_ : 1; + bool has_layout_ : 1; + bool has_requires_grad_ : 1; + bool has_pinned_memory_ : 1; + bool has_memory_format_ : 1; +}; + +// We should aspire to fit in one machine-size word; but a size greater than two +// words is too much. (We are doing terribly on 32-bit archs, where we require +// three machine size words to store tensor options. Eek!) +static_assert( + sizeof(TensorOptions) <= sizeof(int64_t) * 2, + "TensorOptions must fit in 128-bits"); + +/// Convenience function that returns a `TensorOptions` object with the `dtype` +/// set to the given one. +inline TensorOptions dtype(caffe2::TypeMeta dtype) { + return TensorOptions().dtype(dtype); +} + +// legacy function to support ScalarType +inline TensorOptions dtype(ScalarType dtype) { + return TensorOptions().dtype(scalarTypeToTypeMeta(dtype)); +} + +/// Convenience function that returns a `TensorOptions` object with the `layout` +/// set to the given one. +inline TensorOptions layout(Layout layout) { + return TensorOptions().layout(layout); +} + +/// Convenience function that returns a `TensorOptions` object with the `device` +/// set to the given one. +inline TensorOptions device(Device device) { + return TensorOptions().device(device); +} + +/// Convenience function that returns a `TensorOptions` object with the +/// `device` set to CUDA and the `device_index` set to the given one. +inline TensorOptions device_index(int16_t device_index) { + return TensorOptions().device_index( + static_cast(device_index)); +} + +/// Convenience function that returns a `TensorOptions` object with the +/// `requires_grad` set to the given one. +inline TensorOptions requires_grad(bool requires_grad = true) { + return TensorOptions().requires_grad(requires_grad); +} + +/// Convenience function that returns a `TensorOptions` object with the +/// `memory_format` set to the given one. +inline TensorOptions memory_format(MemoryFormat memory_format) { + return TensorOptions().memory_format(memory_format); +} + +C10_API std::ostream& operator<<( + std::ostream& stream, + const TensorOptions& options); + +template +inline TensorOptions dtype() { + return dtype(caffe2::TypeMeta::Make()); +} + +inline std::string toString(const TensorOptions options) { + std::ostringstream stream; + stream << options; + return stream.str(); +} + +// This is intended to be a centralized location by which we can determine +// what an appropriate DispatchKey for a tensor is. +inline DispatchKey computeDispatchKey( + c10::optional dtype, + c10::optional layout, + c10::optional device) { + const auto layout_ = layout_or_default(layout); + const auto device_ = device_or_default(device); + switch (layout_) { + case Layout::Strided: { + const auto dtype_ = dtype_or_default(dtype); + switch (device_.type()) { +#define DO_CASE(device, _) \ + case DeviceType::device: { \ + if (isQIntType(dtype_)) { \ + return DispatchKey::Quantized##device; \ + } \ + return DispatchKey::device; \ + } + C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, unused) +#undef DO_CASE + case DeviceType::FPGA: + return DispatchKey::FPGA; + case DeviceType::ORT: + return DispatchKey::ORT; + case DeviceType::Vulkan: + return DispatchKey::Vulkan; + case DeviceType::Metal: + return DispatchKey::Metal; + case DeviceType::MKLDNN: + case DeviceType::OPENGL: + case DeviceType::OPENCL: + case DeviceType::IDEEP: + TORCH_INTERNAL_ASSERT( + 0, + "This is a grandfathered Caffe2 device type ", + device_.type(), + ", it shouldn't ever convert to a DispatchKey. File a bug describing what you were doing if you think this is in error."); + default: + TORCH_CHECK_NOT_IMPLEMENTED( + false, + "Unsupported device type for dense layout: ", + device_.type()); + } + } + case Layout::Sparse: + switch (device_.type()) { +#define DO_CASE(device, _) \ + case DeviceType::device: { \ + return DispatchKey::Sparse##device; \ + } + C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, unused) +#undef DO_CASE + default: + TORCH_CHECK_NOT_IMPLEMENTED( + false, + "Unsupported device type for sparse layout: ", + device_.type()); + } + case Layout::Mkldnn: + switch (device_.type()) { + case DeviceType::CPU: + return DispatchKey::MkldnnCPU; + default: + TORCH_CHECK_NOT_IMPLEMENTED( + false, + "Unsupported device type for mkldnn layout: ", + device_.type()); + } + case Layout::SparseCsr: + case Layout::SparseCsc: + case Layout::SparseBsr: + case Layout::SparseBsc: + switch (device_.type()) { + case DeviceType::CPU: + return DispatchKey::SparseCsrCPU; + case DeviceType::CUDA: + return DispatchKey::SparseCsrCUDA; + default: + AT_ERROR( + "Unsupported device type for ", + layout_, + " layout: ", + device_.type()); + } + default: + TORCH_CHECK(false, "Unsupported layout: ", layout_); + } +} + +inline Layout dispatchKeyToLayout(DispatchKey dispatch_key) { + switch (dispatch_key) { +#define DO_CASE(bc, _) case DispatchKey::Sparse##bc: + C10_FORALL_BACKEND_COMPONENTS(DO_CASE, unused) +#undef DO_CASE + return Layout::Sparse; + case DispatchKey::SparseCsrCPU: + case DispatchKey::SparseCsrCUDA: + TORCH_CHECK( + false, + "Cannot map DispatchKey ", + dispatch_key, + " to a unique layout."); + case DispatchKey::MkldnnCPU: + return Layout::Mkldnn; + default: + return Layout::Strided; + } +} + +inline DeviceType dispatchKeyToDeviceType(DispatchKey dispatch_key) { + switch (dispatch_key) { + // stuff that's real +#define DO_CASE(suffix, prefix) \ + case DispatchKey::prefix##suffix: \ + return DeviceType::suffix; +#define DO_CASES(_, prefix) C10_FORALL_BACKEND_DEVICE_TYPES(DO_CASE, prefix) + C10_FORALL_FUNCTIONALITY_KEYS(DO_CASES) +#undef DO_CASES +#undef DO_CASE + + case DispatchKey::MkldnnCPU: + return DeviceType::CPU; + case DispatchKey::Vulkan: + return DeviceType::Vulkan; + + case DispatchKey::ORT: + return DeviceType::ORT; + default: + TORCH_CHECK( + false, + "DispatchKey ", + dispatch_key, + " doesn't correspond to a device"); + } +} + +inline TensorOptions dispatchKeyToTensorOptions(DispatchKey dispatch_key) { + return TensorOptions() + .layout(dispatchKeyToLayout(dispatch_key)) + .device(dispatchKeyToDeviceType(dispatch_key)); +} + +namespace detail { +inline bool backend_supports_empty_operator(const TensorOptions options) { + // Quantized backends don't support at::empty(). + // They have separate operators like at::empty_quantized() that take in + // extra information about how to quantize the tensor. + return !isQIntType(typeMetaToScalarType(options.dtype())); +} + +} // namespace detail + +} // namespace c10 diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/WrapDimMinimal.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/WrapDimMinimal.h new file mode 100644 index 0000000000000000000000000000000000000000..c935775b2c3e0ed582958f082d9d3a35edc1c8d8 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/WrapDimMinimal.h @@ -0,0 +1,45 @@ +#pragma once + +#include +#include + +namespace c10 { + +namespace detail { +// This template can only be specialized at int64_t and c10::SymInt; +// you'll get linker errors otherwise +template +C10_API T maybe_wrap_dim_slow(T dim, T dim_post_expr, bool wrap_scalar); +} // namespace detail + +template +T _maybe_wrap_dim(T dim, T dim_post_expr, bool wrap_scalar = true) { + // Inline the fast paths + if (C10_LIKELY(dim_post_expr * -1 <= dim && dim < dim_post_expr)) { + // For SymInts, we want an explicit control flow to trigger a guard, so we + // may as well branch too. + if (dim < 0) { + return dim + dim_post_expr; + } + return dim; + } + // Check edge-cases out-of-line (wrapping scalars and out-of-bounds errors) + return c10::detail::maybe_wrap_dim_slow( + std::move(dim), std::move(dim_post_expr), wrap_scalar); +} + +inline int64_t maybe_wrap_dim( + int64_t dim, + int64_t dim_post_expr, + bool wrap_scalar = true) { + return _maybe_wrap_dim(dim, dim_post_expr, wrap_scalar); +} + +inline c10::SymInt maybe_wrap_dim( + c10::SymInt dim, + c10::SymInt dim_post_expr, + bool wrap_scalar = true) { + return _maybe_wrap_dim(std::move(dim), std::move(dim_post_expr), wrap_scalar); +} + +} // namespace c10 diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/alignment.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/alignment.h new file mode 100644 index 0000000000000000000000000000000000000000..4a8c732ef42d0ca8c4fce37ec95ecb03e027ce0e --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/alignment.h @@ -0,0 +1,17 @@ +#pragma once + +#include + +namespace c10 { + +#ifdef C10_MOBILE +// Use 16-byte alignment on mobile +// - ARM NEON AArch32 and AArch64 +// - x86[-64] < AVX +constexpr size_t gAlignment = 16; +#else +// Use 64-byte alignment should be enough for computation up to AVX512. +constexpr size_t gAlignment = 64; +#endif + +} // namespace c10 diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/thread_pool.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/thread_pool.h new file mode 100644 index 0000000000000000000000000000000000000000..bc35707ef5f9c5666bcade19ab611d6fb41772eb --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/core/thread_pool.h @@ -0,0 +1,128 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wshorten-64-to-32") +#endif + +namespace c10 { + +// TODO: move this to C10 and make it C10_API +class C10_API TaskThreadPoolBase { + public: + virtual void run(std::function func) = 0; + + virtual size_t size() const = 0; + + /** + * The number of available (i.e. idle) threads in this thread pool. + */ + virtual size_t numAvailable() const = 0; + + /** + * Check if the current thread is from the thread pool. + */ + virtual bool inThreadPool() const = 0; + + virtual ~TaskThreadPoolBase() noexcept = default; + + static size_t defaultNumThreads() { + auto num_threads = std::thread::hardware_concurrency(); +#if defined(_M_X64) || defined(__x86_64__) + num_threads /= 2; +#endif + return num_threads; + } +}; + +class C10_API ThreadPool : public c10::TaskThreadPoolBase { + protected: + struct task_element_t { + bool run_with_id; + const std::function no_id; + const std::function with_id; + + explicit task_element_t(std::function f) + : run_with_id(false), no_id(std::move(f)), with_id(nullptr) {} + explicit task_element_t(std::function f) + : run_with_id(true), no_id(nullptr), with_id(std::move(f)) {} + }; + + std::queue tasks_; + std::vector threads_; + mutable std::mutex mutex_; + std::condition_variable condition_; + std::condition_variable completed_; + std::atomic_bool running_; + bool complete_; + std::size_t available_; + std::size_t total_; + int numa_node_id_; + + public: + ThreadPool() = delete; + + explicit ThreadPool( + int pool_size, + int numa_node_id = -1, + std::function init_thread = nullptr); + + ~ThreadPool() override; + + size_t size() const override; + + size_t numAvailable() const override; + + bool inThreadPool() const override; + + void run(std::function func) override; + + template + void runTaskWithID(Task task) { + std::unique_lock lock(mutex_); + + // Set task and signal condition variable so that a worker thread will + // wake up and use the task. + tasks_.emplace(static_cast>(task)); + complete_ = false; + condition_.notify_one(); + } + + /// @brief Wait for queue to be empty + void waitWorkComplete(); + + private: + // @brief Entry point for pool threads. + void main_loop(std::size_t index); +}; + +class C10_API TaskThreadPool : public c10::ThreadPool { + public: + explicit TaskThreadPool(std::size_t pool_size, int numa_node_id = -1) + : ThreadPool(pool_size, numa_node_id, [numa_node_id]() { + setThreadName("CaffeTaskThread"); + NUMABind(numa_node_id); + }) {} +}; + +C10_DECLARE_SHARED_REGISTRY( + ThreadPoolRegistry, + TaskThreadPoolBase, + int, + int, + bool); + +} // namespace c10 + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/macros/Export.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/macros/Export.h new file mode 100644 index 0000000000000000000000000000000000000000..b439e74b37e0569c0817f5a3f85b6f9512bba315 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/macros/Export.h @@ -0,0 +1,154 @@ +#ifndef C10_MACROS_EXPORT_H_ +#define C10_MACROS_EXPORT_H_ + +/* Header file to define the common scaffolding for exported symbols. + * + * Export is by itself a quite tricky situation to deal with, and if you are + * hitting this file, make sure you start with the background here: + * - Linux: https://gcc.gnu.org/wiki/Visibility + * - Windows: + * https://docs.microsoft.com/en-us/cpp/cpp/dllexport-dllimport?view=vs-2017 + * + * Do NOT include this file directly. Instead, use c10/macros/Macros.h + */ + +// You do not need to edit this part of file unless you are changing the core +// pytorch export abstractions. +// +// This part defines the C10 core export and import macros. This is controlled +// by whether we are building shared libraries or not, which is determined +// during build time and codified in c10/core/cmake_macros.h. +// When the library is built as a shared lib, EXPORT and IMPORT will contain +// visibility attributes. If it is being built as a static lib, then EXPORT +// and IMPORT basically have no effect. + +// As a rule of thumb, you should almost NEVER mix static and shared builds for +// libraries that depend on c10. AKA, if c10 is built as a static library, we +// recommend everything dependent on c10 to be built statically. If c10 is built +// as a shared library, everything dependent on it should be built as shared. In +// the PyTorch project, all native libraries shall use the macro +// C10_BUILD_SHARED_LIB to check whether pytorch is building shared or static +// libraries. + +// For build systems that do not directly depend on CMake and directly build +// from the source directory (such as Buck), one may not have a cmake_macros.h +// file at all. In this case, the build system is responsible for providing +// correct macro definitions corresponding to the cmake_macros.h.in file. +// +// In such scenarios, one should define the macro +// C10_USING_CUSTOM_GENERATED_MACROS +// to inform this header that it does not need to include the cmake_macros.h +// file. + +#ifndef C10_USING_CUSTOM_GENERATED_MACROS +#include +#endif // C10_USING_CUSTOM_GENERATED_MACROS + +#ifdef _WIN32 +#define C10_HIDDEN +#if defined(C10_BUILD_SHARED_LIBS) +#define C10_EXPORT __declspec(dllexport) +#define C10_IMPORT __declspec(dllimport) +#else +#define C10_EXPORT +#define C10_IMPORT +#endif +#else // _WIN32 +#if defined(__GNUC__) +#define C10_EXPORT __attribute__((__visibility__("default"))) +#define C10_HIDDEN __attribute__((__visibility__("hidden"))) +#else // defined(__GNUC__) +#define C10_EXPORT +#define C10_HIDDEN +#endif // defined(__GNUC__) +#define C10_IMPORT C10_EXPORT +#endif // _WIN32 + +#ifdef NO_EXPORT +#undef C10_EXPORT +#define C10_EXPORT +#endif + +// Definition of an adaptive XX_API macro, that depends on whether you are +// building the library itself or not, routes to XX_EXPORT and XX_IMPORT. +// Basically, you will need to do this for each shared library that you are +// building, and the instruction is as follows: assuming that you are building +// a library called libawesome.so. You should: +// (1) for your cmake target (usually done by "add_library(awesome, ...)"), +// define a macro called AWESOME_BUILD_MAIN_LIB using +// target_compile_options. +// (2) define the AWESOME_API macro similar to the one below. +// And in the source file of your awesome library, use AWESOME_API to +// annotate public symbols. + +// Here, for the C10 library, we will define the macro C10_API for both import +// and export. + +// This one is being used by libc10.so +#ifdef C10_BUILD_MAIN_LIB +#define C10_API C10_EXPORT +#else +#define C10_API C10_IMPORT +#endif + +// This one is being used by libtorch.so +#ifdef CAFFE2_BUILD_MAIN_LIB +#define TORCH_API C10_EXPORT +#else +#define TORCH_API C10_IMPORT +#endif + +// You may be wondering: Whose brilliant idea was it to split torch_cuda into +// two pieces with confusing names? +// Once upon a time, there _was_ only TORCH_CUDA_API. All was happy until we +// tried to compile PyTorch for CUDA 11.1, which ran into relocation marker +// issues when linking big binaries. +// (https://github.com/pytorch/pytorch/issues/39968) We had two choices: +// (1) Stop supporting so many GPU architectures +// (2) Do something else +// We chose #2 and decided to split the behemoth that was torch_cuda into two +// smaller libraries, one with most of the core kernel functions (torch_cuda_cu) +// and the other that had..well..everything else (torch_cuda_cpp). The idea was +// this: instead of linking our static libraries (like the hefty +// libcudnn_static.a) with another huge library, torch_cuda, and run into pesky +// relocation marker issues, we could link our static libraries to a smaller +// part of torch_cuda (torch_cuda_cpp) and avoid the issues. + +// libtorch_cuda_cu.so +#ifdef TORCH_CUDA_CU_BUILD_MAIN_LIB +#define TORCH_CUDA_CU_API C10_EXPORT +#elif defined(BUILD_SPLIT_CUDA) +#define TORCH_CUDA_CU_API C10_IMPORT +#endif + +// libtorch_cuda_cpp.so +#ifdef TORCH_CUDA_CPP_BUILD_MAIN_LIB +#define TORCH_CUDA_CPP_API C10_EXPORT +#elif defined(BUILD_SPLIT_CUDA) +#define TORCH_CUDA_CPP_API C10_IMPORT +#endif + +// libtorch_cuda.so (where torch_cuda_cu and torch_cuda_cpp are a part of the +// same api) +#ifdef TORCH_CUDA_BUILD_MAIN_LIB +#define TORCH_CUDA_CPP_API C10_EXPORT +#define TORCH_CUDA_CU_API C10_EXPORT +#elif !defined(BUILD_SPLIT_CUDA) +#define TORCH_CUDA_CPP_API C10_IMPORT +#define TORCH_CUDA_CU_API C10_IMPORT +#endif + +#if defined(TORCH_HIP_BUILD_MAIN_LIB) +#define TORCH_HIP_API C10_EXPORT +#else +#define TORCH_HIP_API C10_IMPORT +#endif + +// Enums only need to be exported on windows for non-CUDA files +#if defined(_WIN32) && defined(__CUDACC__) +#define C10_API_ENUM C10_API +#else +#define C10_API_ENUM +#endif + +#endif // C10_MACROS_MACROS_H_ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/macros/Macros.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/macros/Macros.h new file mode 100644 index 0000000000000000000000000000000000000000..966a7a27ff069bd0829464fb32ef4bc7e3ab208d --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/macros/Macros.h @@ -0,0 +1,556 @@ +#ifndef C10_MACROS_MACROS_H_ +#define C10_MACROS_MACROS_H_ +#include + +/* Main entry for c10/macros. + * + * In your code, include c10/macros/Macros.h directly, instead of individual + * files in this folder. + */ + +// For build systems that do not directly depend on CMake and directly build +// from the source directory (such as Buck), one may not have a cmake_macros.h +// file at all. In this case, the build system is responsible for providing +// correct macro definitions corresponding to the cmake_macros.h.in file. +// +// In such scenarios, one should define the macro +// C10_USING_CUSTOM_GENERATED_MACROS +// to inform this header that it does not need to include the cmake_macros.h +// file. + +#ifndef C10_USING_CUSTOM_GENERATED_MACROS +#include +#endif // C10_USING_CUSTOM_GENERATED_MACROS + +#include + +#if defined(__clang__) +#define __ubsan_ignore_float_divide_by_zero__ \ + __attribute__((no_sanitize("float-divide-by-zero"))) +#define __ubsan_ignore_undefined__ __attribute__((no_sanitize("undefined"))) +#define __ubsan_ignore_signed_int_overflow__ \ + __attribute__((no_sanitize("signed-integer-overflow"))) +#define __ubsan_ignore_function__ __attribute__((no_sanitize("function"))) +#else +#define __ubsan_ignore_float_divide_by_zero__ +#define __ubsan_ignore_undefined__ +#define __ubsan_ignore_signed_int_overflow__ +#define __ubsan_ignore_function__ +#endif + +// Detect address sanitizer as some stuff doesn't work with it +#undef C10_ASAN_ENABLED + +// for clang +#if defined(__has_feature) +#if ((__has_feature(address_sanitizer))) +#define C10_ASAN_ENABLED 1 +#endif +#endif + +// for gcc +#if defined(__SANITIZE_ADDRESS__) +#if __SANITIZE_ADDRESS__ +#if !defined(C10_ASAN_ENABLED) +#define C10_ASAN_ENABLED 1 +#endif +#endif +#endif + +#if !defined(C10_ASAN_ENABLED) +#define C10_ASAN_ENABLED 0 +#endif + +// Disable the copy and assignment operator for a class. Note that this will +// disable the usage of the class in std containers. +#define C10_DISABLE_COPY_AND_ASSIGN(classname) \ + classname(const classname&) = delete; \ + classname& operator=(const classname&) = delete + +#define C10_CONCATENATE_IMPL(s1, s2) s1##s2 +#define C10_CONCATENATE(s1, s2) C10_CONCATENATE_IMPL(s1, s2) + +#define C10_MACRO_EXPAND(args) args + +#define C10_STRINGIZE_IMPL(x) #x +#define C10_STRINGIZE(x) C10_STRINGIZE_IMPL(x) + +/** + * C10_ANONYMOUS_VARIABLE(str) introduces an identifier starting with + * str and ending with a number that varies with the line. + */ +#ifdef __COUNTER__ +#define C10_UID __COUNTER__ +#define C10_ANONYMOUS_VARIABLE(str) C10_CONCATENATE(str, __COUNTER__) +#else +#define C10_UID __LINE__ +#define C10_ANONYMOUS_VARIABLE(str) C10_CONCATENATE(str, __LINE__) +#endif + +#ifdef __has_cpp_attribute +#define C10_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +#define C10_HAS_CPP_ATTRIBUTE(x) (0) +#endif + +/// C10_NODISCARD - Warn if a type or return value is discarded. + +// Technically, we should check if __cplusplus > 201402L here, because +// [[nodiscard]] is only defined in C++17. However, some compilers +// we care about don't advertise being C++17 (e.g., clang), but +// support the attribute anyway. In fact, this is not just a good idea, +// it's the law: clang::warn_unused_result doesn't work on nvcc + clang +// and the best workaround for this case is to use [[nodiscard]] +// instead; see https://github.com/pytorch/pytorch/issues/13118 +// +// Note to future editors: if you have noticed that a compiler is +// misbehaving (e.g., it advertises support, but the support doesn't +// actually work, or it is emitting warnings). Some compilers which +// are strict about the matter include MSVC, which will complain: +// +// error C2429: attribute 'nodiscard' requires compiler flag '/std:c++latest' +// +// Exhibits: +// - MSVC 19.14: https://godbolt.org/z/Dzd7gn (requires /std:c++latest) +// - Clang 8.0.0: https://godbolt.org/z/3PYL4Z (always advertises support) +// - gcc 8.3: https://godbolt.org/z/4tLMQS (always advertises support) +#if C10_HAS_CPP_ATTRIBUTE(nodiscard) +#define C10_NODISCARD [[nodiscard]] +// Workaround for llvm.org/PR23435, since clang 3.6 and below emit a spurious +// error when __has_cpp_attribute is given a scoped attribute in C mode. +#elif __cplusplus && C10_HAS_CPP_ATTRIBUTE(clang::warn_unused_result) +// TODO: It's possible this is still triggering +// https://github.com/pytorch/pytorch/issues/13118 on Windows; if it is, better +// fix it. +#define C10_NODISCARD [[clang::warn_unused_result]] +#else +#define C10_NODISCARD +#endif + +// suppress an unused variable. +#if defined(_MSC_VER) && !defined(__clang__) +#define C10_UNUSED __pragma(warning(suppress : 4100 4101)) +#else +#define C10_UNUSED __attribute__((__unused__)) +#endif //_MSC_VER + +// Direct port of LLVM_ATTRIBUTE_USED. +#if __has_attribute(used) +#define C10_USED __attribute__((__used__)) +#else +#define C10_USED +#endif + +#define C10_RESTRICT __restrict + +// Simply define the namespace, in case a dependent library want to refer to +// the c10 namespace but not any nontrivial files. +namespace c10 {} // namespace c10 +namespace c10 { +namespace cuda {} +} // namespace c10 +namespace c10 { +namespace hip {} +} // namespace c10 + +// Since C10 is the core library for caffe2 (and aten), we will simply reroute +// all abstractions defined in c10 to be available in caffe2 as well. +// This is only for backwards compatibility. Please use the symbols from the +// c10 namespace where possible. +namespace caffe2 { +using namespace c10; +} +namespace at { +using namespace c10; +} +namespace at { +namespace cuda { +using namespace c10::cuda; +} +} // namespace at + +// WARNING!!! THIS IS A GIANT HACK!!! +// This line means you cannot simultaneously include c10/hip +// and c10/cuda and then use them from the at::cuda namespace. +// This is true in practice, because HIPIFY works inplace on +// files in ATen/cuda, so it assumes that c10::hip is available +// from at::cuda. This namespace makes that happen. When +// HIPIFY is no longer out-of-place, we can switch the cuda +// here to hip and everyone is happy. +namespace at { +namespace cuda { +using namespace c10::hip; +} +} // namespace at + +// C10_LIKELY/C10_UNLIKELY +// +// These macros provide parentheses, so you can use these macros as: +// +// if C10_LIKELY(some_expr) { +// ... +// } +// +// NB: static_cast to boolean is mandatory in C++, because __builtin_expect +// takes a long argument, which means you may trigger the wrong conversion +// without it. +// +#if defined(__GNUC__) || defined(__ICL) || defined(__clang__) +#define C10_LIKELY(expr) (__builtin_expect(static_cast(expr), 1)) +#define C10_UNLIKELY(expr) (__builtin_expect(static_cast(expr), 0)) +#else +#define C10_LIKELY(expr) (expr) +#define C10_UNLIKELY(expr) (expr) +#endif + +/// C10_NOINLINE - Functions whose declaration is annotated with this will not +/// be inlined. +#ifdef __GNUC__ +#define C10_NOINLINE __attribute__((noinline)) +#elif _MSC_VER +#define C10_NOINLINE __declspec(noinline) +#else +#define C10_NOINLINE +#endif + +#if defined(_MSC_VER) +#define C10_ALWAYS_INLINE __forceinline +#elif __has_attribute(always_inline) || defined(__GNUC__) +#define C10_ALWAYS_INLINE __attribute__((__always_inline__)) inline +#else +#define C10_ALWAYS_INLINE inline +#endif + +#if defined(_MSC_VER) +#define C10_ATTR_VISIBILITY_HIDDEN +#elif defined(__GNUC__) +#define C10_ATTR_VISIBILITY_HIDDEN __attribute__((__visibility__("hidden"))) +#else +#define C10_ATTR_VISIBILITY_HIDDEN +#endif + +#define C10_ERASE C10_ALWAYS_INLINE C10_ATTR_VISIBILITY_HIDDEN + +// C10_FALLTHROUGH - Annotate fallthrough to the next case in a switch. +#if C10_HAS_CPP_ATTRIBUTE(fallthrough) +#define C10_FALLTHROUGH [[fallthrough]] +#else +#define C10_FALLTHROUGH +#endif + +#include + +#ifdef __HIPCC__ +// Unlike CUDA, HIP requires a HIP header to be included for __host__ to work. +// We do this #include here so that C10_HOST_DEVICE and friends will Just Work. +// See https://github.com/ROCm-Developer-Tools/HIP/issues/441 +#include +#endif + +#if defined(__CUDACC__) || defined(__HIPCC__) +// Designates functions callable from the host (CPU) and the device (GPU) +#define C10_HOST_DEVICE __host__ __device__ +#define C10_DEVICE __device__ +#define C10_HOST __host__ +// constants from +// (https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications) +// The maximum number of threads per multiprocessor is 1024 for Turing +// architecture (7.5), 1536 for Geforce Ampere (8.6)/Jetson Orin (8.7), and +// 2048 for all other architectures. You'll get warnings if you exceed these +// constants. Hence, the following macros adjust the input values from the user +// to resolve potential warnings. +#if __CUDA_ARCH__ == 750 +constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 1024; +#elif __CUDA_ARCH__ == 860 || __CUDA_ARCH__ == 870 || __CUDA_ARCH__ == 890 +constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 1536; +#else +constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 2048; +#endif +// CUDA_MAX_THREADS_PER_BLOCK is same for all architectures currently +constexpr uint32_t CUDA_MAX_THREADS_PER_BLOCK = 1024; +// CUDA_THREADS_PER_BLOCK_FALLBACK is the "canonical fallback" choice of block +// size. 256 is a good number for this fallback and should give good occupancy +// and versatility across all architectures. +constexpr uint32_t CUDA_THREADS_PER_BLOCK_FALLBACK = 256; +// NOTE: if you are thinking of constexpr-ify the inputs to launch bounds, it +// turns out that although __launch_bounds__ can take constexpr, it +// can't take a constexpr that has anything to do with templates. +// Currently we use launch_bounds that depend on template arguments in +// Loops.cuh, Reduce.cuh and LossCTC.cuh. Hence, C10_MAX_THREADS_PER_BLOCK +// and C10_MIN_BLOCKS_PER_SM are kept as macros. +// Suppose you were planning to write __launch_bounds__(a, b), based on your +// performance tuning on a modern GPU. Instead, you should write +// __launch_bounds__(C10_MAX_THREADS_PER_BLOCK(a), C10_MIN_BLOCKS_PER_SM(a, b)), +// which will also properly respect limits on old architectures. +#define C10_MAX_THREADS_PER_BLOCK(val) \ + (((val) <= CUDA_MAX_THREADS_PER_BLOCK) ? (val) \ + : CUDA_THREADS_PER_BLOCK_FALLBACK) +#define C10_MIN_BLOCKS_PER_SM(threads_per_block, blocks_per_sm) \ + ((((threads_per_block) * (blocks_per_sm) <= CUDA_MAX_THREADS_PER_SM) \ + ? (blocks_per_sm) \ + : ((CUDA_MAX_THREADS_PER_SM + (threads_per_block)-1) / \ + (threads_per_block)))) +// C10_LAUNCH_BOUNDS is analogous to __launch_bounds__ +#define C10_LAUNCH_BOUNDS_0 \ + __launch_bounds__( \ + 256, 4) // default launch bounds that should give good occupancy and + // versatility across all architectures. +#define C10_LAUNCH_BOUNDS_1(max_threads_per_block) \ + __launch_bounds__((C10_MAX_THREADS_PER_BLOCK((max_threads_per_block)))) +#define C10_LAUNCH_BOUNDS_2(max_threads_per_block, min_blocks_per_sm) \ + __launch_bounds__( \ + (C10_MAX_THREADS_PER_BLOCK((max_threads_per_block))), \ + (C10_MIN_BLOCKS_PER_SM((max_threads_per_block), (min_blocks_per_sm)))) +#else +#define C10_HOST_DEVICE +#define C10_HOST +#define C10_DEVICE +#endif + +#if defined(USE_ROCM) +#define C10_HIP_HOST_DEVICE __host__ __device__ +#else +#define C10_HIP_HOST_DEVICE +#endif + +#if defined(USE_ROCM) +#define C10_WARP_SIZE warpSize // = 64 or 32 (Defined in hip_runtime.h) +#else +#define C10_WARP_SIZE 32 +#endif + +#if defined(_MSC_VER) && _MSC_VER <= 1900 +#define __func__ __FUNCTION__ +#endif + +// CUDA_KERNEL_ASSERT checks the assertion +// even when NDEBUG is defined. This is useful for important assertions in CUDA +// code that would otherwise be suppressed when building Release. +#if defined(__ANDROID__) || defined(__APPLE__) || \ + (defined(USE_ROCM) && ROCM_VERSION < 40100) +// Those platforms do not support assert() +#define CUDA_KERNEL_ASSERT(cond) +#define SYCL_KERNEL_ASSERT(cond) +#elif defined(_MSC_VER) +#if defined(NDEBUG) +extern "C" { +C10_IMPORT +#if defined(__SYCL_DEVICE_ONLY__) +extern SYCL_EXTERNAL void _wassert( + const wchar_t* wexpr, + const wchar_t* wfile, + unsigned line); +#else +#if defined(__CUDA_ARCH__) +__host__ __device__ +#endif // __CUDA_ARCH__ + void + _wassert(wchar_t const* _Message, wchar_t const* _File, unsigned _Line); +#endif // __SYCL_DEVICE_ONLY__ +} +#endif // NDEBUG +#define CUDA_KERNEL_ASSERT(cond) \ + if (C10_UNLIKELY(!(cond))) { \ + (void)(_wassert(_CRT_WIDE(#cond), _CRT_WIDE(__FILE__), static_cast(__LINE__)), 0); \ + } +#define SYCL_KERNEL_ASSERT(cond) \ + if (C10_UNLIKELY(!(cond))) { \ + (void)(_wassert(_CRT_WIDE(#cond), _CRT_WIDE(__FILE__), static_cast(__LINE__)), 0); \ + } +#else // __APPLE__, _MSC_VER +#if defined(NDEBUG) +extern "C" { +#if defined(__SYCL_DEVICE_ONLY__) +extern SYCL_EXTERNAL void __assert_fail( + const char* expr, + const char* file, + unsigned int line, + const char* func); +#else // __SYCL_DEVICE_ONLY__ +#if ( \ + defined(__CUDA_ARCH__) && !(defined(__clang__) && defined(__CUDA__)) && \ + !defined(TORCH_DISABLE_GPU_ASSERTS)) +// CUDA supports __assert_fail function which are common for both device +// and host side code. +__host__ __device__ +#endif + + // This forward declaration matching the declaration of __assert_fail + // exactly how it is in glibc in case parts of the program are compiled with + // different NDEBUG settings. Otherwise we might get 'ambiguous declaration' + // error. Note: On ROCm - this declaration serves for host side compilation. + void + __assert_fail( + const char* assertion, + const char* file, + unsigned int line, + const char* function) noexcept __attribute__((__noreturn__)); + +#if (defined(__HIP_ARCH__) || defined(__HIP__)) && \ + !defined(TORCH_DISABLE_GPU_ASSERTS) +// ROCm supports __assert_fail only as a device side function. +__device__ __attribute__((noinline)) __attribute__((weak)) void __assert_fail( + const char* assertion, + const char* file, + unsigned int line, + const char* function); +#endif // defined(__HIP_ARCH__) || defined(__HIP__) +#endif // __SYCL_DEVICE_ONLY__ +} +#endif // NDEBUG +#define CUDA_KERNEL_ASSERT(cond) \ + if (C10_UNLIKELY(!(cond))) { \ + __assert_fail( \ + #cond, __FILE__, static_cast(__LINE__), __func__); \ + } +#define SYCL_KERNEL_ASSERT(cond) \ + if (C10_UNLIKELY(!(cond))) { \ + __assert_fail( \ + #cond, __FILE__, static_cast(__LINE__), __func__); \ + } +#endif // __APPLE__ + +#ifdef __APPLE__ +#include +#endif + +#if defined(__ANDROID__) +#define C10_ANDROID 1 +#define C10_MOBILE 1 +#elif ( \ + defined(__APPLE__) && \ + (TARGET_IPHONE_SIMULATOR || TARGET_OS_SIMULATOR || TARGET_OS_IPHONE)) +#define C10_IOS 1 +#define C10_MOBILE 1 +#endif // ANDROID / IOS + +#if defined(C10_MOBILE) && C10_MOBILE +#define C10_ALWAYS_INLINE_UNLESS_MOBILE inline +#else +#define C10_ALWAYS_INLINE_UNLESS_MOBILE C10_ALWAYS_INLINE +#endif + +// Portable determination of whether type T is trivially copyable. +// Warning: __has_trivial_copy for GCC may not always detect the non-POD +// correctly. For example, T = std::unique_ptr may evaluate to true and be +// treated as POD. This can cause unexpected behavior. +#if defined(__GNUG__) && __GNUC__ < 5 && !defined(__clang__) +#define C10_IS_TRIVIALLY_COPYABLE(T) __has_trivial_copy(T) +#else +#define C10_IS_TRIVIALLY_COPYABLE(T) std::is_trivially_copyable::value +#endif + +#if defined(__CUDA_ARCH__) +#if defined(_MSC_VER) && defined(__CUDACC__) +#define CONSTEXPR_EXCEPT_WIN_CUDA const +#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA __host__ + +// Note [static constexpr char* members for windows NVCC] +// The Windows NVCC compiler doesn't handle static constexpr class members, +// although it's fixed in a later version. +// (see +// https://developercommunity.visualstudio.com/t/intellisense-error-c11-static-constexpr-member-ini/245425) +// +// If we want to ensure that our field is static under all builds, then we need +// to work around it specifically for windows NVCC by making it (a) const, (b) +// defined outside of the class definition We need to define it outside of the +// class definition because of the C++ standard; char* is not an integral type +// (see +// https://stackoverflow.com/questions/24278473/intellisense-a-member-of-type-const-char-const-cannot-have-an-in-class-in) +// +// So instead of this: +// struct Foo { +// static constexpr const char* name = "foo"; +// } +// In Windows NVCC, we end up with this: +// struct Foo { +// static const char* name; +// } +// const char* Foo::name = "foo"; +// +// This gives us a small perf hit for any code that wants to access these field +// members, but right now it isn't used in any perf-critical code paths. +#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \ + static const char* field; +#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val) \ + const char* cls::field = val; +#else +#define CONSTEXPR_EXCEPT_WIN_CUDA constexpr +#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA __host__ + +#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \ + static constexpr const char* field = val; +#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val) +#endif +#else +#if defined(_MSC_VER) && defined(__CUDACC__) +#define CONSTEXPR_EXCEPT_WIN_CUDA const +#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA + +#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \ + static const char* field; +#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val) \ + const char* cls::field = val; +#else +#define CONSTEXPR_EXCEPT_WIN_CUDA constexpr +#define C10_HOST_CONSTEXPR_EXCEPT_WIN_CUDA constexpr + +#define STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(field, val) \ + static constexpr const char* field = val; +#define STATIC_CONST_STR_OUT_OF_LINE_FOR_WIN_CUDA(cls, field, val) +#endif +#endif + +#ifndef HAS_DEMANGLE +#if defined(__ANDROID__) || defined(_WIN32) || defined(__EMSCRIPTEN__) +#define HAS_DEMANGLE 0 +#elif defined(__APPLE__) && \ + (TARGET_IPHONE_SIMULATOR || TARGET_OS_SIMULATOR || TARGET_OS_IPHONE) +#define HAS_DEMANGLE 0 +#else +#define HAS_DEMANGLE 1 +#endif +#endif // HAS_DEMANGLE + +#define _C10_PRAGMA__(string) _Pragma(#string) +#define _C10_PRAGMA_(string) _C10_PRAGMA__(string) + +#ifdef __clang__ +#define C10_CLANG_DIAGNOSTIC_PUSH() _Pragma("clang diagnostic push") +#define C10_CLANG_DIAGNOSTIC_POP() _Pragma("clang diagnostic pop") +#define C10_CLANG_DIAGNOSTIC_IGNORE(flag) \ + _C10_PRAGMA_(clang diagnostic ignored flag) +#define C10_CLANG_HAS_WARNING(flag) __has_warning(flag) +#else +#define C10_CLANG_DIAGNOSTIC_PUSH() +#define C10_CLANG_DIAGNOSTIC_POP() +#define C10_CLANG_DIAGNOSTIC_IGNORE(flag) +#define C10_CLANG_HAS_WARNING(flag) 0 +#endif + +#ifdef __clang__ + +#define C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED(warning) \ + _C10_PRAGMA_(clang diagnostic push) \ + _C10_PRAGMA_(clang diagnostic ignored "-Wunknown-warning-option") \ + _C10_PRAGMA_(clang diagnostic ignored warning) + +#define C10_DIAGNOSTIC_POP() _C10_PRAGMA_(clang diagnostic pop) + +#elif __GNUC__ + +#define C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED(warning) \ + _C10_PRAGMA_(GCC diagnostic push) \ + _C10_PRAGMA_(GCC diagnostic ignored "-Wpragmas") \ + _C10_PRAGMA_(GCC diagnostic ignored warning) + +#define C10_DIAGNOSTIC_POP() _C10_PRAGMA_(GCC diagnostic pop) + +#else + +#define C10_DIAGNOSTIC_PUSH_AND_IGNORED_IF_DEFINED(warning) +#define C10_DIAGNOSTIC_POP() + +#endif + +#endif // C10_MACROS_MACROS_H_ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/macros/cmake_macros.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/macros/cmake_macros.h new file mode 100644 index 0000000000000000000000000000000000000000..6c8b60b9eaecf5a0ce1c55a82cda6fbafd47c727 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/c10/macros/cmake_macros.h @@ -0,0 +1,13 @@ +#ifndef C10_MACROS_CMAKE_MACROS_H_ +#define C10_MACROS_CMAKE_MACROS_H_ + +// Automatically generated header file for the C10 library. +// Do not include this file directly. Instead, include c10/macros/Macros.h. + +#define C10_BUILD_SHARED_LIBS +/* #undef C10_USE_GLOG */ +/* #undef C10_USE_GFLAGS */ +/* #undef C10_USE_NUMA */ +/* #undef C10_USE_MSVC_STATIC_RUNTIME */ + +#endif // C10_MACROS_CMAKE_MACROS_H_ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/clog.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/clog.h new file mode 100644 index 0000000000000000000000000000000000000000..414376116ddcb558f6a318ca295f55a396379f8c --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/clog.h @@ -0,0 +1,100 @@ +#pragma once + +#include +#include +#include + +#define CLOG_NONE 0 +#define CLOG_FATAL 1 +#define CLOG_ERROR 2 +#define CLOG_WARNING 3 +#define CLOG_INFO 4 +#define CLOG_DEBUG 5 + +#ifndef CLOG_VISIBILITY + #if defined(__ELF__) + #define CLOG_VISIBILITY __attribute__((__visibility__("internal"))) + #elif defined(__MACH__) + #define CLOG_VISIBILITY __attribute__((__visibility__("hidden"))) + #else + #define CLOG_VISIBILITY + #endif +#endif + +#ifndef CLOG_ARGUMENTS_FORMAT + #if defined(__GNUC__) + #define CLOG_ARGUMENTS_FORMAT __attribute__((__format__(__printf__, 1, 2))) + #else + #define CLOG_ARGUMENTS_FORMAT + #endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +CLOG_VISIBILITY void clog_vlog_debug(const char* module, const char* format, va_list args); +CLOG_VISIBILITY void clog_vlog_info(const char* module, const char* format, va_list args); +CLOG_VISIBILITY void clog_vlog_warning(const char* module, const char* format, va_list args); +CLOG_VISIBILITY void clog_vlog_error(const char* module, const char* format, va_list args); +CLOG_VISIBILITY void clog_vlog_fatal(const char* module, const char* format, va_list args); + +#define CLOG_DEFINE_LOG_DEBUG(log_debug_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_debug_function_name(const char* format, ...) { \ + if (level >= CLOG_DEBUG) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_debug(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_INFO(log_info_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_info_function_name(const char* format, ...) { \ + if (level >= CLOG_INFO) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_info(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_WARNING(log_warning_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_warning_function_name(const char* format, ...) { \ + if (level >= CLOG_WARNING) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_warning(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_ERROR(log_error_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_error_function_name(const char* format, ...) { \ + if (level >= CLOG_ERROR) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_error(module, format, args); \ + va_end(args); \ + } \ + } + +#define CLOG_DEFINE_LOG_FATAL(log_fatal_function_name, module, level) \ + CLOG_ARGUMENTS_FORMAT \ + inline static void log_fatal_function_name(const char* format, ...) { \ + if (level >= CLOG_FATAL) { \ + va_list args; \ + va_start(args, format); \ + clog_vlog_fatal(module, format, args); \ + va_end(args); \ + } \ + abort(); \ + } + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/dnnl.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/dnnl.h new file mode 100644 index 0000000000000000000000000000000000000000..bc74bf644f4b628018d7a9103ba63320abc466d5 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/dnnl.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_H +#define DNNL_H + +#include "oneapi/dnnl/dnnl.h" + +#endif /* DNNL_H */ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/dnnl_debug.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/dnnl_debug.h new file mode 100644 index 0000000000000000000000000000000000000000..5044971832bbbe56127920a527508b207a803eea --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/dnnl_debug.h @@ -0,0 +1,22 @@ +/******************************************************************************* +* Copyright 2020 Intel Corporation +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*******************************************************************************/ + +#ifndef DNNL_DEBUG_H +#define DNNL_DEBUG_H + +#include "oneapi/dnnl/dnnl_debug.h" + +#endif /* DNNL_DEBUG_H */ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/fxdiv.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/fxdiv.h new file mode 100644 index 0000000000000000000000000000000000000000..2c35038d97c55c524bb97caba2e3560cab9da504 --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/fxdiv.h @@ -0,0 +1,425 @@ +#pragma once +#ifndef FXDIV_H +#define FXDIV_H + +#if defined(__cplusplus) && (__cplusplus >= 201103L) + #include + #include + #include +#elif !defined(__OPENCL_VERSION__) + #include + #include + #include +#endif + +#if defined(_MSC_VER) + #include + #if defined(_M_IX86) || defined(_M_X64) + #include + #endif +#endif + +#ifndef FXDIV_USE_INLINE_ASSEMBLY + #define FXDIV_USE_INLINE_ASSEMBLY 0 +#endif + +static inline uint64_t fxdiv_mulext_uint32_t(uint32_t a, uint32_t b) { +#if defined(_MSC_VER) && defined(_M_IX86) + return (uint64_t) __emulu((unsigned int) a, (unsigned int) b); +#else + return (uint64_t) a * (uint64_t) b; +#endif +} + +static inline uint32_t fxdiv_mulhi_uint32_t(uint32_t a, uint32_t b) { +#if defined(__OPENCL_VERSION__) + return mul_hi(a, b); +#elif defined(__CUDA_ARCH__) + return (uint32_t) __umulhi((unsigned int) a, (unsigned int) b); +#elif defined(_MSC_VER) && defined(_M_IX86) + return (uint32_t) (__emulu((unsigned int) a, (unsigned int) b) >> 32); +#elif defined(_MSC_VER) && defined(_M_ARM) + return (uint32_t) _MulUnsignedHigh((unsigned long) a, (unsigned long) b); +#else + return (uint32_t) (((uint64_t) a * (uint64_t) b) >> 32); +#endif +} + +static inline uint64_t fxdiv_mulhi_uint64_t(uint64_t a, uint64_t b) { +#if defined(__OPENCL_VERSION__) + return mul_hi(a, b); +#elif defined(__CUDA_ARCH__) + return (uint64_t) __umul64hi((unsigned long long) a, (unsigned long long) b); +#elif defined(_MSC_VER) && defined(_M_X64) + return (uint64_t) __umulh((unsigned __int64) a, (unsigned __int64) b); +#elif defined(__GNUC__) && defined(__SIZEOF_INT128__) + return (uint64_t) (((((unsigned __int128) a) * ((unsigned __int128) b))) >> 64); +#else + const uint32_t a_lo = (uint32_t) a; + const uint32_t a_hi = (uint32_t) (a >> 32); + const uint32_t b_lo = (uint32_t) b; + const uint32_t b_hi = (uint32_t) (b >> 32); + + const uint64_t t = fxdiv_mulext_uint32_t(a_hi, b_lo) + + (uint64_t) fxdiv_mulhi_uint32_t(a_lo, b_lo); + return fxdiv_mulext_uint32_t(a_hi, b_hi) + (t >> 32) + + ((fxdiv_mulext_uint32_t(a_lo, b_hi) + (uint64_t) (uint32_t) t) >> 32); +#endif +} + +static inline size_t fxdiv_mulhi_size_t(size_t a, size_t b) { +#if SIZE_MAX == UINT32_MAX + return (size_t) fxdiv_mulhi_uint32_t((uint32_t) a, (uint32_t) b); +#elif SIZE_MAX == UINT64_MAX + return (size_t) fxdiv_mulhi_uint64_t((uint64_t) a, (uint64_t) b); +#else + #error Unsupported platform +#endif +} + +struct fxdiv_divisor_uint32_t { + uint32_t value; + uint32_t m; + uint8_t s1; + uint8_t s2; +}; + +struct fxdiv_result_uint32_t { + uint32_t quotient; + uint32_t remainder; +}; + +struct fxdiv_divisor_uint64_t { + uint64_t value; + uint64_t m; + uint8_t s1; + uint8_t s2; +}; + +struct fxdiv_result_uint64_t { + uint64_t quotient; + uint64_t remainder; +}; + +struct fxdiv_divisor_size_t { + size_t value; + size_t m; + uint8_t s1; + uint8_t s2; +}; + +struct fxdiv_result_size_t { + size_t quotient; + size_t remainder; +}; + +static inline struct fxdiv_divisor_uint32_t fxdiv_init_uint32_t(uint32_t d) { + struct fxdiv_divisor_uint32_t result = { d }; + if (d == 1) { + result.m = UINT32_C(1); + result.s1 = 0; + result.s2 = 0; + } else { + #if defined(__OPENCL_VERSION__) + const uint32_t l_minus_1 = 31 - clz(d - 1); + #elif defined(__CUDA_ARCH__) + const uint32_t l_minus_1 = 31 - __clz((int) (d - 1)); + #elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM) || defined(_M_ARM64)) + unsigned long l_minus_1; + _BitScanReverse(&l_minus_1, (unsigned long) (d - 1)); + #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) && FXDIV_USE_INLINE_ASSEMBLY + uint32_t l_minus_1; + __asm__("BSRL %[d_minus_1], %[l_minus_1]" + : [l_minus_1] "=r" (l_minus_1) + : [d_minus_1] "r" (d - 1) + : "cc"); + #elif defined(__GNUC__) + const uint32_t l_minus_1 = 31 - __builtin_clz(d - 1); + #else + /* Based on Algorithm 2 from Hacker's delight */ + + uint32_t l_minus_1 = 0; + uint32_t x = d - 1; + uint32_t y = x >> 16; + if (y != 0) { + l_minus_1 += 16; + x = y; + } + y = x >> 8; + if (y != 0) { + l_minus_1 += 8; + x = y; + } + y = x >> 4; + if (y != 0) { + l_minus_1 += 4; + x = y; + } + y = x >> 2; + if (y != 0) { + l_minus_1 += 2; + x = y; + } + if ((x & 2) != 0) { + l_minus_1 += 1; + } + #endif + uint32_t u_hi = (UINT32_C(2) << (uint32_t) l_minus_1) - d; + + /* Division of 64-bit number u_hi:UINT32_C(0) by 32-bit number d, 32-bit quotient output q */ + #if defined(__GNUC__) && defined(__i386__) && FXDIV_USE_INLINE_ASSEMBLY + uint32_t q; + __asm__("DIVL %[d]" + : "=a" (q), "+d" (u_hi) + : [d] "r" (d), "a" (0) + : "cc"); + #elif (defined(_MSC_VER) && _MSC_VER >= 1920) && !defined(__clang__) && !defined(__INTEL_COMPILER) && (defined(_M_IX86) || defined(_M_X64)) + unsigned int remainder; + const uint32_t q = (uint32_t) _udiv64((unsigned __int64) ((uint64_t) u_hi << 32), (unsigned int) d, &remainder); + #else + const uint32_t q = ((uint64_t) u_hi << 32) / d; + #endif + + result.m = q + UINT32_C(1); + result.s1 = 1; + result.s2 = (uint8_t) l_minus_1; + } + return result; +} + +static inline struct fxdiv_divisor_uint64_t fxdiv_init_uint64_t(uint64_t d) { + struct fxdiv_divisor_uint64_t result = { d }; + if (d == 1) { + result.m = UINT64_C(1); + result.s1 = 0; + result.s2 = 0; + } else { + #if defined(__OPENCL_VERSION__) + const uint32_t nlz_d = clz(d); + const uint32_t l_minus_1 = 63 - clz(d - 1); + #elif defined(__CUDA_ARCH__) + const uint32_t nlz_d = __clzll((long long) d); + const uint32_t l_minus_1 = 63 - __clzll((long long) (d - 1)); + #elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64)) + unsigned long l_minus_1; + _BitScanReverse64(&l_minus_1, (unsigned __int64) (d - 1)); + unsigned long bsr_d; + _BitScanReverse64(&bsr_d, (unsigned __int64) d); + const uint32_t nlz_d = bsr_d ^ 0x3F; + #elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_ARM)) + const uint64_t d_minus_1 = d - 1; + const uint8_t d_is_power_of_2 = (d & d_minus_1) == 0; + unsigned long l_minus_1; + if ((uint32_t) (d_minus_1 >> 32) == 0) { + _BitScanReverse(&l_minus_1, (unsigned long) d_minus_1); + } else { + _BitScanReverse(&l_minus_1, (unsigned long) (uint32_t) (d_minus_1 >> 32)); + l_minus_1 += 32; + } + const uint32_t nlz_d = ((uint8_t) l_minus_1 ^ UINT8_C(0x3F)) - d_is_power_of_2; + #elif defined(__GNUC__) && defined(__x86_64__) && FXDIV_USE_INLINE_ASSEMBLY + uint64_t l_minus_1; + __asm__("BSRQ %[d_minus_1], %[l_minus_1]" + : [l_minus_1] "=r" (l_minus_1) + : [d_minus_1] "r" (d - 1) + : "cc"); + #elif defined(__GNUC__) + const uint32_t l_minus_1 = 63 - __builtin_clzll(d - 1); + const uint32_t nlz_d = __builtin_clzll(d); + #else + /* Based on Algorithm 2 from Hacker's delight */ + const uint64_t d_minus_1 = d - 1; + const uint32_t d_is_power_of_2 = (d & d_minus_1) == 0; + uint32_t l_minus_1 = 0; + uint32_t x = (uint32_t) d_minus_1; + uint32_t y = d_minus_1 >> 32; + if (y != 0) { + l_minus_1 += 32; + x = y; + } + y = x >> 16; + if (y != 0) { + l_minus_1 += 16; + x = y; + } + y = x >> 8; + if (y != 0) { + l_minus_1 += 8; + x = y; + } + y = x >> 4; + if (y != 0) { + l_minus_1 += 4; + x = y; + } + y = x >> 2; + if (y != 0) { + l_minus_1 += 2; + x = y; + } + if ((x & 2) != 0) { + l_minus_1 += 1; + } + const uint32_t nlz_d = (l_minus_1 ^ UINT32_C(0x3F)) - d_is_power_of_2; + #endif + uint64_t u_hi = (UINT64_C(2) << (uint32_t) l_minus_1) - d; + + /* Division of 128-bit number u_hi:UINT64_C(0) by 64-bit number d, 64-bit quotient output q */ + #if defined(__GNUC__) && defined(__x86_64__) && FXDIV_USE_INLINE_ASSEMBLY + uint64_t q; + __asm__("DIVQ %[d]" + : "=a" (q), "+d" (u_hi) + : [d] "r" (d), "a" (UINT64_C(0)) + : "cc"); + #elif 0 && defined(__GNUC__) && defined(__SIZEOF_INT128__) + /* GCC, Clang, and Intel Compiler fail to inline optimized implementation and call into support library for 128-bit division */ + const uint64_t q = (uint64_t) (((unsigned __int128) u_hi << 64) / ((unsigned __int128) d)); + #elif (defined(_MSC_VER) && _MSC_VER >= 1920) && !defined(__clang__) && !defined(__INTEL_COMPILER) && defined(_M_X64) + unsigned __int64 remainder; + const uint64_t q = (uint64_t) _udiv128((unsigned __int64) u_hi, 0, (unsigned __int64) d, &remainder); + #else + /* Implementation based on code from Hacker's delight */ + + /* Normalize divisor and shift divident left */ + d <<= nlz_d; + u_hi <<= nlz_d; + /* Break divisor up into two 32-bit digits */ + const uint64_t d_hi = (uint32_t) (d >> 32); + const uint32_t d_lo = (uint32_t) d; + + /* Compute the first quotient digit, q1 */ + uint64_t q1 = u_hi / d_hi; + uint64_t r1 = u_hi - q1 * d_hi; + + while ((q1 >> 32) != 0 || fxdiv_mulext_uint32_t((uint32_t) q1, d_lo) > (r1 << 32)) { + q1 -= 1; + r1 += d_hi; + if ((r1 >> 32) != 0) { + break; + } + } + + /* Multiply and subtract. */ + u_hi = (u_hi << 32) - q1 * d; + + /* Compute the second quotient digit, q0 */ + uint64_t q0 = u_hi / d_hi; + uint64_t r0 = u_hi - q0 * d_hi; + + while ((q0 >> 32) != 0 || fxdiv_mulext_uint32_t((uint32_t) q0, d_lo) > (r0 << 32)) { + q0 -= 1; + r0 += d_hi; + if ((r0 >> 32) != 0) { + break; + } + } + const uint64_t q = (q1 << 32) | (uint32_t) q0; + #endif + result.m = q + UINT64_C(1); + result.s1 = 1; + result.s2 = (uint8_t) l_minus_1; + } + return result; +} + +static inline struct fxdiv_divisor_size_t fxdiv_init_size_t(size_t d) { +#if SIZE_MAX == UINT32_MAX + const struct fxdiv_divisor_uint32_t uint_result = fxdiv_init_uint32_t((uint32_t) d); +#elif SIZE_MAX == UINT64_MAX + const struct fxdiv_divisor_uint64_t uint_result = fxdiv_init_uint64_t((uint64_t) d); +#else + #error Unsupported platform +#endif + struct fxdiv_divisor_size_t size_result = { + (size_t) uint_result.value, + (size_t) uint_result.m, + uint_result.s1, + uint_result.s2 + }; + return size_result; +} + +static inline uint32_t fxdiv_quotient_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) { + const uint32_t t = fxdiv_mulhi_uint32_t(n, divisor.m); + return (t + ((n - t) >> divisor.s1)) >> divisor.s2; +} + +static inline uint64_t fxdiv_quotient_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) { + const uint64_t t = fxdiv_mulhi_uint64_t(n, divisor.m); + return (t + ((n - t) >> divisor.s1)) >> divisor.s2; +} + +static inline size_t fxdiv_quotient_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) { +#if SIZE_MAX == UINT32_MAX + const struct fxdiv_divisor_uint32_t uint32_divisor = { + (uint32_t) divisor.value, + (uint32_t) divisor.m, + divisor.s1, + divisor.s2 + }; + return fxdiv_quotient_uint32_t((uint32_t) n, uint32_divisor); +#elif SIZE_MAX == UINT64_MAX + const struct fxdiv_divisor_uint64_t uint64_divisor = { + (uint64_t) divisor.value, + (uint64_t) divisor.m, + divisor.s1, + divisor.s2 + }; + return fxdiv_quotient_uint64_t((uint64_t) n, uint64_divisor); +#else + #error Unsupported platform +#endif +} + +static inline uint32_t fxdiv_remainder_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) { + const uint32_t quotient = fxdiv_quotient_uint32_t(n, divisor); + return n - quotient * divisor.value; +} + +static inline uint64_t fxdiv_remainder_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) { + const uint64_t quotient = fxdiv_quotient_uint64_t(n, divisor); + return n - quotient * divisor.value; +} + +static inline size_t fxdiv_remainder_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) { + const size_t quotient = fxdiv_quotient_size_t(n, divisor); + return n - quotient * divisor.value; +} + +static inline uint32_t fxdiv_round_down_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t granularity) { + const uint32_t quotient = fxdiv_quotient_uint32_t(n, granularity); + return quotient * granularity.value; +} + +static inline uint64_t fxdiv_round_down_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t granularity) { + const uint64_t quotient = fxdiv_quotient_uint64_t(n, granularity); + return quotient * granularity.value; +} + +static inline size_t fxdiv_round_down_size_t(size_t n, const struct fxdiv_divisor_size_t granularity) { + const size_t quotient = fxdiv_quotient_size_t(n, granularity); + return quotient * granularity.value; +} + +static inline struct fxdiv_result_uint32_t fxdiv_divide_uint32_t(uint32_t n, const struct fxdiv_divisor_uint32_t divisor) { + const uint32_t quotient = fxdiv_quotient_uint32_t(n, divisor); + const uint32_t remainder = n - quotient * divisor.value; + struct fxdiv_result_uint32_t result = { quotient, remainder }; + return result; +} + +static inline struct fxdiv_result_uint64_t fxdiv_divide_uint64_t(uint64_t n, const struct fxdiv_divisor_uint64_t divisor) { + const uint64_t quotient = fxdiv_quotient_uint64_t(n, divisor); + const uint64_t remainder = n - quotient * divisor.value; + struct fxdiv_result_uint64_t result = { quotient, remainder }; + return result; +} + +static inline struct fxdiv_result_size_t fxdiv_divide_size_t(size_t n, const struct fxdiv_divisor_size_t divisor) { + const size_t quotient = fxdiv_quotient_size_t(n, divisor); + const size_t remainder = n - quotient * divisor.value; + struct fxdiv_result_size_t result = { quotient, remainder }; + return result; +} + +#endif /* FXDIV_H */ diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/nnpack.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/nnpack.h new file mode 100644 index 0000000000000000000000000000000000000000..97b5ff390076e9ab7ae91e67bfc0d78736aaeffd --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/nnpack.h @@ -0,0 +1,659 @@ +#pragma once + +#include +#include +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Status code for any NNPACK function call. + */ +enum nnp_status { + /** The call succeeded, and all output arguments now contain valid data. */ + nnp_status_success = 0, + /** NNPACK function was called with batch_size == 0. */ + nnp_status_invalid_batch_size = 2, + /** NNPACK function was called with channels == 0. */ + nnp_status_invalid_channels = 3, + /** NNPACK function was called with input_channels == 0. */ + nnp_status_invalid_input_channels = 4, + /** NNPACK function was called with output_channels == 0. */ + nnp_status_invalid_output_channels = 5, + /** NNPACK function was called with input_size.height == 0 or input_size.width == 0 */ + nnp_status_invalid_input_size = 10, + /** NNPACK function was called with input_stride.height == 0 or input_stride.width == 0 */ + nnp_status_invalid_input_stride = 11, + /** NNPACK function was called with input_padding not less than respective kernel (or pooling) size, i.e.: + * + * - input_padding.left >= kernel_size.width (>= pooling_size.width) + * - input_padding.right >= kernel_size.width (>= pooling_size.width) + * - input_padding.top >= kernel_size.height (>= pooling_size.height) + * - input_padding.bottom >= kernel_size.height (>= pooling_size.height) + */ + nnp_status_invalid_input_padding = 12, + /** NNPACK function was called with kernel_size.height == 0 or kernel_size.width == 0 */ + nnp_status_invalid_kernel_size = 13, + /** NNPACK function was called with pooling_size.height == 0 or pooling_size.width == 0 */ + nnp_status_invalid_pooling_size = 14, + /** NNPACK function was called with pooling_stride.height == 0 or pooling_stride.width == 0 */ + nnp_status_invalid_pooling_stride = 15, + /** NNPACK function was called with convolution algorithm not in nnp_convolution_algorithm enumeration */ + nnp_status_invalid_algorithm = 16, + /** NNPACK function was called with convolution transform strategy not in nnp_convolution_transform_strategy enum */ + nnp_status_invalid_transform_strategy = 17, + /** NNPACK function was called with output_subsampling.height == 0 or output_subsampling.width == 0 */ + nnp_status_invalid_output_subsampling = 13, + /** NNPACK function was called with activation not in nnp_activation enum */ + nnp_status_invalid_activation = 14, + /** NNPACK function was called with invalid activation parameters */ + nnp_status_invalid_activation_parameters = 15, + + /** NNPACK does not support the particular input size for the function */ + nnp_status_unsupported_input_size = 20, + /** NNPACK does not support the particular input stride for the function */ + nnp_status_unsupported_input_stride = 21, + /** NNPACK does not support the particular input padding for the function */ + nnp_status_unsupported_input_padding = 22, + /** NNPACK does not support the particular kernel size for the function */ + nnp_status_unsupported_kernel_size = 23, + /** NNPACK does not support the particular pooling size for the function */ + nnp_status_unsupported_pooling_size = 24, + /** NNPACK does not support the particular pooling stride for the function */ + nnp_status_unsupported_pooling_stride = 25, + /** NNPACK does not support the particular convolution algorithm for the function */ + nnp_status_unsupported_algorithm = 26, + /** NNPACK does not support the particular convolution transform strategy for the algorithm */ + nnp_status_unsupported_transform_strategy = 27, + /** NNPACK does not support the particular activation function for the function */ + nnp_status_unsupported_activation = 28, + /** NNPACK does not support the particular activation function parameters for the function */ + nnp_status_unsupported_activation_parameters = 29, + + /** NNPACK function was called before the library was initialized */ + nnp_status_uninitialized = 50, + /** NNPACK does not implement this function for the host CPU */ + nnp_status_unsupported_hardware = 51, + /** NNPACK failed to allocate memory for temporary buffers */ + nnp_status_out_of_memory = 52, + /** Scratch space buffer is too small */ + nnp_status_insufficient_buffer = 53, + /** Scratch space buffer is not properly aligned */ + nnp_status_misaligned_buffer = 54 +}; + +/** + * @brief Activation applied applied after a convolutional or fully-connected layer. + */ +enum nnp_activation { + /** Identity activation f(x) := x, i.e. no transformation */ + nnp_activation_identity = 0, + /** ReLU activation f(x) := max(0, x) */ + nnp_activation_relu = 1, +}; + +/** + * @brief Algorithm for computing convolutional layers. + */ +enum nnp_convolution_algorithm { + /** Let NNPACK choose the algorithm depending on layer parameters */ + nnp_convolution_algorithm_auto = 0, + /** Tiled convolution based on 2D Fourier transform with 8x8 blocks. Supports kernels up to 8x8. */ + nnp_convolution_algorithm_ft8x8 = 1, + /** Tiled convolution based on 2D Fourier transform with 16x16 blocks. Supports kernels up to 16x16. */ + nnp_convolution_algorithm_ft16x16 = 2, + /** Tiled convolution based on 2D Winograd transform F(3x3, 6x6) with 8x8 blocks. Supports only 3x3 kernels. */ + nnp_convolution_algorithm_wt8x8 = 3, + /** Direct convolution via implicit GEMM. */ + nnp_convolution_algorithm_implicit_gemm = 4, + /** Direct convolution implementation. */ + nnp_convolution_algorithm_direct = 5, + /** + * Tiled convolution based on 2D Winograd transform F(3x3, 6x6) with 8x8 blocks in FP16. + * Supports only 3x3 kernels. Implemented only for new ARM processors (with NEON-HP), + * on non-supported processors falls back to nnp_convolution_algorithm_wt8x8. + */ + nnp_convolution_algorithm_wt8x8_fp16 = 6, +}; + +enum nnp_convolution_transform_strategy { + nnp_convolution_transform_strategy_compute = 1, + nnp_convolution_transform_strategy_precompute = 2, + nnp_convolution_transform_strategy_reuse = 3 +}; + +/* For backward compatibility */ +#define nnp_convolution_transform_strategy_block_based nnp_convolution_transform_strategy_compute +#define nnp_convolution_transform_strategy_tuple_based nnp_convolution_transform_strategy_compute + +/** + * @brief Size of images, kernels, and pooling filters in NNPACK. + */ +struct nnp_size { + /** Width (horizontal size) of an image, kernel, or pooling filter. */ + size_t width; + /** Height (vertical size) of an image, kernel, or pooling filter. */ + size_t height; +}; + +/** + * @brief Padding of images in NNPACK. + */ +struct nnp_padding { + /** Padding above the image data */ + size_t top; + /** Padding on the right of image data */ + size_t right; + /** Padding below the image data */ + size_t bottom; + /** Padding on the left of image data */ + size_t left; +}; + +/** + * @brief Profiling information about time spent in different phases of a function call. + */ +struct nnp_profile { + /** Time spent inside the function call, in seconds. */ + double total; + /** Time spend on transformation of the input or input gradient tensor, in seconds. */ + double input_transform; + /** Time spend on transformation of the kernel or kernel gradient tensor, in seconds. */ + double kernel_transform; + /** Time spend on transformation of the output or output gradient tensor, in seconds. */ + double output_transform; + /** Time spend on multiplication-accumulation of transformed coefficients, in seconds. */ + double block_multiplication; +}; + +enum nnp_status nnp_initialize(void); + +enum nnp_status nnp_deinitialize(void); + +/** + * @brief Computes output of a 2D convolutional layer from input and kernel tensors. + * @details This function targets training of convolutional neural networks and performs forward propagation. + * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch. + * For minibatch size 1, use nnp_convolution_inference for optimal performance. + * @param algorithm The type of algorithm to use for convolution. Possible values are: + * + * - nnp_convolution_algorithm_auto -- let the function choose the algorithm. + * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks. + * Supports kernels up to 8x8. + * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks. + * Supports kernels up to 16x16. + * - nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6). + * Supports only 3x3 kernels. + * + * @param batch_size The number of images on the input and output of the convolutional layer. + * @param input_channels The number of channels (AKA features, dimensions) in the input images. + * @param output_channels The number of channels (AKA features, dimensions) in the output images. + * @param input_size Size of input images, excluding implicit zero-padding. + * @param input_padding Implicit zero-padding of input images. + * @param kernel_size Kernel size. + * @param[in] input A 4D tensor input[batch_size][input_channels][input_size.height][input_size.width]. + * @param[in] kernel A 4D tensor kernel[output_channels][input_channels][kernel_size.height][kernel_size.width]. + * @param[in] bias A 1D array bias[output_channels]. + * @param[out] output A 4D tensor output[batch_size][output_channels][output_size.height][output_size.width] where + * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) - + * (kernel_size.height - 1) + * output_size.width = (input_padding.left + input_size.width + input_padding.right) - + * (kernel_size.width - 1) + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + * @param[out] profile An optional pointer to profiling structure. + * If provided, the structure would record time spent in different phases of the computation. + */ + +enum nnp_status nnp_convolution_output( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float* input, + const float* kernel, + const float* bias, + float* output, + void* workspace_buffer, + size_t* workspace_size, + enum nnp_activation activation, + const void* activation_parameters, + pthreadpool_t threadpool, + struct nnp_profile* profile); + +/** + * @brief Computes gradient of input of a 2D convolutional layer from gradient of output and kernel tensors. + * @details This function targets training of convolutional neural networks and performs backward propagation. + * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch. + * @param algorithm The type of algorithm to use for convolution. Possible values are: + * + * - nnp_convolution_algorithm_auto -- let the function choose the algorithm. + * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks. + * Supports kernels up to 8x8. + * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks. + * Supports kernels up to 16x16. + * - nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6). + * Supports only 3x3 kernels. + * + * @param batch_size The number of images (and their gradients) on the input and output of the convolutional layer. + * @param input_channels The number of channels (AKA features, dimensions) in the input images (and gradients). + * @param output_channels The number of channels (AKA features, dimensions) in the output images (and gradients). + * @param input_size Size of input images and their gradients, excluding implicit zero-padding. + * @param input_padding Implicit zero-padding of input images. + * @param kernel_size Kernel size. + * @param[in] grad_output A 4D tensor grad_output[batch_size][output_channels][output_size.height][output_size.width] + * where + * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) - + * (kernel_size.height - 1) + * output_size.width = (input_padding.left + input_size.width + input_padding.right) - + * (kernel_size.width - 1) + * @param[in] kernel A 4D tensor kernel[output_channels][input_channels][kernel_size.height][kernel_size.width]. + * @param[out] grad_input A 4D tensor grad_input[batch_size][input_channels][input_size.height][input_size.width]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + * @param[out] profile An optional pointer to profiling structure. + * If provided, the structure would record time spent in different phases of the computation. + */ +enum nnp_status nnp_convolution_input_gradient( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float* grad_output, + const float* kernel, + float* grad_input, + void* workspace_buffer, + size_t* workspace_size, + enum nnp_activation activation, + const void* activation_parameters, + pthreadpool_t threadpool, + struct nnp_profile* profile); + +/** + * @brief Computes gradient of kernel of a 2D convolutional layer from gradient of output and input tensors. + * @details This function targets training of convolutional neural networks and performs backward propagation. + * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch. + * @param algorithm The type of algorithm to use for convolution. Possible values are: + * + * - nnp_convolution_algorithm_auto -- let the function choose the algorithm. + * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks. + * Supports kernels up to 8x8. + * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks. + * Supports kernels up to 16x16. + * + * @param batch_size The number of images (and their gradients) on the input and output of the convolutional layer. + * @param input_channels The number of channels (AKA features, dimensions) in the input images. + * @param output_channels The number of channels (AKA features, dimensions) in the output images (and gradients). + * @param input_size Size of input images and their gradients, excluding implicit zero-padding. + * @param input_padding Implicit zero-padding of input images. + * @param kernel_size Kernel size. + * @param[in] input A 4D tensor input[batch_size][input_channels][input_size.height][input_size.width]. + * @param[in] grad_output A 4D tensor grad_output[batch_size][output_channels][output_size.height][output_size.width] + * where + * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) - + * (kernel_size.height - 1) + * output_size.width = (input_padding.left + input_size.width + input_padding.right) - + * (kernel_size.width - 1) + * @param[out] grad_kernel A 4D tensor + * grad_kernel[output_channels][input_channels][kernel_size.height][kernel_size.width]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + * @param[out] profile An optional pointer to profiling structure. + * If provided, the structure would record time spent in different phases of the computation. + */ +enum nnp_status nnp_convolution_kernel_gradient( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float* input, + const float* grad_output, + float* grad_kernel, + void* workspace_buffer, + size_t* workspace_size, + enum nnp_activation activation, + const void* activation_parameters, + pthreadpool_t threadpool, + struct nnp_profile* profile); + +/** + * @brief Computes output of a 2D convolutional layer for a single input image and a kernel tensor. + * @details This function targets prediction with convolutional neural networks and performs forward propagation. + * @param algorithm The type of algorithm to use for convolution. Possible values are: + * + * - nnp_convolution_algorithm_auto -- let the function choose the algorithm. + * - nnp_convolution_algorithm_ft8x8 -- tiled convolution based on 2D Fourier transform with 8x8 blocks. + * Supports kernels up to 8x8. + * - nnp_convolution_algorithm_ft16x16 -- tiled convolution based on 2D Fourier transform with 16x16 blocks. + * Supports kernels up to 16x16. + * - nnp_convolution_algorithm_wt8x8 -- tiled convolution based on 2D Winograd transform F(3x3, 6x6). + * Supports only 3x3 kernels. + * + * @param transform_strategy A strategy that guides computation of kernel transforms coefficients. + * Possible values are: + * + * - nnp_convolution_transform_strategy_block_based -- do multiplication-accumulations on blocks of transformed + * coefficients. + * - nnp_convolution_transform_strategy_tuple_based -- do multiplication-accumulations on tuples of transformed + * coefficients. + * + * @param input_channels The number of channels (AKA features, dimensions) in the input image. + * @param output_channels The number of channels (AKA features, dimensions) in the output image. + * @param input_size Size of input image, excluding implicit zero-padding. + * @param input_padding Implicit zero-padding of input image. + * @param kernel_size Kernel size. + * @param output_subsampling Subsample region for output, also known as convolution stride. + * @param[in] input A 3D tensor input[input_channels][input_size.height][input_size.width]. + * @param[in] kernel A 4D tensor kernel[output_channels][input_channels][kernel_size.height][kernel_size.width]. + * @param[in] bias A 1D array bias[output_channels]. + * @param[out] output A 3D tensor output[output_channels][output_size.height][output_size.width] where + * output_size.height = (input_padding.top + input_size.height + input_padding.bottom) - + * (kernel_size.height - 1) + * output_size.width = (input_padding.left + input_size.width + input_padding.right) - + * (kernel_size.width - 1) + * @param[in] workspace_buffer Buffer for scratch memory used during computation. Buffer must be aligned on 64 bytes. + * If workspace_buffer is NULL and workspace_size is non-NULL, NNPACK would store the size + * of required workspace memory at the workspace_size location, and exit without + * computations. + * If workspace_buffer is NULL and workspace_size is NULL, NNPACK would allocate memory + * before and deallocate after this computation, potentially at significant runtime cost. + * @param[in,out] workspace_size Pointer to the size of workspace buffer. + * If workspace_buffer is NULL, NNPACK will write the size of required scratch memory to + * the location specified by this pointer. + * If workspace_buffer is non-NULL, NNPACK expects workspace_size to specify the size of + * the buffer, in bytes. + * If workspace_size is NULL, workspace_buffer must be NULL as well. In this case NNPACK + * would allocate memory before and deallocate after this computation, potentially at + * significant runtime cost. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + * @param[out] profile An optional pointer to profiling structure. + * If provided, the structure would record time spent in different phases of the computation. + */ +enum nnp_status nnp_convolution_inference( + enum nnp_convolution_algorithm algorithm, + enum nnp_convolution_transform_strategy transform_strategy, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + struct nnp_size output_subsampling, + const float* input, + const float* kernel, + const float* bias, + float* output, + void* workspace_buffer, + size_t* workspace_size, + enum nnp_activation activation, + const void* activation_parameters, + pthreadpool_t threadpool, + struct nnp_profile* profile); + +/** + * @brief Computes output of a fully connected layer from input and kernel matrices. + * @details This function targets training of convolutional neural networks and performs forward propagation. + * It is optimized for moderate minibatch sizes (64-128) and can be inefficient on a small minibatch. + * For minibatch size 1, use nnp_fully_connected_inference for optimal performance. + * @param batch_size The number of vectors on the input and output of the fully connected layer. + * @param input_channels The number of channels (AKA features, dimensions) in the input matrix. + * @param output_channels The number of channels (AKA features, dimensions) in the output matrix. + * @param[in] input A 2D matrix input[batch_size][input_channels]. + * @param[in] kernel A 2D matrix kernel[output_channels][input_channels]. + * @param[out] output A 2D matrix output[batch_size][output_channels]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_fully_connected_output( + size_t batch_size, + size_t input_channels, + size_t output_channels, + const float input[], + const float kernel[], + float output[], + pthreadpool_t threadpool, + struct nnp_profile* profile); + +/** + * @brief Computes output of a fully connected layer for a single input vector and a kernel matrix. + * @details This function targets prediction with convolutional neural networks and performs forward propagation. + * @param input_channels The number of channels (AKA features, dimensions) in the input vector. + * @param output_channels The number of channels (AKA features, dimensions) in the output vector. + * @param[in] input A 1D array input[input_channels] of FP32 elements. + * @param[in] kernel A 2D matrix kernel[output_channels][input_channels] of FP32 elements. + * @param[out] output A 1D array output[output_channels] of FP32 elements. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_fully_connected_inference( + size_t input_channels, + size_t output_channels, + const float* input, + const float* kernel, + float* output, + pthreadpool_t threadpool); + +/** + * @brief Computes output of a fully connected layer for a single input vector and a kernel matrix. + * @details This function targets prediction with convolutional neural networks and performs forward propagation. + * @param input_channels The number of channels (AKA features, dimensions) in the input vector. + * @param output_channels The number of channels (AKA features, dimensions) in the output vector. + * @param[in] input A 1D array input[input_channels] of FP32 elements. + * @param[in] kernel A 2D matrix kernel[output_channels][input_channels] of FP16 (ARM alternative format) elements. + * @param[out] output A 1D array output[output_channels] of FP32 elements. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_fully_connected_inference_f16f32( + size_t input_channels, + size_t output_channels, + const float* input, + const void* kernel, + float* output, + pthreadpool_t threadpool); + +/** + * @brief Computes output of a max-pooling layer for an input tensor. + * @details This function targets both prediction and training of convolutional neural networks and performs forward + * propagation. Is is optimized for both large and small minibatch sizes. + * @param batch_size The number of images on the input and output of the max-pooling layer. + * @param channels The number of channels (AKA features, dimensions) in both input and output images. + * @param input_size Size of input images, excluding implicit zero-padding. + * @param input_padding Implicit padding of input images. The padding pixels are ignored by the pooling filter, but + * affect the output size. + * @param pooling_size Size of the pooling filter. Only 2x2 filter are currently supported. + * @param pooling_stride Stride of the pooling filter. Only 2x2 strides are currently supported. + * @param[in] input A 4D tensor input[batch_size][channels][input_size.height][input_size.width]. + * @param[out] output A 4D tensor output[batch_size][channels][output_size.height][output_size.width] where + * output_size.height = ceil( + * (input_padding.top + input_size.height + input_padding.bottom - pooling_size.height) / + * pooling_stride.height) + 1 + * output_size.width = ceil( + * (input_padding.left + input_size.width + input_padding.right - pooling_size.width) / + * pooling_stride.width) + 1 + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_max_pooling_output( + size_t batch_size, + size_t channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size pooling_size, + struct nnp_size pooling_stride, + const float input[], + float output[], + pthreadpool_t threadpool); + +/** + * @brief Computes output of a softmax layer for an input matrix. + * @details This function targets both prediction and training of convolutional neural networks and performs forward + * propagation. Is is optimized for both large and small minibatch sizes. + * @param batch_size The number of vectors on the input and output of the softmax layer. + * @param channels The number of channels (AKA features, dimensions) in both input and output vectors. + * @param[in] input A 2D matrix input[batch_size][channels]. + * @param[out] output A 2D matrix output[batch_size][channels]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_softmax_output( + size_t batch_size, + size_t channels, + const float input[], + float output[], + pthreadpool_t threadpool); + +/** + * @brief Computes output of a rectified linear unit (ReLU) layer for an input matrix. + * @details This function targets both prediction and training of convolutional neural networks and performs forward + * propagation. Is is optimized for both large and small minibatch sizes. + * @param batch_size The number of vectors on the input and output of the ReLU layer. + * @param channels The number of channels (AKA features, dimensions) in both input and output matrices. + * @param[in] input A 2D matrix input[batch_size][channels]. + * @param[out] output A 2D matrix output[batch_size][channels]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_relu_output( + size_t batch_size, + size_t channels, + const float input[], + float output[], + float negative_slope, + pthreadpool_t threadpool); + +/** + * @brief Computes gradient of input of a rectified linear unit (ReLU) layer from gradient of output and input matrices. + * @details This function targets training of convolutional neural networks and performs backward propagation. + * Is is optimized for both large and small minibatch sizes. + * @param batch_size The number of vectors on the input and output of the ReLU layer. + * @param channels The number of channels (AKA features, dimensions) in both input and output matrices. + * @param[in] input A 2D matrix input[batch_size][channels]. + * @param[out] output A 2D matrix output[batch_size][channels]. + * @param threadpool A thread pool for parallelization of the computation. + * If threadpool is NULL, the computation would run on the caller thread without parallelization. + */ +enum nnp_status nnp_relu_input_gradient( + size_t batch_size, + size_t channels, + const float grad_output[], + const float input[], + float grad_input[], + float negative_slope, + pthreadpool_t threadpool); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#ifdef __cplusplus +// Backward compatible implementations for nnp_convolution_*, if we are in C++ +// mode. +inline enum nnp_status nnp_convolution_output( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float input[], + const float kernel[], + const float bias[], + float output[], + pthreadpool_t threadpool, + struct nnp_profile* profile) +{ + return nnp_convolution_output( + algorithm, + batch_size, input_channels, output_channels, + input_size, input_padding, kernel_size, + input, kernel, bias, output, + NULL, NULL, + nnp_activation_identity, NULL, threadpool, profile); +} + +inline enum nnp_status nnp_convolution_input_gradient( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float grad_output[], + const float kernel[], + float grad_input[], + pthreadpool_t threadpool, + struct nnp_profile* profile) +{ + return nnp_convolution_input_gradient( + algorithm, + batch_size, input_channels, output_channels, + input_size, input_padding, kernel_size, + grad_output, kernel, grad_input, + NULL, NULL, + nnp_activation_identity, NULL, threadpool, profile); +} + +inline enum nnp_status nnp_convolution_kernel_gradient( + enum nnp_convolution_algorithm algorithm, + size_t batch_size, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + const float input[], + const float grad_output[], + float grad_kernel[], + pthreadpool_t threadpool, + struct nnp_profile* profile) +{ + return nnp_convolution_kernel_gradient( + algorithm, + batch_size, input_channels, output_channels, + input_size, input_padding, kernel_size, + input, grad_output, grad_kernel, + NULL, NULL, + nnp_activation_identity, NULL, threadpool, profile); +} + +inline enum nnp_status nnp_convolution_inference( + enum nnp_convolution_algorithm algorithm, + enum nnp_convolution_transform_strategy transform_strategy, + size_t input_channels, + size_t output_channels, + struct nnp_size input_size, + struct nnp_padding input_padding, + struct nnp_size kernel_size, + struct nnp_size output_subsampling, + const float input[], + const float kernel[], + const float bias[], + float output[], + pthreadpool_t threadpool, + struct nnp_profile* profile) { + return nnp_convolution_inference( + algorithm, transform_strategy, + input_channels, output_channels, + input_size, input_padding, kernel_size, output_subsampling, + input, kernel, bias, output, NULL, NULL, + nnp_activation_identity, NULL, + threadpool, profile); +} + +#endif // __cplusplus diff --git a/deepseekvl2/lib/python3.10/site-packages/torch/include/sleef.h b/deepseekvl2/lib/python3.10/site-packages/torch/include/sleef.h new file mode 100644 index 0000000000000000000000000000000000000000..de36514f991a5f9b4774b232a1a6350c47c2c74c --- /dev/null +++ b/deepseekvl2/lib/python3.10/site-packages/torch/include/sleef.h @@ -0,0 +1,4459 @@ +// Copyright Naoki Shibata and contributors 2010 - 2020. +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#ifndef __SLEEF_H__ +#define __SLEEF_H__ + +#define SLEEF_VERSION_MAJOR 3 +#define SLEEF_VERSION_MINOR 6 +#define SLEEF_VERSION_PATCHLEVEL 0 + +#include +#include + +#if (defined(__GNUC__) || defined(__CLANG__)) && !defined(__INTEL_COMPILER) +#define CONST const +#else +#define CONST +#endif + +#if defined(__AVX2__) || defined(__aarch64__) || defined(__arm__) || defined(__powerpc64__) || defined(__zarch__) +#ifndef FP_FAST_FMA +#define FP_FAST_FMA +#endif +#ifndef FP_FAST_FMAF +#define FP_FAST_FMAF +#endif +#endif + +#if defined(_MSC_VER) && !defined(__STDC__) +#define __STDC__ 1 +#endif + +#if (defined(__MINGW32__) || defined(__MINGW64__) || defined(__CYGWIN__) || defined(_MSC_VER)) && !defined(SLEEF_STATIC_LIBS) +#ifdef IMPORT_IS_EXPORT +#define IMPORT __declspec(dllexport) +#else // #ifdef IMPORT_IS_EXPORT +#define IMPORT __declspec(dllimport) +#if (defined(_MSC_VER)) +#pragma comment(lib,"sleef.lib") +#endif // #if (defined(_MSC_VER)) +#endif // #ifdef IMPORT_IS_EXPORT +#else // #if (defined(__MINGW32__) || defined(__MINGW64__) || defined(__CYGWIN__) || defined(_MSC_VER)) && !defined(SLEEF_STATIC_LIBS) +#define IMPORT +#endif // #if (defined(__MINGW32__) || defined(__MINGW64__) || defined(__CYGWIN__) || defined(_MSC_VER)) && !defined(SLEEF_STATIC_LIBS) + +#if (defined(__GNUC__) || defined(__CLANG__)) && (defined(__i386__) || defined(__x86_64__)) +#include +#endif + +#if (defined(_MSC_VER)) +#include +#endif + +#if defined(__ARM_NEON__) || defined(__ARM_NEON) +#include +#endif + +#if defined(__ARM_FEATURE_SVE) +#include +#endif + +#if defined(__VSX__) && defined(__PPC64__) && defined(__LITTLE_ENDIAN__) +#include +typedef __vector double SLEEF_VECTOR_DOUBLE; +typedef __vector float SLEEF_VECTOR_FLOAT; +typedef __vector int SLEEF_VECTOR_INT; +typedef __vector unsigned int SLEEF_VECTOR_UINT; +typedef __vector long long SLEEF_VECTOR_LONGLONG; +typedef __vector unsigned long long SLEEF_VECTOR_ULONGLONG; +#endif + +#if defined(__VX__) && defined(__VEC__) +#ifndef SLEEF_VECINTRIN_H_INCLUDED +#include +#define SLEEF_VECINTRIN_H_INCLUDED +#endif +typedef __vector double SLEEF_VECTOR_DOUBLE; +typedef __vector float SLEEF_VECTOR_FLOAT; +typedef __vector int SLEEF_VECTOR_INT; +typedef __vector unsigned int SLEEF_VECTOR_UINT; +typedef __vector long long SLEEF_VECTOR_LONGLONG; +typedef __vector unsigned long long SLEEF_VECTOR_ULONGLONG; +#endif + +// + +#ifndef SLEEF_FP_ILOGB0 +#define SLEEF_FP_ILOGB0 ((int)-2147483648) +#endif + +#ifndef SLEEF_FP_ILOGBNAN +#define SLEEF_FP_ILOGBNAN ((int)2147483647) +#endif + +// + +IMPORT void *Sleef_malloc(size_t z); +IMPORT void Sleef_free(void *ptr); +IMPORT uint64_t Sleef_currentTimeMicros(); + +#if defined(__i386__) || defined(__x86_64__) || defined(_MSC_VER) +IMPORT void Sleef_x86CpuID(int32_t out[4], uint32_t eax, uint32_t ecx); +#endif + +// + +#ifndef Sleef_double2_DEFINED +#define Sleef_double2_DEFINED +typedef struct { + double x, y; +} Sleef_double2; +#endif + +#ifndef Sleef_float2_DEFINED +#define Sleef_float2_DEFINED +typedef struct { + float x, y; +} Sleef_float2; +#endif + +#ifndef Sleef_longdouble2_DEFINED +#define Sleef_longdouble2_DEFINED +typedef struct { + long double x, y; +} Sleef_longdouble2; +#endif + +#if !defined(Sleef_quad_DEFINED) +#define Sleef_quad_DEFINED +#if defined(__SIZEOF_FLOAT128__) || (defined(__linux__) && defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) || (defined(__PPC64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 8) +typedef __float128 Sleef_quad; +#define SLEEF_QUAD_C(x) (x ## Q) +//#elif defined(__SIZEOF_LONG_DOUBLE__) && defined(__aarch64__) +//typedef long double Sleef_quad; +//#define SLEEF_QUAD_C(x) (x ## L) +#else +typedef struct { uint64_t x, y; } Sleef_quad; +#endif +#endif + +#if !defined(Sleef_quad2_DEFINED) +#define Sleef_quad2_DEFINED +typedef union { + struct { + Sleef_quad x, y; + }; + Sleef_quad s[2]; +} Sleef_quad2; +#endif + +#ifdef __cplusplus +extern "C" +{ +#endif + +IMPORT CONST double Sleef_sin_u35(double); +IMPORT CONST double Sleef_cos_u35(double); +IMPORT CONST Sleef_double2 Sleef_sincos_u35(double); +IMPORT CONST double Sleef_tan_u35(double); +IMPORT CONST double Sleef_asin_u35(double); +IMPORT CONST double Sleef_acos_u35(double); +IMPORT CONST double Sleef_atan_u35(double); +IMPORT CONST double Sleef_atan2_u35(double, double); +IMPORT CONST double Sleef_log_u35(double); +IMPORT CONST double Sleef_cbrt_u35(double); +IMPORT CONST double Sleef_sin_u10(double); +IMPORT CONST double Sleef_cos_u10(double); +IMPORT CONST Sleef_double2 Sleef_sincos_u10(double); +IMPORT CONST double Sleef_tan_u10(double); +IMPORT CONST double Sleef_asin_u10(double); +IMPORT CONST double Sleef_acos_u10(double); +IMPORT CONST double Sleef_atan_u10(double); +IMPORT CONST double Sleef_atan2_u10(double, double); +IMPORT CONST double Sleef_log_u10(double); +IMPORT CONST double Sleef_cbrt_u10(double); +IMPORT CONST double Sleef_exp_u10(double); +IMPORT CONST double Sleef_pow_u10(double, double); +IMPORT CONST double Sleef_sinh_u10(double); +IMPORT CONST double Sleef_cosh_u10(double); +IMPORT CONST double Sleef_tanh_u10(double); +IMPORT CONST double Sleef_sinh_u35(double); +IMPORT CONST double Sleef_cosh_u35(double); +IMPORT CONST double Sleef_tanh_u35(double); +IMPORT CONST double Sleef_asinh_u10(double); +IMPORT CONST double Sleef_acosh_u10(double); +IMPORT CONST double Sleef_atanh_u10(double); +IMPORT CONST double Sleef_exp2_u10(double); +IMPORT CONST double Sleef_exp10_u10(double); +IMPORT CONST double Sleef_exp2_u35(double); +IMPORT CONST double Sleef_exp10_u35(double); +IMPORT CONST double Sleef_expm1_u10(double); +IMPORT CONST double Sleef_log10_u10(double); +IMPORT CONST double Sleef_log2_u10(double); +IMPORT CONST double Sleef_log2_u35(double); +IMPORT CONST double Sleef_log1p_u10(double); +IMPORT CONST Sleef_double2 Sleef_sincospi_u05(double); +IMPORT CONST Sleef_double2 Sleef_sincospi_u35(double); +IMPORT CONST double Sleef_sinpi_u05(double); +IMPORT CONST double Sleef_cospi_u05(double); +IMPORT CONST double Sleef_ldexp(double, int); +IMPORT CONST int Sleef_ilogb(double); +IMPORT CONST double Sleef_fma(double, double, double); +IMPORT CONST double Sleef_sqrt(double); +IMPORT CONST double Sleef_sqrt_u05(double); +IMPORT CONST double Sleef_sqrt_u35(double); + +IMPORT CONST double Sleef_hypot_u05(double, double); +IMPORT CONST double Sleef_hypot_u35(double, double); + +IMPORT CONST double Sleef_fabs(double); +IMPORT CONST double Sleef_copysign(double, double); +IMPORT CONST double Sleef_fmax(double, double); +IMPORT CONST double Sleef_fmin(double, double); +IMPORT CONST double Sleef_fdim(double, double); +IMPORT CONST double Sleef_trunc(double); +IMPORT CONST double Sleef_floor(double); +IMPORT CONST double Sleef_ceil(double); +IMPORT CONST double Sleef_round(double); +IMPORT CONST double Sleef_rint(double); +IMPORT CONST double Sleef_nextafter(double, double); +IMPORT CONST double Sleef_frfrexp(double); +IMPORT CONST int Sleef_expfrexp(double); +IMPORT CONST double Sleef_fmod(double, double); +IMPORT CONST double Sleef_remainder(double, double); +IMPORT CONST Sleef_double2 Sleef_modf(double); + +IMPORT CONST double Sleef_lgamma_u10(double); +IMPORT CONST double Sleef_tgamma_u10(double); +IMPORT CONST double Sleef_erf_u10(double); +IMPORT CONST double Sleef_erfc_u15(double); + +IMPORT CONST float Sleef_sinf_u35(float); +IMPORT CONST float Sleef_cosf_u35(float); +IMPORT CONST Sleef_float2 Sleef_sincosf_u35(float); +IMPORT CONST float Sleef_tanf_u35(float); +IMPORT CONST float Sleef_asinf_u35(float); +IMPORT CONST float Sleef_acosf_u35(float); +IMPORT CONST float Sleef_atanf_u35(float); +IMPORT CONST float Sleef_atan2f_u35(float, float); +IMPORT CONST float Sleef_logf_u35(float); +IMPORT CONST float Sleef_cbrtf_u35(float); +IMPORT CONST float Sleef_sinf_u10(float); +IMPORT CONST float Sleef_cosf_u10(float); +IMPORT CONST Sleef_float2 Sleef_sincosf_u10(float); +IMPORT CONST float Sleef_fastsinf_u3500(float); +IMPORT CONST float Sleef_fastcosf_u3500(float); +IMPORT CONST float Sleef_tanf_u10(float); +IMPORT CONST float Sleef_asinf_u10(float); +IMPORT CONST float Sleef_acosf_u10(float); +IMPORT CONST float Sleef_atanf_u10(float); +IMPORT CONST float Sleef_atan2f_u10(float, float); +IMPORT CONST float Sleef_logf_u10(float); +IMPORT CONST float Sleef_cbrtf_u10(float); +IMPORT CONST float Sleef_expf_u10(float); +IMPORT CONST float Sleef_powf_u10(float, float); +IMPORT CONST float Sleef_fastpowf_u3500(float, float); +IMPORT CONST float Sleef_sinhf_u10(float); +IMPORT CONST float Sleef_coshf_u10(float); +IMPORT CONST float Sleef_tanhf_u10(float); +IMPORT CONST float Sleef_sinhf_u35(float); +IMPORT CONST float Sleef_coshf_u35(float); +IMPORT CONST float Sleef_tanhf_u35(float); +IMPORT CONST float Sleef_asinhf_u10(float); +IMPORT CONST float Sleef_acoshf_u10(float); +IMPORT CONST float Sleef_atanhf_u10(float); +IMPORT CONST float Sleef_exp2f_u10(float); +IMPORT CONST float Sleef_exp10f_u10(float); +IMPORT CONST float Sleef_exp2f_u35(float); +IMPORT CONST float Sleef_exp10f_u35(float); +IMPORT CONST float Sleef_expm1f_u10(float); +IMPORT CONST float Sleef_log10f_u10(float); +IMPORT CONST float Sleef_log2f_u10(float); +IMPORT CONST float Sleef_log2f_u35(float); +IMPORT CONST float Sleef_log1pf_u10(float); +IMPORT CONST Sleef_float2 Sleef_sincospif_u05(float); +IMPORT CONST Sleef_float2 Sleef_sincospif_u35(float); +IMPORT CONST float Sleef_sinpif_u05(float d); +IMPORT CONST float Sleef_cospif_u05(float d); +IMPORT CONST float Sleef_ldexpf(float, int); +IMPORT CONST int Sleef_ilogbf(float); +IMPORT CONST float Sleef_fmaf(float, float, float); +IMPORT CONST float Sleef_sqrtf(float); +IMPORT CONST float Sleef_sqrtf_u05(float); +IMPORT CONST float Sleef_sqrtf_u35(float); + +IMPORT CONST float Sleef_hypotf_u05(float, float); +IMPORT CONST float Sleef_hypotf_u35(float, float); + +IMPORT CONST float Sleef_fabsf(float); +IMPORT CONST float Sleef_copysignf(float, float); +IMPORT CONST float Sleef_fmaxf(float, float); +IMPORT CONST float Sleef_fminf(float, float); +IMPORT CONST float Sleef_fdimf(float, float); +IMPORT CONST float Sleef_truncf(float); +IMPORT CONST float Sleef_floorf(float); +IMPORT CONST float Sleef_ceilf(float); +IMPORT CONST float Sleef_roundf(float); +IMPORT CONST float Sleef_rintf(float); +IMPORT CONST float Sleef_nextafterf(float, float); +IMPORT CONST float Sleef_frfrexpf(float); +IMPORT CONST int Sleef_expfrexpf(float); +IMPORT CONST float Sleef_fmodf(float, float); +IMPORT CONST float Sleef_remainderf(float, float); +IMPORT CONST Sleef_float2 Sleef_modff(float); + +IMPORT CONST float Sleef_lgammaf_u10(float); +IMPORT CONST float Sleef_tgammaf_u10(float); +IMPORT CONST float Sleef_erff_u10(float); +IMPORT CONST float Sleef_erfcf_u15(float); + +IMPORT CONST Sleef_longdouble2 Sleef_sincospil_u05(long double); +IMPORT CONST Sleef_longdouble2 Sleef_sincospil_u35(long double); + +#if defined(Sleef_quad2_DEFINED) +IMPORT CONST Sleef_quad2 Sleef_sincospiq_u05(Sleef_quad); +IMPORT CONST Sleef_quad2 Sleef_sincospiq_u35(Sleef_quad); +#endif +#ifdef __SSE2__ + +#ifndef Sleef___m128d_2_DEFINED +typedef struct { + __m128d x, y; +} Sleef___m128d_2; +#define Sleef___m128d_2_DEFINED +#endif + +IMPORT CONST __m128d Sleef_sind2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u35(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u35(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u35(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u35(__m128d); +IMPORT CONST __m128d Sleef_tand2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u35(__m128d); +IMPORT CONST __m128d Sleef_asind2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u35(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u35(__m128d); +IMPORT CONST __m128d Sleef_atand2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u35(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u35(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u35(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u35(__m128d); +IMPORT CONST __m128d Sleef_sind2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u10(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u10(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u10(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u10(__m128d); +IMPORT CONST __m128d Sleef_tand2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u10(__m128d); +IMPORT CONST __m128d Sleef_asind2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u10(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u10(__m128d); +IMPORT CONST __m128d Sleef_atand2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u10(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u10(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u10(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u10(__m128d); +IMPORT CONST __m128d Sleef_expd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_expd2_u10(__m128d); +IMPORT CONST __m128d Sleef_powd2_u10(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_powd2_u10(__m128d, __m128d); +IMPORT CONST __m128d Sleef_sinhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u10(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_sinhd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u35(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u35(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u35(__m128d); +IMPORT CONST __m128d Sleef_fastsind2_u3500(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastsind2_u3500(__m128d); +IMPORT CONST __m128d Sleef_fastcosd2_u3500(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastcosd2_u3500(__m128d); +IMPORT CONST __m128d Sleef_fastpowd2_u3500(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fastpowd2_u3500(__m128d, __m128d); +IMPORT CONST __m128d Sleef_asinhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_asinhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_acoshd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_acoshd2_u10(__m128d); +IMPORT CONST __m128d Sleef_atanhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_atanhd2_u10(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u10(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u35(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u10(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u35(__m128d); +IMPORT CONST __m128d Sleef_expm1d2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_expm1d2_u10(__m128d); +IMPORT CONST __m128d Sleef_log10d2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_log10d2_u10(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u10(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u35(__m128d); +IMPORT CONST __m128d Sleef_log1pd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_log1pd2_u10(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u05(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u05(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u35(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u35(__m128d); +IMPORT CONST __m128d Sleef_sinpid2_u05(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinpid2_u05(__m128d); +IMPORT CONST __m128d Sleef_cospid2_u05(__m128d); +IMPORT CONST __m128d Sleef_cinz_cospid2_u05(__m128d); +IMPORT CONST __m128d Sleef_ldexpd2(__m128d, __m128i); +IMPORT CONST __m128d Sleef_cinz_ldexpd2(__m128d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd2(__m128d); +IMPORT CONST __m128i Sleef_cinz_ilogbd2(__m128d); +IMPORT CONST __m128d Sleef_fmad2(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmad2(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_sqrtd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u05(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u05(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u35(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u35(__m128d); +IMPORT CONST __m128d Sleef_hypotd2_u05(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u05(__m128d, __m128d); +IMPORT CONST __m128d Sleef_hypotd2_u35(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u35(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fabsd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_fabsd2(__m128d); +IMPORT CONST __m128d Sleef_copysignd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_copysignd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmaxd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmaxd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmind2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmind2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fdimd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fdimd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_truncd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_truncd2(__m128d); +IMPORT CONST __m128d Sleef_floord2(__m128d); +IMPORT CONST __m128d Sleef_cinz_floord2(__m128d); +IMPORT CONST __m128d Sleef_ceild2(__m128d); +IMPORT CONST __m128d Sleef_cinz_ceild2(__m128d); +IMPORT CONST __m128d Sleef_roundd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_roundd2(__m128d); +IMPORT CONST __m128d Sleef_rintd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_rintd2(__m128d); +IMPORT CONST __m128d Sleef_nextafterd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_nextafterd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_frfrexpd2(__m128d); +IMPORT CONST __m128d Sleef_cinz_frfrexpd2(__m128d); +IMPORT CONST __m128i Sleef_expfrexpd2(__m128d); +IMPORT CONST __m128i Sleef_cinz_expfrexpd2(__m128d); +IMPORT CONST __m128d Sleef_fmodd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmodd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_remainderd2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_remainderd2(__m128d, __m128d); +IMPORT CONST Sleef___m128d_2 Sleef_modfd2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_modfd2(__m128d); +IMPORT CONST __m128d Sleef_lgammad2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_lgammad2_u10(__m128d); +IMPORT CONST __m128d Sleef_tgammad2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_tgammad2_u10(__m128d); +IMPORT CONST __m128d Sleef_erfd2_u10(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfd2_u10(__m128d); +IMPORT CONST __m128d Sleef_erfcd2_u15(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfcd2_u15(__m128d); +IMPORT CONST int Sleef_getIntd2(int); +IMPORT CONST void *Sleef_getPtrd2(int); + +#ifndef Sleef___m128_2_DEFINED +typedef struct { + __m128 x, y; +} Sleef___m128_2; +#define Sleef___m128_2_DEFINED +#endif + +IMPORT CONST __m128 Sleef_sinf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u35(__m128); +IMPORT CONST __m128 Sleef_cosf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u35(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u35(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u35(__m128); +IMPORT CONST __m128 Sleef_tanf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u35(__m128); +IMPORT CONST __m128 Sleef_asinf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u35(__m128); +IMPORT CONST __m128 Sleef_acosf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u35(__m128); +IMPORT CONST __m128 Sleef_atanf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u35(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u35(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u35(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u35(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u35(__m128); +IMPORT CONST __m128 Sleef_sinf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u10(__m128); +IMPORT CONST __m128 Sleef_cosf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u10(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u10(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u10(__m128); +IMPORT CONST __m128 Sleef_tanf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u10(__m128); +IMPORT CONST __m128 Sleef_asinf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u10(__m128); +IMPORT CONST __m128 Sleef_acosf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u10(__m128); +IMPORT CONST __m128 Sleef_atanf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u10(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u10(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u10(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u10(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u10(__m128); +IMPORT CONST __m128 Sleef_expf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_expf4_u10(__m128); +IMPORT CONST __m128 Sleef_powf4_u10(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_powf4_u10(__m128, __m128); +IMPORT CONST __m128 Sleef_sinhf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u10(__m128); +IMPORT CONST __m128 Sleef_coshf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u10(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u10(__m128); +IMPORT CONST __m128 Sleef_sinhf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u35(__m128); +IMPORT CONST __m128 Sleef_coshf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u35(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u35(__m128); +IMPORT CONST __m128 Sleef_fastsinf4_u3500(__m128); +IMPORT CONST __m128 Sleef_cinz_fastsinf4_u3500(__m128); +IMPORT CONST __m128 Sleef_fastcosf4_u3500(__m128); +IMPORT CONST __m128 Sleef_cinz_fastcosf4_u3500(__m128); +IMPORT CONST __m128 Sleef_fastpowf4_u3500(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fastpowf4_u3500(__m128, __m128); +IMPORT CONST __m128 Sleef_asinhf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_asinhf4_u10(__m128); +IMPORT CONST __m128 Sleef_acoshf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_acoshf4_u10(__m128); +IMPORT CONST __m128 Sleef_atanhf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_atanhf4_u10(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u10(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u35(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u10(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u35(__m128); +IMPORT CONST __m128 Sleef_expm1f4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_expm1f4_u10(__m128); +IMPORT CONST __m128 Sleef_log10f4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_log10f4_u10(__m128); +IMPORT CONST __m128 Sleef_log2f4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u10(__m128); +IMPORT CONST __m128 Sleef_log2f4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u35(__m128); +IMPORT CONST __m128 Sleef_log1pf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_log1pf4_u10(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u05(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u05(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u35(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u35(__m128); +IMPORT CONST __m128 Sleef_sinpif4_u05(__m128); +IMPORT CONST __m128 Sleef_cinz_sinpif4_u05(__m128); +IMPORT CONST __m128 Sleef_cospif4_u05(__m128); +IMPORT CONST __m128 Sleef_cinz_cospif4_u05(__m128); +IMPORT CONST __m128 Sleef_fmaf4(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaf4(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_sqrtf4(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u05(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u05(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u35(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u35(__m128); +IMPORT CONST __m128 Sleef_hypotf4_u05(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u05(__m128, __m128); +IMPORT CONST __m128 Sleef_hypotf4_u35(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u35(__m128, __m128); +IMPORT CONST __m128 Sleef_fabsf4(__m128); +IMPORT CONST __m128 Sleef_cinz_fabsf4(__m128); +IMPORT CONST __m128 Sleef_copysignf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_copysignf4(__m128, __m128); +IMPORT CONST __m128 Sleef_fmaxf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaxf4(__m128, __m128); +IMPORT CONST __m128 Sleef_fminf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fminf4(__m128, __m128); +IMPORT CONST __m128 Sleef_fdimf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fdimf4(__m128, __m128); +IMPORT CONST __m128 Sleef_truncf4(__m128); +IMPORT CONST __m128 Sleef_cinz_truncf4(__m128); +IMPORT CONST __m128 Sleef_floorf4(__m128); +IMPORT CONST __m128 Sleef_cinz_floorf4(__m128); +IMPORT CONST __m128 Sleef_ceilf4(__m128); +IMPORT CONST __m128 Sleef_cinz_ceilf4(__m128); +IMPORT CONST __m128 Sleef_roundf4(__m128); +IMPORT CONST __m128 Sleef_cinz_roundf4(__m128); +IMPORT CONST __m128 Sleef_rintf4(__m128); +IMPORT CONST __m128 Sleef_cinz_rintf4(__m128); +IMPORT CONST __m128 Sleef_nextafterf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_nextafterf4(__m128, __m128); +IMPORT CONST __m128 Sleef_frfrexpf4(__m128); +IMPORT CONST __m128 Sleef_cinz_frfrexpf4(__m128); +IMPORT CONST __m128 Sleef_fmodf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmodf4(__m128, __m128); +IMPORT CONST __m128 Sleef_remainderf4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_remainderf4(__m128, __m128); +IMPORT CONST Sleef___m128_2 Sleef_modff4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_modff4(__m128); +IMPORT CONST __m128 Sleef_lgammaf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_lgammaf4_u10(__m128); +IMPORT CONST __m128 Sleef_tgammaf4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_tgammaf4_u10(__m128); +IMPORT CONST __m128 Sleef_erff4_u10(__m128); +IMPORT CONST __m128 Sleef_cinz_erff4_u10(__m128); +IMPORT CONST __m128 Sleef_erfcf4_u15(__m128); +IMPORT CONST __m128 Sleef_cinz_erfcf4_u15(__m128); +IMPORT CONST int Sleef_getIntf4(int); +IMPORT CONST int Sleef_cinz_getIntf4(int); +IMPORT CONST void *Sleef_getPtrf4(int); +IMPORT CONST void *Sleef_cinz_getPtrf4(int); +#endif +#ifdef __SSE2__ + +#ifndef Sleef___m128d_2_DEFINED +typedef struct { + __m128d x, y; +} Sleef___m128d_2; +#define Sleef___m128d_2_DEFINED +#endif + +IMPORT CONST __m128d Sleef_sind2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u35sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u35sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_tand2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_asind2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_atand2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u35sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u35sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_sind2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u10sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u10sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_tand2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_asind2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_atand2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u10sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u10sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_expd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_expd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_powd2_u10sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_powd2_u10sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_sinhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_sinhd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_fastsind2_u3500sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastsind2_u3500sse2(__m128d); +IMPORT CONST __m128d Sleef_fastcosd2_u3500sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastcosd2_u3500sse2(__m128d); +IMPORT CONST __m128d Sleef_fastpowd2_u3500sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fastpowd2_u3500sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_asinhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_asinhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_acoshd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_acoshd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_atanhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_atanhd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_expm1d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_expm1d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_log10d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_log10d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_log1pd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_log1pd2_u10sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u05sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u05sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u35sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_sinpid2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinpid2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_cospid2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_cospid2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_ldexpd2_sse2(__m128d, __m128i); +IMPORT CONST __m128d Sleef_cinz_ldexpd2_sse2(__m128d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd2_sse2(__m128d); +IMPORT CONST __m128i Sleef_cinz_ilogbd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_fmad2_sse2(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmad2_sse2(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_sqrtd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u05sse2(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u35sse2(__m128d); +IMPORT CONST __m128d Sleef_hypotd2_u05sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u05sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_hypotd2_u35sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u35sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fabsd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_fabsd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_copysignd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_copysignd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmaxd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmaxd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmind2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmind2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fdimd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fdimd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_truncd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_truncd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_floord2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_floord2_sse2(__m128d); +IMPORT CONST __m128d Sleef_ceild2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_ceild2_sse2(__m128d); +IMPORT CONST __m128d Sleef_roundd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_roundd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_rintd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_rintd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_nextafterd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_nextafterd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_frfrexpd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_frfrexpd2_sse2(__m128d); +IMPORT CONST __m128i Sleef_expfrexpd2_sse2(__m128d); +IMPORT CONST __m128i Sleef_cinz_expfrexpd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_fmodd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmodd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_remainderd2_sse2(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_remainderd2_sse2(__m128d, __m128d); +IMPORT CONST Sleef___m128d_2 Sleef_modfd2_sse2(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_modfd2_sse2(__m128d); +IMPORT CONST __m128d Sleef_lgammad2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_lgammad2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_tgammad2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_tgammad2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_erfd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfd2_u10sse2(__m128d); +IMPORT CONST __m128d Sleef_erfcd2_u15sse2(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfcd2_u15sse2(__m128d); +IMPORT CONST int Sleef_getIntd2_sse2(int); +IMPORT CONST void *Sleef_getPtrd2_sse2(int); + +#ifndef Sleef___m128_2_DEFINED +typedef struct { + __m128 x, y; +} Sleef___m128_2; +#define Sleef___m128_2_DEFINED +#endif + +IMPORT CONST __m128 Sleef_sinf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cosf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u35sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u35sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_tanf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_asinf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_acosf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_atanf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u35sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u35sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_sinf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cosf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u10sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u10sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_tanf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_asinf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_acosf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_atanf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u10sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u10sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_expf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_expf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_powf4_u10sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_powf4_u10sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_sinhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_coshf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_sinhf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_coshf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_fastsinf4_u3500sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_fastsinf4_u3500sse2(__m128); +IMPORT CONST __m128 Sleef_fastcosf4_u3500sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_fastcosf4_u3500sse2(__m128); +IMPORT CONST __m128 Sleef_fastpowf4_u3500sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fastpowf4_u3500sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_asinhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_asinhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_acoshf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_acoshf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_atanhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_atanhf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_expm1f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_expm1f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_log10f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_log10f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_log2f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_log2f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_log1pf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_log1pf4_u10sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u05sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u05sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u35sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_sinpif4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sinpif4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_cospif4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_cospif4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_fmaf4_sse2(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaf4_sse2(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_sqrtf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_sse2(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u05sse2(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u35sse2(__m128); +IMPORT CONST __m128 Sleef_hypotf4_u05sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u05sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_hypotf4_u35sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u35sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_fabsf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_fabsf4_sse2(__m128); +IMPORT CONST __m128 Sleef_copysignf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_copysignf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_fmaxf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaxf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_fminf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fminf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_fdimf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fdimf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_truncf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_truncf4_sse2(__m128); +IMPORT CONST __m128 Sleef_floorf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_floorf4_sse2(__m128); +IMPORT CONST __m128 Sleef_ceilf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_ceilf4_sse2(__m128); +IMPORT CONST __m128 Sleef_roundf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_roundf4_sse2(__m128); +IMPORT CONST __m128 Sleef_rintf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_rintf4_sse2(__m128); +IMPORT CONST __m128 Sleef_nextafterf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_nextafterf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_frfrexpf4_sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_frfrexpf4_sse2(__m128); +IMPORT CONST __m128 Sleef_fmodf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmodf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_remainderf4_sse2(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_remainderf4_sse2(__m128, __m128); +IMPORT CONST Sleef___m128_2 Sleef_modff4_sse2(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_modff4_sse2(__m128); +IMPORT CONST __m128 Sleef_lgammaf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_lgammaf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_tgammaf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_tgammaf4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_erff4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_erff4_u10sse2(__m128); +IMPORT CONST __m128 Sleef_erfcf4_u15sse2(__m128); +IMPORT CONST __m128 Sleef_cinz_erfcf4_u15sse2(__m128); +IMPORT CONST int Sleef_getIntf4_sse2(int); +IMPORT CONST int Sleef_cinz_getIntf4_sse2(int); +IMPORT CONST void *Sleef_getPtrf4_sse2(int); +IMPORT CONST void *Sleef_cinz_getPtrf4_sse2(int); +#endif +#ifdef __SSE2__ + +#ifndef Sleef___m128d_2_DEFINED +typedef struct { + __m128d x, y; +} Sleef___m128d_2; +#define Sleef___m128d_2_DEFINED +#endif + +IMPORT CONST __m128d Sleef_sind2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u35sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u35sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_tand2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_asind2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_atand2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u35sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u35sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_sind2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sind2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_cosd2_u10sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u10sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincosd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_tand2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_tand2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_asind2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_asind2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_acosd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_atand2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_atand2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u10sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_atan2d2_u10sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_logd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_cbrtd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_expd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_expd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_powd2_u10sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_powd2_u10sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_sinhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_sinhd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinhd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_coshd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_tanhd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_fastsind2_u3500sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastsind2_u3500sse4(__m128d); +IMPORT CONST __m128d Sleef_fastcosd2_u3500sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_fastcosd2_u3500sse4(__m128d); +IMPORT CONST __m128d Sleef_fastpowd2_u3500sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fastpowd2_u3500sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_asinhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_asinhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_acoshd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_acoshd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_atanhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_atanhd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp2d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_exp10d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_expm1d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_expm1d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_log10d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_log10d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_log2d2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_log1pd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_log1pd2_u10sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u05sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u05sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u35sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_sincospid2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_sinpid2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sinpid2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_cospid2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_cospid2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_ldexpd2_sse4(__m128d, __m128i); +IMPORT CONST __m128d Sleef_cinz_ldexpd2_sse4(__m128d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd2_sse4(__m128d); +IMPORT CONST __m128i Sleef_cinz_ilogbd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_fmad2_sse4(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmad2_sse4(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_sqrtd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u05sse4(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_sqrtd2_u35sse4(__m128d); +IMPORT CONST __m128d Sleef_hypotd2_u05sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u05sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_hypotd2_u35sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_hypotd2_u35sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fabsd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_fabsd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_copysignd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_copysignd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmaxd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmaxd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmind2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmind2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fdimd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fdimd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_truncd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_truncd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_floord2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_floord2_sse4(__m128d); +IMPORT CONST __m128d Sleef_ceild2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_ceild2_sse4(__m128d); +IMPORT CONST __m128d Sleef_roundd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_roundd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_rintd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_rintd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_nextafterd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_nextafterd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_frfrexpd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_frfrexpd2_sse4(__m128d); +IMPORT CONST __m128i Sleef_expfrexpd2_sse4(__m128d); +IMPORT CONST __m128i Sleef_cinz_expfrexpd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_fmodd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_fmodd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_remainderd2_sse4(__m128d, __m128d); +IMPORT CONST __m128d Sleef_cinz_remainderd2_sse4(__m128d, __m128d); +IMPORT CONST Sleef___m128d_2 Sleef_modfd2_sse4(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_cinz_modfd2_sse4(__m128d); +IMPORT CONST __m128d Sleef_lgammad2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_lgammad2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_tgammad2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_tgammad2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_erfd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfd2_u10sse4(__m128d); +IMPORT CONST __m128d Sleef_erfcd2_u15sse4(__m128d); +IMPORT CONST __m128d Sleef_cinz_erfcd2_u15sse4(__m128d); +IMPORT CONST int Sleef_getIntd2_sse4(int); +IMPORT CONST void *Sleef_getPtrd2_sse4(int); + +#ifndef Sleef___m128_2_DEFINED +typedef struct { + __m128 x, y; +} Sleef___m128_2; +#define Sleef___m128_2_DEFINED +#endif + +IMPORT CONST __m128 Sleef_sinf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cosf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u35sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u35sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_tanf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_asinf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_acosf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_atanf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u35sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u35sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_sinf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sinf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cosf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_cosf4_u10sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u10sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincosf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_tanf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_tanf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_asinf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_asinf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_acosf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_acosf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_atanf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_atanf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u10sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_atan2f4_u10sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_logf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_cbrtf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_expf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_expf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_powf4_u10sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_powf4_u10sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_sinhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_coshf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_sinhf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sinhf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_coshf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_coshf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_tanhf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_fastsinf4_u3500sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_fastsinf4_u3500sse4(__m128); +IMPORT CONST __m128 Sleef_fastcosf4_u3500sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_fastcosf4_u3500sse4(__m128); +IMPORT CONST __m128 Sleef_fastpowf4_u3500sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fastpowf4_u3500sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_asinhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_asinhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_acoshf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_acoshf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_atanhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_atanhf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_exp2f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_exp10f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_expm1f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_expm1f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_log10f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_log10f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_log2f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_log2f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_log2f4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_log1pf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_log1pf4_u10sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u05sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u05sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u35sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_sincospif4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_sinpif4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sinpif4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_cospif4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_cospif4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_fmaf4_sse4(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaf4_sse4(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_sqrtf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_sse4(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u05sse4(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_sqrtf4_u35sse4(__m128); +IMPORT CONST __m128 Sleef_hypotf4_u05sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u05sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_hypotf4_u35sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_hypotf4_u35sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_fabsf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_fabsf4_sse4(__m128); +IMPORT CONST __m128 Sleef_copysignf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_copysignf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_fmaxf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmaxf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_fminf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fminf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_fdimf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fdimf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_truncf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_truncf4_sse4(__m128); +IMPORT CONST __m128 Sleef_floorf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_floorf4_sse4(__m128); +IMPORT CONST __m128 Sleef_ceilf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_ceilf4_sse4(__m128); +IMPORT CONST __m128 Sleef_roundf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_roundf4_sse4(__m128); +IMPORT CONST __m128 Sleef_rintf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_rintf4_sse4(__m128); +IMPORT CONST __m128 Sleef_nextafterf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_nextafterf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_frfrexpf4_sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_frfrexpf4_sse4(__m128); +IMPORT CONST __m128 Sleef_fmodf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_fmodf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_remainderf4_sse4(__m128, __m128); +IMPORT CONST __m128 Sleef_cinz_remainderf4_sse4(__m128, __m128); +IMPORT CONST Sleef___m128_2 Sleef_modff4_sse4(__m128); +IMPORT CONST Sleef___m128_2 Sleef_cinz_modff4_sse4(__m128); +IMPORT CONST __m128 Sleef_lgammaf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_lgammaf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_tgammaf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_tgammaf4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_erff4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_erff4_u10sse4(__m128); +IMPORT CONST __m128 Sleef_erfcf4_u15sse4(__m128); +IMPORT CONST __m128 Sleef_cinz_erfcf4_u15sse4(__m128); +IMPORT CONST int Sleef_getIntf4_sse4(int); +IMPORT CONST int Sleef_cinz_getIntf4_sse4(int); +IMPORT CONST void *Sleef_getPtrf4_sse4(int); +IMPORT CONST void *Sleef_cinz_getPtrf4_sse4(int); +#endif +#ifdef __AVX__ + +#ifndef Sleef___m256d_2_DEFINED +typedef struct { + __m256d x, y; +} Sleef___m256d_2; +#define Sleef___m256d_2_DEFINED +#endif + +IMPORT CONST __m256d Sleef_sind4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_sind4_u35(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_cosd4_u35(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u35(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincosd4_u35(__m256d); +IMPORT CONST __m256d Sleef_tand4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_tand4_u35(__m256d); +IMPORT CONST __m256d Sleef_asind4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_asind4_u35(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_acosd4_u35(__m256d); +IMPORT CONST __m256d Sleef_atand4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_atand4_u35(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u35(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_atan2d4_u35(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_logd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_cbrtd4_u35(__m256d); +IMPORT CONST __m256d Sleef_sind4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_sind4_u10(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_cosd4_u10(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u10(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincosd4_u10(__m256d); +IMPORT CONST __m256d Sleef_tand4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_tand4_u10(__m256d); +IMPORT CONST __m256d Sleef_asind4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_asind4_u10(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_acosd4_u10(__m256d); +IMPORT CONST __m256d Sleef_atand4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_atand4_u10(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u10(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_atan2d4_u10(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_logd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_cbrtd4_u10(__m256d); +IMPORT CONST __m256d Sleef_expd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_expd4_u10(__m256d); +IMPORT CONST __m256d Sleef_powd4_u10(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_powd4_u10(__m256d, __m256d); +IMPORT CONST __m256d Sleef_sinhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_coshd4_u10(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_tanhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_sinhd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinhd4_u35(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_coshd4_u35(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_tanhd4_u35(__m256d); +IMPORT CONST __m256d Sleef_fastsind4_u3500(__m256d); +IMPORT CONST __m256d Sleef_cinz_fastsind4_u3500(__m256d); +IMPORT CONST __m256d Sleef_fastcosd4_u3500(__m256d); +IMPORT CONST __m256d Sleef_cinz_fastcosd4_u3500(__m256d); +IMPORT CONST __m256d Sleef_fastpowd4_u3500(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fastpowd4_u3500(__m256d, __m256d); +IMPORT CONST __m256d Sleef_asinhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_asinhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_acoshd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_acoshd4_u10(__m256d); +IMPORT CONST __m256d Sleef_atanhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_atanhd4_u10(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp2d4_u10(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp2d4_u35(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp10d4_u10(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp10d4_u35(__m256d); +IMPORT CONST __m256d Sleef_expm1d4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_expm1d4_u10(__m256d); +IMPORT CONST __m256d Sleef_log10d4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_log10d4_u10(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_log2d4_u10(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_log2d4_u35(__m256d); +IMPORT CONST __m256d Sleef_log1pd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_log1pd4_u10(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u05(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincospid4_u05(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u35(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincospid4_u35(__m256d); +IMPORT CONST __m256d Sleef_sinpid4_u05(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinpid4_u05(__m256d); +IMPORT CONST __m256d Sleef_cospid4_u05(__m256d); +IMPORT CONST __m256d Sleef_cinz_cospid4_u05(__m256d); +IMPORT CONST __m256d Sleef_ldexpd4(__m256d, __m128i); +IMPORT CONST __m256d Sleef_cinz_ldexpd4(__m256d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd4(__m256d); +IMPORT CONST __m128i Sleef_cinz_ilogbd4(__m256d); +IMPORT CONST __m256d Sleef_fmad4(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmad4(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_sqrtd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u05(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4_u05(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u35(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4_u35(__m256d); +IMPORT CONST __m256d Sleef_hypotd4_u05(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_hypotd4_u05(__m256d, __m256d); +IMPORT CONST __m256d Sleef_hypotd4_u35(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_hypotd4_u35(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fabsd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_fabsd4(__m256d); +IMPORT CONST __m256d Sleef_copysignd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_copysignd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmaxd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmaxd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmind4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmind4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fdimd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fdimd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_truncd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_truncd4(__m256d); +IMPORT CONST __m256d Sleef_floord4(__m256d); +IMPORT CONST __m256d Sleef_cinz_floord4(__m256d); +IMPORT CONST __m256d Sleef_ceild4(__m256d); +IMPORT CONST __m256d Sleef_cinz_ceild4(__m256d); +IMPORT CONST __m256d Sleef_roundd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_roundd4(__m256d); +IMPORT CONST __m256d Sleef_rintd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_rintd4(__m256d); +IMPORT CONST __m256d Sleef_nextafterd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_nextafterd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_frfrexpd4(__m256d); +IMPORT CONST __m256d Sleef_cinz_frfrexpd4(__m256d); +IMPORT CONST __m128i Sleef_expfrexpd4(__m256d); +IMPORT CONST __m128i Sleef_cinz_expfrexpd4(__m256d); +IMPORT CONST __m256d Sleef_fmodd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmodd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_remainderd4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_remainderd4(__m256d, __m256d); +IMPORT CONST Sleef___m256d_2 Sleef_modfd4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_modfd4(__m256d); +IMPORT CONST __m256d Sleef_lgammad4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_lgammad4_u10(__m256d); +IMPORT CONST __m256d Sleef_tgammad4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_tgammad4_u10(__m256d); +IMPORT CONST __m256d Sleef_erfd4_u10(__m256d); +IMPORT CONST __m256d Sleef_cinz_erfd4_u10(__m256d); +IMPORT CONST __m256d Sleef_erfcd4_u15(__m256d); +IMPORT CONST __m256d Sleef_cinz_erfcd4_u15(__m256d); +IMPORT CONST int Sleef_getIntd4(int); +IMPORT CONST void *Sleef_getPtrd4(int); + +#ifndef Sleef___m256_2_DEFINED +typedef struct { + __m256 x, y; +} Sleef___m256_2; +#define Sleef___m256_2_DEFINED +#endif + +IMPORT CONST __m256 Sleef_sinf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_sinf8_u35(__m256); +IMPORT CONST __m256 Sleef_cosf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_cosf8_u35(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u35(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincosf8_u35(__m256); +IMPORT CONST __m256 Sleef_tanf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_tanf8_u35(__m256); +IMPORT CONST __m256 Sleef_asinf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_asinf8_u35(__m256); +IMPORT CONST __m256 Sleef_acosf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_acosf8_u35(__m256); +IMPORT CONST __m256 Sleef_atanf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_atanf8_u35(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u35(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_atan2f8_u35(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_logf8_u35(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_cbrtf8_u35(__m256); +IMPORT CONST __m256 Sleef_sinf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_sinf8_u10(__m256); +IMPORT CONST __m256 Sleef_cosf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_cosf8_u10(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u10(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincosf8_u10(__m256); +IMPORT CONST __m256 Sleef_tanf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_tanf8_u10(__m256); +IMPORT CONST __m256 Sleef_asinf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_asinf8_u10(__m256); +IMPORT CONST __m256 Sleef_acosf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_acosf8_u10(__m256); +IMPORT CONST __m256 Sleef_atanf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_atanf8_u10(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u10(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_atan2f8_u10(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_logf8_u10(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_cbrtf8_u10(__m256); +IMPORT CONST __m256 Sleef_expf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_expf8_u10(__m256); +IMPORT CONST __m256 Sleef_powf8_u10(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_powf8_u10(__m256, __m256); +IMPORT CONST __m256 Sleef_sinhf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_sinhf8_u10(__m256); +IMPORT CONST __m256 Sleef_coshf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_coshf8_u10(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_tanhf8_u10(__m256); +IMPORT CONST __m256 Sleef_sinhf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_sinhf8_u35(__m256); +IMPORT CONST __m256 Sleef_coshf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_coshf8_u35(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_tanhf8_u35(__m256); +IMPORT CONST __m256 Sleef_fastsinf8_u3500(__m256); +IMPORT CONST __m256 Sleef_cinz_fastsinf8_u3500(__m256); +IMPORT CONST __m256 Sleef_fastcosf8_u3500(__m256); +IMPORT CONST __m256 Sleef_cinz_fastcosf8_u3500(__m256); +IMPORT CONST __m256 Sleef_fastpowf8_u3500(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fastpowf8_u3500(__m256, __m256); +IMPORT CONST __m256 Sleef_asinhf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_asinhf8_u10(__m256); +IMPORT CONST __m256 Sleef_acoshf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_acoshf8_u10(__m256); +IMPORT CONST __m256 Sleef_atanhf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_atanhf8_u10(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_exp2f8_u10(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_exp2f8_u35(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_exp10f8_u10(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_exp10f8_u35(__m256); +IMPORT CONST __m256 Sleef_expm1f8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_expm1f8_u10(__m256); +IMPORT CONST __m256 Sleef_log10f8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_log10f8_u10(__m256); +IMPORT CONST __m256 Sleef_log2f8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_log2f8_u10(__m256); +IMPORT CONST __m256 Sleef_log2f8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_log2f8_u35(__m256); +IMPORT CONST __m256 Sleef_log1pf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_log1pf8_u10(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u05(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincospif8_u05(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u35(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincospif8_u35(__m256); +IMPORT CONST __m256 Sleef_sinpif8_u05(__m256); +IMPORT CONST __m256 Sleef_cinz_sinpif8_u05(__m256); +IMPORT CONST __m256 Sleef_cospif8_u05(__m256); +IMPORT CONST __m256 Sleef_cinz_cospif8_u05(__m256); +IMPORT CONST __m256 Sleef_fmaf8(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmaf8(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_sqrtf8(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u05(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8_u05(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u35(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8_u35(__m256); +IMPORT CONST __m256 Sleef_hypotf8_u05(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_hypotf8_u05(__m256, __m256); +IMPORT CONST __m256 Sleef_hypotf8_u35(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_hypotf8_u35(__m256, __m256); +IMPORT CONST __m256 Sleef_fabsf8(__m256); +IMPORT CONST __m256 Sleef_cinz_fabsf8(__m256); +IMPORT CONST __m256 Sleef_copysignf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_copysignf8(__m256, __m256); +IMPORT CONST __m256 Sleef_fmaxf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmaxf8(__m256, __m256); +IMPORT CONST __m256 Sleef_fminf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fminf8(__m256, __m256); +IMPORT CONST __m256 Sleef_fdimf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fdimf8(__m256, __m256); +IMPORT CONST __m256 Sleef_truncf8(__m256); +IMPORT CONST __m256 Sleef_cinz_truncf8(__m256); +IMPORT CONST __m256 Sleef_floorf8(__m256); +IMPORT CONST __m256 Sleef_cinz_floorf8(__m256); +IMPORT CONST __m256 Sleef_ceilf8(__m256); +IMPORT CONST __m256 Sleef_cinz_ceilf8(__m256); +IMPORT CONST __m256 Sleef_roundf8(__m256); +IMPORT CONST __m256 Sleef_cinz_roundf8(__m256); +IMPORT CONST __m256 Sleef_rintf8(__m256); +IMPORT CONST __m256 Sleef_cinz_rintf8(__m256); +IMPORT CONST __m256 Sleef_nextafterf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_nextafterf8(__m256, __m256); +IMPORT CONST __m256 Sleef_frfrexpf8(__m256); +IMPORT CONST __m256 Sleef_cinz_frfrexpf8(__m256); +IMPORT CONST __m256 Sleef_fmodf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmodf8(__m256, __m256); +IMPORT CONST __m256 Sleef_remainderf8(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_remainderf8(__m256, __m256); +IMPORT CONST Sleef___m256_2 Sleef_modff8(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_modff8(__m256); +IMPORT CONST __m256 Sleef_lgammaf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_lgammaf8_u10(__m256); +IMPORT CONST __m256 Sleef_tgammaf8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_tgammaf8_u10(__m256); +IMPORT CONST __m256 Sleef_erff8_u10(__m256); +IMPORT CONST __m256 Sleef_cinz_erff8_u10(__m256); +IMPORT CONST __m256 Sleef_erfcf8_u15(__m256); +IMPORT CONST __m256 Sleef_cinz_erfcf8_u15(__m256); +IMPORT CONST int Sleef_getIntf8(int); +IMPORT CONST int Sleef_cinz_getIntf8(int); +IMPORT CONST void *Sleef_getPtrf8(int); +IMPORT CONST void *Sleef_cinz_getPtrf8(int); +#endif +#ifdef __AVX__ + +#ifndef Sleef___m256d_2_DEFINED +typedef struct { + __m256d x, y; +} Sleef___m256d_2; +#define Sleef___m256d_2_DEFINED +#endif + +IMPORT CONST __m256d Sleef_sind4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sind4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_cosd4_u35avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u35avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincosd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_tand4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_tand4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_asind4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_asind4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_acosd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_atand4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_atand4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u35avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_atan2d4_u35avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_logd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_cbrtd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_sind4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sind4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_cosd4_u10avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u10avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincosd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_tand4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_tand4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_asind4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_asind4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_acosd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_atand4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_atand4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u10avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_atan2d4_u10avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_logd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_cbrtd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_expd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_expd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_powd4_u10avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_powd4_u10avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_sinhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_coshd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_tanhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_sinhd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinhd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_coshd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_tanhd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_fastsind4_u3500avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_fastsind4_u3500avx(__m256d); +IMPORT CONST __m256d Sleef_fastcosd4_u3500avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_fastcosd4_u3500avx(__m256d); +IMPORT CONST __m256d Sleef_fastpowd4_u3500avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fastpowd4_u3500avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_asinhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_asinhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_acoshd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_acoshd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_atanhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_atanhd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp2d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp2d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp10d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_exp10d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_expm1d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_expm1d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_log10d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_log10d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_log2d4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_log2d4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_log1pd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_log1pd4_u10avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u05avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincospid4_u05avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u35avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_sincospid4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_sinpid4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sinpid4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_cospid4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_cospid4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_ldexpd4_avx(__m256d, __m128i); +IMPORT CONST __m256d Sleef_cinz_ldexpd4_avx(__m256d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd4_avx(__m256d); +IMPORT CONST __m128i Sleef_cinz_ilogbd4_avx(__m256d); +IMPORT CONST __m256d Sleef_fmad4_avx(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmad4_avx(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_sqrtd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4_avx(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4_u05avx(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_sqrtd4_u35avx(__m256d); +IMPORT CONST __m256d Sleef_hypotd4_u05avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_hypotd4_u05avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_hypotd4_u35avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_hypotd4_u35avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fabsd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_fabsd4_avx(__m256d); +IMPORT CONST __m256d Sleef_copysignd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_copysignd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmaxd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmaxd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmind4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmind4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fdimd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fdimd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_truncd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_truncd4_avx(__m256d); +IMPORT CONST __m256d Sleef_floord4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_floord4_avx(__m256d); +IMPORT CONST __m256d Sleef_ceild4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_ceild4_avx(__m256d); +IMPORT CONST __m256d Sleef_roundd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_roundd4_avx(__m256d); +IMPORT CONST __m256d Sleef_rintd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_rintd4_avx(__m256d); +IMPORT CONST __m256d Sleef_nextafterd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_nextafterd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_frfrexpd4_avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_frfrexpd4_avx(__m256d); +IMPORT CONST __m128i Sleef_expfrexpd4_avx(__m256d); +IMPORT CONST __m128i Sleef_cinz_expfrexpd4_avx(__m256d); +IMPORT CONST __m256d Sleef_fmodd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_fmodd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_remainderd4_avx(__m256d, __m256d); +IMPORT CONST __m256d Sleef_cinz_remainderd4_avx(__m256d, __m256d); +IMPORT CONST Sleef___m256d_2 Sleef_modfd4_avx(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_cinz_modfd4_avx(__m256d); +IMPORT CONST __m256d Sleef_lgammad4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_lgammad4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_tgammad4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_tgammad4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_erfd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_erfd4_u10avx(__m256d); +IMPORT CONST __m256d Sleef_erfcd4_u15avx(__m256d); +IMPORT CONST __m256d Sleef_cinz_erfcd4_u15avx(__m256d); +IMPORT CONST int Sleef_getIntd4_avx(int); +IMPORT CONST void *Sleef_getPtrd4_avx(int); + +#ifndef Sleef___m256_2_DEFINED +typedef struct { + __m256 x, y; +} Sleef___m256_2; +#define Sleef___m256_2_DEFINED +#endif + +IMPORT CONST __m256 Sleef_sinf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sinf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cosf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_cosf8_u35avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u35avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincosf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_tanf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_tanf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_asinf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_asinf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_acosf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_acosf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_atanf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_atanf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u35avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_atan2f8_u35avx(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_logf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_cbrtf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_sinf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sinf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cosf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_cosf8_u10avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u10avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincosf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_tanf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_tanf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_asinf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_asinf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_acosf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_acosf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_atanf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_atanf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u10avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_atan2f8_u10avx(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_logf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_cbrtf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_expf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_expf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_powf8_u10avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_powf8_u10avx(__m256, __m256); +IMPORT CONST __m256 Sleef_sinhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sinhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_coshf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_coshf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_tanhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_sinhf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sinhf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_coshf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_coshf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_tanhf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_fastsinf8_u3500avx(__m256); +IMPORT CONST __m256 Sleef_cinz_fastsinf8_u3500avx(__m256); +IMPORT CONST __m256 Sleef_fastcosf8_u3500avx(__m256); +IMPORT CONST __m256 Sleef_cinz_fastcosf8_u3500avx(__m256); +IMPORT CONST __m256 Sleef_fastpowf8_u3500avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fastpowf8_u3500avx(__m256, __m256); +IMPORT CONST __m256 Sleef_asinhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_asinhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_acoshf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_acoshf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_atanhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_atanhf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_exp2f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_exp2f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_exp10f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_exp10f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_expm1f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_expm1f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_log10f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_log10f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_log2f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_log2f8_u10avx(__m256); +IMPORT CONST __m256 Sleef_log2f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_log2f8_u35avx(__m256); +IMPORT CONST __m256 Sleef_log1pf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_log1pf8_u10avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u05avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincospif8_u05avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u35avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_sincospif8_u35avx(__m256); +IMPORT CONST __m256 Sleef_sinpif8_u05avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sinpif8_u05avx(__m256); +IMPORT CONST __m256 Sleef_cospif8_u05avx(__m256); +IMPORT CONST __m256 Sleef_cinz_cospif8_u05avx(__m256); +IMPORT CONST __m256 Sleef_fmaf8_avx(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmaf8_avx(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_sqrtf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8_avx(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u05avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8_u05avx(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_cinz_sqrtf8_u35avx(__m256); +IMPORT CONST __m256 Sleef_hypotf8_u05avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_hypotf8_u05avx(__m256, __m256); +IMPORT CONST __m256 Sleef_hypotf8_u35avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_hypotf8_u35avx(__m256, __m256); +IMPORT CONST __m256 Sleef_fabsf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_fabsf8_avx(__m256); +IMPORT CONST __m256 Sleef_copysignf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_copysignf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_fmaxf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmaxf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_fminf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fminf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_fdimf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fdimf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_truncf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_truncf8_avx(__m256); +IMPORT CONST __m256 Sleef_floorf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_floorf8_avx(__m256); +IMPORT CONST __m256 Sleef_ceilf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_ceilf8_avx(__m256); +IMPORT CONST __m256 Sleef_roundf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_roundf8_avx(__m256); +IMPORT CONST __m256 Sleef_rintf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_rintf8_avx(__m256); +IMPORT CONST __m256 Sleef_nextafterf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_nextafterf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_frfrexpf8_avx(__m256); +IMPORT CONST __m256 Sleef_cinz_frfrexpf8_avx(__m256); +IMPORT CONST __m256 Sleef_fmodf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_fmodf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_remainderf8_avx(__m256, __m256); +IMPORT CONST __m256 Sleef_cinz_remainderf8_avx(__m256, __m256); +IMPORT CONST Sleef___m256_2 Sleef_modff8_avx(__m256); +IMPORT CONST Sleef___m256_2 Sleef_cinz_modff8_avx(__m256); +IMPORT CONST __m256 Sleef_lgammaf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_lgammaf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_tgammaf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_tgammaf8_u10avx(__m256); +IMPORT CONST __m256 Sleef_erff8_u10avx(__m256); +IMPORT CONST __m256 Sleef_cinz_erff8_u10avx(__m256); +IMPORT CONST __m256 Sleef_erfcf8_u15avx(__m256); +IMPORT CONST __m256 Sleef_cinz_erfcf8_u15avx(__m256); +IMPORT CONST int Sleef_getIntf8_avx(int); +IMPORT CONST int Sleef_cinz_getIntf8_avx(int); +IMPORT CONST void *Sleef_getPtrf8_avx(int); +IMPORT CONST void *Sleef_cinz_getPtrf8_avx(int); +#endif +#ifdef __AVX__ + +#ifndef Sleef___m256d_2_DEFINED +typedef struct { + __m256d x, y; +} Sleef___m256d_2; +#define Sleef___m256d_2_DEFINED +#endif + +IMPORT CONST __m256d Sleef_sind4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sind4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_cosd4_u35fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u35fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincosd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_tand4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_tand4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_asind4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_asind4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_acosd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_atand4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_atand4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u35fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_atan2d4_u35fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_logd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_cbrtd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_sind4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sind4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_cosd4_u10fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u10fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincosd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_tand4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_tand4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_asind4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_asind4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_acosd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_atand4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_atand4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u10fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_atan2d4_u10fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_logd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_cbrtd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_expd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_expd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_powd4_u10fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_powd4_u10fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_sinhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sinhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_coshd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_tanhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_sinhd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sinhd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_coshd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_tanhd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_fastsind4_u3500fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_fastsind4_u3500fma4(__m256d); +IMPORT CONST __m256d Sleef_fastcosd4_u3500fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_fastcosd4_u3500fma4(__m256d); +IMPORT CONST __m256d Sleef_fastpowd4_u3500fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fastpowd4_u3500fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_asinhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_asinhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_acoshd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_acoshd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_atanhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_atanhd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_exp2d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_exp2d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_exp10d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_exp10d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_expm1d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_expm1d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_log10d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_log10d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_log2d4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_log2d4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_log1pd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_log1pd4_u10fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u05fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincospid4_u05fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u35fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincospid4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_sinpid4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sinpid4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_cospid4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_cospid4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_ldexpd4_fma4(__m256d, __m128i); +IMPORT CONST __m256d Sleef_finz_ldexpd4_fma4(__m256d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd4_fma4(__m256d); +IMPORT CONST __m128i Sleef_finz_ilogbd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_fmad4_fma4(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmad4_fma4(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_sqrtd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_u05fma4(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_u35fma4(__m256d); +IMPORT CONST __m256d Sleef_hypotd4_u05fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_hypotd4_u05fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_hypotd4_u35fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_hypotd4_u35fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fabsd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_fabsd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_copysignd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_copysignd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmaxd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmaxd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmind4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmind4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fdimd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fdimd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_truncd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_truncd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_floord4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_floord4_fma4(__m256d); +IMPORT CONST __m256d Sleef_ceild4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_ceild4_fma4(__m256d); +IMPORT CONST __m256d Sleef_roundd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_roundd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_rintd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_rintd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_nextafterd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_nextafterd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_frfrexpd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_frfrexpd4_fma4(__m256d); +IMPORT CONST __m128i Sleef_expfrexpd4_fma4(__m256d); +IMPORT CONST __m128i Sleef_finz_expfrexpd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_fmodd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmodd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_remainderd4_fma4(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_remainderd4_fma4(__m256d, __m256d); +IMPORT CONST Sleef___m256d_2 Sleef_modfd4_fma4(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_modfd4_fma4(__m256d); +IMPORT CONST __m256d Sleef_lgammad4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_lgammad4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_tgammad4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_tgammad4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_erfd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_erfd4_u10fma4(__m256d); +IMPORT CONST __m256d Sleef_erfcd4_u15fma4(__m256d); +IMPORT CONST __m256d Sleef_finz_erfcd4_u15fma4(__m256d); +IMPORT CONST int Sleef_getIntd4_fma4(int); +IMPORT CONST void *Sleef_getPtrd4_fma4(int); + +#ifndef Sleef___m256_2_DEFINED +typedef struct { + __m256 x, y; +} Sleef___m256_2; +#define Sleef___m256_2_DEFINED +#endif + +IMPORT CONST __m256 Sleef_sinf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sinf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_cosf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_cosf8_u35fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u35fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincosf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_tanf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_tanf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_asinf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_asinf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_acosf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_acosf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_atanf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_atanf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u35fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_atan2f8_u35fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_logf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_cbrtf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_sinf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sinf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_cosf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_cosf8_u10fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u10fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincosf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_tanf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_tanf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_asinf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_asinf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_acosf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_acosf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_atanf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_atanf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u10fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_atan2f8_u10fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_logf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_cbrtf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_expf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_expf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_powf8_u10fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_powf8_u10fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_sinhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sinhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_coshf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_coshf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_tanhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_sinhf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sinhf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_coshf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_coshf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_tanhf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_fastsinf8_u3500fma4(__m256); +IMPORT CONST __m256 Sleef_finz_fastsinf8_u3500fma4(__m256); +IMPORT CONST __m256 Sleef_fastcosf8_u3500fma4(__m256); +IMPORT CONST __m256 Sleef_finz_fastcosf8_u3500fma4(__m256); +IMPORT CONST __m256 Sleef_fastpowf8_u3500fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fastpowf8_u3500fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_asinhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_asinhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_acoshf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_acoshf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_atanhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_atanhf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_exp2f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_exp2f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_exp10f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_exp10f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_expm1f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_expm1f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_log10f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_log10f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_log2f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_log2f8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_log2f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_log2f8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_log1pf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_log1pf8_u10fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u05fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincospif8_u05fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u35fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincospif8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_sinpif8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sinpif8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_cospif8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_finz_cospif8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_fmaf8_fma4(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmaf8_fma4(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_sqrtf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_fma4(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_u05fma4(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_u35fma4(__m256); +IMPORT CONST __m256 Sleef_hypotf8_u05fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_hypotf8_u05fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_hypotf8_u35fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_hypotf8_u35fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_fabsf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_fabsf8_fma4(__m256); +IMPORT CONST __m256 Sleef_copysignf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_copysignf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_fmaxf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmaxf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_fminf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fminf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_fdimf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fdimf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_truncf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_truncf8_fma4(__m256); +IMPORT CONST __m256 Sleef_floorf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_floorf8_fma4(__m256); +IMPORT CONST __m256 Sleef_ceilf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_ceilf8_fma4(__m256); +IMPORT CONST __m256 Sleef_roundf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_roundf8_fma4(__m256); +IMPORT CONST __m256 Sleef_rintf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_rintf8_fma4(__m256); +IMPORT CONST __m256 Sleef_nextafterf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_nextafterf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_frfrexpf8_fma4(__m256); +IMPORT CONST __m256 Sleef_finz_frfrexpf8_fma4(__m256); +IMPORT CONST __m256 Sleef_fmodf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmodf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_remainderf8_fma4(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_remainderf8_fma4(__m256, __m256); +IMPORT CONST Sleef___m256_2 Sleef_modff8_fma4(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_modff8_fma4(__m256); +IMPORT CONST __m256 Sleef_lgammaf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_lgammaf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_tgammaf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_tgammaf8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_erff8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_finz_erff8_u10fma4(__m256); +IMPORT CONST __m256 Sleef_erfcf8_u15fma4(__m256); +IMPORT CONST __m256 Sleef_finz_erfcf8_u15fma4(__m256); +IMPORT CONST int Sleef_getIntf8_fma4(int); +IMPORT CONST int Sleef_finz_getIntf8_fma4(int); +IMPORT CONST void *Sleef_getPtrf8_fma4(int); +IMPORT CONST void *Sleef_finz_getPtrf8_fma4(int); +#endif +#ifdef __AVX__ + +#ifndef Sleef___m256d_2_DEFINED +typedef struct { + __m256d x, y; +} Sleef___m256d_2; +#define Sleef___m256d_2_DEFINED +#endif + +IMPORT CONST __m256d Sleef_sind4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sind4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_cosd4_u35avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u35avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincosd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_tand4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_tand4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_asind4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_asind4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_acosd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_atand4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_atand4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u35avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_atan2d4_u35avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_logd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_cbrtd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_sind4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sind4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_cosd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_cosd4_u10avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincosd4_u10avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincosd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_tand4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_tand4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_asind4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_asind4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_acosd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_acosd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_atand4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_atand4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_atan2d4_u10avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_atan2d4_u10avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_logd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_logd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_cbrtd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_cbrtd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_expd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_expd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_powd4_u10avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_powd4_u10avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_sinhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sinhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_coshd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_tanhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_sinhd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sinhd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_coshd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_coshd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_tanhd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_tanhd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_fastsind4_u3500avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_fastsind4_u3500avx2(__m256d); +IMPORT CONST __m256d Sleef_fastcosd4_u3500avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_fastcosd4_u3500avx2(__m256d); +IMPORT CONST __m256d Sleef_fastpowd4_u3500avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fastpowd4_u3500avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_asinhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_asinhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_acoshd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_acoshd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_atanhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_atanhd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_exp2d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_exp2d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_exp2d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_exp10d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_exp10d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_exp10d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_expm1d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_expm1d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_log10d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_log10d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_log2d4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_log2d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_log2d4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_log1pd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_log1pd4_u10avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u05avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincospid4_u05avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_sincospid4_u35avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_sincospid4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_sinpid4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sinpid4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_cospid4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_cospid4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_ldexpd4_avx2(__m256d, __m128i); +IMPORT CONST __m256d Sleef_finz_ldexpd4_avx2(__m256d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd4_avx2(__m256d); +IMPORT CONST __m128i Sleef_finz_ilogbd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_fmad4_avx2(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmad4_avx2(__m256d, __m256d, __m256d); +IMPORT CONST __m256d Sleef_sqrtd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_u05avx2(__m256d); +IMPORT CONST __m256d Sleef_sqrtd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_sqrtd4_u35avx2(__m256d); +IMPORT CONST __m256d Sleef_hypotd4_u05avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_hypotd4_u05avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_hypotd4_u35avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_hypotd4_u35avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fabsd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_fabsd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_copysignd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_copysignd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmaxd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmaxd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fmind4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmind4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_fdimd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fdimd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_truncd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_truncd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_floord4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_floord4_avx2(__m256d); +IMPORT CONST __m256d Sleef_ceild4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_ceild4_avx2(__m256d); +IMPORT CONST __m256d Sleef_roundd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_roundd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_rintd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_rintd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_nextafterd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_nextafterd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_frfrexpd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_frfrexpd4_avx2(__m256d); +IMPORT CONST __m128i Sleef_expfrexpd4_avx2(__m256d); +IMPORT CONST __m128i Sleef_finz_expfrexpd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_fmodd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_fmodd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_remainderd4_avx2(__m256d, __m256d); +IMPORT CONST __m256d Sleef_finz_remainderd4_avx2(__m256d, __m256d); +IMPORT CONST Sleef___m256d_2 Sleef_modfd4_avx2(__m256d); +IMPORT CONST Sleef___m256d_2 Sleef_finz_modfd4_avx2(__m256d); +IMPORT CONST __m256d Sleef_lgammad4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_lgammad4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_tgammad4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_tgammad4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_erfd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_erfd4_u10avx2(__m256d); +IMPORT CONST __m256d Sleef_erfcd4_u15avx2(__m256d); +IMPORT CONST __m256d Sleef_finz_erfcd4_u15avx2(__m256d); +IMPORT CONST int Sleef_getIntd4_avx2(int); +IMPORT CONST void *Sleef_getPtrd4_avx2(int); + +#ifndef Sleef___m256_2_DEFINED +typedef struct { + __m256 x, y; +} Sleef___m256_2; +#define Sleef___m256_2_DEFINED +#endif + +IMPORT CONST __m256 Sleef_sinf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sinf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_cosf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_cosf8_u35avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u35avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincosf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_tanf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_tanf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_asinf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_asinf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_acosf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_acosf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_atanf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_atanf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u35avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_atan2f8_u35avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_logf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_cbrtf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_sinf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sinf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_cosf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_cosf8_u10avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincosf8_u10avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincosf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_tanf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_tanf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_asinf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_asinf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_acosf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_acosf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_atanf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_atanf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_atan2f8_u10avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_atan2f8_u10avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_logf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_logf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_cbrtf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_cbrtf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_expf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_expf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_powf8_u10avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_powf8_u10avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_sinhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sinhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_coshf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_coshf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_tanhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_sinhf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sinhf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_coshf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_coshf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_tanhf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_tanhf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_fastsinf8_u3500avx2(__m256); +IMPORT CONST __m256 Sleef_finz_fastsinf8_u3500avx2(__m256); +IMPORT CONST __m256 Sleef_fastcosf8_u3500avx2(__m256); +IMPORT CONST __m256 Sleef_finz_fastcosf8_u3500avx2(__m256); +IMPORT CONST __m256 Sleef_fastpowf8_u3500avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fastpowf8_u3500avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_asinhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_asinhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_acoshf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_acoshf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_atanhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_atanhf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_exp2f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_exp2f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_exp2f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_exp10f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_exp10f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_exp10f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_expm1f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_expm1f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_log10f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_log10f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_log2f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_log2f8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_log2f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_log2f8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_log1pf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_log1pf8_u10avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u05avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincospif8_u05avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_sincospif8_u35avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_sincospif8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_sinpif8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sinpif8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_cospif8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_finz_cospif8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_fmaf8_avx2(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmaf8_avx2(__m256, __m256, __m256); +IMPORT CONST __m256 Sleef_sqrtf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_avx2(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_u05avx2(__m256); +IMPORT CONST __m256 Sleef_sqrtf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_finz_sqrtf8_u35avx2(__m256); +IMPORT CONST __m256 Sleef_hypotf8_u05avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_hypotf8_u05avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_hypotf8_u35avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_hypotf8_u35avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_fabsf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_fabsf8_avx2(__m256); +IMPORT CONST __m256 Sleef_copysignf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_copysignf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_fmaxf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmaxf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_fminf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fminf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_fdimf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fdimf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_truncf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_truncf8_avx2(__m256); +IMPORT CONST __m256 Sleef_floorf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_floorf8_avx2(__m256); +IMPORT CONST __m256 Sleef_ceilf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_ceilf8_avx2(__m256); +IMPORT CONST __m256 Sleef_roundf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_roundf8_avx2(__m256); +IMPORT CONST __m256 Sleef_rintf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_rintf8_avx2(__m256); +IMPORT CONST __m256 Sleef_nextafterf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_nextafterf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_frfrexpf8_avx2(__m256); +IMPORT CONST __m256 Sleef_finz_frfrexpf8_avx2(__m256); +IMPORT CONST __m256 Sleef_fmodf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_fmodf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_remainderf8_avx2(__m256, __m256); +IMPORT CONST __m256 Sleef_finz_remainderf8_avx2(__m256, __m256); +IMPORT CONST Sleef___m256_2 Sleef_modff8_avx2(__m256); +IMPORT CONST Sleef___m256_2 Sleef_finz_modff8_avx2(__m256); +IMPORT CONST __m256 Sleef_lgammaf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_lgammaf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_tgammaf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_tgammaf8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_erff8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_finz_erff8_u10avx2(__m256); +IMPORT CONST __m256 Sleef_erfcf8_u15avx2(__m256); +IMPORT CONST __m256 Sleef_finz_erfcf8_u15avx2(__m256); +IMPORT CONST int Sleef_getIntf8_avx2(int); +IMPORT CONST int Sleef_finz_getIntf8_avx2(int); +IMPORT CONST void *Sleef_getPtrf8_avx2(int); +IMPORT CONST void *Sleef_finz_getPtrf8_avx2(int); +#endif +#ifdef __SSE2__ + +#ifndef Sleef___m128d_2_DEFINED +typedef struct { + __m128d x, y; +} Sleef___m128d_2; +#define Sleef___m128d_2_DEFINED +#endif + +IMPORT CONST __m128d Sleef_sind2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sind2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_cosd2_u35avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u35avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_finz_sincosd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_tand2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_tand2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_asind2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_asind2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_acosd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_atand2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_atand2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u35avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_atan2d2_u35avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_logd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_cbrtd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_sind2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sind2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_cosd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_cosd2_u10avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincosd2_u10avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_finz_sincosd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_tand2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_tand2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_asind2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_asind2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_acosd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_acosd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_atand2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_atand2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_atan2d2_u10avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_atan2d2_u10avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_logd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_logd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_cbrtd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_cbrtd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_expd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_expd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_powd2_u10avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_powd2_u10avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_sinhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sinhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_coshd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_tanhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_sinhd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sinhd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_coshd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_coshd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_tanhd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_tanhd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_fastsind2_u3500avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_fastsind2_u3500avx2128(__m128d); +IMPORT CONST __m128d Sleef_fastcosd2_u3500avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_fastcosd2_u3500avx2128(__m128d); +IMPORT CONST __m128d Sleef_fastpowd2_u3500avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fastpowd2_u3500avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_asinhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_asinhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_acoshd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_acoshd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_atanhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_atanhd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_exp2d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_exp2d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_exp2d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_exp10d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_exp10d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_exp10d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_expm1d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_expm1d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_log10d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_log10d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_log2d2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_log2d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_log2d2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_log1pd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_log1pd2_u10avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u05avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_finz_sincospid2_u05avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_sincospid2_u35avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_finz_sincospid2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_sinpid2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sinpid2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_cospid2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_cospid2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_ldexpd2_avx2128(__m128d, __m128i); +IMPORT CONST __m128d Sleef_finz_ldexpd2_avx2128(__m128d, __m128i); +IMPORT CONST __m128i Sleef_ilogbd2_avx2128(__m128d); +IMPORT CONST __m128i Sleef_finz_ilogbd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_fmad2_avx2128(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fmad2_avx2128(__m128d, __m128d, __m128d); +IMPORT CONST __m128d Sleef_sqrtd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sqrtd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sqrtd2_u05avx2128(__m128d); +IMPORT CONST __m128d Sleef_sqrtd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_sqrtd2_u35avx2128(__m128d); +IMPORT CONST __m128d Sleef_hypotd2_u05avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_hypotd2_u05avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_hypotd2_u35avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_hypotd2_u35avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fabsd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_fabsd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_copysignd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_copysignd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmaxd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fmaxd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fmind2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fmind2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_fdimd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fdimd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_truncd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_truncd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_floord2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_floord2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_ceild2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_ceild2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_roundd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_roundd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_rintd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_rintd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_nextafterd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_nextafterd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_frfrexpd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_frfrexpd2_avx2128(__m128d); +IMPORT CONST __m128i Sleef_expfrexpd2_avx2128(__m128d); +IMPORT CONST __m128i Sleef_finz_expfrexpd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_fmodd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_fmodd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_remainderd2_avx2128(__m128d, __m128d); +IMPORT CONST __m128d Sleef_finz_remainderd2_avx2128(__m128d, __m128d); +IMPORT CONST Sleef___m128d_2 Sleef_modfd2_avx2128(__m128d); +IMPORT CONST Sleef___m128d_2 Sleef_finz_modfd2_avx2128(__m128d); +IMPORT CONST __m128d Sleef_lgammad2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_lgammad2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_tgammad2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_tgammad2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_erfd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_erfd2_u10avx2128(__m128d); +IMPORT CONST __m128d Sleef_erfcd2_u15avx2128(__m128d); +IMPORT CONST __m128d Sleef_finz_erfcd2_u15avx2128(__m128d); +IMPORT CONST int Sleef_getIntd2_avx2128(int); +IMPORT CONST void *Sleef_getPtrd2_avx2128(int); + +#ifndef Sleef___m128_2_DEFINED +typedef struct { + __m128 x, y; +} Sleef___m128_2; +#define Sleef___m128_2_DEFINED +#endif + +IMPORT CONST __m128 Sleef_sinf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sinf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_cosf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_cosf4_u35avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u35avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_finz_sincosf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_tanf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_tanf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_asinf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_asinf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_acosf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_acosf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_atanf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_atanf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u35avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_atan2f4_u35avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_logf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_cbrtf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_sinf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sinf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_cosf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_cosf4_u10avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincosf4_u10avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_finz_sincosf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_tanf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_tanf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_asinf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_asinf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_acosf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_acosf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_atanf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_atanf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_atan2f4_u10avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_atan2f4_u10avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_logf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_logf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_cbrtf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_cbrtf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_expf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_expf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_powf4_u10avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_powf4_u10avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_sinhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sinhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_coshf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_coshf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_tanhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_sinhf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sinhf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_coshf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_coshf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_tanhf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_tanhf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_fastsinf4_u3500avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_fastsinf4_u3500avx2128(__m128); +IMPORT CONST __m128 Sleef_fastcosf4_u3500avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_fastcosf4_u3500avx2128(__m128); +IMPORT CONST __m128 Sleef_fastpowf4_u3500avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_fastpowf4_u3500avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_asinhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_asinhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_acoshf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_acoshf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_atanhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_atanhf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_exp2f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_exp2f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_exp2f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_exp10f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_exp10f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_exp10f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_expm1f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_expm1f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_log10f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_log10f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_log2f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_log2f4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_log2f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_log2f4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_log1pf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_log1pf4_u10avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u05avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_finz_sincospif4_u05avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_sincospif4_u35avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_finz_sincospif4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_sinpif4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sinpif4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_cospif4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_cospif4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_fmaf4_avx2128(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_finz_fmaf4_avx2128(__m128, __m128, __m128); +IMPORT CONST __m128 Sleef_sqrtf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sqrtf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sqrtf4_u05avx2128(__m128); +IMPORT CONST __m128 Sleef_sqrtf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_sqrtf4_u35avx2128(__m128); +IMPORT CONST __m128 Sleef_hypotf4_u05avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_hypotf4_u05avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_hypotf4_u35avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_hypotf4_u35avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_fabsf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_fabsf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_copysignf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_copysignf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_fmaxf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_fmaxf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_fminf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_fminf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_fdimf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_fdimf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_truncf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_truncf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_floorf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_floorf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_ceilf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_ceilf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_roundf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_roundf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_rintf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_rintf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_nextafterf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_nextafterf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_frfrexpf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_frfrexpf4_avx2128(__m128); +IMPORT CONST __m128 Sleef_fmodf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_fmodf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_remainderf4_avx2128(__m128, __m128); +IMPORT CONST __m128 Sleef_finz_remainderf4_avx2128(__m128, __m128); +IMPORT CONST Sleef___m128_2 Sleef_modff4_avx2128(__m128); +IMPORT CONST Sleef___m128_2 Sleef_finz_modff4_avx2128(__m128); +IMPORT CONST __m128 Sleef_lgammaf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_lgammaf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_tgammaf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_tgammaf4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_erff4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_erff4_u10avx2128(__m128); +IMPORT CONST __m128 Sleef_erfcf4_u15avx2128(__m128); +IMPORT CONST __m128 Sleef_finz_erfcf4_u15avx2128(__m128); +IMPORT CONST int Sleef_getIntf4_avx2128(int); +IMPORT CONST int Sleef_finz_getIntf4_avx2128(int); +IMPORT CONST void *Sleef_getPtrf4_avx2128(int); +IMPORT CONST void *Sleef_finz_getPtrf4_avx2128(int); +#endif +#ifdef __AVX512F__ + +#ifndef Sleef___m512d_2_DEFINED +typedef struct { + __m512d x, y; +} Sleef___m512d_2; +#define Sleef___m512d_2_DEFINED +#endif + +IMPORT CONST __m512d Sleef_sind8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_sind8_u35(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_cosd8_u35(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u35(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincosd8_u35(__m512d); +IMPORT CONST __m512d Sleef_tand8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_tand8_u35(__m512d); +IMPORT CONST __m512d Sleef_asind8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_asind8_u35(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_acosd8_u35(__m512d); +IMPORT CONST __m512d Sleef_atand8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_atand8_u35(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u35(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_atan2d8_u35(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_logd8_u35(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_cbrtd8_u35(__m512d); +IMPORT CONST __m512d Sleef_sind8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_sind8_u10(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_cosd8_u10(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u10(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincosd8_u10(__m512d); +IMPORT CONST __m512d Sleef_tand8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_tand8_u10(__m512d); +IMPORT CONST __m512d Sleef_asind8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_asind8_u10(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_acosd8_u10(__m512d); +IMPORT CONST __m512d Sleef_atand8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_atand8_u10(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u10(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_atan2d8_u10(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_logd8_u10(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_cbrtd8_u10(__m512d); +IMPORT CONST __m512d Sleef_expd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_expd8_u10(__m512d); +IMPORT CONST __m512d Sleef_powd8_u10(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_powd8_u10(__m512d, __m512d); +IMPORT CONST __m512d Sleef_sinhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_sinhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_coshd8_u10(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_tanhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_sinhd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_sinhd8_u35(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_coshd8_u35(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_tanhd8_u35(__m512d); +IMPORT CONST __m512d Sleef_fastsind8_u3500(__m512d); +IMPORT CONST __m512d Sleef_finz_fastsind8_u3500(__m512d); +IMPORT CONST __m512d Sleef_fastcosd8_u3500(__m512d); +IMPORT CONST __m512d Sleef_finz_fastcosd8_u3500(__m512d); +IMPORT CONST __m512d Sleef_fastpowd8_u3500(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fastpowd8_u3500(__m512d, __m512d); +IMPORT CONST __m512d Sleef_asinhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_asinhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_acoshd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_acoshd8_u10(__m512d); +IMPORT CONST __m512d Sleef_atanhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_atanhd8_u10(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_exp2d8_u10(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_exp2d8_u35(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_exp10d8_u10(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_exp10d8_u35(__m512d); +IMPORT CONST __m512d Sleef_expm1d8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_expm1d8_u10(__m512d); +IMPORT CONST __m512d Sleef_log10d8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_log10d8_u10(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_log2d8_u10(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_log2d8_u35(__m512d); +IMPORT CONST __m512d Sleef_log1pd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_log1pd8_u10(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u05(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincospid8_u05(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u35(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincospid8_u35(__m512d); +IMPORT CONST __m512d Sleef_sinpid8_u05(__m512d); +IMPORT CONST __m512d Sleef_finz_sinpid8_u05(__m512d); +IMPORT CONST __m512d Sleef_cospid8_u05(__m512d); +IMPORT CONST __m512d Sleef_finz_cospid8_u05(__m512d); +IMPORT CONST __m512d Sleef_ldexpd8(__m512d, __m256i); +IMPORT CONST __m512d Sleef_finz_ldexpd8(__m512d, __m256i); +IMPORT CONST __m256i Sleef_ilogbd8(__m512d); +IMPORT CONST __m256i Sleef_finz_ilogbd8(__m512d); +IMPORT CONST __m512d Sleef_fmad8(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmad8(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_sqrtd8(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u05(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8_u05(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u35(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8_u35(__m512d); +IMPORT CONST __m512d Sleef_hypotd8_u05(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_hypotd8_u05(__m512d, __m512d); +IMPORT CONST __m512d Sleef_hypotd8_u35(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_hypotd8_u35(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fabsd8(__m512d); +IMPORT CONST __m512d Sleef_finz_fabsd8(__m512d); +IMPORT CONST __m512d Sleef_copysignd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_copysignd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmaxd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmaxd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmind8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmind8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fdimd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fdimd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_truncd8(__m512d); +IMPORT CONST __m512d Sleef_finz_truncd8(__m512d); +IMPORT CONST __m512d Sleef_floord8(__m512d); +IMPORT CONST __m512d Sleef_finz_floord8(__m512d); +IMPORT CONST __m512d Sleef_ceild8(__m512d); +IMPORT CONST __m512d Sleef_finz_ceild8(__m512d); +IMPORT CONST __m512d Sleef_roundd8(__m512d); +IMPORT CONST __m512d Sleef_finz_roundd8(__m512d); +IMPORT CONST __m512d Sleef_rintd8(__m512d); +IMPORT CONST __m512d Sleef_finz_rintd8(__m512d); +IMPORT CONST __m512d Sleef_nextafterd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_nextafterd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_frfrexpd8(__m512d); +IMPORT CONST __m512d Sleef_finz_frfrexpd8(__m512d); +IMPORT CONST __m256i Sleef_expfrexpd8(__m512d); +IMPORT CONST __m256i Sleef_finz_expfrexpd8(__m512d); +IMPORT CONST __m512d Sleef_fmodd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmodd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_remainderd8(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_remainderd8(__m512d, __m512d); +IMPORT CONST Sleef___m512d_2 Sleef_modfd8(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_modfd8(__m512d); +IMPORT CONST __m512d Sleef_lgammad8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_lgammad8_u10(__m512d); +IMPORT CONST __m512d Sleef_tgammad8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_tgammad8_u10(__m512d); +IMPORT CONST __m512d Sleef_erfd8_u10(__m512d); +IMPORT CONST __m512d Sleef_finz_erfd8_u10(__m512d); +IMPORT CONST __m512d Sleef_erfcd8_u15(__m512d); +IMPORT CONST __m512d Sleef_finz_erfcd8_u15(__m512d); +IMPORT CONST int Sleef_getIntd8(int); +IMPORT CONST void *Sleef_getPtrd8(int); + +#ifndef Sleef___m512_2_DEFINED +typedef struct { + __m512 x, y; +} Sleef___m512_2; +#define Sleef___m512_2_DEFINED +#endif + +IMPORT CONST __m512 Sleef_sinf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_sinf16_u35(__m512); +IMPORT CONST __m512 Sleef_cosf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_cosf16_u35(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u35(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincosf16_u35(__m512); +IMPORT CONST __m512 Sleef_tanf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_tanf16_u35(__m512); +IMPORT CONST __m512 Sleef_asinf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_asinf16_u35(__m512); +IMPORT CONST __m512 Sleef_acosf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_acosf16_u35(__m512); +IMPORT CONST __m512 Sleef_atanf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_atanf16_u35(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u35(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_atan2f16_u35(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_logf16_u35(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_cbrtf16_u35(__m512); +IMPORT CONST __m512 Sleef_sinf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_sinf16_u10(__m512); +IMPORT CONST __m512 Sleef_cosf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_cosf16_u10(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u10(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincosf16_u10(__m512); +IMPORT CONST __m512 Sleef_tanf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_tanf16_u10(__m512); +IMPORT CONST __m512 Sleef_asinf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_asinf16_u10(__m512); +IMPORT CONST __m512 Sleef_acosf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_acosf16_u10(__m512); +IMPORT CONST __m512 Sleef_atanf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_atanf16_u10(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u10(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_atan2f16_u10(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_logf16_u10(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_cbrtf16_u10(__m512); +IMPORT CONST __m512 Sleef_expf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_expf16_u10(__m512); +IMPORT CONST __m512 Sleef_powf16_u10(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_powf16_u10(__m512, __m512); +IMPORT CONST __m512 Sleef_sinhf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_sinhf16_u10(__m512); +IMPORT CONST __m512 Sleef_coshf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_coshf16_u10(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_tanhf16_u10(__m512); +IMPORT CONST __m512 Sleef_sinhf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_sinhf16_u35(__m512); +IMPORT CONST __m512 Sleef_coshf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_coshf16_u35(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_tanhf16_u35(__m512); +IMPORT CONST __m512 Sleef_fastsinf16_u3500(__m512); +IMPORT CONST __m512 Sleef_finz_fastsinf16_u3500(__m512); +IMPORT CONST __m512 Sleef_fastcosf16_u3500(__m512); +IMPORT CONST __m512 Sleef_finz_fastcosf16_u3500(__m512); +IMPORT CONST __m512 Sleef_fastpowf16_u3500(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fastpowf16_u3500(__m512, __m512); +IMPORT CONST __m512 Sleef_asinhf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_asinhf16_u10(__m512); +IMPORT CONST __m512 Sleef_acoshf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_acoshf16_u10(__m512); +IMPORT CONST __m512 Sleef_atanhf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_atanhf16_u10(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_exp2f16_u10(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_exp2f16_u35(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_exp10f16_u10(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_exp10f16_u35(__m512); +IMPORT CONST __m512 Sleef_expm1f16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_expm1f16_u10(__m512); +IMPORT CONST __m512 Sleef_log10f16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_log10f16_u10(__m512); +IMPORT CONST __m512 Sleef_log2f16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_log2f16_u10(__m512); +IMPORT CONST __m512 Sleef_log2f16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_log2f16_u35(__m512); +IMPORT CONST __m512 Sleef_log1pf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_log1pf16_u10(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u05(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincospif16_u05(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u35(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincospif16_u35(__m512); +IMPORT CONST __m512 Sleef_sinpif16_u05(__m512); +IMPORT CONST __m512 Sleef_finz_sinpif16_u05(__m512); +IMPORT CONST __m512 Sleef_cospif16_u05(__m512); +IMPORT CONST __m512 Sleef_finz_cospif16_u05(__m512); +IMPORT CONST __m512 Sleef_fmaf16(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmaf16(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_sqrtf16(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u05(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16_u05(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u35(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16_u35(__m512); +IMPORT CONST __m512 Sleef_hypotf16_u05(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_hypotf16_u05(__m512, __m512); +IMPORT CONST __m512 Sleef_hypotf16_u35(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_hypotf16_u35(__m512, __m512); +IMPORT CONST __m512 Sleef_fabsf16(__m512); +IMPORT CONST __m512 Sleef_finz_fabsf16(__m512); +IMPORT CONST __m512 Sleef_copysignf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_copysignf16(__m512, __m512); +IMPORT CONST __m512 Sleef_fmaxf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmaxf16(__m512, __m512); +IMPORT CONST __m512 Sleef_fminf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fminf16(__m512, __m512); +IMPORT CONST __m512 Sleef_fdimf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fdimf16(__m512, __m512); +IMPORT CONST __m512 Sleef_truncf16(__m512); +IMPORT CONST __m512 Sleef_finz_truncf16(__m512); +IMPORT CONST __m512 Sleef_floorf16(__m512); +IMPORT CONST __m512 Sleef_finz_floorf16(__m512); +IMPORT CONST __m512 Sleef_ceilf16(__m512); +IMPORT CONST __m512 Sleef_finz_ceilf16(__m512); +IMPORT CONST __m512 Sleef_roundf16(__m512); +IMPORT CONST __m512 Sleef_finz_roundf16(__m512); +IMPORT CONST __m512 Sleef_rintf16(__m512); +IMPORT CONST __m512 Sleef_finz_rintf16(__m512); +IMPORT CONST __m512 Sleef_nextafterf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_nextafterf16(__m512, __m512); +IMPORT CONST __m512 Sleef_frfrexpf16(__m512); +IMPORT CONST __m512 Sleef_finz_frfrexpf16(__m512); +IMPORT CONST __m512 Sleef_fmodf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmodf16(__m512, __m512); +IMPORT CONST __m512 Sleef_remainderf16(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_remainderf16(__m512, __m512); +IMPORT CONST Sleef___m512_2 Sleef_modff16(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_modff16(__m512); +IMPORT CONST __m512 Sleef_lgammaf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_lgammaf16_u10(__m512); +IMPORT CONST __m512 Sleef_tgammaf16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_tgammaf16_u10(__m512); +IMPORT CONST __m512 Sleef_erff16_u10(__m512); +IMPORT CONST __m512 Sleef_finz_erff16_u10(__m512); +IMPORT CONST __m512 Sleef_erfcf16_u15(__m512); +IMPORT CONST __m512 Sleef_finz_erfcf16_u15(__m512); +IMPORT CONST int Sleef_getIntf16(int); +IMPORT CONST int Sleef_finz_getIntf16(int); +IMPORT CONST void *Sleef_getPtrf16(int); +IMPORT CONST void *Sleef_finz_getPtrf16(int); +#endif +#ifdef __AVX512F__ + +#ifndef Sleef___m512d_2_DEFINED +typedef struct { + __m512d x, y; +} Sleef___m512d_2; +#define Sleef___m512d_2_DEFINED +#endif + +IMPORT CONST __m512d Sleef_sind8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sind8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_cosd8_u35avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u35avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincosd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_tand8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_tand8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_asind8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_asind8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_acosd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_atand8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_atand8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u35avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_atan2d8_u35avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_logd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_cbrtd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_sind8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sind8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_cosd8_u10avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u10avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincosd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_tand8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_tand8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_asind8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_asind8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_acosd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_atand8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_atand8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u10avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_atan2d8_u10avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_logd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_cbrtd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_expd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_expd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_powd8_u10avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_powd8_u10avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_sinhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sinhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_coshd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_tanhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_sinhd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sinhd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_coshd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_tanhd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_fastsind8_u3500avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_fastsind8_u3500avx512f(__m512d); +IMPORT CONST __m512d Sleef_fastcosd8_u3500avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_fastcosd8_u3500avx512f(__m512d); +IMPORT CONST __m512d Sleef_fastpowd8_u3500avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fastpowd8_u3500avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_asinhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_asinhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_acoshd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_acoshd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_atanhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_atanhd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_exp2d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_exp2d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_exp10d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_exp10d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_expm1d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_expm1d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_log10d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_log10d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_log2d8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_log2d8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_log1pd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_log1pd8_u10avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u05avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincospid8_u05avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u35avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_sincospid8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_sinpid8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sinpid8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_cospid8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_cospid8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_ldexpd8_avx512f(__m512d, __m256i); +IMPORT CONST __m512d Sleef_finz_ldexpd8_avx512f(__m512d, __m256i); +IMPORT CONST __m256i Sleef_ilogbd8_avx512f(__m512d); +IMPORT CONST __m256i Sleef_finz_ilogbd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_fmad8_avx512f(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmad8_avx512f(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_sqrtd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8_u05avx512f(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_sqrtd8_u35avx512f(__m512d); +IMPORT CONST __m512d Sleef_hypotd8_u05avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_hypotd8_u05avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_hypotd8_u35avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_hypotd8_u35avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fabsd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_fabsd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_copysignd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_copysignd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmaxd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmaxd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmind8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmind8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fdimd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fdimd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_truncd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_truncd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_floord8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_floord8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_ceild8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_ceild8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_roundd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_roundd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_rintd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_rintd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_nextafterd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_nextafterd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_frfrexpd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_frfrexpd8_avx512f(__m512d); +IMPORT CONST __m256i Sleef_expfrexpd8_avx512f(__m512d); +IMPORT CONST __m256i Sleef_finz_expfrexpd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_fmodd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_fmodd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_remainderd8_avx512f(__m512d, __m512d); +IMPORT CONST __m512d Sleef_finz_remainderd8_avx512f(__m512d, __m512d); +IMPORT CONST Sleef___m512d_2 Sleef_modfd8_avx512f(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_finz_modfd8_avx512f(__m512d); +IMPORT CONST __m512d Sleef_lgammad8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_lgammad8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_tgammad8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_tgammad8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_erfd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_erfd8_u10avx512f(__m512d); +IMPORT CONST __m512d Sleef_erfcd8_u15avx512f(__m512d); +IMPORT CONST __m512d Sleef_finz_erfcd8_u15avx512f(__m512d); +IMPORT CONST int Sleef_getIntd8_avx512f(int); +IMPORT CONST void *Sleef_getPtrd8_avx512f(int); + +#ifndef Sleef___m512_2_DEFINED +typedef struct { + __m512 x, y; +} Sleef___m512_2; +#define Sleef___m512_2_DEFINED +#endif + +IMPORT CONST __m512 Sleef_sinf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sinf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_cosf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_cosf16_u35avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u35avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincosf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_tanf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_tanf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_asinf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_asinf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_acosf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_acosf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_atanf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_atanf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u35avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_atan2f16_u35avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_logf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_cbrtf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_sinf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sinf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_cosf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_cosf16_u10avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u10avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincosf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_tanf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_tanf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_asinf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_asinf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_acosf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_acosf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_atanf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_atanf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u10avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_atan2f16_u10avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_logf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_cbrtf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_expf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_expf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_powf16_u10avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_powf16_u10avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_sinhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sinhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_coshf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_coshf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_tanhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_sinhf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sinhf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_coshf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_coshf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_tanhf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_fastsinf16_u3500avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_fastsinf16_u3500avx512f(__m512); +IMPORT CONST __m512 Sleef_fastcosf16_u3500avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_fastcosf16_u3500avx512f(__m512); +IMPORT CONST __m512 Sleef_fastpowf16_u3500avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fastpowf16_u3500avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_asinhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_asinhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_acoshf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_acoshf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_atanhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_atanhf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_exp2f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_exp2f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_exp10f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_exp10f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_expm1f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_expm1f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_log10f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_log10f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_log2f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_log2f16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_log2f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_log2f16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_log1pf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_log1pf16_u10avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u05avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincospif16_u05avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u35avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_sincospif16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_sinpif16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sinpif16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_cospif16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_cospif16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_fmaf16_avx512f(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmaf16_avx512f(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_sqrtf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16_u05avx512f(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_sqrtf16_u35avx512f(__m512); +IMPORT CONST __m512 Sleef_hypotf16_u05avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_hypotf16_u05avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_hypotf16_u35avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_hypotf16_u35avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_fabsf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_fabsf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_copysignf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_copysignf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_fmaxf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmaxf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_fminf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fminf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_fdimf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fdimf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_truncf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_truncf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_floorf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_floorf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_ceilf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_ceilf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_roundf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_roundf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_rintf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_rintf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_nextafterf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_nextafterf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_frfrexpf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_frfrexpf16_avx512f(__m512); +IMPORT CONST __m512 Sleef_fmodf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_fmodf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_remainderf16_avx512f(__m512, __m512); +IMPORT CONST __m512 Sleef_finz_remainderf16_avx512f(__m512, __m512); +IMPORT CONST Sleef___m512_2 Sleef_modff16_avx512f(__m512); +IMPORT CONST Sleef___m512_2 Sleef_finz_modff16_avx512f(__m512); +IMPORT CONST __m512 Sleef_lgammaf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_lgammaf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_tgammaf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_tgammaf16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_erff16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_erff16_u10avx512f(__m512); +IMPORT CONST __m512 Sleef_erfcf16_u15avx512f(__m512); +IMPORT CONST __m512 Sleef_finz_erfcf16_u15avx512f(__m512); +IMPORT CONST int Sleef_getIntf16_avx512f(int); +IMPORT CONST int Sleef_finz_getIntf16_avx512f(int); +IMPORT CONST void *Sleef_getPtrf16_avx512f(int); +IMPORT CONST void *Sleef_finz_getPtrf16_avx512f(int); +#endif +#ifdef __AVX512F__ + +#ifndef Sleef___m512d_2_DEFINED +typedef struct { + __m512d x, y; +} Sleef___m512d_2; +#define Sleef___m512d_2_DEFINED +#endif + +IMPORT CONST __m512d Sleef_sind8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sind8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_cosd8_u35avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u35avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_cinz_sincosd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_tand8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_tand8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_asind8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_asind8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_acosd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_atand8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_atand8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u35avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_atan2d8_u35avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_logd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_cbrtd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_sind8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sind8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cosd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_cosd8_u10avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincosd8_u10avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_cinz_sincosd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_tand8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_tand8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_asind8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_asind8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_acosd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_acosd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_atand8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_atand8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_atan2d8_u10avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_atan2d8_u10avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_logd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_logd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cbrtd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_cbrtd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_expd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_expd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_powd8_u10avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_powd8_u10avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_sinhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sinhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_coshd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_tanhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_sinhd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sinhd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_coshd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_coshd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_tanhd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_tanhd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_fastsind8_u3500avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_fastsind8_u3500avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_fastcosd8_u3500avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_fastcosd8_u3500avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_fastpowd8_u3500avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fastpowd8_u3500avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_asinhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_asinhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_acoshd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_acoshd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_atanhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_atanhd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_exp2d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_exp2d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_exp2d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_exp10d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_exp10d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_exp10d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_expm1d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_expm1d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_log10d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_log10d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_log2d8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_log2d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_log2d8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_log1pd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_log1pd8_u10avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u05avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_cinz_sincospid8_u05avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_sincospid8_u35avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_cinz_sincospid8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_sinpid8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sinpid8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cospid8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_cospid8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_ldexpd8_avx512fnofma(__m512d, __m256i); +IMPORT CONST __m512d Sleef_cinz_ldexpd8_avx512fnofma(__m512d, __m256i); +IMPORT CONST __m256i Sleef_ilogbd8_avx512fnofma(__m512d); +IMPORT CONST __m256i Sleef_cinz_ilogbd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_fmad8_avx512fnofma(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fmad8_avx512fnofma(__m512d, __m512d, __m512d); +IMPORT CONST __m512d Sleef_sqrtd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sqrtd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sqrtd8_u05avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_sqrtd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_sqrtd8_u35avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_hypotd8_u05avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_hypotd8_u05avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_hypotd8_u35avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_hypotd8_u35avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fabsd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_fabsd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_copysignd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_copysignd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmaxd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fmaxd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fmind8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fmind8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_fdimd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fdimd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_truncd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_truncd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_floord8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_floord8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_ceild8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_ceild8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_roundd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_roundd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_rintd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_rintd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_nextafterd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_nextafterd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_frfrexpd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_frfrexpd8_avx512fnofma(__m512d); +IMPORT CONST __m256i Sleef_expfrexpd8_avx512fnofma(__m512d); +IMPORT CONST __m256i Sleef_cinz_expfrexpd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_fmodd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_fmodd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_remainderd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST __m512d Sleef_cinz_remainderd8_avx512fnofma(__m512d, __m512d); +IMPORT CONST Sleef___m512d_2 Sleef_modfd8_avx512fnofma(__m512d); +IMPORT CONST Sleef___m512d_2 Sleef_cinz_modfd8_avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_lgammad8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_lgammad8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_tgammad8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_tgammad8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_erfd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_erfd8_u10avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_erfcd8_u15avx512fnofma(__m512d); +IMPORT CONST __m512d Sleef_cinz_erfcd8_u15avx512fnofma(__m512d); +IMPORT CONST int Sleef_getIntd8_avx512fnofma(int); +IMPORT CONST void *Sleef_getPtrd8_avx512fnofma(int); + +#ifndef Sleef___m512_2_DEFINED +typedef struct { + __m512 x, y; +} Sleef___m512_2; +#define Sleef___m512_2_DEFINED +#endif + +IMPORT CONST __m512 Sleef_sinf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sinf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cosf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_cosf16_u35avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u35avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_cinz_sincosf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_tanf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_tanf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_asinf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_asinf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_acosf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_acosf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_atanf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_atanf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u35avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_atan2f16_u35avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_logf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_cbrtf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_sinf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sinf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cosf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_cosf16_u10avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincosf16_u10avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_cinz_sincosf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_tanf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_tanf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_asinf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_asinf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_acosf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_acosf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_atanf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_atanf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_atan2f16_u10avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_atan2f16_u10avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_logf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_logf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cbrtf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_cbrtf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_expf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_expf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_powf16_u10avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_powf16_u10avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_sinhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sinhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_coshf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_coshf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_tanhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_sinhf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sinhf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_coshf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_coshf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_tanhf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_tanhf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_fastsinf16_u3500avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_fastsinf16_u3500avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_fastcosf16_u3500avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_fastcosf16_u3500avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_fastpowf16_u3500avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fastpowf16_u3500avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_asinhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_asinhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_acoshf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_acoshf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_atanhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_atanhf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_exp2f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_exp2f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_exp2f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_exp10f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_exp10f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_exp10f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_expm1f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_expm1f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_log10f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_log10f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_log2f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_log2f16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_log2f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_log2f16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_log1pf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_log1pf16_u10avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u05avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_cinz_sincospif16_u05avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_sincospif16_u35avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_cinz_sincospif16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_sinpif16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sinpif16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cospif16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_cospif16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_fmaf16_avx512fnofma(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fmaf16_avx512fnofma(__m512, __m512, __m512); +IMPORT CONST __m512 Sleef_sqrtf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sqrtf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sqrtf16_u05avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_sqrtf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_sqrtf16_u35avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_hypotf16_u05avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_hypotf16_u05avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_hypotf16_u35avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_hypotf16_u35avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_fabsf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_fabsf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_copysignf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_copysignf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_fmaxf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fmaxf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_fminf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fminf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_fdimf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fdimf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_truncf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_truncf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_floorf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_floorf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_ceilf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_ceilf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_roundf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_roundf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_rintf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_rintf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_nextafterf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_nextafterf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_frfrexpf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_frfrexpf16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_fmodf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_fmodf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_remainderf16_avx512fnofma(__m512, __m512); +IMPORT CONST __m512 Sleef_cinz_remainderf16_avx512fnofma(__m512, __m512); +IMPORT CONST Sleef___m512_2 Sleef_modff16_avx512fnofma(__m512); +IMPORT CONST Sleef___m512_2 Sleef_cinz_modff16_avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_lgammaf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_lgammaf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_tgammaf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_tgammaf16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_erff16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_erff16_u10avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_erfcf16_u15avx512fnofma(__m512); +IMPORT CONST __m512 Sleef_cinz_erfcf16_u15avx512fnofma(__m512); +IMPORT CONST int Sleef_getIntf16_avx512fnofma(int); +IMPORT CONST int Sleef_cinz_getIntf16_avx512fnofma(int); +IMPORT CONST void *Sleef_getPtrf16_avx512fnofma(int); +IMPORT CONST void *Sleef_cinz_getPtrf16_avx512fnofma(int); +#endif +#ifdef __STDC__ + +#ifndef Sleef_double_2_DEFINED +typedef struct { + double x, y; +} Sleef_double_2; +#define Sleef_double_2_DEFINED +#endif + +IMPORT CONST double Sleef_sind1_u35purec(double); +IMPORT CONST double Sleef_cinz_sind1_u35purec(double); +IMPORT CONST double Sleef_cosd1_u35purec(double); +IMPORT CONST double Sleef_cinz_cosd1_u35purec(double); +IMPORT CONST Sleef_double_2 Sleef_sincosd1_u35purec(double); +IMPORT CONST Sleef_double_2 Sleef_cinz_sincosd1_u35purec(double); +IMPORT CONST double Sleef_tand1_u35purec(double); +IMPORT CONST double Sleef_cinz_tand1_u35purec(double); +IMPORT CONST double Sleef_asind1_u35purec(double); +IMPORT CONST double Sleef_cinz_asind1_u35purec(double); +IMPORT CONST double Sleef_acosd1_u35purec(double); +IMPORT CONST double Sleef_cinz_acosd1_u35purec(double); +IMPORT CONST double Sleef_atand1_u35purec(double); +IMPORT CONST double Sleef_cinz_atand1_u35purec(double); +IMPORT CONST double Sleef_atan2d1_u35purec(double, double); +IMPORT CONST double Sleef_cinz_atan2d1_u35purec(double, double); +IMPORT CONST double Sleef_logd1_u35purec(double); +IMPORT CONST double Sleef_cinz_logd1_u35purec(double); +IMPORT CONST double Sleef_cbrtd1_u35purec(double); +IMPORT CONST double Sleef_cinz_cbrtd1_u35purec(double); +IMPORT CONST double Sleef_sind1_u10purec(double); +IMPORT CONST double Sleef_cinz_sind1_u10purec(double); +IMPORT CONST double Sleef_cosd1_u10purec(double); +IMPORT CONST double Sleef_cinz_cosd1_u10purec(double); +IMPORT CONST Sleef_double_2 Sleef_sincosd1_u10purec(double); +IMPORT CONST Sleef_double_2 Sleef_cinz_sincosd1_u10purec(double); +IMPORT CONST double Sleef_tand1_u10purec(double); +IMPORT CONST double Sleef_cinz_tand1_u10purec(double); +IMPORT CONST double Sleef_asind1_u10purec(double); +IMPORT CONST double Sleef_cinz_asind1_u10purec(double); +IMPORT CONST double Sleef_acosd1_u10purec(double); +IMPORT CONST double Sleef_cinz_acosd1_u10purec(double); +IMPORT CONST double Sleef_atand1_u10purec(double); +IMPORT CONST double Sleef_cinz_atand1_u10purec(double); +IMPORT CONST double Sleef_atan2d1_u10purec(double, double); +IMPORT CONST double Sleef_cinz_atan2d1_u10purec(double, double); +IMPORT CONST double Sleef_logd1_u10purec(double); +IMPORT CONST double Sleef_cinz_logd1_u10purec(double); +IMPORT CONST double Sleef_cbrtd1_u10purec(double); +IMPORT CONST double Sleef_cinz_cbrtd1_u10purec(double); +IMPORT CONST double Sleef_expd1_u10purec(double); +IMPORT CONST double Sleef_cinz_expd1_u10purec(double); +IMPORT CONST double Sleef_powd1_u10purec(double, double); +IMPORT CONST double Sleef_cinz_powd1_u10purec(double, double); +IMPORT CONST double Sleef_sinhd1_u10purec(double); +IMPORT CONST double Sleef_cinz_sinhd1_u10purec(double); +IMPORT CONST double Sleef_coshd1_u10purec(double); +IMPORT CONST double Sleef_cinz_coshd1_u10purec(double); +IMPORT CONST double Sleef_tanhd1_u10purec(double); +IMPORT CONST double Sleef_cinz_tanhd1_u10purec(double); +IMPORT CONST double Sleef_sinhd1_u35purec(double); +IMPORT CONST double Sleef_cinz_sinhd1_u35purec(double); +IMPORT CONST double Sleef_coshd1_u35purec(double); +IMPORT CONST double Sleef_cinz_coshd1_u35purec(double); +IMPORT CONST double Sleef_tanhd1_u35purec(double); +IMPORT CONST double Sleef_cinz_tanhd1_u35purec(double); +IMPORT CONST double Sleef_fastsind1_u3500purec(double); +IMPORT CONST double Sleef_cinz_fastsind1_u3500purec(double); +IMPORT CONST double Sleef_fastcosd1_u3500purec(double); +IMPORT CONST double Sleef_cinz_fastcosd1_u3500purec(double); +IMPORT CONST double Sleef_fastpowd1_u3500purec(double, double); +IMPORT CONST double Sleef_cinz_fastpowd1_u3500purec(double, double); +IMPORT CONST double Sleef_asinhd1_u10purec(double); +IMPORT CONST double Sleef_cinz_asinhd1_u10purec(double); +IMPORT CONST double Sleef_acoshd1_u10purec(double); +IMPORT CONST double Sleef_cinz_acoshd1_u10purec(double); +IMPORT CONST double Sleef_atanhd1_u10purec(double); +IMPORT CONST double Sleef_cinz_atanhd1_u10purec(double); +IMPORT CONST double Sleef_exp2d1_u10purec(double); +IMPORT CONST double Sleef_cinz_exp2d1_u10purec(double); +IMPORT CONST double Sleef_exp2d1_u35purec(double); +IMPORT CONST double Sleef_cinz_exp2d1_u35purec(double); +IMPORT CONST double Sleef_exp10d1_u10purec(double); +IMPORT CONST double Sleef_cinz_exp10d1_u10purec(double); +IMPORT CONST double Sleef_exp10d1_u35purec(double); +IMPORT CONST double Sleef_cinz_exp10d1_u35purec(double); +IMPORT CONST double Sleef_expm1d1_u10purec(double); +IMPORT CONST double Sleef_cinz_expm1d1_u10purec(double); +IMPORT CONST double Sleef_log10d1_u10purec(double); +IMPORT CONST double Sleef_cinz_log10d1_u10purec(double); +IMPORT CONST double Sleef_log2d1_u10purec(double); +IMPORT CONST double Sleef_cinz_log2d1_u10purec(double); +IMPORT CONST double Sleef_log2d1_u35purec(double); +IMPORT CONST double Sleef_cinz_log2d1_u35purec(double); +IMPORT CONST double Sleef_log1pd1_u10purec(double); +IMPORT CONST double Sleef_cinz_log1pd1_u10purec(double); +IMPORT CONST Sleef_double_2 Sleef_sincospid1_u05purec(double); +IMPORT CONST Sleef_double_2 Sleef_cinz_sincospid1_u05purec(double); +IMPORT CONST Sleef_double_2 Sleef_sincospid1_u35purec(double); +IMPORT CONST Sleef_double_2 Sleef_cinz_sincospid1_u35purec(double); +IMPORT CONST double Sleef_sinpid1_u05purec(double); +IMPORT CONST double Sleef_cinz_sinpid1_u05purec(double); +IMPORT CONST double Sleef_cospid1_u05purec(double); +IMPORT CONST double Sleef_cinz_cospid1_u05purec(double); +IMPORT CONST double Sleef_ldexpd1_purec(double, int32_t); +IMPORT CONST double Sleef_cinz_ldexpd1_purec(double, int32_t); +IMPORT CONST int32_t Sleef_ilogbd1_purec(double); +IMPORT CONST int32_t Sleef_cinz_ilogbd1_purec(double); +IMPORT CONST double Sleef_fmad1_purec(double, double, double); +IMPORT CONST double Sleef_cinz_fmad1_purec(double, double, double); +IMPORT CONST double Sleef_sqrtd1_purec(double); +IMPORT CONST double Sleef_cinz_sqrtd1_purec(double); +IMPORT CONST double Sleef_sqrtd1_u05purec(double); +IMPORT CONST double Sleef_cinz_sqrtd1_u05purec(double); +IMPORT CONST double Sleef_sqrtd1_u35purec(double); +IMPORT CONST double Sleef_cinz_sqrtd1_u35purec(double); +IMPORT CONST double Sleef_hypotd1_u05purec(double, double); +IMPORT CONST double Sleef_cinz_hypotd1_u05purec(double, double); +IMPORT CONST double Sleef_hypotd1_u35purec(double, double); +IMPORT CONST double Sleef_cinz_hypotd1_u35purec(double, double); +IMPORT CONST double Sleef_fabsd1_purec(double); +IMPORT CONST double Sleef_cinz_fabsd1_purec(double); +IMPORT CONST double Sleef_copysignd1_purec(double, double); +IMPORT CONST double Sleef_cinz_copysignd1_purec(double, double); +IMPORT CONST double Sleef_fmaxd1_purec(double, double); +IMPORT CONST double Sleef_cinz_fmaxd1_purec(double, double); +IMPORT CONST double Sleef_fmind1_purec(double, double); +IMPORT CONST double Sleef_cinz_fmind1_purec(double, double); +IMPORT CONST double Sleef_fdimd1_purec(double, double); +IMPORT CONST double Sleef_cinz_fdimd1_purec(double, double); +IMPORT CONST double Sleef_truncd1_purec(double); +IMPORT CONST double Sleef_cinz_truncd1_purec(double); +IMPORT CONST double Sleef_floord1_purec(double); +IMPORT CONST double Sleef_cinz_floord1_purec(double); +IMPORT CONST double Sleef_ceild1_purec(double); +IMPORT CONST double Sleef_cinz_ceild1_purec(double); +IMPORT CONST double Sleef_roundd1_purec(double); +IMPORT CONST double Sleef_cinz_roundd1_purec(double); +IMPORT CONST double Sleef_rintd1_purec(double); +IMPORT CONST double Sleef_cinz_rintd1_purec(double); +IMPORT CONST double Sleef_nextafterd1_purec(double, double); +IMPORT CONST double Sleef_cinz_nextafterd1_purec(double, double); +IMPORT CONST double Sleef_frfrexpd1_purec(double); +IMPORT CONST double Sleef_cinz_frfrexpd1_purec(double); +IMPORT CONST int32_t Sleef_expfrexpd1_purec(double); +IMPORT CONST int32_t Sleef_cinz_expfrexpd1_purec(double); +IMPORT CONST double Sleef_fmodd1_purec(double, double); +IMPORT CONST double Sleef_cinz_fmodd1_purec(double, double); +IMPORT CONST double Sleef_remainderd1_purec(double, double); +IMPORT CONST double Sleef_cinz_remainderd1_purec(double, double); +IMPORT CONST Sleef_double_2 Sleef_modfd1_purec(double); +IMPORT CONST Sleef_double_2 Sleef_cinz_modfd1_purec(double); +IMPORT CONST double Sleef_lgammad1_u10purec(double); +IMPORT CONST double Sleef_cinz_lgammad1_u10purec(double); +IMPORT CONST double Sleef_tgammad1_u10purec(double); +IMPORT CONST double Sleef_cinz_tgammad1_u10purec(double); +IMPORT CONST double Sleef_erfd1_u10purec(double); +IMPORT CONST double Sleef_cinz_erfd1_u10purec(double); +IMPORT CONST double Sleef_erfcd1_u15purec(double); +IMPORT CONST double Sleef_cinz_erfcd1_u15purec(double); +IMPORT CONST int Sleef_getIntd1_purec(int); +IMPORT CONST void *Sleef_getPtrd1_purec(int); + +#ifndef Sleef_float_2_DEFINED +typedef struct { + float x, y; +} Sleef_float_2; +#define Sleef_float_2_DEFINED +#endif + +IMPORT CONST float Sleef_sinf1_u35purec(float); +IMPORT CONST float Sleef_cinz_sinf1_u35purec(float); +IMPORT CONST float Sleef_cosf1_u35purec(float); +IMPORT CONST float Sleef_cinz_cosf1_u35purec(float); +IMPORT CONST Sleef_float_2 Sleef_sincosf1_u35purec(float); +IMPORT CONST Sleef_float_2 Sleef_cinz_sincosf1_u35purec(float); +IMPORT CONST float Sleef_tanf1_u35purec(float); +IMPORT CONST float Sleef_cinz_tanf1_u35purec(float); +IMPORT CONST float Sleef_asinf1_u35purec(float); +IMPORT CONST float Sleef_cinz_asinf1_u35purec(float); +IMPORT CONST float Sleef_acosf1_u35purec(float); +IMPORT CONST float Sleef_cinz_acosf1_u35purec(float); +IMPORT CONST float Sleef_atanf1_u35purec(float); +IMPORT CONST float Sleef_cinz_atanf1_u35purec(float); +IMPORT CONST float Sleef_atan2f1_u35purec(float, float); +IMPORT CONST float Sleef_cinz_atan2f1_u35purec(float, float); +IMPORT CONST float Sleef_logf1_u35purec(float); +IMPORT CONST float Sleef_cinz_logf1_u35purec(float); +IMPORT CONST float Sleef_cbrtf1_u35purec(float); +IMPORT CONST float Sleef_cinz_cbrtf1_u35purec(float); +IMPORT CONST float Sleef_sinf1_u10purec(float); +IMPORT CONST float Sleef_cinz_sinf1_u10purec(float); +IMPORT CONST float Sleef_cosf1_u10purec(float); +IMPORT CONST float Sleef_cinz_cosf1_u10purec(float); +IMPORT CONST Sleef_float_2 Sleef_sincosf1_u10purec(float); +IMPORT CONST Sleef_float_2 Sleef_cinz_sincosf1_u10purec(float); +IMPORT CONST float Sleef_tanf1_u10purec(float); +IMPORT CONST float Sleef_cinz_tanf1_u10purec(float); +IMPORT CONST float Sleef_asinf1_u10purec(float); +IMPORT CONST float Sleef_cinz_asinf1_u10purec(float); +IMPORT CONST float Sleef_acosf1_u10purec(float); +IMPORT CONST float Sleef_cinz_acosf1_u10purec(float); +IMPORT CONST float Sleef_atanf1_u10purec(float); +IMPORT CONST float Sleef_cinz_atanf1_u10purec(float); +IMPORT CONST float Sleef_atan2f1_u10purec(float, float); +IMPORT CONST float Sleef_cinz_atan2f1_u10purec(float, float); +IMPORT CONST float Sleef_logf1_u10purec(float); +IMPORT CONST float Sleef_cinz_logf1_u10purec(float); +IMPORT CONST float Sleef_cbrtf1_u10purec(float); +IMPORT CONST float Sleef_cinz_cbrtf1_u10purec(float); +IMPORT CONST float Sleef_expf1_u10purec(float); +IMPORT CONST float Sleef_cinz_expf1_u10purec(float); +IMPORT CONST float Sleef_powf1_u10purec(float, float); +IMPORT CONST float Sleef_cinz_powf1_u10purec(float, float); +IMPORT CONST float Sleef_sinhf1_u10purec(float); +IMPORT CONST float Sleef_cinz_sinhf1_u10purec(float); +IMPORT CONST float Sleef_coshf1_u10purec(float); +IMPORT CONST float Sleef_cinz_coshf1_u10purec(float); +IMPORT CONST float Sleef_tanhf1_u10purec(float); +IMPORT CONST float Sleef_cinz_tanhf1_u10purec(float); +IMPORT CONST float Sleef_sinhf1_u35purec(float); +IMPORT CONST float Sleef_cinz_sinhf1_u35purec(float); +IMPORT CONST float Sleef_coshf1_u35purec(float); +IMPORT CONST float Sleef_cinz_coshf1_u35purec(float); +IMPORT CONST float Sleef_tanhf1_u35purec(float); +IMPORT CONST float Sleef_cinz_tanhf1_u35purec(float); +IMPORT CONST float Sleef_fastsinf1_u3500purec(float); +IMPORT CONST float Sleef_cinz_fastsinf1_u3500purec(float); +IMPORT CONST float Sleef_fastcosf1_u3500purec(float); +IMPORT CONST float Sleef_cinz_fastcosf1_u3500purec(float); +IMPORT CONST float Sleef_fastpowf1_u3500purec(float, float); +IMPORT CONST float Sleef_cinz_fastpowf1_u3500purec(float, float); +IMPORT CONST float Sleef_asinhf1_u10purec(float); +IMPORT CONST float Sleef_cinz_asinhf1_u10purec(float); +IMPORT CONST float Sleef_acoshf1_u10purec(float); +IMPORT CONST float Sleef_cinz_acoshf1_u10purec(float); +IMPORT CONST float Sleef_atanhf1_u10purec(float); +IMPORT CONST float Sleef_cinz_atanhf1_u10purec(float); +IMPORT CONST float Sleef_exp2f1_u10purec(float); +IMPORT CONST float Sleef_cinz_exp2f1_u10purec(float); +IMPORT CONST float Sleef_exp2f1_u35purec(float); +IMPORT CONST float Sleef_cinz_exp2f1_u35purec(float); +IMPORT CONST float Sleef_exp10f1_u10purec(float); +IMPORT CONST float Sleef_cinz_exp10f1_u10purec(float); +IMPORT CONST float Sleef_exp10f1_u35purec(float); +IMPORT CONST float Sleef_cinz_exp10f1_u35purec(float); +IMPORT CONST float Sleef_expm1f1_u10purec(float); +IMPORT CONST float Sleef_cinz_expm1f1_u10purec(float); +IMPORT CONST float Sleef_log10f1_u10purec(float); +IMPORT CONST float Sleef_cinz_log10f1_u10purec(float); +IMPORT CONST float Sleef_log2f1_u10purec(float); +IMPORT CONST float Sleef_cinz_log2f1_u10purec(float); +IMPORT CONST float Sleef_log2f1_u35purec(float); +IMPORT CONST float Sleef_cinz_log2f1_u35purec(float); +IMPORT CONST float Sleef_log1pf1_u10purec(float); +IMPORT CONST float Sleef_cinz_log1pf1_u10purec(float); +IMPORT CONST Sleef_float_2 Sleef_sincospif1_u05purec(float); +IMPORT CONST Sleef_float_2 Sleef_cinz_sincospif1_u05purec(float); +IMPORT CONST Sleef_float_2 Sleef_sincospif1_u35purec(float); +IMPORT CONST Sleef_float_2 Sleef_cinz_sincospif1_u35purec(float); +IMPORT CONST float Sleef_sinpif1_u05purec(float); +IMPORT CONST float Sleef_cinz_sinpif1_u05purec(float); +IMPORT CONST float Sleef_cospif1_u05purec(float); +IMPORT CONST float Sleef_cinz_cospif1_u05purec(float); +IMPORT CONST float Sleef_fmaf1_purec(float, float, float); +IMPORT CONST float Sleef_cinz_fmaf1_purec(float, float, float); +IMPORT CONST float Sleef_sqrtf1_purec(float); +IMPORT CONST float Sleef_cinz_sqrtf1_purec(float); +IMPORT CONST float Sleef_sqrtf1_u05purec(float); +IMPORT CONST float Sleef_cinz_sqrtf1_u05purec(float); +IMPORT CONST float Sleef_sqrtf1_u35purec(float); +IMPORT CONST float Sleef_cinz_sqrtf1_u35purec(float); +IMPORT CONST float Sleef_hypotf1_u05purec(float, float); +IMPORT CONST float Sleef_cinz_hypotf1_u05purec(float, float); +IMPORT CONST float Sleef_hypotf1_u35purec(float, float); +IMPORT CONST float Sleef_cinz_hypotf1_u35purec(float, float); +IMPORT CONST float Sleef_fabsf1_purec(float); +IMPORT CONST float Sleef_cinz_fabsf1_purec(float); +IMPORT CONST float Sleef_copysignf1_purec(float, float); +IMPORT CONST float Sleef_cinz_copysignf1_purec(float, float); +IMPORT CONST float Sleef_fmaxf1_purec(float, float); +IMPORT CONST float Sleef_cinz_fmaxf1_purec(float, float); +IMPORT CONST float Sleef_fminf1_purec(float, float); +IMPORT CONST float Sleef_cinz_fminf1_purec(float, float); +IMPORT CONST float Sleef_fdimf1_purec(float, float); +IMPORT CONST float Sleef_cinz_fdimf1_purec(float, float); +IMPORT CONST float Sleef_truncf1_purec(float); +IMPORT CONST float Sleef_cinz_truncf1_purec(float); +IMPORT CONST float Sleef_floorf1_purec(float); +IMPORT CONST float Sleef_cinz_floorf1_purec(float); +IMPORT CONST float Sleef_ceilf1_purec(float); +IMPORT CONST float Sleef_cinz_ceilf1_purec(float); +IMPORT CONST float Sleef_roundf1_purec(float); +IMPORT CONST float Sleef_cinz_roundf1_purec(float); +IMPORT CONST float Sleef_rintf1_purec(float); +IMPORT CONST float Sleef_cinz_rintf1_purec(float); +IMPORT CONST float Sleef_nextafterf1_purec(float, float); +IMPORT CONST float Sleef_cinz_nextafterf1_purec(float, float); +IMPORT CONST float Sleef_frfrexpf1_purec(float); +IMPORT CONST float Sleef_cinz_frfrexpf1_purec(float); +IMPORT CONST float Sleef_fmodf1_purec(float, float); +IMPORT CONST float Sleef_cinz_fmodf1_purec(float, float); +IMPORT CONST float Sleef_remainderf1_purec(float, float); +IMPORT CONST float Sleef_cinz_remainderf1_purec(float, float); +IMPORT CONST Sleef_float_2 Sleef_modff1_purec(float); +IMPORT CONST Sleef_float_2 Sleef_cinz_modff1_purec(float); +IMPORT CONST float Sleef_lgammaf1_u10purec(float); +IMPORT CONST float Sleef_cinz_lgammaf1_u10purec(float); +IMPORT CONST float Sleef_tgammaf1_u10purec(float); +IMPORT CONST float Sleef_cinz_tgammaf1_u10purec(float); +IMPORT CONST float Sleef_erff1_u10purec(float); +IMPORT CONST float Sleef_cinz_erff1_u10purec(float); +IMPORT CONST float Sleef_erfcf1_u15purec(float); +IMPORT CONST float Sleef_cinz_erfcf1_u15purec(float); +IMPORT CONST int Sleef_getIntf1_purec(int); +IMPORT CONST int Sleef_cinz_getIntf1_purec(int); +IMPORT CONST void *Sleef_getPtrf1_purec(int); +IMPORT CONST void *Sleef_cinz_getPtrf1_purec(int); +#endif +#ifdef FP_FAST_FMA + +#ifndef Sleef_double_2_DEFINED +typedef struct { + double x, y; +} Sleef_double_2; +#define Sleef_double_2_DEFINED +#endif + +IMPORT CONST double Sleef_sind1_u35purecfma(double); +IMPORT CONST double Sleef_finz_sind1_u35purecfma(double); +IMPORT CONST double Sleef_cosd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_cosd1_u35purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_sincosd1_u35purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_finz_sincosd1_u35purecfma(double); +IMPORT CONST double Sleef_tand1_u35purecfma(double); +IMPORT CONST double Sleef_finz_tand1_u35purecfma(double); +IMPORT CONST double Sleef_asind1_u35purecfma(double); +IMPORT CONST double Sleef_finz_asind1_u35purecfma(double); +IMPORT CONST double Sleef_acosd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_acosd1_u35purecfma(double); +IMPORT CONST double Sleef_atand1_u35purecfma(double); +IMPORT CONST double Sleef_finz_atand1_u35purecfma(double); +IMPORT CONST double Sleef_atan2d1_u35purecfma(double, double); +IMPORT CONST double Sleef_finz_atan2d1_u35purecfma(double, double); +IMPORT CONST double Sleef_logd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_logd1_u35purecfma(double); +IMPORT CONST double Sleef_cbrtd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_cbrtd1_u35purecfma(double); +IMPORT CONST double Sleef_sind1_u10purecfma(double); +IMPORT CONST double Sleef_finz_sind1_u10purecfma(double); +IMPORT CONST double Sleef_cosd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_cosd1_u10purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_sincosd1_u10purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_finz_sincosd1_u10purecfma(double); +IMPORT CONST double Sleef_tand1_u10purecfma(double); +IMPORT CONST double Sleef_finz_tand1_u10purecfma(double); +IMPORT CONST double Sleef_asind1_u10purecfma(double); +IMPORT CONST double Sleef_finz_asind1_u10purecfma(double); +IMPORT CONST double Sleef_acosd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_acosd1_u10purecfma(double); +IMPORT CONST double Sleef_atand1_u10purecfma(double); +IMPORT CONST double Sleef_finz_atand1_u10purecfma(double); +IMPORT CONST double Sleef_atan2d1_u10purecfma(double, double); +IMPORT CONST double Sleef_finz_atan2d1_u10purecfma(double, double); +IMPORT CONST double Sleef_logd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_logd1_u10purecfma(double); +IMPORT CONST double Sleef_cbrtd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_cbrtd1_u10purecfma(double); +IMPORT CONST double Sleef_expd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_expd1_u10purecfma(double); +IMPORT CONST double Sleef_powd1_u10purecfma(double, double); +IMPORT CONST double Sleef_finz_powd1_u10purecfma(double, double); +IMPORT CONST double Sleef_sinhd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_sinhd1_u10purecfma(double); +IMPORT CONST double Sleef_coshd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_coshd1_u10purecfma(double); +IMPORT CONST double Sleef_tanhd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_tanhd1_u10purecfma(double); +IMPORT CONST double Sleef_sinhd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_sinhd1_u35purecfma(double); +IMPORT CONST double Sleef_coshd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_coshd1_u35purecfma(double); +IMPORT CONST double Sleef_tanhd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_tanhd1_u35purecfma(double); +IMPORT CONST double Sleef_fastsind1_u3500purecfma(double); +IMPORT CONST double Sleef_finz_fastsind1_u3500purecfma(double); +IMPORT CONST double Sleef_fastcosd1_u3500purecfma(double); +IMPORT CONST double Sleef_finz_fastcosd1_u3500purecfma(double); +IMPORT CONST double Sleef_fastpowd1_u3500purecfma(double, double); +IMPORT CONST double Sleef_finz_fastpowd1_u3500purecfma(double, double); +IMPORT CONST double Sleef_asinhd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_asinhd1_u10purecfma(double); +IMPORT CONST double Sleef_acoshd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_acoshd1_u10purecfma(double); +IMPORT CONST double Sleef_atanhd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_atanhd1_u10purecfma(double); +IMPORT CONST double Sleef_exp2d1_u10purecfma(double); +IMPORT CONST double Sleef_finz_exp2d1_u10purecfma(double); +IMPORT CONST double Sleef_exp2d1_u35purecfma(double); +IMPORT CONST double Sleef_finz_exp2d1_u35purecfma(double); +IMPORT CONST double Sleef_exp10d1_u10purecfma(double); +IMPORT CONST double Sleef_finz_exp10d1_u10purecfma(double); +IMPORT CONST double Sleef_exp10d1_u35purecfma(double); +IMPORT CONST double Sleef_finz_exp10d1_u35purecfma(double); +IMPORT CONST double Sleef_expm1d1_u10purecfma(double); +IMPORT CONST double Sleef_finz_expm1d1_u10purecfma(double); +IMPORT CONST double Sleef_log10d1_u10purecfma(double); +IMPORT CONST double Sleef_finz_log10d1_u10purecfma(double); +IMPORT CONST double Sleef_log2d1_u10purecfma(double); +IMPORT CONST double Sleef_finz_log2d1_u10purecfma(double); +IMPORT CONST double Sleef_log2d1_u35purecfma(double); +IMPORT CONST double Sleef_finz_log2d1_u35purecfma(double); +IMPORT CONST double Sleef_log1pd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_log1pd1_u10purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_sincospid1_u05purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_finz_sincospid1_u05purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_sincospid1_u35purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_finz_sincospid1_u35purecfma(double); +IMPORT CONST double Sleef_sinpid1_u05purecfma(double); +IMPORT CONST double Sleef_finz_sinpid1_u05purecfma(double); +IMPORT CONST double Sleef_cospid1_u05purecfma(double); +IMPORT CONST double Sleef_finz_cospid1_u05purecfma(double); +IMPORT CONST double Sleef_ldexpd1_purecfma(double, int32_t); +IMPORT CONST double Sleef_finz_ldexpd1_purecfma(double, int32_t); +IMPORT CONST int32_t Sleef_ilogbd1_purecfma(double); +IMPORT CONST int32_t Sleef_finz_ilogbd1_purecfma(double); +IMPORT CONST double Sleef_fmad1_purecfma(double, double, double); +IMPORT CONST double Sleef_finz_fmad1_purecfma(double, double, double); +IMPORT CONST double Sleef_sqrtd1_purecfma(double); +IMPORT CONST double Sleef_finz_sqrtd1_purecfma(double); +IMPORT CONST double Sleef_sqrtd1_u05purecfma(double); +IMPORT CONST double Sleef_finz_sqrtd1_u05purecfma(double); +IMPORT CONST double Sleef_sqrtd1_u35purecfma(double); +IMPORT CONST double Sleef_finz_sqrtd1_u35purecfma(double); +IMPORT CONST double Sleef_hypotd1_u05purecfma(double, double); +IMPORT CONST double Sleef_finz_hypotd1_u05purecfma(double, double); +IMPORT CONST double Sleef_hypotd1_u35purecfma(double, double); +IMPORT CONST double Sleef_finz_hypotd1_u35purecfma(double, double); +IMPORT CONST double Sleef_fabsd1_purecfma(double); +IMPORT CONST double Sleef_finz_fabsd1_purecfma(double); +IMPORT CONST double Sleef_copysignd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_copysignd1_purecfma(double, double); +IMPORT CONST double Sleef_fmaxd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_fmaxd1_purecfma(double, double); +IMPORT CONST double Sleef_fmind1_purecfma(double, double); +IMPORT CONST double Sleef_finz_fmind1_purecfma(double, double); +IMPORT CONST double Sleef_fdimd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_fdimd1_purecfma(double, double); +IMPORT CONST double Sleef_truncd1_purecfma(double); +IMPORT CONST double Sleef_finz_truncd1_purecfma(double); +IMPORT CONST double Sleef_floord1_purecfma(double); +IMPORT CONST double Sleef_finz_floord1_purecfma(double); +IMPORT CONST double Sleef_ceild1_purecfma(double); +IMPORT CONST double Sleef_finz_ceild1_purecfma(double); +IMPORT CONST double Sleef_roundd1_purecfma(double); +IMPORT CONST double Sleef_finz_roundd1_purecfma(double); +IMPORT CONST double Sleef_rintd1_purecfma(double); +IMPORT CONST double Sleef_finz_rintd1_purecfma(double); +IMPORT CONST double Sleef_nextafterd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_nextafterd1_purecfma(double, double); +IMPORT CONST double Sleef_frfrexpd1_purecfma(double); +IMPORT CONST double Sleef_finz_frfrexpd1_purecfma(double); +IMPORT CONST int32_t Sleef_expfrexpd1_purecfma(double); +IMPORT CONST int32_t Sleef_finz_expfrexpd1_purecfma(double); +IMPORT CONST double Sleef_fmodd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_fmodd1_purecfma(double, double); +IMPORT CONST double Sleef_remainderd1_purecfma(double, double); +IMPORT CONST double Sleef_finz_remainderd1_purecfma(double, double); +IMPORT CONST Sleef_double_2 Sleef_modfd1_purecfma(double); +IMPORT CONST Sleef_double_2 Sleef_finz_modfd1_purecfma(double); +IMPORT CONST double Sleef_lgammad1_u10purecfma(double); +IMPORT CONST double Sleef_finz_lgammad1_u10purecfma(double); +IMPORT CONST double Sleef_tgammad1_u10purecfma(double); +IMPORT CONST double Sleef_finz_tgammad1_u10purecfma(double); +IMPORT CONST double Sleef_erfd1_u10purecfma(double); +IMPORT CONST double Sleef_finz_erfd1_u10purecfma(double); +IMPORT CONST double Sleef_erfcd1_u15purecfma(double); +IMPORT CONST double Sleef_finz_erfcd1_u15purecfma(double); +IMPORT CONST int Sleef_getIntd1_purecfma(int); +IMPORT CONST void *Sleef_getPtrd1_purecfma(int); + +#ifndef Sleef_float_2_DEFINED +typedef struct { + float x, y; +} Sleef_float_2; +#define Sleef_float_2_DEFINED +#endif + +IMPORT CONST float Sleef_sinf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_sinf1_u35purecfma(float); +IMPORT CONST float Sleef_cosf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_cosf1_u35purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_sincosf1_u35purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_finz_sincosf1_u35purecfma(float); +IMPORT CONST float Sleef_tanf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_tanf1_u35purecfma(float); +IMPORT CONST float Sleef_asinf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_asinf1_u35purecfma(float); +IMPORT CONST float Sleef_acosf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_acosf1_u35purecfma(float); +IMPORT CONST float Sleef_atanf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_atanf1_u35purecfma(float); +IMPORT CONST float Sleef_atan2f1_u35purecfma(float, float); +IMPORT CONST float Sleef_finz_atan2f1_u35purecfma(float, float); +IMPORT CONST float Sleef_logf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_logf1_u35purecfma(float); +IMPORT CONST float Sleef_cbrtf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_cbrtf1_u35purecfma(float); +IMPORT CONST float Sleef_sinf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_sinf1_u10purecfma(float); +IMPORT CONST float Sleef_cosf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_cosf1_u10purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_sincosf1_u10purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_finz_sincosf1_u10purecfma(float); +IMPORT CONST float Sleef_tanf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_tanf1_u10purecfma(float); +IMPORT CONST float Sleef_asinf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_asinf1_u10purecfma(float); +IMPORT CONST float Sleef_acosf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_acosf1_u10purecfma(float); +IMPORT CONST float Sleef_atanf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_atanf1_u10purecfma(float); +IMPORT CONST float Sleef_atan2f1_u10purecfma(float, float); +IMPORT CONST float Sleef_finz_atan2f1_u10purecfma(float, float); +IMPORT CONST float Sleef_logf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_logf1_u10purecfma(float); +IMPORT CONST float Sleef_cbrtf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_cbrtf1_u10purecfma(float); +IMPORT CONST float Sleef_expf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_expf1_u10purecfma(float); +IMPORT CONST float Sleef_powf1_u10purecfma(float, float); +IMPORT CONST float Sleef_finz_powf1_u10purecfma(float, float); +IMPORT CONST float Sleef_sinhf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_sinhf1_u10purecfma(float); +IMPORT CONST float Sleef_coshf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_coshf1_u10purecfma(float); +IMPORT CONST float Sleef_tanhf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_tanhf1_u10purecfma(float); +IMPORT CONST float Sleef_sinhf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_sinhf1_u35purecfma(float); +IMPORT CONST float Sleef_coshf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_coshf1_u35purecfma(float); +IMPORT CONST float Sleef_tanhf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_tanhf1_u35purecfma(float); +IMPORT CONST float Sleef_fastsinf1_u3500purecfma(float); +IMPORT CONST float Sleef_finz_fastsinf1_u3500purecfma(float); +IMPORT CONST float Sleef_fastcosf1_u3500purecfma(float); +IMPORT CONST float Sleef_finz_fastcosf1_u3500purecfma(float); +IMPORT CONST float Sleef_fastpowf1_u3500purecfma(float, float); +IMPORT CONST float Sleef_finz_fastpowf1_u3500purecfma(float, float); +IMPORT CONST float Sleef_asinhf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_asinhf1_u10purecfma(float); +IMPORT CONST float Sleef_acoshf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_acoshf1_u10purecfma(float); +IMPORT CONST float Sleef_atanhf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_atanhf1_u10purecfma(float); +IMPORT CONST float Sleef_exp2f1_u10purecfma(float); +IMPORT CONST float Sleef_finz_exp2f1_u10purecfma(float); +IMPORT CONST float Sleef_exp2f1_u35purecfma(float); +IMPORT CONST float Sleef_finz_exp2f1_u35purecfma(float); +IMPORT CONST float Sleef_exp10f1_u10purecfma(float); +IMPORT CONST float Sleef_finz_exp10f1_u10purecfma(float); +IMPORT CONST float Sleef_exp10f1_u35purecfma(float); +IMPORT CONST float Sleef_finz_exp10f1_u35purecfma(float); +IMPORT CONST float Sleef_expm1f1_u10purecfma(float); +IMPORT CONST float Sleef_finz_expm1f1_u10purecfma(float); +IMPORT CONST float Sleef_log10f1_u10purecfma(float); +IMPORT CONST float Sleef_finz_log10f1_u10purecfma(float); +IMPORT CONST float Sleef_log2f1_u10purecfma(float); +IMPORT CONST float Sleef_finz_log2f1_u10purecfma(float); +IMPORT CONST float Sleef_log2f1_u35purecfma(float); +IMPORT CONST float Sleef_finz_log2f1_u35purecfma(float); +IMPORT CONST float Sleef_log1pf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_log1pf1_u10purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_sincospif1_u05purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_finz_sincospif1_u05purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_sincospif1_u35purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_finz_sincospif1_u35purecfma(float); +IMPORT CONST float Sleef_sinpif1_u05purecfma(float); +IMPORT CONST float Sleef_finz_sinpif1_u05purecfma(float); +IMPORT CONST float Sleef_cospif1_u05purecfma(float); +IMPORT CONST float Sleef_finz_cospif1_u05purecfma(float); +IMPORT CONST float Sleef_fmaf1_purecfma(float, float, float); +IMPORT CONST float Sleef_finz_fmaf1_purecfma(float, float, float); +IMPORT CONST float Sleef_sqrtf1_purecfma(float); +IMPORT CONST float Sleef_finz_sqrtf1_purecfma(float); +IMPORT CONST float Sleef_sqrtf1_u05purecfma(float); +IMPORT CONST float Sleef_finz_sqrtf1_u05purecfma(float); +IMPORT CONST float Sleef_sqrtf1_u35purecfma(float); +IMPORT CONST float Sleef_finz_sqrtf1_u35purecfma(float); +IMPORT CONST float Sleef_hypotf1_u05purecfma(float, float); +IMPORT CONST float Sleef_finz_hypotf1_u05purecfma(float, float); +IMPORT CONST float Sleef_hypotf1_u35purecfma(float, float); +IMPORT CONST float Sleef_finz_hypotf1_u35purecfma(float, float); +IMPORT CONST float Sleef_fabsf1_purecfma(float); +IMPORT CONST float Sleef_finz_fabsf1_purecfma(float); +IMPORT CONST float Sleef_copysignf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_copysignf1_purecfma(float, float); +IMPORT CONST float Sleef_fmaxf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_fmaxf1_purecfma(float, float); +IMPORT CONST float Sleef_fminf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_fminf1_purecfma(float, float); +IMPORT CONST float Sleef_fdimf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_fdimf1_purecfma(float, float); +IMPORT CONST float Sleef_truncf1_purecfma(float); +IMPORT CONST float Sleef_finz_truncf1_purecfma(float); +IMPORT CONST float Sleef_floorf1_purecfma(float); +IMPORT CONST float Sleef_finz_floorf1_purecfma(float); +IMPORT CONST float Sleef_ceilf1_purecfma(float); +IMPORT CONST float Sleef_finz_ceilf1_purecfma(float); +IMPORT CONST float Sleef_roundf1_purecfma(float); +IMPORT CONST float Sleef_finz_roundf1_purecfma(float); +IMPORT CONST float Sleef_rintf1_purecfma(float); +IMPORT CONST float Sleef_finz_rintf1_purecfma(float); +IMPORT CONST float Sleef_nextafterf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_nextafterf1_purecfma(float, float); +IMPORT CONST float Sleef_frfrexpf1_purecfma(float); +IMPORT CONST float Sleef_finz_frfrexpf1_purecfma(float); +IMPORT CONST float Sleef_fmodf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_fmodf1_purecfma(float, float); +IMPORT CONST float Sleef_remainderf1_purecfma(float, float); +IMPORT CONST float Sleef_finz_remainderf1_purecfma(float, float); +IMPORT CONST Sleef_float_2 Sleef_modff1_purecfma(float); +IMPORT CONST Sleef_float_2 Sleef_finz_modff1_purecfma(float); +IMPORT CONST float Sleef_lgammaf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_lgammaf1_u10purecfma(float); +IMPORT CONST float Sleef_tgammaf1_u10purecfma(float); +IMPORT CONST float Sleef_finz_tgammaf1_u10purecfma(float); +IMPORT CONST float Sleef_erff1_u10purecfma(float); +IMPORT CONST float Sleef_finz_erff1_u10purecfma(float); +IMPORT CONST float Sleef_erfcf1_u15purecfma(float); +IMPORT CONST float Sleef_finz_erfcf1_u15purecfma(float); +IMPORT CONST int Sleef_getIntf1_purecfma(int); +IMPORT CONST int Sleef_finz_getIntf1_purecfma(int); +IMPORT CONST void *Sleef_getPtrf1_purecfma(int); +IMPORT CONST void *Sleef_finz_getPtrf1_purecfma(int); +#endif +#ifdef __cplusplus +} +#endif + +#undef IMPORT +#endif // #ifndef __SLEEF_H__