diff --git a/.gitattributes b/.gitattributes index f66af179c1fb2419a1fc3b86babebe69c08b9a5c..5338db1c4012f74b248622e582464670ab52446a 100644 --- a/.gitattributes +++ b/.gitattributes @@ -279,3 +279,7 @@ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5p.cp my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5i.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/_conv.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zstandard/_cffi.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/_proxy.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchaudio/_torchaudio_ffmpeg.so filter=lfs diff=lfs merge=lfs -text +my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5z.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/_qhull.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/_cparser.pxd b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/_cparser.pxd new file mode 100644 index 0000000000000000000000000000000000000000..165dd61d8a98ca1294887bb3985e3ba3ec7c0b98 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/_cparser.pxd @@ -0,0 +1,190 @@ +from libc.stdint cimport ( + int8_t, + int16_t, + int32_t, + int64_t, + uint8_t, + uint16_t, + uint32_t, + uint64_t, +) + + +cdef extern from "../vendor/llhttp/build/llhttp.h": + + struct llhttp__internal_s: + int32_t _index + void* _span_pos0 + void* _span_cb0 + int32_t error + const char* reason + const char* error_pos + void* data + void* _current + uint64_t content_length + uint8_t type + uint8_t method + uint8_t http_major + uint8_t http_minor + uint8_t header_state + uint8_t lenient_flags + uint8_t upgrade + uint8_t finish + uint16_t flags + uint16_t status_code + void* settings + + ctypedef llhttp__internal_s llhttp__internal_t + ctypedef llhttp__internal_t llhttp_t + + ctypedef int (*llhttp_data_cb)(llhttp_t*, const char *at, size_t length) except -1 + ctypedef int (*llhttp_cb)(llhttp_t*) except -1 + + struct llhttp_settings_s: + llhttp_cb on_message_begin + llhttp_data_cb on_url + llhttp_data_cb on_status + llhttp_data_cb on_header_field + llhttp_data_cb on_header_value + llhttp_cb on_headers_complete + llhttp_data_cb on_body + llhttp_cb on_message_complete + llhttp_cb on_chunk_header + llhttp_cb on_chunk_complete + + llhttp_cb on_url_complete + llhttp_cb on_status_complete + llhttp_cb on_header_field_complete + llhttp_cb on_header_value_complete + + ctypedef llhttp_settings_s llhttp_settings_t + + enum llhttp_errno: + HPE_OK, + HPE_INTERNAL, + HPE_STRICT, + HPE_LF_EXPECTED, + HPE_UNEXPECTED_CONTENT_LENGTH, + HPE_CLOSED_CONNECTION, + HPE_INVALID_METHOD, + HPE_INVALID_URL, + HPE_INVALID_CONSTANT, + HPE_INVALID_VERSION, + HPE_INVALID_HEADER_TOKEN, + HPE_INVALID_CONTENT_LENGTH, + HPE_INVALID_CHUNK_SIZE, + HPE_INVALID_STATUS, + HPE_INVALID_EOF_STATE, + HPE_INVALID_TRANSFER_ENCODING, + HPE_CB_MESSAGE_BEGIN, + HPE_CB_HEADERS_COMPLETE, + HPE_CB_MESSAGE_COMPLETE, + HPE_CB_CHUNK_HEADER, + HPE_CB_CHUNK_COMPLETE, + HPE_PAUSED, + HPE_PAUSED_UPGRADE, + HPE_USER + + ctypedef llhttp_errno llhttp_errno_t + + enum llhttp_flags: + F_CONNECTION_KEEP_ALIVE, + F_CONNECTION_CLOSE, + F_CONNECTION_UPGRADE, + F_CHUNKED, + F_UPGRADE, + F_CONTENT_LENGTH, + F_SKIPBODY, + F_TRAILING, + F_TRANSFER_ENCODING + + enum llhttp_lenient_flags: + LENIENT_HEADERS, + LENIENT_CHUNKED_LENGTH + + enum llhttp_type: + HTTP_REQUEST, + HTTP_RESPONSE, + HTTP_BOTH + + enum llhttp_finish_t: + HTTP_FINISH_SAFE, + HTTP_FINISH_SAFE_WITH_CB, + HTTP_FINISH_UNSAFE + + enum llhttp_method: + HTTP_DELETE, + HTTP_GET, + HTTP_HEAD, + HTTP_POST, + HTTP_PUT, + HTTP_CONNECT, + HTTP_OPTIONS, + HTTP_TRACE, + HTTP_COPY, + HTTP_LOCK, + HTTP_MKCOL, + HTTP_MOVE, + HTTP_PROPFIND, + HTTP_PROPPATCH, + HTTP_SEARCH, + HTTP_UNLOCK, + HTTP_BIND, + HTTP_REBIND, + HTTP_UNBIND, + HTTP_ACL, + HTTP_REPORT, + HTTP_MKACTIVITY, + HTTP_CHECKOUT, + HTTP_MERGE, + HTTP_MSEARCH, + HTTP_NOTIFY, + HTTP_SUBSCRIBE, + HTTP_UNSUBSCRIBE, + HTTP_PATCH, + HTTP_PURGE, + HTTP_MKCALENDAR, + HTTP_LINK, + HTTP_UNLINK, + HTTP_SOURCE, + HTTP_PRI, + HTTP_DESCRIBE, + HTTP_ANNOUNCE, + HTTP_SETUP, + HTTP_PLAY, + HTTP_PAUSE, + HTTP_TEARDOWN, + HTTP_GET_PARAMETER, + HTTP_SET_PARAMETER, + HTTP_REDIRECT, + HTTP_RECORD, + HTTP_FLUSH + + ctypedef llhttp_method llhttp_method_t; + + void llhttp_settings_init(llhttp_settings_t* settings) + void llhttp_init(llhttp_t* parser, llhttp_type type, + const llhttp_settings_t* settings) + + llhttp_errno_t llhttp_execute(llhttp_t* parser, const char* data, size_t len) + llhttp_errno_t llhttp_finish(llhttp_t* parser) + + int llhttp_message_needs_eof(const llhttp_t* parser) + + int llhttp_should_keep_alive(const llhttp_t* parser) + + void llhttp_pause(llhttp_t* parser) + void llhttp_resume(llhttp_t* parser) + + void llhttp_resume_after_upgrade(llhttp_t* parser) + + llhttp_errno_t llhttp_get_errno(const llhttp_t* parser) + const char* llhttp_get_error_reason(const llhttp_t* parser) + void llhttp_set_error_reason(llhttp_t* parser, const char* reason) + const char* llhttp_get_error_pos(const llhttp_t* parser) + const char* llhttp_errno_name(llhttp_errno_t err) + + const char* llhttp_method_name(llhttp_method_t method) + + void llhttp_set_lenient_headers(llhttp_t* parser, int enabled) + void llhttp_set_lenient_chunked_length(llhttp_t* parser, int enabled) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/_helpers.pyi b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/_helpers.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1e358937024ac1b9d19cf714fc12087688c6fb95 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/_helpers.pyi @@ -0,0 +1,6 @@ +from typing import Any + +class reify: + def __init__(self, wrapped: Any) -> None: ... + def __get__(self, inst: Any, owner: Any) -> Any: ... + def __set__(self, inst: Any, value: Any) -> None: ... diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/_websocket.c b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/_websocket.c new file mode 100644 index 0000000000000000000000000000000000000000..e798ab6851bf2e91a2f50c687c890e2d0796218c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/_websocket.c @@ -0,0 +1,3622 @@ +/* Generated by Cython 0.29.24 */ + +#ifndef PY_SSIZE_T_CLEAN +#define PY_SSIZE_T_CLEAN +#endif /* PY_SSIZE_T_CLEAN */ +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.6+ or Python 3.3+. +#else +#define CYTHON_ABI "0_29_24" +#define CYTHON_HEX_VERSION 0x001D18F0 +#define CYTHON_FUTURE_DIVISION 1 +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef HAVE_LONG_LONG + #if PY_VERSION_HEX >= 0x02070000 + #define HAVE_LONG_LONG + #endif +#endif +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#ifdef PYPY_VERSION + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#elif defined(PYSTON_VERSION) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #elif !defined(CYTHON_USE_PYLONG_INTERNALS) + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) + #endif + #ifndef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) + #endif + #ifndef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) + #endif +#endif +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #include "longintrepr.h" + #undef SHIFT + #undef BASE + #undef MASK + #ifdef SIZEOF_VOID_P + enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; + #endif +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int32 uint32_t; + #endif + #endif +#else + #include +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) && __cplusplus >= 201103L + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #elif __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__ ) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif + +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #elif defined(__GNUC__) + #define CYTHON_INLINE __inline__ + #elif defined(_MSC_VER) + #define CYTHON_INLINE __inline + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_INLINE inline + #else + #define CYTHON_INLINE + #endif +#endif + +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define Py_OptimizeFlag 0 +#endif +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyClass_Type +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" +#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#else + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#endif + #define __Pyx_DefaultClassType PyType_Type +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#ifndef METH_STACKLESS + #define METH_STACKLESS 0 +#endif +#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast + #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords +#endif +#if CYTHON_FAST_PYCCALL +#define __Pyx_PyFastCFunction_Check(func)\ + ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) +#else +#define __Pyx_PyFastCFunction_Check(func) 0 +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 + #define PyMem_RawMalloc(n) PyMem_Malloc(n) + #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) + #define PyMem_RawFree(p) PyMem_Free(p) +#endif +#if CYTHON_COMPILING_IN_PYSTON + #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) +#include "pythread.h" +#define Py_tss_NEEDS_INIT 0 +typedef int Py_tss_t; +static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { + *key = PyThread_create_key(); + return 0; +} +static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { + Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); + *key = Py_tss_NEEDS_INIT; + return key; +} +static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { + PyObject_Free(key); +} +static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { + return *key != Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { + PyThread_delete_key(*key); + *key = Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { + return PyThread_set_key_value(*key, value); +} +static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { + return PyThread_get_key_value(*key); +} +#endif +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS +#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) +#else +#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) +#endif +#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + #define CYTHON_PEP393_ENABLED 1 + #if defined(PyUnicode_IS_READY) + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #else + #define __Pyx_PyUnicode_READY(op) (0) + #endif + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) + #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000 + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length)) + #else + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) + #endif + #else + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) + #endif +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) + #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) +#endif +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact +#ifndef PyObject_Unicode + #define PyObject_Unicode PyObject_Str +#endif +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) +#endif +#if PY_VERSION_HEX >= 0x030900A4 + #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) +#else + #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) +#endif +#if CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) +#else + #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t PyInt_AsLong +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) +#else + #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) +#endif +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif + +#if defined(WIN32) || defined(MS_WINDOWS) + #define _USE_MATH_DEFINES +#endif +#include +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + +#define __PYX_MARK_ERR_POS(f_index, lineno) \ + { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } +#define __PYX_ERR(f_index, lineno, Ln_error) \ + { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } + +#ifndef __PYX_EXTERN_C + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__aiohttp___websocket +#define __PYX_HAVE_API__aiohttp___websocket +/* Early includes */ +#include +#include +#include "pythread.h" +#include +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) +#define __PYX_DEFAULT_STRING_ENCODING "" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { + return (size_t) i < (size_t) limit; +} +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +static char* __PYX_DEFAULT_STRING_ENCODING; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } + +static PyObject *__pyx_m = NULL; +static PyObject *__pyx_d; +static PyObject *__pyx_b; +static PyObject *__pyx_cython_runtime = NULL; +static PyObject *__pyx_empty_tuple; +static PyObject *__pyx_empty_bytes; +static PyObject *__pyx_empty_unicode; +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm= __FILE__; +static const char *__pyx_filename; + + +static const char *__pyx_f[] = { + "aiohttp/_websocket.pyx", + "type.pxd", + "bool.pxd", + "complex.pxd", +}; + +/*--- Type declarations ---*/ + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, int); + void (*DECREF)(void*, PyObject*, int); + void (*GOTREF)(void*, PyObject*, int); + void (*GIVEREF)(void*, PyObject*, int); + void* (*SetupContext)(const char*, int, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) +#endif + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* PyObjectGetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* GetBuiltinName.proto */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* RaiseDoubleKeywords.proto */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywords.proto */ +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ + const char* function_name); + +/* PyCFunctionFastCall.proto */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); +#else +#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) +#endif + +/* PyFunctionFastCall.proto */ +#if CYTHON_FAST_PYCALL +#define __Pyx_PyFunction_FastCall(func, args, nargs)\ + __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); +#else +#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) +#endif +#define __Pyx_BUILD_ASSERT_EXPR(cond)\ + (sizeof(char [1 - 2*!(cond)]) - 1) +#ifndef Py_MEMBER_SIZE +#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) +#endif + static size_t __pyx_pyframe_localsplus_offset = 0; + #include "frameobject.h" + #define __Pxy_PyFrame_Initialize_Offsets()\ + ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ + (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) + #define __Pyx_PyFrame_GetLocalsplus(frame)\ + (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) +#endif + +/* PyObjectCall.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); +#else +#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) +#endif + +/* PyObjectCallMethO.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); +#endif + +/* PyObjectCallOneArg.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); + +/* TypeImport.proto */ +#ifndef __PYX_HAVE_RT_ImportType_proto +#define __PYX_HAVE_RT_ImportType_proto +enum __Pyx_ImportType_CheckSize { + __Pyx_ImportType_CheckSize_Error = 0, + __Pyx_ImportType_CheckSize_Warn = 1, + __Pyx_ImportType_CheckSize_Ignore = 2 +}; +static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); +#endif + +/* PyDictVersioning.proto */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) +#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ + (version_var) = __PYX_GET_DICT_VERSION(dict);\ + (cache_var) = (value); +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ + (VAR) = __pyx_dict_cached_value;\ + } else {\ + (VAR) = __pyx_dict_cached_value = (LOOKUP);\ + __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ + }\ +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); +#else +#define __PYX_GET_DICT_VERSION(dict) (0) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); +#endif + +/* PyThreadStateGet.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* CLineInTraceback.proto */ +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif + +/* CodeObjectCache.proto */ +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +/* GCCDiagnostics.proto */ +#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) +#define __Pyx_HAS_GCC_DIAGNOSTIC +#endif + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) + +/* CheckBinaryVersion.proto */ +static int __Pyx_check_binary_version(void); + +/* InitStrings.proto */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); + + +/* Module declarations from 'cpython.version' */ + +/* Module declarations from '__builtin__' */ + +/* Module declarations from 'cpython.type' */ +static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; + +/* Module declarations from 'libc.string' */ + +/* Module declarations from 'libc.stdio' */ + +/* Module declarations from 'cpython.object' */ + +/* Module declarations from 'cpython.ref' */ + +/* Module declarations from 'cpython.exc' */ + +/* Module declarations from 'cpython.module' */ + +/* Module declarations from 'cpython.mem' */ + +/* Module declarations from 'cpython.tuple' */ + +/* Module declarations from 'cpython.list' */ + +/* Module declarations from 'cpython.sequence' */ + +/* Module declarations from 'cpython.mapping' */ + +/* Module declarations from 'cpython.iterator' */ + +/* Module declarations from 'cpython.number' */ + +/* Module declarations from 'cpython.int' */ + +/* Module declarations from '__builtin__' */ + +/* Module declarations from 'cpython.bool' */ +static PyTypeObject *__pyx_ptype_7cpython_4bool_bool = 0; + +/* Module declarations from 'cpython.long' */ + +/* Module declarations from 'cpython.float' */ + +/* Module declarations from '__builtin__' */ + +/* Module declarations from 'cpython.complex' */ +static PyTypeObject *__pyx_ptype_7cpython_7complex_complex = 0; + +/* Module declarations from 'cpython.string' */ + +/* Module declarations from 'cpython.unicode' */ + +/* Module declarations from 'cpython.dict' */ + +/* Module declarations from 'cpython.instance' */ + +/* Module declarations from 'cpython.function' */ + +/* Module declarations from 'cpython.method' */ + +/* Module declarations from 'cpython.weakref' */ + +/* Module declarations from 'cpython.getargs' */ + +/* Module declarations from 'cpython.pythread' */ + +/* Module declarations from 'cpython.pystate' */ + +/* Module declarations from 'cpython.cobject' */ + +/* Module declarations from 'cpython.oldbuffer' */ + +/* Module declarations from 'cpython.set' */ + +/* Module declarations from 'cpython.buffer' */ + +/* Module declarations from 'cpython.bytes' */ + +/* Module declarations from 'cpython.pycapsule' */ + +/* Module declarations from 'cpython' */ + +/* Module declarations from 'libc.stdint' */ + +/* Module declarations from 'aiohttp._websocket' */ +#define __Pyx_MODULE_NAME "aiohttp._websocket" +extern int __pyx_module_is_main_aiohttp___websocket; +int __pyx_module_is_main_aiohttp___websocket = 0; + +/* Implementation of 'aiohttp._websocket' */ +static PyObject *__pyx_builtin_range; +static const char __pyx_k_i[] = "i"; +static const char __pyx_k_data[] = "data"; +static const char __pyx_k_main[] = "__main__"; +static const char __pyx_k_mask[] = "mask"; +static const char __pyx_k_name[] = "__name__"; +static const char __pyx_k_test[] = "__test__"; +static const char __pyx_k_range[] = "range"; +static const char __pyx_k_in_buf[] = "in_buf"; +static const char __pyx_k_data_len[] = "data_len"; +static const char __pyx_k_mask_buf[] = "mask_buf"; +static const char __pyx_k_uint32_msk[] = "uint32_msk"; +static const char __pyx_k_uint64_msk[] = "uint64_msk"; +static const char __pyx_k_aiohttp__websocket[] = "aiohttp._websocket"; +static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; +static const char __pyx_k_websocket_mask_cython[] = "_websocket_mask_cython"; +static const char __pyx_k_aiohttp__websocket_pyx[] = "aiohttp/_websocket.pyx"; +static PyObject *__pyx_n_s_aiohttp__websocket; +static PyObject *__pyx_kp_s_aiohttp__websocket_pyx; +static PyObject *__pyx_n_s_cline_in_traceback; +static PyObject *__pyx_n_s_data; +static PyObject *__pyx_n_s_data_len; +static PyObject *__pyx_n_s_i; +static PyObject *__pyx_n_s_in_buf; +static PyObject *__pyx_n_s_main; +static PyObject *__pyx_n_s_mask; +static PyObject *__pyx_n_s_mask_buf; +static PyObject *__pyx_n_s_name; +static PyObject *__pyx_n_s_range; +static PyObject *__pyx_n_s_test; +static PyObject *__pyx_n_s_uint32_msk; +static PyObject *__pyx_n_s_uint64_msk; +static PyObject *__pyx_n_s_websocket_mask_cython; +static PyObject *__pyx_pf_7aiohttp_10_websocket__websocket_mask_cython(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_mask, PyObject *__pyx_v_data); /* proto */ +static PyObject *__pyx_tuple_; +static PyObject *__pyx_codeobj__2; +/* Late includes */ + +/* "aiohttp/_websocket.pyx":11 + * + * + * def _websocket_mask_cython(object mask, object data): # <<<<<<<<<<<<<< + * """Note, this function mutates its `data` argument + * """ + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7aiohttp_10_websocket_1_websocket_mask_cython(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_7aiohttp_10_websocket__websocket_mask_cython[] = "Note, this function mutates its `data` argument\n "; +static PyMethodDef __pyx_mdef_7aiohttp_10_websocket_1_websocket_mask_cython = {"_websocket_mask_cython", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7aiohttp_10_websocket_1_websocket_mask_cython, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7aiohttp_10_websocket__websocket_mask_cython}; +static PyObject *__pyx_pw_7aiohttp_10_websocket_1_websocket_mask_cython(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_mask = 0; + PyObject *__pyx_v_data = 0; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("_websocket_mask_cython (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_mask,&__pyx_n_s_data,0}; + PyObject* values[2] = {0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mask)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("_websocket_mask_cython", 1, 2, 2, 1); __PYX_ERR(0, 11, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_websocket_mask_cython") < 0)) __PYX_ERR(0, 11, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + } + __pyx_v_mask = values[0]; + __pyx_v_data = values[1]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("_websocket_mask_cython", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 11, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("aiohttp._websocket._websocket_mask_cython", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7aiohttp_10_websocket__websocket_mask_cython(__pyx_self, __pyx_v_mask, __pyx_v_data); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7aiohttp_10_websocket__websocket_mask_cython(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_mask, PyObject *__pyx_v_data) { + Py_ssize_t __pyx_v_data_len; + Py_ssize_t __pyx_v_i; + unsigned char *__pyx_v_in_buf; + unsigned char const *__pyx_v_mask_buf; + uint32_t __pyx_v_uint32_msk; + uint64_t __pyx_v_uint64_msk; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + char *__pyx_t_5; + uint64_t *__pyx_t_6; + long __pyx_t_7; + uint32_t *__pyx_t_8; + Py_ssize_t __pyx_t_9; + Py_ssize_t __pyx_t_10; + Py_ssize_t __pyx_t_11; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("_websocket_mask_cython", 0); + __Pyx_INCREF(__pyx_v_mask); + __Pyx_INCREF(__pyx_v_data); + + /* "aiohttp/_websocket.pyx":22 + * uint64_t uint64_msk + * + * assert len(mask) == 4 # <<<<<<<<<<<<<< + * + * if not isinstance(mask, bytes): + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(!Py_OptimizeFlag)) { + __pyx_t_1 = PyObject_Length(__pyx_v_mask); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 22, __pyx_L1_error) + if (unlikely(!((__pyx_t_1 == 4) != 0))) { + PyErr_SetNone(PyExc_AssertionError); + __PYX_ERR(0, 22, __pyx_L1_error) + } + } + #endif + + /* "aiohttp/_websocket.pyx":24 + * assert len(mask) == 4 + * + * if not isinstance(mask, bytes): # <<<<<<<<<<<<<< + * mask = bytes(mask) + * + */ + __pyx_t_2 = PyBytes_Check(__pyx_v_mask); + __pyx_t_3 = ((!(__pyx_t_2 != 0)) != 0); + if (__pyx_t_3) { + + /* "aiohttp/_websocket.pyx":25 + * + * if not isinstance(mask, bytes): + * mask = bytes(mask) # <<<<<<<<<<<<<< + * + * if isinstance(data, bytearray): + */ + __pyx_t_4 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyBytes_Type)), __pyx_v_mask); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF_SET(__pyx_v_mask, __pyx_t_4); + __pyx_t_4 = 0; + + /* "aiohttp/_websocket.pyx":24 + * assert len(mask) == 4 + * + * if not isinstance(mask, bytes): # <<<<<<<<<<<<<< + * mask = bytes(mask) + * + */ + } + + /* "aiohttp/_websocket.pyx":27 + * mask = bytes(mask) + * + * if isinstance(data, bytearray): # <<<<<<<<<<<<<< + * data = data + * else: + */ + __pyx_t_3 = PyByteArray_Check(__pyx_v_data); + __pyx_t_2 = (__pyx_t_3 != 0); + if (__pyx_t_2) { + + /* "aiohttp/_websocket.pyx":28 + * + * if isinstance(data, bytearray): + * data = data # <<<<<<<<<<<<<< + * else: + * data = bytearray(data) + */ + __pyx_t_4 = __pyx_v_data; + __Pyx_INCREF(__pyx_t_4); + __Pyx_DECREF_SET(__pyx_v_data, __pyx_t_4); + __pyx_t_4 = 0; + + /* "aiohttp/_websocket.pyx":27 + * mask = bytes(mask) + * + * if isinstance(data, bytearray): # <<<<<<<<<<<<<< + * data = data + * else: + */ + goto __pyx_L4; + } + + /* "aiohttp/_websocket.pyx":30 + * data = data + * else: + * data = bytearray(data) # <<<<<<<<<<<<<< + * + * data_len = len(data) + */ + /*else*/ { + __pyx_t_4 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyByteArray_Type)), __pyx_v_data); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 30, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF_SET(__pyx_v_data, __pyx_t_4); + __pyx_t_4 = 0; + } + __pyx_L4:; + + /* "aiohttp/_websocket.pyx":32 + * data = bytearray(data) + * + * data_len = len(data) # <<<<<<<<<<<<<< + * in_buf = PyByteArray_AsString(data) + * mask_buf = PyBytes_AsString(mask) + */ + __pyx_t_1 = PyObject_Length(__pyx_v_data); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 32, __pyx_L1_error) + __pyx_v_data_len = __pyx_t_1; + + /* "aiohttp/_websocket.pyx":33 + * + * data_len = len(data) + * in_buf = PyByteArray_AsString(data) # <<<<<<<<<<<<<< + * mask_buf = PyBytes_AsString(mask) + * uint32_msk = (mask_buf)[0] + */ + if (!(likely(PyByteArray_CheckExact(__pyx_v_data))||((__pyx_v_data) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytearray", Py_TYPE(__pyx_v_data)->tp_name), 0))) __PYX_ERR(0, 33, __pyx_L1_error) + __pyx_t_5 = PyByteArray_AsString(((PyObject*)__pyx_v_data)); if (unlikely(__pyx_t_5 == ((char *)NULL))) __PYX_ERR(0, 33, __pyx_L1_error) + __pyx_v_in_buf = ((unsigned char *)__pyx_t_5); + + /* "aiohttp/_websocket.pyx":34 + * data_len = len(data) + * in_buf = PyByteArray_AsString(data) + * mask_buf = PyBytes_AsString(mask) # <<<<<<<<<<<<<< + * uint32_msk = (mask_buf)[0] + * + */ + __pyx_t_5 = PyBytes_AsString(__pyx_v_mask); if (unlikely(__pyx_t_5 == ((char *)NULL))) __PYX_ERR(0, 34, __pyx_L1_error) + __pyx_v_mask_buf = ((unsigned char const *)__pyx_t_5); + + /* "aiohttp/_websocket.pyx":35 + * in_buf = PyByteArray_AsString(data) + * mask_buf = PyBytes_AsString(mask) + * uint32_msk = (mask_buf)[0] # <<<<<<<<<<<<<< + * + * # TODO: align in_data ptr to achieve even faster speeds + */ + __pyx_v_uint32_msk = (((uint32_t *)__pyx_v_mask_buf)[0]); + + /* "aiohttp/_websocket.pyx":40 + * # does it need in python ?! malloc() always aligns to sizeof(long) bytes + * + * if sizeof(size_t) >= 8: # <<<<<<<<<<<<<< + * uint64_msk = uint32_msk + * uint64_msk = (uint64_msk << 32) | uint32_msk + */ + __pyx_t_2 = (((sizeof(size_t)) >= 8) != 0); + if (__pyx_t_2) { + + /* "aiohttp/_websocket.pyx":41 + * + * if sizeof(size_t) >= 8: + * uint64_msk = uint32_msk # <<<<<<<<<<<<<< + * uint64_msk = (uint64_msk << 32) | uint32_msk + * + */ + __pyx_v_uint64_msk = __pyx_v_uint32_msk; + + /* "aiohttp/_websocket.pyx":42 + * if sizeof(size_t) >= 8: + * uint64_msk = uint32_msk + * uint64_msk = (uint64_msk << 32) | uint32_msk # <<<<<<<<<<<<<< + * + * while data_len >= 8: + */ + __pyx_v_uint64_msk = ((__pyx_v_uint64_msk << 32) | __pyx_v_uint32_msk); + + /* "aiohttp/_websocket.pyx":44 + * uint64_msk = (uint64_msk << 32) | uint32_msk + * + * while data_len >= 8: # <<<<<<<<<<<<<< + * (in_buf)[0] ^= uint64_msk + * in_buf += 8 + */ + while (1) { + __pyx_t_2 = ((__pyx_v_data_len >= 8) != 0); + if (!__pyx_t_2) break; + + /* "aiohttp/_websocket.pyx":45 + * + * while data_len >= 8: + * (in_buf)[0] ^= uint64_msk # <<<<<<<<<<<<<< + * in_buf += 8 + * data_len -= 8 + */ + __pyx_t_6 = ((uint64_t *)__pyx_v_in_buf); + __pyx_t_7 = 0; + (__pyx_t_6[__pyx_t_7]) = ((__pyx_t_6[__pyx_t_7]) ^ __pyx_v_uint64_msk); + + /* "aiohttp/_websocket.pyx":46 + * while data_len >= 8: + * (in_buf)[0] ^= uint64_msk + * in_buf += 8 # <<<<<<<<<<<<<< + * data_len -= 8 + * + */ + __pyx_v_in_buf = (__pyx_v_in_buf + 8); + + /* "aiohttp/_websocket.pyx":47 + * (in_buf)[0] ^= uint64_msk + * in_buf += 8 + * data_len -= 8 # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_data_len = (__pyx_v_data_len - 8); + } + + /* "aiohttp/_websocket.pyx":40 + * # does it need in python ?! malloc() always aligns to sizeof(long) bytes + * + * if sizeof(size_t) >= 8: # <<<<<<<<<<<<<< + * uint64_msk = uint32_msk + * uint64_msk = (uint64_msk << 32) | uint32_msk + */ + } + + /* "aiohttp/_websocket.pyx":50 + * + * + * while data_len >= 4: # <<<<<<<<<<<<<< + * (in_buf)[0] ^= uint32_msk + * in_buf += 4 + */ + while (1) { + __pyx_t_2 = ((__pyx_v_data_len >= 4) != 0); + if (!__pyx_t_2) break; + + /* "aiohttp/_websocket.pyx":51 + * + * while data_len >= 4: + * (in_buf)[0] ^= uint32_msk # <<<<<<<<<<<<<< + * in_buf += 4 + * data_len -= 4 + */ + __pyx_t_8 = ((uint32_t *)__pyx_v_in_buf); + __pyx_t_7 = 0; + (__pyx_t_8[__pyx_t_7]) = ((__pyx_t_8[__pyx_t_7]) ^ __pyx_v_uint32_msk); + + /* "aiohttp/_websocket.pyx":52 + * while data_len >= 4: + * (in_buf)[0] ^= uint32_msk + * in_buf += 4 # <<<<<<<<<<<<<< + * data_len -= 4 + * + */ + __pyx_v_in_buf = (__pyx_v_in_buf + 4); + + /* "aiohttp/_websocket.pyx":53 + * (in_buf)[0] ^= uint32_msk + * in_buf += 4 + * data_len -= 4 # <<<<<<<<<<<<<< + * + * for i in range(0, data_len): + */ + __pyx_v_data_len = (__pyx_v_data_len - 4); + } + + /* "aiohttp/_websocket.pyx":55 + * data_len -= 4 + * + * for i in range(0, data_len): # <<<<<<<<<<<<<< + * in_buf[i] ^= mask_buf[i] + */ + __pyx_t_1 = __pyx_v_data_len; + __pyx_t_9 = __pyx_t_1; + for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { + __pyx_v_i = __pyx_t_10; + + /* "aiohttp/_websocket.pyx":56 + * + * for i in range(0, data_len): + * in_buf[i] ^= mask_buf[i] # <<<<<<<<<<<<<< + */ + __pyx_t_11 = __pyx_v_i; + (__pyx_v_in_buf[__pyx_t_11]) = ((__pyx_v_in_buf[__pyx_t_11]) ^ (__pyx_v_mask_buf[__pyx_v_i])); + } + + /* "aiohttp/_websocket.pyx":11 + * + * + * def _websocket_mask_cython(object mask, object data): # <<<<<<<<<<<<<< + * """Note, this function mutates its `data` argument + * """ + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("aiohttp._websocket._websocket_mask_cython", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_mask); + __Pyx_XDECREF(__pyx_v_data); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyMethodDef __pyx_methods[] = { + {0, 0, 0, 0} +}; + +#if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec__websocket(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec__websocket}, + {0, NULL} +}; +#endif + +static struct PyModuleDef __pyx_moduledef = { + PyModuleDef_HEAD_INIT, + "_websocket", + 0, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #else + -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ +}; +#endif +#ifndef CYTHON_SMALL_CODE +#if defined(__clang__) + #define CYTHON_SMALL_CODE +#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) + #define CYTHON_SMALL_CODE __attribute__((cold)) +#else + #define CYTHON_SMALL_CODE +#endif +#endif + +static __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_n_s_aiohttp__websocket, __pyx_k_aiohttp__websocket, sizeof(__pyx_k_aiohttp__websocket), 0, 0, 1, 1}, + {&__pyx_kp_s_aiohttp__websocket_pyx, __pyx_k_aiohttp__websocket_pyx, sizeof(__pyx_k_aiohttp__websocket_pyx), 0, 0, 1, 0}, + {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, + {&__pyx_n_s_data, __pyx_k_data, sizeof(__pyx_k_data), 0, 0, 1, 1}, + {&__pyx_n_s_data_len, __pyx_k_data_len, sizeof(__pyx_k_data_len), 0, 0, 1, 1}, + {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, + {&__pyx_n_s_in_buf, __pyx_k_in_buf, sizeof(__pyx_k_in_buf), 0, 0, 1, 1}, + {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, + {&__pyx_n_s_mask, __pyx_k_mask, sizeof(__pyx_k_mask), 0, 0, 1, 1}, + {&__pyx_n_s_mask_buf, __pyx_k_mask_buf, sizeof(__pyx_k_mask_buf), 0, 0, 1, 1}, + {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, + {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, + {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, + {&__pyx_n_s_uint32_msk, __pyx_k_uint32_msk, sizeof(__pyx_k_uint32_msk), 0, 0, 1, 1}, + {&__pyx_n_s_uint64_msk, __pyx_k_uint64_msk, sizeof(__pyx_k_uint64_msk), 0, 0, 1, 1}, + {&__pyx_n_s_websocket_mask_cython, __pyx_k_websocket_mask_cython, sizeof(__pyx_k_websocket_mask_cython), 0, 0, 1, 1}, + {0, 0, 0, 0, 0, 0, 0} +}; +static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { + __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 55, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + + /* "aiohttp/_websocket.pyx":11 + * + * + * def _websocket_mask_cython(object mask, object data): # <<<<<<<<<<<<<< + * """Note, this function mutates its `data` argument + * """ + */ + __pyx_tuple_ = PyTuple_Pack(8, __pyx_n_s_mask, __pyx_n_s_data, __pyx_n_s_data_len, __pyx_n_s_i, __pyx_n_s_in_buf, __pyx_n_s_mask_buf, __pyx_n_s_uint32_msk, __pyx_n_s_uint64_msk); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple_); + __Pyx_GIVEREF(__pyx_tuple_); + __pyx_codeobj__2 = (PyObject*)__Pyx_PyCode_New(2, 0, 8, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple_, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_aiohttp__websocket_pyx, __pyx_n_s_websocket_mask_cython, 11, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__2)) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { + if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ + +static int __Pyx_modinit_global_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); + /*--- Global init code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); + /*--- Variable export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); + /*--- Function export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); + /*--- Type init code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_import_code(void) { + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); + /*--- Type import code ---*/ + __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 9, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", + #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 + sizeof(PyTypeObject), + #else + sizeof(PyHeapTypeObject), + #endif + __Pyx_ImportType_CheckSize_Warn); + if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(1, 9, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 8, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "bool", sizeof(PyBoolObject), __Pyx_ImportType_CheckSize_Warn); + if (!__pyx_ptype_7cpython_4bool_bool) __PYX_ERR(2, 8, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "complex", sizeof(PyComplexObject), __Pyx_ImportType_CheckSize_Warn); + if (!__pyx_ptype_7cpython_7complex_complex) __PYX_ERR(3, 15, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_modinit_variable_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); + /*--- Variable import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); + /*--- Function import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + + +#ifndef CYTHON_NO_PYINIT_EXPORT +#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC +#elif PY_MAJOR_VERSION < 3 +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" void +#else +#define __Pyx_PyMODINIT_FUNC void +#endif +#else +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * +#else +#define __Pyx_PyMODINIT_FUNC PyObject * +#endif +#endif + + +#if PY_MAJOR_VERSION < 3 +__Pyx_PyMODINIT_FUNC init_websocket(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC init_websocket(void) +#else +__Pyx_PyMODINIT_FUNC PyInit__websocket(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC PyInit__websocket(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { + #if PY_VERSION_HEX >= 0x030700A1 + static PY_INT64_T main_interpreter_id = -1; + PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); + if (main_interpreter_id == -1) { + main_interpreter_id = current_id; + return (unlikely(current_id == -1)) ? -1 : 0; + } else if (unlikely(main_interpreter_id != current_id)) + #else + static PyInterpreterState *main_interpreter = NULL; + PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; + if (!main_interpreter) { + main_interpreter = current_interpreter; + } else if (unlikely(main_interpreter != current_interpreter)) + #endif + { + PyErr_SetString( + PyExc_ImportError, + "Interpreter change detected - this module can only be loaded into one interpreter per process."); + return -1; + } + return 0; +} +static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + if (allow_none || value != Py_None) { + result = PyDict_SetItemString(moddict, to_name, value); + } + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + if (__Pyx_check_single_interpreter()) + return NULL; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static CYTHON_SMALL_CODE int __pyx_pymod_exec__websocket(PyObject *__pyx_pyinit_module) +#endif +#endif +{ + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m) { + if (__pyx_m == __pyx_pyinit_module) return 0; + PyErr_SetString(PyExc_RuntimeError, "Module '_websocket' has already been imported. Re-initialisation is not supported."); + return -1; + } + #elif PY_MAJOR_VERSION >= 3 + if (__pyx_m) return __Pyx_NewRef(__pyx_m); + #endif + #if CYTHON_REFNANNY +__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); +if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); +} +#endif + __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit__websocket(void)", 0); + if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pxy_PyFrame_Initialize_Offsets + __Pxy_PyFrame_Initialize_Offsets(); + #endif + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pyx_CyFunction_USED + if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_FusedFunction_USED + if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Coroutine_USED + if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Generator_USED + if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_StopAsyncIteration_USED + if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + /*--- Library function declarations ---*/ + /*--- Threads initialization code ---*/ + #if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS + PyEval_InitThreads(); + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("_websocket", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + #endif + if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_d); + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_b); + __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_cython_runtime); + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) + if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + if (__pyx_module_is_main_aiohttp___websocket) { + if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + } + #if PY_MAJOR_VERSION >= 3 + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "aiohttp._websocket")) { + if (unlikely(PyDict_SetItemString(modules, "aiohttp._websocket", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + } + } + #endif + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Global type/function init code ---*/ + (void)__Pyx_modinit_global_init_code(); + (void)__Pyx_modinit_variable_export_code(); + (void)__Pyx_modinit_function_export_code(); + (void)__Pyx_modinit_type_init_code(); + if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + (void)__Pyx_modinit_variable_import_code(); + (void)__Pyx_modinit_function_import_code(); + /*--- Execution code ---*/ + #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + + /* "aiohttp/_websocket.pyx":11 + * + * + * def _websocket_mask_cython(object mask, object data): # <<<<<<<<<<<<<< + * """Note, this function mutates its `data` argument + * """ + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7aiohttp_10_websocket_1_websocket_mask_cython, NULL, __pyx_n_s_aiohttp__websocket); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_websocket_mask_cython, __pyx_t_1) < 0) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "aiohttp/_websocket.pyx":1 + * from cpython cimport PyBytes_AsString # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + if (__pyx_m) { + if (__pyx_d) { + __Pyx_AddTraceback("init aiohttp._websocket", __pyx_clineno, __pyx_lineno, __pyx_filename); + } + Py_CLEAR(__pyx_m); + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init aiohttp._websocket"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 + return __pyx_m; + #else + return; + #endif +} + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule(modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, "RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* PyObjectGetAttrStr */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#endif + +/* GetBuiltinName */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); + if (unlikely(!result)) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/* RaiseArgTupleInvalid */ +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + +/* RaiseDoubleKeywords */ +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + +/* ParseKeywords */ +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + while (PyDict_Next(kwds, &pos, &key, &value)) { + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; + continue; + } + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = (**name == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION < 3 + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + return -1; +} + +/* PyCFunctionFastCall */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { + PyCFunctionObject *func = (PyCFunctionObject*)func_obj; + PyCFunction meth = PyCFunction_GET_FUNCTION(func); + PyObject *self = PyCFunction_GET_SELF(func); + int flags = PyCFunction_GET_FLAGS(func); + assert(PyCFunction_Check(func)); + assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); + assert(nargs >= 0); + assert(nargs == 0 || args != NULL); + /* _PyCFunction_FastCallDict() must not be called with an exception set, + because it may clear it (directly or indirectly) and so the + caller loses its exception */ + assert(!PyErr_Occurred()); + if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { + return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); + } else { + return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); + } +} +#endif + +/* PyFunctionFastCall */ +#if CYTHON_FAST_PYCALL +static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, + PyObject *globals) { + PyFrameObject *f; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject **fastlocals; + Py_ssize_t i; + PyObject *result; + assert(globals != NULL); + /* XXX Perhaps we should create a specialized + PyFrame_New() that doesn't take locals, but does + take builtins without sanity checking them. + */ + assert(tstate != NULL); + f = PyFrame_New(tstate, co, globals, NULL); + if (f == NULL) { + return NULL; + } + fastlocals = __Pyx_PyFrame_GetLocalsplus(f); + for (i = 0; i < na; i++) { + Py_INCREF(*args); + fastlocals[i] = *args++; + } + result = PyEval_EvalFrameEx(f,0); + ++tstate->recursion_depth; + Py_DECREF(f); + --tstate->recursion_depth; + return result; +} +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { + PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); + PyObject *globals = PyFunction_GET_GLOBALS(func); + PyObject *argdefs = PyFunction_GET_DEFAULTS(func); + PyObject *closure; +#if PY_MAJOR_VERSION >= 3 + PyObject *kwdefs; +#endif + PyObject *kwtuple, **k; + PyObject **d; + Py_ssize_t nd; + Py_ssize_t nk; + PyObject *result; + assert(kwargs == NULL || PyDict_Check(kwargs)); + nk = kwargs ? PyDict_Size(kwargs) : 0; + if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { + return NULL; + } + if ( +#if PY_MAJOR_VERSION >= 3 + co->co_kwonlyargcount == 0 && +#endif + likely(kwargs == NULL || nk == 0) && + co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { + if (argdefs == NULL && co->co_argcount == nargs) { + result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); + goto done; + } + else if (nargs == 0 && argdefs != NULL + && co->co_argcount == Py_SIZE(argdefs)) { + /* function called with no arguments, but all parameters have + a default value: use default values as arguments .*/ + args = &PyTuple_GET_ITEM(argdefs, 0); + result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); + goto done; + } + } + if (kwargs != NULL) { + Py_ssize_t pos, i; + kwtuple = PyTuple_New(2 * nk); + if (kwtuple == NULL) { + result = NULL; + goto done; + } + k = &PyTuple_GET_ITEM(kwtuple, 0); + pos = i = 0; + while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { + Py_INCREF(k[i]); + Py_INCREF(k[i+1]); + i += 2; + } + nk = i / 2; + } + else { + kwtuple = NULL; + k = NULL; + } + closure = PyFunction_GET_CLOSURE(func); +#if PY_MAJOR_VERSION >= 3 + kwdefs = PyFunction_GET_KW_DEFAULTS(func); +#endif + if (argdefs != NULL) { + d = &PyTuple_GET_ITEM(argdefs, 0); + nd = Py_SIZE(argdefs); + } + else { + d = NULL; + nd = 0; + } +#if PY_MAJOR_VERSION >= 3 + result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, kwdefs, closure); +#else + result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, closure); +#endif + Py_XDECREF(kwtuple); +done: + Py_LeaveRecursiveCall(); + return result; +} +#endif +#endif + +/* PyObjectCall */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *result; + ternaryfunc call = Py_TYPE(func)->tp_call; + if (unlikely(!call)) + return PyObject_Call(func, arg, kw); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = (*call)(func, arg, kw); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCallMethO */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { + PyObject *self, *result; + PyCFunction cfunc; + cfunc = PyCFunction_GET_FUNCTION(func); + self = PyCFunction_GET_SELF(func); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = cfunc(self, arg); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCallOneArg */ +#if CYTHON_COMPILING_IN_CPYTHON +static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_New(1); + if (unlikely(!args)) return NULL; + Py_INCREF(arg); + PyTuple_SET_ITEM(args, 0, arg); + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { +#if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCall(func, &arg, 1); + } +#endif + if (likely(PyCFunction_Check(func))) { + if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { + return __Pyx_PyObject_CallMethO(func, arg); +#if CYTHON_FAST_PYCCALL + } else if (__Pyx_PyFastCFunction_Check(func)) { + return __Pyx_PyCFunction_FastCall(func, &arg, 1); +#endif + } + } + return __Pyx__PyObject_CallOneArg(func, arg); +} +#else +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_Pack(1, arg); + if (unlikely(!args)) return NULL; + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +#endif + +/* TypeImport */ +#ifndef __PYX_HAVE_RT_ImportType +#define __PYX_HAVE_RT_ImportType +static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, + size_t size, enum __Pyx_ImportType_CheckSize check_size) +{ + PyObject *result = 0; + char warning[200]; + Py_ssize_t basicsize; +#ifdef Py_LIMITED_API + PyObject *py_basicsize; +#endif + result = PyObject_GetAttrString(module, class_name); + if (!result) + goto bad; + if (!PyType_Check(result)) { + PyErr_Format(PyExc_TypeError, + "%.200s.%.200s is not a type object", + module_name, class_name); + goto bad; + } +#ifndef Py_LIMITED_API + basicsize = ((PyTypeObject *)result)->tp_basicsize; +#else + py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); + if (!py_basicsize) + goto bad; + basicsize = PyLong_AsSsize_t(py_basicsize); + Py_DECREF(py_basicsize); + py_basicsize = 0; + if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) + goto bad; +#endif + if ((size_t)basicsize < size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + goto bad; + } + if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + goto bad; + } + else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { + PyOS_snprintf(warning, sizeof(warning), + "%s.%s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; + } + return (PyTypeObject *)result; +bad: + Py_XDECREF(result); + return NULL; +} +#endif + +/* PyDictVersioning */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { + PyObject **dictptr = NULL; + Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; + if (offset) { +#if CYTHON_COMPILING_IN_CPYTHON + dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); +#else + dictptr = _PyObject_GetDictPtr(obj); +#endif + } + return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; +} +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) + return 0; + return obj_dict_version == __Pyx_get_object_dict_version(obj); +} +#endif + +/* PyErrFetchRestore */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +} +#endif + +/* CLineInTraceback */ +#ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + if (unlikely(!__pyx_cython_runtime)) { + return c_line; + } + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + __PYX_PY_DICT_LOOKUP_IF_MODIFIED( + use_cline, *cython_runtime_dict, + __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); + } + else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache */ +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} + +/* AddTraceback */ +#include "compile.h" +#include "frameobject.h" +#include "traceback.h" +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + #if PY_MAJOR_VERSION < 3 + py_srcfile = PyString_FromString(filename); + #else + py_srcfile = PyUnicode_FromString(filename); + #endif + if (!py_srcfile) goto bad; + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + #else + py_funcname = PyUnicode_FromString(funcname); + #endif + } + if (!py_funcname) goto bad; + py_code = __Pyx_PyCode_New( + 0, + 0, + 0, + 0, + 0, + __pyx_empty_bytes, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, + __pyx_empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + Py_DECREF(py_funcname); + return py_code; +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) goto bad; + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} + +/* CIntToPy */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const long neg_one = (long) -1, const_zero = (long) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); + } +} + +/* CIntFromPyVerify */ +#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* CIntFromPy */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const long neg_one = (long) -1, const_zero = (long) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(long) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (long) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(long) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) + case -2: + if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } +#endif + if (sizeof(long) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + long val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (long) -1; + } + } else { + long val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (long) -1; + val = __Pyx_PyInt_As_long(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* CIntFromPy */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const int neg_one = (int) -1, const_zero = (int) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) + case -2: + if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (int) -1; + } + } else { + int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (int) -1; + val = __Pyx_PyInt_As_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* FastTypeChecks */ +#if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = a->tp_base; + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; + if (!res) { + res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } + return res; +} +#endif +static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + assert(PyExceptionClass_Check(exc_type)); + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; ip) { + #if PY_MAJOR_VERSION < 3 + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + #else + if (t->is_unicode | t->is_str) { + if (t->intern) { + *t->p = PyUnicode_InternFromString(t->s); + } else if (t->encoding) { + *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); + } else { + *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); + } + } else { + *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); + } + #endif + if (!*t->p) + return -1; + if (PyObject_Hash(*t->p) == -1) + return -1; + ++t; + } + return 0; +} + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} +#else +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { + int retval; + if (unlikely(!x)) return -1; + retval = __Pyx_PyObject_IsTrue(x); + Py_DECREF(x); + return retval; +} +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type %.200s). " + "The ability to return an instance of a strict subclass of int " + "is deprecated, and may be removed in a future version of Python.", + Py_TYPE(result)->tp_name)) { + Py_DECREF(result); + return NULL; + } + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type %.200s)", + type_name, type_name, Py_TYPE(result)->tp_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(b); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)b)->ob_digit; + const Py_ssize_t size = Py_SIZE(b); + if (likely(__Pyx_sst_abs(size) <= 1)) { + ival = likely(size) ? digits[0] : 0; + if (size == -1) ival = -ival; + return ival; + } else { + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { + return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); +} +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + + +#endif /* Py_PYTHON_H */ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/base_protocol.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/base_protocol.py new file mode 100644 index 0000000000000000000000000000000000000000..fff4610a1ea408f37528eb8dc2f92d27a0d8c9f6 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/base_protocol.py @@ -0,0 +1,87 @@ +import asyncio +from typing import Optional, cast + +from .tcp_helpers import tcp_nodelay + + +class BaseProtocol(asyncio.Protocol): + __slots__ = ( + "_loop", + "_paused", + "_drain_waiter", + "_connection_lost", + "_reading_paused", + "transport", + ) + + def __init__(self, loop: asyncio.AbstractEventLoop) -> None: + self._loop = loop # type: asyncio.AbstractEventLoop + self._paused = False + self._drain_waiter = None # type: Optional[asyncio.Future[None]] + self._connection_lost = False + self._reading_paused = False + + self.transport = None # type: Optional[asyncio.Transport] + + def pause_writing(self) -> None: + assert not self._paused + self._paused = True + + def resume_writing(self) -> None: + assert self._paused + self._paused = False + + waiter = self._drain_waiter + if waiter is not None: + self._drain_waiter = None + if not waiter.done(): + waiter.set_result(None) + + def pause_reading(self) -> None: + if not self._reading_paused and self.transport is not None: + try: + self.transport.pause_reading() + except (AttributeError, NotImplementedError, RuntimeError): + pass + self._reading_paused = True + + def resume_reading(self) -> None: + if self._reading_paused and self.transport is not None: + try: + self.transport.resume_reading() + except (AttributeError, NotImplementedError, RuntimeError): + pass + self._reading_paused = False + + def connection_made(self, transport: asyncio.BaseTransport) -> None: + tr = cast(asyncio.Transport, transport) + tcp_nodelay(tr, True) + self.transport = tr + + def connection_lost(self, exc: Optional[BaseException]) -> None: + self._connection_lost = True + # Wake up the writer if currently paused. + self.transport = None + if not self._paused: + return + waiter = self._drain_waiter + if waiter is None: + return + self._drain_waiter = None + if waiter.done(): + return + if exc is None: + waiter.set_result(None) + else: + waiter.set_exception(exc) + + async def _drain_helper(self) -> None: + if self._connection_lost: + raise ConnectionResetError("Connection lost") + if not self._paused: + return + waiter = self._drain_waiter + if waiter is None: + waiter = self._loop.create_future() + self._drain_waiter = waiter + await asyncio.shield(waiter) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/client.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/client.py new file mode 100644 index 0000000000000000000000000000000000000000..c6e4a766204f162e5f14f44ded465e280506125f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/client.py @@ -0,0 +1,1302 @@ +"""HTTP Client for asyncio.""" + +import asyncio +import base64 +import hashlib +import json +import os +import sys +import traceback +import warnings +from contextlib import suppress +from types import SimpleNamespace, TracebackType +from typing import ( + Any, + Awaitable, + Callable, + Coroutine, + FrozenSet, + Generator, + Generic, + Iterable, + List, + Mapping, + Optional, + Set, + Tuple, + Type, + TypeVar, + Union, +) + +import attr +from multidict import CIMultiDict, MultiDict, MultiDictProxy, istr +from yarl import URL + +from . import hdrs, http, payload +from .abc import AbstractCookieJar +from .client_exceptions import ( + ClientConnectionError as ClientConnectionError, + ClientConnectorCertificateError as ClientConnectorCertificateError, + ClientConnectorError as ClientConnectorError, + ClientConnectorSSLError as ClientConnectorSSLError, + ClientError as ClientError, + ClientHttpProxyError as ClientHttpProxyError, + ClientOSError as ClientOSError, + ClientPayloadError as ClientPayloadError, + ClientProxyConnectionError as ClientProxyConnectionError, + ClientResponseError as ClientResponseError, + ClientSSLError as ClientSSLError, + ContentTypeError as ContentTypeError, + InvalidURL as InvalidURL, + ServerConnectionError as ServerConnectionError, + ServerDisconnectedError as ServerDisconnectedError, + ServerFingerprintMismatch as ServerFingerprintMismatch, + ServerTimeoutError as ServerTimeoutError, + TooManyRedirects as TooManyRedirects, + WSServerHandshakeError as WSServerHandshakeError, +) +from .client_reqrep import ( + ClientRequest as ClientRequest, + ClientResponse as ClientResponse, + Fingerprint as Fingerprint, + RequestInfo as RequestInfo, + _merge_ssl_params, +) +from .client_ws import ClientWebSocketResponse as ClientWebSocketResponse +from .connector import ( + BaseConnector as BaseConnector, + NamedPipeConnector as NamedPipeConnector, + TCPConnector as TCPConnector, + UnixConnector as UnixConnector, +) +from .cookiejar import CookieJar +from .helpers import ( + DEBUG, + PY_36, + BasicAuth, + TimeoutHandle, + ceil_timeout, + get_env_proxy_for_url, + get_running_loop, + sentinel, + strip_auth_from_url, +) +from .http import WS_KEY, HttpVersion, WebSocketReader, WebSocketWriter +from .http_websocket import WSHandshakeError, WSMessage, ws_ext_gen, ws_ext_parse +from .streams import FlowControlDataQueue +from .tracing import Trace, TraceConfig +from .typedefs import Final, JSONEncoder, LooseCookies, LooseHeaders, StrOrURL + +__all__ = ( + # client_exceptions + "ClientConnectionError", + "ClientConnectorCertificateError", + "ClientConnectorError", + "ClientConnectorSSLError", + "ClientError", + "ClientHttpProxyError", + "ClientOSError", + "ClientPayloadError", + "ClientProxyConnectionError", + "ClientResponseError", + "ClientSSLError", + "ContentTypeError", + "InvalidURL", + "ServerConnectionError", + "ServerDisconnectedError", + "ServerFingerprintMismatch", + "ServerTimeoutError", + "TooManyRedirects", + "WSServerHandshakeError", + # client_reqrep + "ClientRequest", + "ClientResponse", + "Fingerprint", + "RequestInfo", + # connector + "BaseConnector", + "TCPConnector", + "UnixConnector", + "NamedPipeConnector", + # client_ws + "ClientWebSocketResponse", + # client + "ClientSession", + "ClientTimeout", + "request", +) + + +try: + from ssl import SSLContext +except ImportError: # pragma: no cover + SSLContext = object # type: ignore[misc,assignment] + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class ClientTimeout: + total: Optional[float] = None + connect: Optional[float] = None + sock_read: Optional[float] = None + sock_connect: Optional[float] = None + + # pool_queue_timeout: Optional[float] = None + # dns_resolution_timeout: Optional[float] = None + # socket_connect_timeout: Optional[float] = None + # connection_acquiring_timeout: Optional[float] = None + # new_connection_timeout: Optional[float] = None + # http_header_timeout: Optional[float] = None + # response_body_timeout: Optional[float] = None + + # to create a timeout specific for a single request, either + # - create a completely new one to overwrite the default + # - or use http://www.attrs.org/en/stable/api.html#attr.evolve + # to overwrite the defaults + + +# 5 Minute default read timeout +DEFAULT_TIMEOUT: Final[ClientTimeout] = ClientTimeout(total=5 * 60) + +_RetType = TypeVar("_RetType") + + +class ClientSession: + """First-class interface for making HTTP requests.""" + + ATTRS = frozenset( + [ + "_base_url", + "_source_traceback", + "_connector", + "requote_redirect_url", + "_loop", + "_cookie_jar", + "_connector_owner", + "_default_auth", + "_version", + "_json_serialize", + "_requote_redirect_url", + "_timeout", + "_raise_for_status", + "_auto_decompress", + "_trust_env", + "_default_headers", + "_skip_auto_headers", + "_request_class", + "_response_class", + "_ws_response_class", + "_trace_configs", + "_read_bufsize", + ] + ) + + _source_traceback = None + + def __init__( + self, + base_url: Optional[StrOrURL] = None, + *, + connector: Optional[BaseConnector] = None, + loop: Optional[asyncio.AbstractEventLoop] = None, + cookies: Optional[LooseCookies] = None, + headers: Optional[LooseHeaders] = None, + skip_auto_headers: Optional[Iterable[str]] = None, + auth: Optional[BasicAuth] = None, + json_serialize: JSONEncoder = json.dumps, + request_class: Type[ClientRequest] = ClientRequest, + response_class: Type[ClientResponse] = ClientResponse, + ws_response_class: Type[ClientWebSocketResponse] = ClientWebSocketResponse, + version: HttpVersion = http.HttpVersion11, + cookie_jar: Optional[AbstractCookieJar] = None, + connector_owner: bool = True, + raise_for_status: bool = False, + read_timeout: Union[float, object] = sentinel, + conn_timeout: Optional[float] = None, + timeout: Union[object, ClientTimeout] = sentinel, + auto_decompress: bool = True, + trust_env: bool = False, + requote_redirect_url: bool = True, + trace_configs: Optional[List[TraceConfig]] = None, + read_bufsize: int = 2 ** 16, + ) -> None: + if loop is None: + if connector is not None: + loop = connector._loop + + loop = get_running_loop(loop) + + if base_url is None or isinstance(base_url, URL): + self._base_url: Optional[URL] = base_url + else: + self._base_url = URL(base_url) + assert ( + self._base_url.origin() == self._base_url + ), "Only absolute URLs without path part are supported" + + if connector is None: + connector = TCPConnector(loop=loop) + + if connector._loop is not loop: + raise RuntimeError("Session and connector has to use same event loop") + + self._loop = loop + + if loop.get_debug(): + self._source_traceback = traceback.extract_stack(sys._getframe(1)) + + if cookie_jar is None: + cookie_jar = CookieJar(loop=loop) + self._cookie_jar = cookie_jar + + if cookies is not None: + self._cookie_jar.update_cookies(cookies) + + self._connector = connector # type: Optional[BaseConnector] + self._connector_owner = connector_owner + self._default_auth = auth + self._version = version + self._json_serialize = json_serialize + if timeout is sentinel: + self._timeout = DEFAULT_TIMEOUT + if read_timeout is not sentinel: + warnings.warn( + "read_timeout is deprecated, " "use timeout argument instead", + DeprecationWarning, + stacklevel=2, + ) + self._timeout = attr.evolve(self._timeout, total=read_timeout) + if conn_timeout is not None: + self._timeout = attr.evolve(self._timeout, connect=conn_timeout) + warnings.warn( + "conn_timeout is deprecated, " "use timeout argument instead", + DeprecationWarning, + stacklevel=2, + ) + else: + self._timeout = timeout # type: ignore[assignment] + if read_timeout is not sentinel: + raise ValueError( + "read_timeout and timeout parameters " + "conflict, please setup " + "timeout.read" + ) + if conn_timeout is not None: + raise ValueError( + "conn_timeout and timeout parameters " + "conflict, please setup " + "timeout.connect" + ) + self._raise_for_status = raise_for_status + self._auto_decompress = auto_decompress + self._trust_env = trust_env + self._requote_redirect_url = requote_redirect_url + self._read_bufsize = read_bufsize + + # Convert to list of tuples + if headers: + real_headers = CIMultiDict(headers) # type: CIMultiDict[str] + else: + real_headers = CIMultiDict() + self._default_headers = real_headers # type: CIMultiDict[str] + if skip_auto_headers is not None: + self._skip_auto_headers = frozenset(istr(i) for i in skip_auto_headers) + else: + self._skip_auto_headers = frozenset() + + self._request_class = request_class + self._response_class = response_class + self._ws_response_class = ws_response_class + + self._trace_configs = trace_configs or [] + for trace_config in self._trace_configs: + trace_config.freeze() + + def __init_subclass__(cls: Type["ClientSession"]) -> None: + warnings.warn( + "Inheritance class {} from ClientSession " + "is discouraged".format(cls.__name__), + DeprecationWarning, + stacklevel=2, + ) + + if DEBUG: + + def __setattr__(self, name: str, val: Any) -> None: + if name not in self.ATTRS: + warnings.warn( + "Setting custom ClientSession.{} attribute " + "is discouraged".format(name), + DeprecationWarning, + stacklevel=2, + ) + super().__setattr__(name, val) + + def __del__(self, _warnings: Any = warnings) -> None: + if not self.closed: + if PY_36: + kwargs = {"source": self} + else: + kwargs = {} + _warnings.warn( + f"Unclosed client session {self!r}", ResourceWarning, **kwargs + ) + context = {"client_session": self, "message": "Unclosed client session"} + if self._source_traceback is not None: + context["source_traceback"] = self._source_traceback + self._loop.call_exception_handler(context) + + def request( + self, method: str, url: StrOrURL, **kwargs: Any + ) -> "_RequestContextManager": + """Perform HTTP request.""" + return _RequestContextManager(self._request(method, url, **kwargs)) + + def _build_url(self, str_or_url: StrOrURL) -> URL: + url = URL(str_or_url) + if self._base_url is None: + return url + else: + assert not url.is_absolute() and url.path.startswith("/") + return self._base_url.join(url) + + async def _request( + self, + method: str, + str_or_url: StrOrURL, + *, + params: Optional[Mapping[str, str]] = None, + data: Any = None, + json: Any = None, + cookies: Optional[LooseCookies] = None, + headers: Optional[LooseHeaders] = None, + skip_auto_headers: Optional[Iterable[str]] = None, + auth: Optional[BasicAuth] = None, + allow_redirects: bool = True, + max_redirects: int = 10, + compress: Optional[str] = None, + chunked: Optional[bool] = None, + expect100: bool = False, + raise_for_status: Optional[bool] = None, + read_until_eof: bool = True, + proxy: Optional[StrOrURL] = None, + proxy_auth: Optional[BasicAuth] = None, + timeout: Union[ClientTimeout, object] = sentinel, + verify_ssl: Optional[bool] = None, + fingerprint: Optional[bytes] = None, + ssl_context: Optional[SSLContext] = None, + ssl: Optional[Union[SSLContext, bool, Fingerprint]] = None, + proxy_headers: Optional[LooseHeaders] = None, + trace_request_ctx: Optional[SimpleNamespace] = None, + read_bufsize: Optional[int] = None, + ) -> ClientResponse: + + # NOTE: timeout clamps existing connect and read timeouts. We cannot + # set the default to None because we need to detect if the user wants + # to use the existing timeouts by setting timeout to None. + + if self.closed: + raise RuntimeError("Session is closed") + + ssl = _merge_ssl_params(ssl, verify_ssl, ssl_context, fingerprint) + + if data is not None and json is not None: + raise ValueError( + "data and json parameters can not be used at the same time" + ) + elif json is not None: + data = payload.JsonPayload(json, dumps=self._json_serialize) + + if not isinstance(chunked, bool) and chunked is not None: + warnings.warn("Chunk size is deprecated #1615", DeprecationWarning) + + redirects = 0 + history = [] + version = self._version + + # Merge with default headers and transform to CIMultiDict + headers = self._prepare_headers(headers) + proxy_headers = self._prepare_headers(proxy_headers) + + try: + url = self._build_url(str_or_url) + except ValueError as e: + raise InvalidURL(str_or_url) from e + + skip_headers = set(self._skip_auto_headers) + if skip_auto_headers is not None: + for i in skip_auto_headers: + skip_headers.add(istr(i)) + + if proxy is not None: + try: + proxy = URL(proxy) + except ValueError as e: + raise InvalidURL(proxy) from e + + if timeout is sentinel: + real_timeout = self._timeout # type: ClientTimeout + else: + if not isinstance(timeout, ClientTimeout): + real_timeout = ClientTimeout(total=timeout) # type: ignore[arg-type] + else: + real_timeout = timeout + # timeout is cumulative for all request operations + # (request, redirects, responses, data consuming) + tm = TimeoutHandle(self._loop, real_timeout.total) + handle = tm.start() + + if read_bufsize is None: + read_bufsize = self._read_bufsize + + traces = [ + Trace( + self, + trace_config, + trace_config.trace_config_ctx(trace_request_ctx=trace_request_ctx), + ) + for trace_config in self._trace_configs + ] + + for trace in traces: + await trace.send_request_start(method, url.update_query(params), headers) + + timer = tm.timer() + try: + with timer: + while True: + url, auth_from_url = strip_auth_from_url(url) + if auth and auth_from_url: + raise ValueError( + "Cannot combine AUTH argument with " + "credentials encoded in URL" + ) + + if auth is None: + auth = auth_from_url + if auth is None: + auth = self._default_auth + # It would be confusing if we support explicit + # Authorization header with auth argument + if ( + headers is not None + and auth is not None + and hdrs.AUTHORIZATION in headers + ): + raise ValueError( + "Cannot combine AUTHORIZATION header " + "with AUTH argument or credentials " + "encoded in URL" + ) + + all_cookies = self._cookie_jar.filter_cookies(url) + + if cookies is not None: + tmp_cookie_jar = CookieJar() + tmp_cookie_jar.update_cookies(cookies) + req_cookies = tmp_cookie_jar.filter_cookies(url) + if req_cookies: + all_cookies.load(req_cookies) + + if proxy is not None: + proxy = URL(proxy) + elif self._trust_env: + with suppress(LookupError): + proxy, proxy_auth = get_env_proxy_for_url(url) + + req = self._request_class( + method, + url, + params=params, + headers=headers, + skip_auto_headers=skip_headers, + data=data, + cookies=all_cookies, + auth=auth, + version=version, + compress=compress, + chunked=chunked, + expect100=expect100, + loop=self._loop, + response_class=self._response_class, + proxy=proxy, + proxy_auth=proxy_auth, + timer=timer, + session=self, + ssl=ssl, + proxy_headers=proxy_headers, + traces=traces, + ) + + # connection timeout + try: + async with ceil_timeout(real_timeout.connect): + assert self._connector is not None + conn = await self._connector.connect( + req, traces=traces, timeout=real_timeout + ) + except asyncio.TimeoutError as exc: + raise ServerTimeoutError( + "Connection timeout " "to host {}".format(url) + ) from exc + + assert conn.transport is not None + + assert conn.protocol is not None + conn.protocol.set_response_params( + timer=timer, + skip_payload=method.upper() == "HEAD", + read_until_eof=read_until_eof, + auto_decompress=self._auto_decompress, + read_timeout=real_timeout.sock_read, + read_bufsize=read_bufsize, + ) + + try: + try: + resp = await req.send(conn) + try: + await resp.start(conn) + except BaseException: + resp.close() + raise + except BaseException: + conn.close() + raise + except ClientError: + raise + except OSError as exc: + raise ClientOSError(*exc.args) from exc + + self._cookie_jar.update_cookies(resp.cookies, resp.url) + + # redirects + if resp.status in (301, 302, 303, 307, 308) and allow_redirects: + + for trace in traces: + await trace.send_request_redirect( + method, url.update_query(params), headers, resp + ) + + redirects += 1 + history.append(resp) + if max_redirects and redirects >= max_redirects: + resp.close() + raise TooManyRedirects( + history[0].request_info, tuple(history) + ) + + # For 301 and 302, mimic IE, now changed in RFC + # https://github.com/kennethreitz/requests/pull/269 + if (resp.status == 303 and resp.method != hdrs.METH_HEAD) or ( + resp.status in (301, 302) and resp.method == hdrs.METH_POST + ): + method = hdrs.METH_GET + data = None + if headers.get(hdrs.CONTENT_LENGTH): + headers.pop(hdrs.CONTENT_LENGTH) + + r_url = resp.headers.get(hdrs.LOCATION) or resp.headers.get( + hdrs.URI + ) + if r_url is None: + # see github.com/aio-libs/aiohttp/issues/2022 + break + else: + # reading from correct redirection + # response is forbidden + resp.release() + + try: + parsed_url = URL( + r_url, encoded=not self._requote_redirect_url + ) + + except ValueError as e: + raise InvalidURL(r_url) from e + + scheme = parsed_url.scheme + if scheme not in ("http", "https", ""): + resp.close() + raise ValueError("Can redirect only to http or https") + elif not scheme: + parsed_url = url.join(parsed_url) + + if url.origin() != parsed_url.origin(): + auth = None + headers.pop(hdrs.AUTHORIZATION, None) + + url = parsed_url + params = None + resp.release() + continue + + break + + # check response status + if raise_for_status is None: + raise_for_status = self._raise_for_status + if raise_for_status: + resp.raise_for_status() + + # register connection + if handle is not None: + if resp.connection is not None: + resp.connection.add_callback(handle.cancel) + else: + handle.cancel() + + resp._history = tuple(history) + + for trace in traces: + await trace.send_request_end( + method, url.update_query(params), headers, resp + ) + return resp + + except BaseException as e: + # cleanup timer + tm.close() + if handle: + handle.cancel() + handle = None + + for trace in traces: + await trace.send_request_exception( + method, url.update_query(params), headers, e + ) + raise + + def ws_connect( + self, + url: StrOrURL, + *, + method: str = hdrs.METH_GET, + protocols: Iterable[str] = (), + timeout: float = 10.0, + receive_timeout: Optional[float] = None, + autoclose: bool = True, + autoping: bool = True, + heartbeat: Optional[float] = None, + auth: Optional[BasicAuth] = None, + origin: Optional[str] = None, + params: Optional[Mapping[str, str]] = None, + headers: Optional[LooseHeaders] = None, + proxy: Optional[StrOrURL] = None, + proxy_auth: Optional[BasicAuth] = None, + ssl: Union[SSLContext, bool, None, Fingerprint] = None, + verify_ssl: Optional[bool] = None, + fingerprint: Optional[bytes] = None, + ssl_context: Optional[SSLContext] = None, + proxy_headers: Optional[LooseHeaders] = None, + compress: int = 0, + max_msg_size: int = 4 * 1024 * 1024, + ) -> "_WSRequestContextManager": + """Initiate websocket connection.""" + return _WSRequestContextManager( + self._ws_connect( + url, + method=method, + protocols=protocols, + timeout=timeout, + receive_timeout=receive_timeout, + autoclose=autoclose, + autoping=autoping, + heartbeat=heartbeat, + auth=auth, + origin=origin, + params=params, + headers=headers, + proxy=proxy, + proxy_auth=proxy_auth, + ssl=ssl, + verify_ssl=verify_ssl, + fingerprint=fingerprint, + ssl_context=ssl_context, + proxy_headers=proxy_headers, + compress=compress, + max_msg_size=max_msg_size, + ) + ) + + async def _ws_connect( + self, + url: StrOrURL, + *, + method: str = hdrs.METH_GET, + protocols: Iterable[str] = (), + timeout: float = 10.0, + receive_timeout: Optional[float] = None, + autoclose: bool = True, + autoping: bool = True, + heartbeat: Optional[float] = None, + auth: Optional[BasicAuth] = None, + origin: Optional[str] = None, + params: Optional[Mapping[str, str]] = None, + headers: Optional[LooseHeaders] = None, + proxy: Optional[StrOrURL] = None, + proxy_auth: Optional[BasicAuth] = None, + ssl: Union[SSLContext, bool, None, Fingerprint] = None, + verify_ssl: Optional[bool] = None, + fingerprint: Optional[bytes] = None, + ssl_context: Optional[SSLContext] = None, + proxy_headers: Optional[LooseHeaders] = None, + compress: int = 0, + max_msg_size: int = 4 * 1024 * 1024, + ) -> ClientWebSocketResponse: + + if headers is None: + real_headers = CIMultiDict() # type: CIMultiDict[str] + else: + real_headers = CIMultiDict(headers) + + default_headers = { + hdrs.UPGRADE: "websocket", + hdrs.CONNECTION: "upgrade", + hdrs.SEC_WEBSOCKET_VERSION: "13", + } + + for key, value in default_headers.items(): + real_headers.setdefault(key, value) + + sec_key = base64.b64encode(os.urandom(16)) + real_headers[hdrs.SEC_WEBSOCKET_KEY] = sec_key.decode() + + if protocols: + real_headers[hdrs.SEC_WEBSOCKET_PROTOCOL] = ",".join(protocols) + if origin is not None: + real_headers[hdrs.ORIGIN] = origin + if compress: + extstr = ws_ext_gen(compress=compress) + real_headers[hdrs.SEC_WEBSOCKET_EXTENSIONS] = extstr + + ssl = _merge_ssl_params(ssl, verify_ssl, ssl_context, fingerprint) + + # send request + resp = await self.request( + method, + url, + params=params, + headers=real_headers, + read_until_eof=False, + auth=auth, + proxy=proxy, + proxy_auth=proxy_auth, + ssl=ssl, + proxy_headers=proxy_headers, + ) + + try: + # check handshake + if resp.status != 101: + raise WSServerHandshakeError( + resp.request_info, + resp.history, + message="Invalid response status", + status=resp.status, + headers=resp.headers, + ) + + if resp.headers.get(hdrs.UPGRADE, "").lower() != "websocket": + raise WSServerHandshakeError( + resp.request_info, + resp.history, + message="Invalid upgrade header", + status=resp.status, + headers=resp.headers, + ) + + if resp.headers.get(hdrs.CONNECTION, "").lower() != "upgrade": + raise WSServerHandshakeError( + resp.request_info, + resp.history, + message="Invalid connection header", + status=resp.status, + headers=resp.headers, + ) + + # key calculation + r_key = resp.headers.get(hdrs.SEC_WEBSOCKET_ACCEPT, "") + match = base64.b64encode(hashlib.sha1(sec_key + WS_KEY).digest()).decode() + if r_key != match: + raise WSServerHandshakeError( + resp.request_info, + resp.history, + message="Invalid challenge response", + status=resp.status, + headers=resp.headers, + ) + + # websocket protocol + protocol = None + if protocols and hdrs.SEC_WEBSOCKET_PROTOCOL in resp.headers: + resp_protocols = [ + proto.strip() + for proto in resp.headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(",") + ] + + for proto in resp_protocols: + if proto in protocols: + protocol = proto + break + + # websocket compress + notakeover = False + if compress: + compress_hdrs = resp.headers.get(hdrs.SEC_WEBSOCKET_EXTENSIONS) + if compress_hdrs: + try: + compress, notakeover = ws_ext_parse(compress_hdrs) + except WSHandshakeError as exc: + raise WSServerHandshakeError( + resp.request_info, + resp.history, + message=exc.args[0], + status=resp.status, + headers=resp.headers, + ) from exc + else: + compress = 0 + notakeover = False + + conn = resp.connection + assert conn is not None + conn_proto = conn.protocol + assert conn_proto is not None + transport = conn.transport + assert transport is not None + reader = FlowControlDataQueue( + conn_proto, 2 ** 16, loop=self._loop + ) # type: FlowControlDataQueue[WSMessage] + conn_proto.set_parser(WebSocketReader(reader, max_msg_size), reader) + writer = WebSocketWriter( + conn_proto, + transport, + use_mask=True, + compress=compress, + notakeover=notakeover, + ) + except BaseException: + resp.close() + raise + else: + return self._ws_response_class( + reader, + writer, + protocol, + resp, + timeout, + autoclose, + autoping, + self._loop, + receive_timeout=receive_timeout, + heartbeat=heartbeat, + compress=compress, + client_notakeover=notakeover, + ) + + def _prepare_headers(self, headers: Optional[LooseHeaders]) -> "CIMultiDict[str]": + """Add default headers and transform it to CIMultiDict""" + # Convert headers to MultiDict + result = CIMultiDict(self._default_headers) + if headers: + if not isinstance(headers, (MultiDictProxy, MultiDict)): + headers = CIMultiDict(headers) + added_names = set() # type: Set[str] + for key, value in headers.items(): + if key in added_names: + result.add(key, value) + else: + result[key] = value + added_names.add(key) + return result + + def get( + self, url: StrOrURL, *, allow_redirects: bool = True, **kwargs: Any + ) -> "_RequestContextManager": + """Perform HTTP GET request.""" + return _RequestContextManager( + self._request(hdrs.METH_GET, url, allow_redirects=allow_redirects, **kwargs) + ) + + def options( + self, url: StrOrURL, *, allow_redirects: bool = True, **kwargs: Any + ) -> "_RequestContextManager": + """Perform HTTP OPTIONS request.""" + return _RequestContextManager( + self._request( + hdrs.METH_OPTIONS, url, allow_redirects=allow_redirects, **kwargs + ) + ) + + def head( + self, url: StrOrURL, *, allow_redirects: bool = False, **kwargs: Any + ) -> "_RequestContextManager": + """Perform HTTP HEAD request.""" + return _RequestContextManager( + self._request( + hdrs.METH_HEAD, url, allow_redirects=allow_redirects, **kwargs + ) + ) + + def post( + self, url: StrOrURL, *, data: Any = None, **kwargs: Any + ) -> "_RequestContextManager": + """Perform HTTP POST request.""" + return _RequestContextManager( + self._request(hdrs.METH_POST, url, data=data, **kwargs) + ) + + def put( + self, url: StrOrURL, *, data: Any = None, **kwargs: Any + ) -> "_RequestContextManager": + """Perform HTTP PUT request.""" + return _RequestContextManager( + self._request(hdrs.METH_PUT, url, data=data, **kwargs) + ) + + def patch( + self, url: StrOrURL, *, data: Any = None, **kwargs: Any + ) -> "_RequestContextManager": + """Perform HTTP PATCH request.""" + return _RequestContextManager( + self._request(hdrs.METH_PATCH, url, data=data, **kwargs) + ) + + def delete(self, url: StrOrURL, **kwargs: Any) -> "_RequestContextManager": + """Perform HTTP DELETE request.""" + return _RequestContextManager(self._request(hdrs.METH_DELETE, url, **kwargs)) + + async def close(self) -> None: + """Close underlying connector. + + Release all acquired resources. + """ + if not self.closed: + if self._connector is not None and self._connector_owner: + await self._connector.close() + self._connector = None + + @property + def closed(self) -> bool: + """Is client session closed. + + A readonly property. + """ + return self._connector is None or self._connector.closed + + @property + def connector(self) -> Optional[BaseConnector]: + """Connector instance used for the session.""" + return self._connector + + @property + def cookie_jar(self) -> AbstractCookieJar: + """The session cookies.""" + return self._cookie_jar + + @property + def version(self) -> Tuple[int, int]: + """The session HTTP protocol version.""" + return self._version + + @property + def requote_redirect_url(self) -> bool: + """Do URL requoting on redirection handling.""" + return self._requote_redirect_url + + @requote_redirect_url.setter + def requote_redirect_url(self, val: bool) -> None: + """Do URL requoting on redirection handling.""" + warnings.warn( + "session.requote_redirect_url modification " "is deprecated #2778", + DeprecationWarning, + stacklevel=2, + ) + self._requote_redirect_url = val + + @property + def loop(self) -> asyncio.AbstractEventLoop: + """Session's loop.""" + warnings.warn( + "client.loop property is deprecated", DeprecationWarning, stacklevel=2 + ) + return self._loop + + @property + def timeout(self) -> Union[object, ClientTimeout]: + """Timeout for the session.""" + return self._timeout + + @property + def headers(self) -> "CIMultiDict[str]": + """The default headers of the client session.""" + return self._default_headers + + @property + def skip_auto_headers(self) -> FrozenSet[istr]: + """Headers for which autogeneration should be skipped""" + return self._skip_auto_headers + + @property + def auth(self) -> Optional[BasicAuth]: + """An object that represents HTTP Basic Authorization""" + return self._default_auth + + @property + def json_serialize(self) -> JSONEncoder: + """Json serializer callable""" + return self._json_serialize + + @property + def connector_owner(self) -> bool: + """Should connector be closed on session closing""" + return self._connector_owner + + @property + def raise_for_status( + self, + ) -> Union[bool, Callable[[ClientResponse], Awaitable[None]]]: + """Should `ClientResponse.raise_for_status()` be called for each response.""" + return self._raise_for_status + + @property + def auto_decompress(self) -> bool: + """Should the body response be automatically decompressed.""" + return self._auto_decompress + + @property + def trust_env(self) -> bool: + """ + Should proxies information from environment or netrc be trusted. + + Information is from HTTP_PROXY / HTTPS_PROXY environment variables + or ~/.netrc file if present. + """ + return self._trust_env + + @property + def trace_configs(self) -> List[TraceConfig]: + """A list of TraceConfig instances used for client tracing""" + return self._trace_configs + + def detach(self) -> None: + """Detach connector from session without closing the former. + + Session is switched to closed state anyway. + """ + self._connector = None + + def __enter__(self) -> None: + raise TypeError("Use async with instead") + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + # __exit__ should exist in pair with __enter__ but never executed + pass # pragma: no cover + + async def __aenter__(self) -> "ClientSession": + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + await self.close() + + +class _BaseRequestContextManager(Coroutine[Any, Any, _RetType], Generic[_RetType]): + + __slots__ = ("_coro", "_resp") + + def __init__(self, coro: Coroutine["asyncio.Future[Any]", None, _RetType]) -> None: + self._coro = coro + + def send(self, arg: None) -> "asyncio.Future[Any]": + return self._coro.send(arg) + + def throw(self, arg: BaseException) -> None: # type: ignore[arg-type,override] + self._coro.throw(arg) + + def close(self) -> None: + return self._coro.close() + + def __await__(self) -> Generator[Any, None, _RetType]: + ret = self._coro.__await__() + return ret + + def __iter__(self) -> Generator[Any, None, _RetType]: + return self.__await__() + + async def __aenter__(self) -> _RetType: + self._resp = await self._coro + return self._resp + + +class _RequestContextManager(_BaseRequestContextManager[ClientResponse]): + __slots__ = () + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc: Optional[BaseException], + tb: Optional[TracebackType], + ) -> None: + # We're basing behavior on the exception as it can be caused by + # user code unrelated to the status of the connection. If you + # would like to close a connection you must do that + # explicitly. Otherwise connection error handling should kick in + # and close/recycle the connection as required. + self._resp.release() + + +class _WSRequestContextManager(_BaseRequestContextManager[ClientWebSocketResponse]): + __slots__ = () + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc: Optional[BaseException], + tb: Optional[TracebackType], + ) -> None: + await self._resp.close() + + +class _SessionRequestContextManager: + + __slots__ = ("_coro", "_resp", "_session") + + def __init__( + self, + coro: Coroutine["asyncio.Future[Any]", None, ClientResponse], + session: ClientSession, + ) -> None: + self._coro = coro + self._resp = None # type: Optional[ClientResponse] + self._session = session + + async def __aenter__(self) -> ClientResponse: + try: + self._resp = await self._coro + except BaseException: + await self._session.close() + raise + else: + return self._resp + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc: Optional[BaseException], + tb: Optional[TracebackType], + ) -> None: + assert self._resp is not None + self._resp.close() + await self._session.close() + + +def request( + method: str, + url: StrOrURL, + *, + params: Optional[Mapping[str, str]] = None, + data: Any = None, + json: Any = None, + headers: Optional[LooseHeaders] = None, + skip_auto_headers: Optional[Iterable[str]] = None, + auth: Optional[BasicAuth] = None, + allow_redirects: bool = True, + max_redirects: int = 10, + compress: Optional[str] = None, + chunked: Optional[bool] = None, + expect100: bool = False, + raise_for_status: Optional[bool] = None, + read_until_eof: bool = True, + proxy: Optional[StrOrURL] = None, + proxy_auth: Optional[BasicAuth] = None, + timeout: Union[ClientTimeout, object] = sentinel, + cookies: Optional[LooseCookies] = None, + version: HttpVersion = http.HttpVersion11, + connector: Optional[BaseConnector] = None, + read_bufsize: Optional[int] = None, + loop: Optional[asyncio.AbstractEventLoop] = None, +) -> _SessionRequestContextManager: + """Constructs and sends a request. + + Returns response object. + method - HTTP method + url - request url + params - (optional) Dictionary or bytes to be sent in the query + string of the new request + data - (optional) Dictionary, bytes, or file-like object to + send in the body of the request + json - (optional) Any json compatible python object + headers - (optional) Dictionary of HTTP Headers to send with + the request + cookies - (optional) Dict object to send with the request + auth - (optional) BasicAuth named tuple represent HTTP Basic Auth + auth - aiohttp.helpers.BasicAuth + allow_redirects - (optional) If set to False, do not follow + redirects + version - Request HTTP version. + compress - Set to True if request has to be compressed + with deflate encoding. + chunked - Set to chunk size for chunked transfer encoding. + expect100 - Expect 100-continue response from server. + connector - BaseConnector sub-class instance to support + connection pooling. + read_until_eof - Read response until eof if response + does not have Content-Length header. + loop - Optional event loop. + timeout - Optional ClientTimeout settings structure, 5min + total timeout by default. + Usage:: + >>> import aiohttp + >>> resp = await aiohttp.request('GET', 'http://python.org/') + >>> resp + + >>> data = await resp.read() + """ + connector_owner = False + if connector is None: + connector_owner = True + connector = TCPConnector(loop=loop, force_close=True) + + session = ClientSession( + loop=loop, + cookies=cookies, + version=version, + timeout=timeout, + connector=connector, + connector_owner=connector_owner, + ) + + return _SessionRequestContextManager( + session._request( + method, + url, + params=params, + data=data, + json=json, + headers=headers, + skip_auto_headers=skip_auto_headers, + auth=auth, + allow_redirects=allow_redirects, + max_redirects=max_redirects, + compress=compress, + chunked=chunked, + expect100=expect100, + raise_for_status=raise_for_status, + read_until_eof=read_until_eof, + proxy=proxy, + proxy_auth=proxy_auth, + read_bufsize=read_bufsize, + ), + session, + ) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/client_exceptions.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/client_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..dd55321054c6ce17b4dcee52a3288eca25ef22af --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/client_exceptions.py @@ -0,0 +1,342 @@ +"""HTTP related errors.""" + +import asyncio +import warnings +from typing import TYPE_CHECKING, Any, Optional, Tuple, Union + +from .http_parser import RawResponseMessage +from .typedefs import LooseHeaders + +try: + import ssl + + SSLContext = ssl.SSLContext +except ImportError: # pragma: no cover + ssl = SSLContext = None # type: ignore[assignment] + + +if TYPE_CHECKING: # pragma: no cover + from .client_reqrep import ClientResponse, ConnectionKey, Fingerprint, RequestInfo +else: + RequestInfo = ClientResponse = ConnectionKey = None + +__all__ = ( + "ClientError", + "ClientConnectionError", + "ClientOSError", + "ClientConnectorError", + "ClientProxyConnectionError", + "ClientSSLError", + "ClientConnectorSSLError", + "ClientConnectorCertificateError", + "ServerConnectionError", + "ServerTimeoutError", + "ServerDisconnectedError", + "ServerFingerprintMismatch", + "ClientResponseError", + "ClientHttpProxyError", + "WSServerHandshakeError", + "ContentTypeError", + "ClientPayloadError", + "InvalidURL", +) + + +class ClientError(Exception): + """Base class for client connection errors.""" + + +class ClientResponseError(ClientError): + """Connection error during reading response. + + request_info: instance of RequestInfo + """ + + def __init__( + self, + request_info: RequestInfo, + history: Tuple[ClientResponse, ...], + *, + code: Optional[int] = None, + status: Optional[int] = None, + message: str = "", + headers: Optional[LooseHeaders] = None, + ) -> None: + self.request_info = request_info + if code is not None: + if status is not None: + raise ValueError( + "Both code and status arguments are provided; " + "code is deprecated, use status instead" + ) + warnings.warn( + "code argument is deprecated, use status instead", + DeprecationWarning, + stacklevel=2, + ) + if status is not None: + self.status = status + elif code is not None: + self.status = code + else: + self.status = 0 + self.message = message + self.headers = headers + self.history = history + self.args = (request_info, history) + + def __str__(self) -> str: + return "{}, message={!r}, url={!r}".format( + self.status, + self.message, + self.request_info.real_url, + ) + + def __repr__(self) -> str: + args = f"{self.request_info!r}, {self.history!r}" + if self.status != 0: + args += f", status={self.status!r}" + if self.message != "": + args += f", message={self.message!r}" + if self.headers is not None: + args += f", headers={self.headers!r}" + return f"{type(self).__name__}({args})" + + @property + def code(self) -> int: + warnings.warn( + "code property is deprecated, use status instead", + DeprecationWarning, + stacklevel=2, + ) + return self.status + + @code.setter + def code(self, value: int) -> None: + warnings.warn( + "code property is deprecated, use status instead", + DeprecationWarning, + stacklevel=2, + ) + self.status = value + + +class ContentTypeError(ClientResponseError): + """ContentType found is not valid.""" + + +class WSServerHandshakeError(ClientResponseError): + """websocket server handshake error.""" + + +class ClientHttpProxyError(ClientResponseError): + """HTTP proxy error. + + Raised in :class:`aiohttp.connector.TCPConnector` if + proxy responds with status other than ``200 OK`` + on ``CONNECT`` request. + """ + + +class TooManyRedirects(ClientResponseError): + """Client was redirected too many times.""" + + +class ClientConnectionError(ClientError): + """Base class for client socket errors.""" + + +class ClientOSError(ClientConnectionError, OSError): + """OSError error.""" + + +class ClientConnectorError(ClientOSError): + """Client connector error. + + Raised in :class:`aiohttp.connector.TCPConnector` if + connection to proxy can not be established. + """ + + def __init__(self, connection_key: ConnectionKey, os_error: OSError) -> None: + self._conn_key = connection_key + self._os_error = os_error + super().__init__(os_error.errno, os_error.strerror) + self.args = (connection_key, os_error) + + @property + def os_error(self) -> OSError: + return self._os_error + + @property + def host(self) -> str: + return self._conn_key.host + + @property + def port(self) -> Optional[int]: + return self._conn_key.port + + @property + def ssl(self) -> Union[SSLContext, None, bool, "Fingerprint"]: + return self._conn_key.ssl + + def __str__(self) -> str: + return "Cannot connect to host {0.host}:{0.port} ssl:{1} [{2}]".format( + self, self.ssl if self.ssl is not None else "default", self.strerror + ) + + # OSError.__reduce__ does too much black magick + __reduce__ = BaseException.__reduce__ + + +class ClientProxyConnectionError(ClientConnectorError): + """Proxy connection error. + + Raised in :class:`aiohttp.connector.TCPConnector` if + connection to proxy can not be established. + """ + + +class UnixClientConnectorError(ClientConnectorError): + """Unix connector error. + + Raised in :py:class:`aiohttp.connector.UnixConnector` + if connection to unix socket can not be established. + """ + + def __init__( + self, path: str, connection_key: ConnectionKey, os_error: OSError + ) -> None: + self._path = path + super().__init__(connection_key, os_error) + + @property + def path(self) -> str: + return self._path + + def __str__(self) -> str: + return "Cannot connect to unix socket {0.path} ssl:{1} [{2}]".format( + self, self.ssl if self.ssl is not None else "default", self.strerror + ) + + +class ServerConnectionError(ClientConnectionError): + """Server connection errors.""" + + +class ServerDisconnectedError(ServerConnectionError): + """Server disconnected.""" + + def __init__(self, message: Union[RawResponseMessage, str, None] = None) -> None: + if message is None: + message = "Server disconnected" + + self.args = (message,) + self.message = message + + +class ServerTimeoutError(ServerConnectionError, asyncio.TimeoutError): + """Server timeout error.""" + + +class ServerFingerprintMismatch(ServerConnectionError): + """SSL certificate does not match expected fingerprint.""" + + def __init__(self, expected: bytes, got: bytes, host: str, port: int) -> None: + self.expected = expected + self.got = got + self.host = host + self.port = port + self.args = (expected, got, host, port) + + def __repr__(self) -> str: + return "<{} expected={!r} got={!r} host={!r} port={!r}>".format( + self.__class__.__name__, self.expected, self.got, self.host, self.port + ) + + +class ClientPayloadError(ClientError): + """Response payload error.""" + + +class InvalidURL(ClientError, ValueError): + """Invalid URL. + + URL used for fetching is malformed, e.g. it doesn't contains host + part. + """ + + # Derive from ValueError for backward compatibility + + def __init__(self, url: Any) -> None: + # The type of url is not yarl.URL because the exception can be raised + # on URL(url) call + super().__init__(url) + + @property + def url(self) -> Any: + return self.args[0] + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.url}>" + + +class ClientSSLError(ClientConnectorError): + """Base error for ssl.*Errors.""" + + +if ssl is not None: + cert_errors = (ssl.CertificateError,) + cert_errors_bases = ( + ClientSSLError, + ssl.CertificateError, + ) + + ssl_errors = (ssl.SSLError,) + ssl_error_bases = (ClientSSLError, ssl.SSLError) +else: # pragma: no cover + cert_errors = tuple() + cert_errors_bases = ( + ClientSSLError, + ValueError, + ) + + ssl_errors = tuple() + ssl_error_bases = (ClientSSLError,) + + +class ClientConnectorSSLError(*ssl_error_bases): # type: ignore[misc] + """Response ssl error.""" + + +class ClientConnectorCertificateError(*cert_errors_bases): # type: ignore[misc] + """Response certificate error.""" + + def __init__( + self, connection_key: ConnectionKey, certificate_error: Exception + ) -> None: + self._conn_key = connection_key + self._certificate_error = certificate_error + self.args = (connection_key, certificate_error) + + @property + def certificate_error(self) -> Exception: + return self._certificate_error + + @property + def host(self) -> str: + return self._conn_key.host + + @property + def port(self) -> Optional[int]: + return self._conn_key.port + + @property + def ssl(self) -> bool: + return self._conn_key.is_ssl + + def __str__(self) -> str: + return ( + "Cannot connect to host {0.host}:{0.port} ssl:{0.ssl} " + "[{0.certificate_error.__class__.__name__}: " + "{0.certificate_error.args}]".format(self) + ) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/client_ws.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/client_ws.py new file mode 100644 index 0000000000000000000000000000000000000000..7c8121f659f06d2bf5f3685c556dcd97a4b922b5 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/client_ws.py @@ -0,0 +1,300 @@ +"""WebSocket client for asyncio.""" + +import asyncio +from typing import Any, Optional, cast + +import async_timeout + +from .client_exceptions import ClientError +from .client_reqrep import ClientResponse +from .helpers import call_later, set_result +from .http import ( + WS_CLOSED_MESSAGE, + WS_CLOSING_MESSAGE, + WebSocketError, + WSCloseCode, + WSMessage, + WSMsgType, +) +from .http_websocket import WebSocketWriter # WSMessage +from .streams import EofStream, FlowControlDataQueue +from .typedefs import ( + DEFAULT_JSON_DECODER, + DEFAULT_JSON_ENCODER, + JSONDecoder, + JSONEncoder, +) + + +class ClientWebSocketResponse: + def __init__( + self, + reader: "FlowControlDataQueue[WSMessage]", + writer: WebSocketWriter, + protocol: Optional[str], + response: ClientResponse, + timeout: float, + autoclose: bool, + autoping: bool, + loop: asyncio.AbstractEventLoop, + *, + receive_timeout: Optional[float] = None, + heartbeat: Optional[float] = None, + compress: int = 0, + client_notakeover: bool = False, + ) -> None: + self._response = response + self._conn = response.connection + + self._writer = writer + self._reader = reader + self._protocol = protocol + self._closed = False + self._closing = False + self._close_code = None # type: Optional[int] + self._timeout = timeout + self._receive_timeout = receive_timeout + self._autoclose = autoclose + self._autoping = autoping + self._heartbeat = heartbeat + self._heartbeat_cb: Optional[asyncio.TimerHandle] = None + if heartbeat is not None: + self._pong_heartbeat = heartbeat / 2.0 + self._pong_response_cb: Optional[asyncio.TimerHandle] = None + self._loop = loop + self._waiting = None # type: Optional[asyncio.Future[bool]] + self._exception = None # type: Optional[BaseException] + self._compress = compress + self._client_notakeover = client_notakeover + + self._reset_heartbeat() + + def _cancel_heartbeat(self) -> None: + if self._pong_response_cb is not None: + self._pong_response_cb.cancel() + self._pong_response_cb = None + + if self._heartbeat_cb is not None: + self._heartbeat_cb.cancel() + self._heartbeat_cb = None + + def _reset_heartbeat(self) -> None: + self._cancel_heartbeat() + + if self._heartbeat is not None: + self._heartbeat_cb = call_later( + self._send_heartbeat, self._heartbeat, self._loop + ) + + def _send_heartbeat(self) -> None: + if self._heartbeat is not None and not self._closed: + # fire-and-forget a task is not perfect but maybe ok for + # sending ping. Otherwise we need a long-living heartbeat + # task in the class. + self._loop.create_task(self._writer.ping()) + + if self._pong_response_cb is not None: + self._pong_response_cb.cancel() + self._pong_response_cb = call_later( + self._pong_not_received, self._pong_heartbeat, self._loop + ) + + def _pong_not_received(self) -> None: + if not self._closed: + self._closed = True + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + self._exception = asyncio.TimeoutError() + self._response.close() + + @property + def closed(self) -> bool: + return self._closed + + @property + def close_code(self) -> Optional[int]: + return self._close_code + + @property + def protocol(self) -> Optional[str]: + return self._protocol + + @property + def compress(self) -> int: + return self._compress + + @property + def client_notakeover(self) -> bool: + return self._client_notakeover + + def get_extra_info(self, name: str, default: Any = None) -> Any: + """extra info from connection transport""" + conn = self._response.connection + if conn is None: + return default + transport = conn.transport + if transport is None: + return default + return transport.get_extra_info(name, default) + + def exception(self) -> Optional[BaseException]: + return self._exception + + async def ping(self, message: bytes = b"") -> None: + await self._writer.ping(message) + + async def pong(self, message: bytes = b"") -> None: + await self._writer.pong(message) + + async def send_str(self, data: str, compress: Optional[int] = None) -> None: + if not isinstance(data, str): + raise TypeError("data argument must be str (%r)" % type(data)) + await self._writer.send(data, binary=False, compress=compress) + + async def send_bytes(self, data: bytes, compress: Optional[int] = None) -> None: + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError("data argument must be byte-ish (%r)" % type(data)) + await self._writer.send(data, binary=True, compress=compress) + + async def send_json( + self, + data: Any, + compress: Optional[int] = None, + *, + dumps: JSONEncoder = DEFAULT_JSON_ENCODER, + ) -> None: + await self.send_str(dumps(data), compress=compress) + + async def close(self, *, code: int = WSCloseCode.OK, message: bytes = b"") -> bool: + # we need to break `receive()` cycle first, + # `close()` may be called from different task + if self._waiting is not None and not self._closed: + self._reader.feed_data(WS_CLOSING_MESSAGE, 0) + await self._waiting + + if not self._closed: + self._cancel_heartbeat() + self._closed = True + try: + await self._writer.close(code, message) + except asyncio.CancelledError: + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + self._response.close() + raise + except Exception as exc: + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + self._exception = exc + self._response.close() + return True + + if self._closing: + self._response.close() + return True + + while True: + try: + async with async_timeout.timeout(self._timeout): + msg = await self._reader.read() + except asyncio.CancelledError: + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + self._response.close() + raise + except Exception as exc: + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + self._exception = exc + self._response.close() + return True + + if msg.type == WSMsgType.CLOSE: + self._close_code = msg.data + self._response.close() + return True + else: + return False + + async def receive(self, timeout: Optional[float] = None) -> WSMessage: + while True: + if self._waiting is not None: + raise RuntimeError("Concurrent call to receive() is not allowed") + + if self._closed: + return WS_CLOSED_MESSAGE + elif self._closing: + await self.close() + return WS_CLOSED_MESSAGE + + try: + self._waiting = self._loop.create_future() + try: + async with async_timeout.timeout(timeout or self._receive_timeout): + msg = await self._reader.read() + self._reset_heartbeat() + finally: + waiter = self._waiting + self._waiting = None + set_result(waiter, True) + except (asyncio.CancelledError, asyncio.TimeoutError): + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + raise + except EofStream: + self._close_code = WSCloseCode.OK + await self.close() + return WSMessage(WSMsgType.CLOSED, None, None) + except ClientError: + self._closed = True + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + return WS_CLOSED_MESSAGE + except WebSocketError as exc: + self._close_code = exc.code + await self.close(code=exc.code) + return WSMessage(WSMsgType.ERROR, exc, None) + except Exception as exc: + self._exception = exc + self._closing = True + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + await self.close() + return WSMessage(WSMsgType.ERROR, exc, None) + + if msg.type == WSMsgType.CLOSE: + self._closing = True + self._close_code = msg.data + if not self._closed and self._autoclose: + await self.close() + elif msg.type == WSMsgType.CLOSING: + self._closing = True + elif msg.type == WSMsgType.PING and self._autoping: + await self.pong(msg.data) + continue + elif msg.type == WSMsgType.PONG and self._autoping: + continue + + return msg + + async def receive_str(self, *, timeout: Optional[float] = None) -> str: + msg = await self.receive(timeout) + if msg.type != WSMsgType.TEXT: + raise TypeError(f"Received message {msg.type}:{msg.data!r} is not str") + return cast(str, msg.data) + + async def receive_bytes(self, *, timeout: Optional[float] = None) -> bytes: + msg = await self.receive(timeout) + if msg.type != WSMsgType.BINARY: + raise TypeError(f"Received message {msg.type}:{msg.data!r} is not bytes") + return cast(bytes, msg.data) + + async def receive_json( + self, + *, + loads: JSONDecoder = DEFAULT_JSON_DECODER, + timeout: Optional[float] = None, + ) -> Any: + data = await self.receive_str(timeout=timeout) + return loads(data) + + def __aiter__(self) -> "ClientWebSocketResponse": + return self + + async def __anext__(self) -> WSMessage: + msg = await self.receive() + if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.CLOSED): + raise StopAsyncIteration + return msg diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/cookiejar.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/cookiejar.py new file mode 100644 index 0000000000000000000000000000000000000000..0a2656634d1b8160b968387f76caa651eab4cac0 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/cookiejar.py @@ -0,0 +1,413 @@ +import asyncio +import contextlib +import datetime +import os # noqa +import pathlib +import pickle +import re +from collections import defaultdict +from http.cookies import BaseCookie, Morsel, SimpleCookie +from typing import ( # noqa + DefaultDict, + Dict, + Iterable, + Iterator, + List, + Mapping, + Optional, + Set, + Tuple, + Union, + cast, +) + +from yarl import URL + +from .abc import AbstractCookieJar, ClearCookiePredicate +from .helpers import is_ip_address, next_whole_second +from .typedefs import LooseCookies, PathLike, StrOrURL + +__all__ = ("CookieJar", "DummyCookieJar") + + +CookieItem = Union[str, "Morsel[str]"] + + +class CookieJar(AbstractCookieJar): + """Implements cookie storage adhering to RFC 6265.""" + + DATE_TOKENS_RE = re.compile( + r"[\x09\x20-\x2F\x3B-\x40\x5B-\x60\x7B-\x7E]*" + r"(?P[\x00-\x08\x0A-\x1F\d:a-zA-Z\x7F-\xFF]+)" + ) + + DATE_HMS_TIME_RE = re.compile(r"(\d{1,2}):(\d{1,2}):(\d{1,2})") + + DATE_DAY_OF_MONTH_RE = re.compile(r"(\d{1,2})") + + DATE_MONTH_RE = re.compile( + "(jan)|(feb)|(mar)|(apr)|(may)|(jun)|(jul)|" "(aug)|(sep)|(oct)|(nov)|(dec)", + re.I, + ) + + DATE_YEAR_RE = re.compile(r"(\d{2,4})") + + MAX_TIME = datetime.datetime.max.replace(tzinfo=datetime.timezone.utc) + + MAX_32BIT_TIME = datetime.datetime.utcfromtimestamp(2 ** 31 - 1) + + def __init__( + self, + *, + unsafe: bool = False, + quote_cookie: bool = True, + treat_as_secure_origin: Union[StrOrURL, List[StrOrURL], None] = None, + loop: Optional[asyncio.AbstractEventLoop] = None, + ) -> None: + super().__init__(loop=loop) + self._cookies = defaultdict( + SimpleCookie + ) # type: DefaultDict[str, SimpleCookie[str]] + self._host_only_cookies = set() # type: Set[Tuple[str, str]] + self._unsafe = unsafe + self._quote_cookie = quote_cookie + if treat_as_secure_origin is None: + treat_as_secure_origin = [] + elif isinstance(treat_as_secure_origin, URL): + treat_as_secure_origin = [treat_as_secure_origin.origin()] + elif isinstance(treat_as_secure_origin, str): + treat_as_secure_origin = [URL(treat_as_secure_origin).origin()] + else: + treat_as_secure_origin = [ + URL(url).origin() if isinstance(url, str) else url.origin() + for url in treat_as_secure_origin + ] + self._treat_as_secure_origin = treat_as_secure_origin + self._next_expiration = next_whole_second() + self._expirations = {} # type: Dict[Tuple[str, str], datetime.datetime] + # #4515: datetime.max may not be representable on 32-bit platforms + self._max_time = self.MAX_TIME + try: + self._max_time.timestamp() + except OverflowError: + self._max_time = self.MAX_32BIT_TIME + + def save(self, file_path: PathLike) -> None: + file_path = pathlib.Path(file_path) + with file_path.open(mode="wb") as f: + pickle.dump(self._cookies, f, pickle.HIGHEST_PROTOCOL) + + def load(self, file_path: PathLike) -> None: + file_path = pathlib.Path(file_path) + with file_path.open(mode="rb") as f: + self._cookies = pickle.load(f) + + def clear(self, predicate: Optional[ClearCookiePredicate] = None) -> None: + if predicate is None: + self._next_expiration = next_whole_second() + self._cookies.clear() + self._host_only_cookies.clear() + self._expirations.clear() + return + + to_del = [] + now = datetime.datetime.now(datetime.timezone.utc) + for domain, cookie in self._cookies.items(): + for name, morsel in cookie.items(): + key = (domain, name) + if ( + key in self._expirations and self._expirations[key] <= now + ) or predicate(morsel): + to_del.append(key) + + for domain, name in to_del: + key = (domain, name) + self._host_only_cookies.discard(key) + if key in self._expirations: + del self._expirations[(domain, name)] + self._cookies[domain].pop(name, None) + + next_expiration = min(self._expirations.values(), default=self._max_time) + try: + self._next_expiration = next_expiration.replace( + microsecond=0 + ) + datetime.timedelta(seconds=1) + except OverflowError: + self._next_expiration = self._max_time + + def clear_domain(self, domain: str) -> None: + self.clear(lambda x: self._is_domain_match(domain, x["domain"])) + + def __iter__(self) -> "Iterator[Morsel[str]]": + self._do_expiration() + for val in self._cookies.values(): + yield from val.values() + + def __len__(self) -> int: + return sum(1 for i in self) + + def _do_expiration(self) -> None: + self.clear(lambda x: False) + + def _expire_cookie(self, when: datetime.datetime, domain: str, name: str) -> None: + self._next_expiration = min(self._next_expiration, when) + self._expirations[(domain, name)] = when + + def update_cookies(self, cookies: LooseCookies, response_url: URL = URL()) -> None: + """Update cookies.""" + hostname = response_url.raw_host + + if not self._unsafe and is_ip_address(hostname): + # Don't accept cookies from IPs + return + + if isinstance(cookies, Mapping): + cookies = cookies.items() + + for name, cookie in cookies: + if not isinstance(cookie, Morsel): + tmp = SimpleCookie() # type: SimpleCookie[str] + tmp[name] = cookie # type: ignore[assignment] + cookie = tmp[name] + + domain = cookie["domain"] + + # ignore domains with trailing dots + if domain.endswith("."): + domain = "" + del cookie["domain"] + + if not domain and hostname is not None: + # Set the cookie's domain to the response hostname + # and set its host-only-flag + self._host_only_cookies.add((hostname, name)) + domain = cookie["domain"] = hostname + + if domain.startswith("."): + # Remove leading dot + domain = domain[1:] + cookie["domain"] = domain + + if hostname and not self._is_domain_match(domain, hostname): + # Setting cookies for different domains is not allowed + continue + + path = cookie["path"] + if not path or not path.startswith("/"): + # Set the cookie's path to the response path + path = response_url.path + if not path.startswith("/"): + path = "/" + else: + # Cut everything from the last slash to the end + path = "/" + path[1 : path.rfind("/")] + cookie["path"] = path + + max_age = cookie["max-age"] + if max_age: + try: + delta_seconds = int(max_age) + try: + max_age_expiration = datetime.datetime.now( + datetime.timezone.utc + ) + datetime.timedelta(seconds=delta_seconds) + except OverflowError: + max_age_expiration = self._max_time + self._expire_cookie(max_age_expiration, domain, name) + except ValueError: + cookie["max-age"] = "" + + else: + expires = cookie["expires"] + if expires: + expire_time = self._parse_date(expires) + if expire_time: + self._expire_cookie(expire_time, domain, name) + else: + cookie["expires"] = "" + + self._cookies[domain][name] = cookie + + self._do_expiration() + + def filter_cookies( + self, request_url: URL = URL() + ) -> Union["BaseCookie[str]", "SimpleCookie[str]"]: + """Returns this jar's cookies filtered by their attributes.""" + self._do_expiration() + request_url = URL(request_url) + filtered: Union["SimpleCookie[str]", "BaseCookie[str]"] = ( + SimpleCookie() if self._quote_cookie else BaseCookie() + ) + hostname = request_url.raw_host or "" + request_origin = URL() + with contextlib.suppress(ValueError): + request_origin = request_url.origin() + + is_not_secure = ( + request_url.scheme not in ("https", "wss") + and request_origin not in self._treat_as_secure_origin + ) + + for cookie in self: + name = cookie.key + domain = cookie["domain"] + + # Send shared cookies + if not domain: + filtered[name] = cookie.value + continue + + if not self._unsafe and is_ip_address(hostname): + continue + + if (domain, name) in self._host_only_cookies: + if domain != hostname: + continue + elif not self._is_domain_match(domain, hostname): + continue + + if not self._is_path_match(request_url.path, cookie["path"]): + continue + + if is_not_secure and cookie["secure"]: + continue + + # It's critical we use the Morsel so the coded_value + # (based on cookie version) is preserved + mrsl_val = cast("Morsel[str]", cookie.get(cookie.key, Morsel())) + mrsl_val.set(cookie.key, cookie.value, cookie.coded_value) + filtered[name] = mrsl_val + + return filtered + + @staticmethod + def _is_domain_match(domain: str, hostname: str) -> bool: + """Implements domain matching adhering to RFC 6265.""" + if hostname == domain: + return True + + if not hostname.endswith(domain): + return False + + non_matching = hostname[: -len(domain)] + + if not non_matching.endswith("."): + return False + + return not is_ip_address(hostname) + + @staticmethod + def _is_path_match(req_path: str, cookie_path: str) -> bool: + """Implements path matching adhering to RFC 6265.""" + if not req_path.startswith("/"): + req_path = "/" + + if req_path == cookie_path: + return True + + if not req_path.startswith(cookie_path): + return False + + if cookie_path.endswith("/"): + return True + + non_matching = req_path[len(cookie_path) :] + + return non_matching.startswith("/") + + @classmethod + def _parse_date(cls, date_str: str) -> Optional[datetime.datetime]: + """Implements date string parsing adhering to RFC 6265.""" + if not date_str: + return None + + found_time = False + found_day = False + found_month = False + found_year = False + + hour = minute = second = 0 + day = 0 + month = 0 + year = 0 + + for token_match in cls.DATE_TOKENS_RE.finditer(date_str): + + token = token_match.group("token") + + if not found_time: + time_match = cls.DATE_HMS_TIME_RE.match(token) + if time_match: + found_time = True + hour, minute, second = (int(s) for s in time_match.groups()) + continue + + if not found_day: + day_match = cls.DATE_DAY_OF_MONTH_RE.match(token) + if day_match: + found_day = True + day = int(day_match.group()) + continue + + if not found_month: + month_match = cls.DATE_MONTH_RE.match(token) + if month_match: + found_month = True + assert month_match.lastindex is not None + month = month_match.lastindex + continue + + if not found_year: + year_match = cls.DATE_YEAR_RE.match(token) + if year_match: + found_year = True + year = int(year_match.group()) + + if 70 <= year <= 99: + year += 1900 + elif 0 <= year <= 69: + year += 2000 + + if False in (found_day, found_month, found_year, found_time): + return None + + if not 1 <= day <= 31: + return None + + if year < 1601 or hour > 23 or minute > 59 or second > 59: + return None + + return datetime.datetime( + year, month, day, hour, minute, second, tzinfo=datetime.timezone.utc + ) + + +class DummyCookieJar(AbstractCookieJar): + """Implements a dummy cookie storage. + + It can be used with the ClientSession when no cookie processing is needed. + + """ + + def __init__(self, *, loop: Optional[asyncio.AbstractEventLoop] = None) -> None: + super().__init__(loop=loop) + + def __iter__(self) -> "Iterator[Morsel[str]]": + while False: + yield None + + def __len__(self) -> int: + return 0 + + def clear(self, predicate: Optional[ClearCookiePredicate] = None) -> None: + pass + + def clear_domain(self, domain: str) -> None: + pass + + def update_cookies(self, cookies: LooseCookies, response_url: URL = URL()) -> None: + pass + + def filter_cookies(self, request_url: URL) -> "BaseCookie[str]": + return SimpleCookie() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/helpers.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..536d2190126494f452001e4ea2196e18631d854d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/helpers.py @@ -0,0 +1,875 @@ +"""Various helper functions""" + +import asyncio +import base64 +import binascii +import cgi +import datetime +import functools +import inspect +import netrc +import os +import platform +import re +import sys +import time +import warnings +import weakref +from collections import namedtuple +from contextlib import suppress +from email.utils import parsedate +from math import ceil +from pathlib import Path +from types import TracebackType +from typing import ( + Any, + Callable, + ContextManager, + Dict, + Generator, + Generic, + Iterable, + Iterator, + List, + Mapping, + Optional, + Pattern, + Set, + Tuple, + Type, + TypeVar, + Union, + cast, +) +from urllib.parse import quote +from urllib.request import getproxies, proxy_bypass + +import async_timeout +import attr +from multidict import MultiDict, MultiDictProxy +from yarl import URL + +from . import hdrs +from .log import client_logger, internal_logger +from .typedefs import PathLike, Protocol # noqa + +__all__ = ("BasicAuth", "ChainMapProxy", "ETag") + +IS_MACOS = platform.system() == "Darwin" +IS_WINDOWS = platform.system() == "Windows" + +PY_36 = sys.version_info >= (3, 6) +PY_37 = sys.version_info >= (3, 7) +PY_38 = sys.version_info >= (3, 8) +PY_310 = sys.version_info >= (3, 10) + +if sys.version_info < (3, 7): + import idna_ssl + + idna_ssl.patch_match_hostname() + + def all_tasks( + loop: Optional[asyncio.AbstractEventLoop] = None, + ) -> Set["asyncio.Task[Any]"]: + tasks = list(asyncio.Task.all_tasks(loop)) + return {t for t in tasks if not t.done()} + + +else: + all_tasks = asyncio.all_tasks + + +_T = TypeVar("_T") +_S = TypeVar("_S") + + +sentinel = object() # type: Any +NO_EXTENSIONS = bool(os.environ.get("AIOHTTP_NO_EXTENSIONS")) # type: bool + +# N.B. sys.flags.dev_mode is available on Python 3.7+, use getattr +# for compatibility with older versions +DEBUG = getattr(sys.flags, "dev_mode", False) or ( + not sys.flags.ignore_environment and bool(os.environ.get("PYTHONASYNCIODEBUG")) +) # type: bool + + +CHAR = {chr(i) for i in range(0, 128)} +CTL = {chr(i) for i in range(0, 32)} | { + chr(127), +} +SEPARATORS = { + "(", + ")", + "<", + ">", + "@", + ",", + ";", + ":", + "\\", + '"', + "/", + "[", + "]", + "?", + "=", + "{", + "}", + " ", + chr(9), +} +TOKEN = CHAR ^ CTL ^ SEPARATORS + + +class noop: + def __await__(self) -> Generator[None, None, None]: + yield + + +class BasicAuth(namedtuple("BasicAuth", ["login", "password", "encoding"])): + """Http basic authentication helper.""" + + def __new__( + cls, login: str, password: str = "", encoding: str = "latin1" + ) -> "BasicAuth": + if login is None: + raise ValueError("None is not allowed as login value") + + if password is None: + raise ValueError("None is not allowed as password value") + + if ":" in login: + raise ValueError('A ":" is not allowed in login (RFC 1945#section-11.1)') + + return super().__new__(cls, login, password, encoding) + + @classmethod + def decode(cls, auth_header: str, encoding: str = "latin1") -> "BasicAuth": + """Create a BasicAuth object from an Authorization HTTP header.""" + try: + auth_type, encoded_credentials = auth_header.split(" ", 1) + except ValueError: + raise ValueError("Could not parse authorization header.") + + if auth_type.lower() != "basic": + raise ValueError("Unknown authorization method %s" % auth_type) + + try: + decoded = base64.b64decode( + encoded_credentials.encode("ascii"), validate=True + ).decode(encoding) + except binascii.Error: + raise ValueError("Invalid base64 encoding.") + + try: + # RFC 2617 HTTP Authentication + # https://www.ietf.org/rfc/rfc2617.txt + # the colon must be present, but the username and password may be + # otherwise blank. + username, password = decoded.split(":", 1) + except ValueError: + raise ValueError("Invalid credentials.") + + return cls(username, password, encoding=encoding) + + @classmethod + def from_url(cls, url: URL, *, encoding: str = "latin1") -> Optional["BasicAuth"]: + """Create BasicAuth from url.""" + if not isinstance(url, URL): + raise TypeError("url should be yarl.URL instance") + if url.user is None: + return None + return cls(url.user, url.password or "", encoding=encoding) + + def encode(self) -> str: + """Encode credentials.""" + creds = (f"{self.login}:{self.password}").encode(self.encoding) + return "Basic %s" % base64.b64encode(creds).decode(self.encoding) + + +def strip_auth_from_url(url: URL) -> Tuple[URL, Optional[BasicAuth]]: + auth = BasicAuth.from_url(url) + if auth is None: + return url, None + else: + return url.with_user(None), auth + + +def netrc_from_env() -> Optional[netrc.netrc]: + """Load netrc from file. + + Attempt to load it from the path specified by the env-var + NETRC or in the default location in the user's home directory. + + Returns None if it couldn't be found or fails to parse. + """ + netrc_env = os.environ.get("NETRC") + + if netrc_env is not None: + netrc_path = Path(netrc_env) + else: + try: + home_dir = Path.home() + except RuntimeError as e: # pragma: no cover + # if pathlib can't resolve home, it may raise a RuntimeError + client_logger.debug( + "Could not resolve home directory when " + "trying to look for .netrc file: %s", + e, + ) + return None + + netrc_path = home_dir / ("_netrc" if IS_WINDOWS else ".netrc") + + try: + return netrc.netrc(str(netrc_path)) + except netrc.NetrcParseError as e: + client_logger.warning("Could not parse .netrc file: %s", e) + except OSError as e: + # we couldn't read the file (doesn't exist, permissions, etc.) + if netrc_env or netrc_path.is_file(): + # only warn if the environment wanted us to load it, + # or it appears like the default file does actually exist + client_logger.warning("Could not read .netrc file: %s", e) + + return None + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class ProxyInfo: + proxy: URL + proxy_auth: Optional[BasicAuth] + + +def proxies_from_env() -> Dict[str, ProxyInfo]: + proxy_urls = { + k: URL(v) + for k, v in getproxies().items() + if k in ("http", "https", "ws", "wss") + } + netrc_obj = netrc_from_env() + stripped = {k: strip_auth_from_url(v) for k, v in proxy_urls.items()} + ret = {} + for proto, val in stripped.items(): + proxy, auth = val + if proxy.scheme in ("https", "wss"): + client_logger.warning( + "%s proxies %s are not supported, ignoring", proxy.scheme.upper(), proxy + ) + continue + if netrc_obj and auth is None: + auth_from_netrc = None + if proxy.host is not None: + auth_from_netrc = netrc_obj.authenticators(proxy.host) + if auth_from_netrc is not None: + # auth_from_netrc is a (`user`, `account`, `password`) tuple, + # `user` and `account` both can be username, + # if `user` is None, use `account` + *logins, password = auth_from_netrc + login = logins[0] if logins[0] else logins[-1] + auth = BasicAuth(cast(str, login), cast(str, password)) + ret[proto] = ProxyInfo(proxy, auth) + return ret + + +def current_task( + loop: Optional[asyncio.AbstractEventLoop] = None, +) -> "Optional[asyncio.Task[Any]]": + if sys.version_info >= (3, 7): + return asyncio.current_task(loop=loop) + else: + return asyncio.Task.current_task(loop=loop) + + +def get_running_loop( + loop: Optional[asyncio.AbstractEventLoop] = None, +) -> asyncio.AbstractEventLoop: + if loop is None: + loop = asyncio.get_event_loop() + if not loop.is_running(): + warnings.warn( + "The object should be created within an async function", + DeprecationWarning, + stacklevel=3, + ) + if loop.get_debug(): + internal_logger.warning( + "The object should be created within an async function", stack_info=True + ) + return loop + + +def isasyncgenfunction(obj: Any) -> bool: + func = getattr(inspect, "isasyncgenfunction", None) + if func is not None: + return func(obj) # type: ignore[no-any-return] + else: + return False + + +def get_env_proxy_for_url(url: URL) -> Tuple[URL, Optional[BasicAuth]]: + """Get a permitted proxy for the given URL from the env.""" + if url.host is not None and proxy_bypass(url.host): + raise LookupError(f"Proxying is disallowed for `{url.host!r}`") + + proxies_in_env = proxies_from_env() + try: + proxy_info = proxies_in_env[url.scheme] + except KeyError: + raise LookupError(f"No proxies found for `{url!s}` in the env") + else: + return proxy_info.proxy, proxy_info.proxy_auth + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class MimeType: + type: str + subtype: str + suffix: str + parameters: "MultiDictProxy[str]" + + +@functools.lru_cache(maxsize=56) +def parse_mimetype(mimetype: str) -> MimeType: + """Parses a MIME type into its components. + + mimetype is a MIME type string. + + Returns a MimeType object. + + Example: + + >>> parse_mimetype('text/html; charset=utf-8') + MimeType(type='text', subtype='html', suffix='', + parameters={'charset': 'utf-8'}) + + """ + if not mimetype: + return MimeType( + type="", subtype="", suffix="", parameters=MultiDictProxy(MultiDict()) + ) + + parts = mimetype.split(";") + params = MultiDict() # type: MultiDict[str] + for item in parts[1:]: + if not item: + continue + key, value = cast( + Tuple[str, str], item.split("=", 1) if "=" in item else (item, "") + ) + params.add(key.lower().strip(), value.strip(' "')) + + fulltype = parts[0].strip().lower() + if fulltype == "*": + fulltype = "*/*" + + mtype, stype = ( + cast(Tuple[str, str], fulltype.split("/", 1)) + if "/" in fulltype + else (fulltype, "") + ) + stype, suffix = ( + cast(Tuple[str, str], stype.split("+", 1)) if "+" in stype else (stype, "") + ) + + return MimeType( + type=mtype, subtype=stype, suffix=suffix, parameters=MultiDictProxy(params) + ) + + +def guess_filename(obj: Any, default: Optional[str] = None) -> Optional[str]: + name = getattr(obj, "name", None) + if name and isinstance(name, str) and name[0] != "<" and name[-1] != ">": + return Path(name).name + return default + + +not_qtext_re = re.compile(r"[^\041\043-\133\135-\176]") +QCONTENT = {chr(i) for i in range(0x20, 0x7F)} | {"\t"} + + +def quoted_string(content: str) -> str: + """Return 7-bit content as quoted-string. + + Format content into a quoted-string as defined in RFC5322 for + Internet Message Format. Notice that this is not the 8-bit HTTP + format, but the 7-bit email format. Content must be in usascii or + a ValueError is raised. + """ + if not (QCONTENT > set(content)): + raise ValueError(f"bad content for quoted-string {content!r}") + return not_qtext_re.sub(lambda x: "\\" + x.group(0), content) + + +def content_disposition_header( + disptype: str, quote_fields: bool = True, _charset: str = "utf-8", **params: str +) -> str: + """Sets ``Content-Disposition`` header for MIME. + + This is the MIME payload Content-Disposition header from RFC 2183 + and RFC 7579 section 4.2, not the HTTP Content-Disposition from + RFC 6266. + + disptype is a disposition type: inline, attachment, form-data. + Should be valid extension token (see RFC 2183) + + quote_fields performs value quoting to 7-bit MIME headers + according to RFC 7578. Set to quote_fields to False if recipient + can take 8-bit file names and field values. + + _charset specifies the charset to use when quote_fields is True. + + params is a dict with disposition params. + """ + if not disptype or not (TOKEN > set(disptype)): + raise ValueError("bad content disposition type {!r}" "".format(disptype)) + + value = disptype + if params: + lparams = [] + for key, val in params.items(): + if not key or not (TOKEN > set(key)): + raise ValueError( + "bad content disposition parameter" " {!r}={!r}".format(key, val) + ) + if quote_fields: + if key.lower() == "filename": + qval = quote(val, "", encoding=_charset) + lparams.append((key, '"%s"' % qval)) + else: + try: + qval = quoted_string(val) + except ValueError: + qval = "".join( + (_charset, "''", quote(val, "", encoding=_charset)) + ) + lparams.append((key + "*", qval)) + else: + lparams.append((key, '"%s"' % qval)) + else: + qval = val.replace("\\", "\\\\").replace('"', '\\"') + lparams.append((key, '"%s"' % qval)) + sparams = "; ".join("=".join(pair) for pair in lparams) + value = "; ".join((value, sparams)) + return value + + +class _TSelf(Protocol, Generic[_T]): + _cache: Dict[str, _T] + + +class reify(Generic[_T]): + """Use as a class method decorator. + + It operates almost exactly like + the Python `@property` decorator, but it puts the result of the + method it decorates into the instance dict after the first call, + effectively replacing the function it decorates with an instance + variable. It is, in Python parlance, a data descriptor. + """ + + def __init__(self, wrapped: Callable[..., _T]) -> None: + self.wrapped = wrapped + self.__doc__ = wrapped.__doc__ + self.name = wrapped.__name__ + + def __get__(self, inst: _TSelf[_T], owner: Optional[Type[Any]] = None) -> _T: + try: + try: + return inst._cache[self.name] + except KeyError: + val = self.wrapped(inst) + inst._cache[self.name] = val + return val + except AttributeError: + if inst is None: + return self + raise + + def __set__(self, inst: _TSelf[_T], value: _T) -> None: + raise AttributeError("reified property is read-only") + + +reify_py = reify + +try: + from ._helpers import reify as reify_c + + if not NO_EXTENSIONS: + reify = reify_c # type: ignore[misc,assignment] +except ImportError: + pass + +_ipv4_pattern = ( + r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}" + r"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" +) +_ipv6_pattern = ( + r"^(?:(?:(?:[A-F0-9]{1,4}:){6}|(?=(?:[A-F0-9]{0,4}:){0,6}" + r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}$)(([0-9A-F]{1,4}:){0,5}|:)" + r"((:[0-9A-F]{1,4}){1,5}:|:)|::(?:[A-F0-9]{1,4}:){5})" + r"(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\.){3}" + r"(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])|(?:[A-F0-9]{1,4}:){7}" + r"[A-F0-9]{1,4}|(?=(?:[A-F0-9]{0,4}:){0,7}[A-F0-9]{0,4}$)" + r"(([0-9A-F]{1,4}:){1,7}|:)((:[0-9A-F]{1,4}){1,7}|:)|(?:[A-F0-9]{1,4}:){7}" + r":|:(:[A-F0-9]{1,4}){7})$" +) +_ipv4_regex = re.compile(_ipv4_pattern) +_ipv6_regex = re.compile(_ipv6_pattern, flags=re.IGNORECASE) +_ipv4_regexb = re.compile(_ipv4_pattern.encode("ascii")) +_ipv6_regexb = re.compile(_ipv6_pattern.encode("ascii"), flags=re.IGNORECASE) + + +def _is_ip_address( + regex: Pattern[str], regexb: Pattern[bytes], host: Optional[Union[str, bytes]] +) -> bool: + if host is None: + return False + if isinstance(host, str): + return bool(regex.match(host)) + elif isinstance(host, (bytes, bytearray, memoryview)): + return bool(regexb.match(host)) + else: + raise TypeError(f"{host} [{type(host)}] is not a str or bytes") + + +is_ipv4_address = functools.partial(_is_ip_address, _ipv4_regex, _ipv4_regexb) +is_ipv6_address = functools.partial(_is_ip_address, _ipv6_regex, _ipv6_regexb) + + +def is_ip_address(host: Optional[Union[str, bytes, bytearray, memoryview]]) -> bool: + return is_ipv4_address(host) or is_ipv6_address(host) + + +def next_whole_second() -> datetime.datetime: + """Return current time rounded up to the next whole second.""" + return datetime.datetime.now(datetime.timezone.utc).replace( + microsecond=0 + ) + datetime.timedelta(seconds=0) + + +_cached_current_datetime = None # type: Optional[int] +_cached_formatted_datetime = "" + + +def rfc822_formatted_time() -> str: + global _cached_current_datetime + global _cached_formatted_datetime + + now = int(time.time()) + if now != _cached_current_datetime: + # Weekday and month names for HTTP date/time formatting; + # always English! + # Tuples are constants stored in codeobject! + _weekdayname = ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun") + _monthname = ( + "", # Dummy so we can use 1-based month numbers + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec", + ) + + year, month, day, hh, mm, ss, wd, *tail = time.gmtime(now) + _cached_formatted_datetime = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( + _weekdayname[wd], + day, + _monthname[month], + year, + hh, + mm, + ss, + ) + _cached_current_datetime = now + return _cached_formatted_datetime + + +def _weakref_handle(info: "Tuple[weakref.ref[object], str]") -> None: + ref, name = info + ob = ref() + if ob is not None: + with suppress(Exception): + getattr(ob, name)() + + +def weakref_handle( + ob: object, name: str, timeout: float, loop: asyncio.AbstractEventLoop +) -> Optional[asyncio.TimerHandle]: + if timeout is not None and timeout > 0: + when = loop.time() + timeout + if timeout >= 5: + when = ceil(when) + + return loop.call_at(when, _weakref_handle, (weakref.ref(ob), name)) + return None + + +def call_later( + cb: Callable[[], Any], timeout: float, loop: asyncio.AbstractEventLoop +) -> Optional[asyncio.TimerHandle]: + if timeout is not None and timeout > 0: + when = loop.time() + timeout + if timeout > 5: + when = ceil(when) + return loop.call_at(when, cb) + return None + + +class TimeoutHandle: + """Timeout handle""" + + def __init__( + self, loop: asyncio.AbstractEventLoop, timeout: Optional[float] + ) -> None: + self._timeout = timeout + self._loop = loop + self._callbacks = ( + [] + ) # type: List[Tuple[Callable[..., None], Tuple[Any, ...], Dict[str, Any]]] + + def register( + self, callback: Callable[..., None], *args: Any, **kwargs: Any + ) -> None: + self._callbacks.append((callback, args, kwargs)) + + def close(self) -> None: + self._callbacks.clear() + + def start(self) -> Optional[asyncio.Handle]: + timeout = self._timeout + if timeout is not None and timeout > 0: + when = self._loop.time() + timeout + if timeout >= 5: + when = ceil(when) + return self._loop.call_at(when, self.__call__) + else: + return None + + def timer(self) -> "BaseTimerContext": + if self._timeout is not None and self._timeout > 0: + timer = TimerContext(self._loop) + self.register(timer.timeout) + return timer + else: + return TimerNoop() + + def __call__(self) -> None: + for cb, args, kwargs in self._callbacks: + with suppress(Exception): + cb(*args, **kwargs) + + self._callbacks.clear() + + +class BaseTimerContext(ContextManager["BaseTimerContext"]): + pass + + +class TimerNoop(BaseTimerContext): + def __enter__(self) -> BaseTimerContext: + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + return + + +class TimerContext(BaseTimerContext): + """Low resolution timeout context manager""" + + def __init__(self, loop: asyncio.AbstractEventLoop) -> None: + self._loop = loop + self._tasks = [] # type: List[asyncio.Task[Any]] + self._cancelled = False + + def __enter__(self) -> BaseTimerContext: + task = current_task(loop=self._loop) + + if task is None: + raise RuntimeError( + "Timeout context manager should be used " "inside a task" + ) + + if self._cancelled: + raise asyncio.TimeoutError from None + + self._tasks.append(task) + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + if self._tasks: + self._tasks.pop() + + if exc_type is asyncio.CancelledError and self._cancelled: + raise asyncio.TimeoutError from None + return None + + def timeout(self) -> None: + if not self._cancelled: + for task in set(self._tasks): + task.cancel() + + self._cancelled = True + + +def ceil_timeout(delay: Optional[float]) -> async_timeout.Timeout: + if delay is None or delay <= 0: + return async_timeout.timeout(None) + + loop = get_running_loop() + now = loop.time() + when = now + delay + if delay > 5: + when = ceil(when) + return async_timeout.timeout_at(when) + + +class HeadersMixin: + + ATTRS = frozenset(["_content_type", "_content_dict", "_stored_content_type"]) + + _content_type = None # type: Optional[str] + _content_dict = None # type: Optional[Dict[str, str]] + _stored_content_type = sentinel + + def _parse_content_type(self, raw: str) -> None: + self._stored_content_type = raw + if raw is None: + # default value according to RFC 2616 + self._content_type = "application/octet-stream" + self._content_dict = {} + else: + self._content_type, self._content_dict = cgi.parse_header(raw) + + @property + def content_type(self) -> str: + """The value of content part for Content-Type HTTP header.""" + raw = self._headers.get(hdrs.CONTENT_TYPE) # type: ignore[attr-defined] + if self._stored_content_type != raw: + self._parse_content_type(raw) + return self._content_type # type: ignore[return-value] + + @property + def charset(self) -> Optional[str]: + """The value of charset part for Content-Type HTTP header.""" + raw = self._headers.get(hdrs.CONTENT_TYPE) # type: ignore[attr-defined] + if self._stored_content_type != raw: + self._parse_content_type(raw) + return self._content_dict.get("charset") # type: ignore[union-attr] + + @property + def content_length(self) -> Optional[int]: + """The value of Content-Length HTTP header.""" + content_length = self._headers.get( # type: ignore[attr-defined] + hdrs.CONTENT_LENGTH + ) + + if content_length is not None: + return int(content_length) + else: + return None + + +def set_result(fut: "asyncio.Future[_T]", result: _T) -> None: + if not fut.done(): + fut.set_result(result) + + +def set_exception(fut: "asyncio.Future[_T]", exc: BaseException) -> None: + if not fut.done(): + fut.set_exception(exc) + + +class ChainMapProxy(Mapping[str, Any]): + __slots__ = ("_maps",) + + def __init__(self, maps: Iterable[Mapping[str, Any]]) -> None: + self._maps = tuple(maps) + + def __init_subclass__(cls) -> None: + raise TypeError( + "Inheritance class {} from ChainMapProxy " + "is forbidden".format(cls.__name__) + ) + + def __getitem__(self, key: str) -> Any: + for mapping in self._maps: + try: + return mapping[key] + except KeyError: + pass + raise KeyError(key) + + def get(self, key: str, default: Any = None) -> Any: + return self[key] if key in self else default + + def __len__(self) -> int: + # reuses stored hash values if possible + return len(set().union(*self._maps)) # type: ignore[arg-type] + + def __iter__(self) -> Iterator[str]: + d = {} # type: Dict[str, Any] + for mapping in reversed(self._maps): + # reuses stored hash values if possible + d.update(mapping) + return iter(d) + + def __contains__(self, key: object) -> bool: + return any(key in m for m in self._maps) + + def __bool__(self) -> bool: + return any(self._maps) + + def __repr__(self) -> str: + content = ", ".join(map(repr, self._maps)) + return f"ChainMapProxy({content})" + + +# https://tools.ietf.org/html/rfc7232#section-2.3 +_ETAGC = r"[!#-}\x80-\xff]+" +_ETAGC_RE = re.compile(_ETAGC) +_QUOTED_ETAG = fr'(W/)?"({_ETAGC})"' +QUOTED_ETAG_RE = re.compile(_QUOTED_ETAG) +LIST_QUOTED_ETAG_RE = re.compile(fr"({_QUOTED_ETAG})(?:\s*,\s*|$)|(.)") + +ETAG_ANY = "*" + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class ETag: + value: str + is_weak: bool = False + + +def validate_etag_value(value: str) -> None: + if value != ETAG_ANY and not _ETAGC_RE.fullmatch(value): + raise ValueError( + f"Value {value!r} is not a valid etag. Maybe it contains '\"'?" + ) + + +def parse_http_date(date_str: Optional[str]) -> Optional[datetime.datetime]: + """Process a date string, return a datetime object""" + if date_str is not None: + timetuple = parsedate(date_str) + if timetuple is not None: + with suppress(ValueError): + return datetime.datetime(*timetuple[:6], tzinfo=datetime.timezone.utc) + return None diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/http.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/http.py new file mode 100644 index 0000000000000000000000000000000000000000..415ffbf563bc2b2c5c13736938af4218d2343824 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/http.py @@ -0,0 +1,72 @@ +import http.server +import sys +from typing import Mapping, Tuple + +from . import __version__ +from .http_exceptions import HttpProcessingError as HttpProcessingError +from .http_parser import ( + HeadersParser as HeadersParser, + HttpParser as HttpParser, + HttpRequestParser as HttpRequestParser, + HttpResponseParser as HttpResponseParser, + RawRequestMessage as RawRequestMessage, + RawResponseMessage as RawResponseMessage, +) +from .http_websocket import ( + WS_CLOSED_MESSAGE as WS_CLOSED_MESSAGE, + WS_CLOSING_MESSAGE as WS_CLOSING_MESSAGE, + WS_KEY as WS_KEY, + WebSocketError as WebSocketError, + WebSocketReader as WebSocketReader, + WebSocketWriter as WebSocketWriter, + WSCloseCode as WSCloseCode, + WSMessage as WSMessage, + WSMsgType as WSMsgType, + ws_ext_gen as ws_ext_gen, + ws_ext_parse as ws_ext_parse, +) +from .http_writer import ( + HttpVersion as HttpVersion, + HttpVersion10 as HttpVersion10, + HttpVersion11 as HttpVersion11, + StreamWriter as StreamWriter, +) + +__all__ = ( + "HttpProcessingError", + "RESPONSES", + "SERVER_SOFTWARE", + # .http_writer + "StreamWriter", + "HttpVersion", + "HttpVersion10", + "HttpVersion11", + # .http_parser + "HeadersParser", + "HttpParser", + "HttpRequestParser", + "HttpResponseParser", + "RawRequestMessage", + "RawResponseMessage", + # .http_websocket + "WS_CLOSED_MESSAGE", + "WS_CLOSING_MESSAGE", + "WS_KEY", + "WebSocketReader", + "WebSocketWriter", + "ws_ext_gen", + "ws_ext_parse", + "WSMessage", + "WebSocketError", + "WSMsgType", + "WSCloseCode", +) + + +SERVER_SOFTWARE = "Python/{0[0]}.{0[1]} aiohttp/{1}".format( + sys.version_info, __version__ +) # type: str + +RESPONSES = ( + http.server.BaseHTTPRequestHandler.responses +) # type: Mapping[int, Tuple[str, str]] diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/http_parser.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/http_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..2dc9482f4fae178d6341ba535d0f7ee18e8525a0 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/http_parser.py @@ -0,0 +1,956 @@ +import abc +import asyncio +import collections +import re +import string +import zlib +from contextlib import suppress +from enum import IntEnum +from typing import ( + Any, + Generic, + List, + NamedTuple, + Optional, + Pattern, + Set, + Tuple, + Type, + TypeVar, + Union, + cast, +) + +from multidict import CIMultiDict, CIMultiDictProxy, istr +from yarl import URL + +from . import hdrs +from .base_protocol import BaseProtocol +from .helpers import NO_EXTENSIONS, BaseTimerContext +from .http_exceptions import ( + BadHttpMessage, + BadStatusLine, + ContentEncodingError, + ContentLengthError, + InvalidHeader, + LineTooLong, + TransferEncodingError, +) +from .http_writer import HttpVersion, HttpVersion10 +from .log import internal_logger +from .streams import EMPTY_PAYLOAD, StreamReader +from .typedefs import Final, RawHeaders + +try: + import brotli + + HAS_BROTLI = True +except ImportError: # pragma: no cover + HAS_BROTLI = False + + +__all__ = ( + "HeadersParser", + "HttpParser", + "HttpRequestParser", + "HttpResponseParser", + "RawRequestMessage", + "RawResponseMessage", +) + +ASCIISET: Final[Set[str]] = set(string.printable) + +# See https://tools.ietf.org/html/rfc7230#section-3.1.1 +# and https://tools.ietf.org/html/rfc7230#appendix-B +# +# method = token +# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +# "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +# token = 1*tchar +METHRE: Final[Pattern[str]] = re.compile(r"[!#$%&'*+\-.^_`|~0-9A-Za-z]+") +VERSRE: Final[Pattern[str]] = re.compile(r"HTTP/(\d+).(\d+)") +HDRRE: Final[Pattern[bytes]] = re.compile(rb"[\x00-\x1F\x7F()<>@,;:\[\]={} \t\\\\\"]") + + +class RawRequestMessage(NamedTuple): + method: str + path: str + version: HttpVersion + headers: "CIMultiDictProxy[str]" + raw_headers: RawHeaders + should_close: bool + compression: Optional[str] + upgrade: bool + chunked: bool + url: URL + + +RawResponseMessage = collections.namedtuple( + "RawResponseMessage", + [ + "version", + "code", + "reason", + "headers", + "raw_headers", + "should_close", + "compression", + "upgrade", + "chunked", + ], +) + + +_MsgT = TypeVar("_MsgT", RawRequestMessage, RawResponseMessage) + + +class ParseState(IntEnum): + + PARSE_NONE = 0 + PARSE_LENGTH = 1 + PARSE_CHUNKED = 2 + PARSE_UNTIL_EOF = 3 + + +class ChunkState(IntEnum): + PARSE_CHUNKED_SIZE = 0 + PARSE_CHUNKED_CHUNK = 1 + PARSE_CHUNKED_CHUNK_EOF = 2 + PARSE_MAYBE_TRAILERS = 3 + PARSE_TRAILERS = 4 + + +class HeadersParser: + def __init__( + self, + max_line_size: int = 8190, + max_headers: int = 32768, + max_field_size: int = 8190, + ) -> None: + self.max_line_size = max_line_size + self.max_headers = max_headers + self.max_field_size = max_field_size + + def parse_headers( + self, lines: List[bytes] + ) -> Tuple["CIMultiDictProxy[str]", RawHeaders]: + headers = CIMultiDict() # type: CIMultiDict[str] + raw_headers = [] + + lines_idx = 1 + line = lines[1] + line_count = len(lines) + + while line: + # Parse initial header name : value pair. + try: + bname, bvalue = line.split(b":", 1) + except ValueError: + raise InvalidHeader(line) from None + + bname = bname.strip(b" \t") + bvalue = bvalue.lstrip() + if HDRRE.search(bname): + raise InvalidHeader(bname) + if len(bname) > self.max_field_size: + raise LineTooLong( + "request header name {}".format( + bname.decode("utf8", "xmlcharrefreplace") + ), + str(self.max_field_size), + str(len(bname)), + ) + + header_length = len(bvalue) + + # next line + lines_idx += 1 + line = lines[lines_idx] + + # consume continuation lines + continuation = line and line[0] in (32, 9) # (' ', '\t') + + if continuation: + bvalue_lst = [bvalue] + while continuation: + header_length += len(line) + if header_length > self.max_field_size: + raise LineTooLong( + "request header field {}".format( + bname.decode("utf8", "xmlcharrefreplace") + ), + str(self.max_field_size), + str(header_length), + ) + bvalue_lst.append(line) + + # next line + lines_idx += 1 + if lines_idx < line_count: + line = lines[lines_idx] + if line: + continuation = line[0] in (32, 9) # (' ', '\t') + else: + line = b"" + break + bvalue = b"".join(bvalue_lst) + else: + if header_length > self.max_field_size: + raise LineTooLong( + "request header field {}".format( + bname.decode("utf8", "xmlcharrefreplace") + ), + str(self.max_field_size), + str(header_length), + ) + + bvalue = bvalue.strip() + name = bname.decode("utf-8", "surrogateescape") + value = bvalue.decode("utf-8", "surrogateescape") + + headers.add(name, value) + raw_headers.append((bname, bvalue)) + + return (CIMultiDictProxy(headers), tuple(raw_headers)) + + +class HttpParser(abc.ABC, Generic[_MsgT]): + def __init__( + self, + protocol: Optional[BaseProtocol] = None, + loop: Optional[asyncio.AbstractEventLoop] = None, + limit: int = 2 ** 16, + max_line_size: int = 8190, + max_headers: int = 32768, + max_field_size: int = 8190, + timer: Optional[BaseTimerContext] = None, + code: Optional[int] = None, + method: Optional[str] = None, + readall: bool = False, + payload_exception: Optional[Type[BaseException]] = None, + response_with_body: bool = True, + read_until_eof: bool = False, + auto_decompress: bool = True, + ) -> None: + self.protocol = protocol + self.loop = loop + self.max_line_size = max_line_size + self.max_headers = max_headers + self.max_field_size = max_field_size + self.timer = timer + self.code = code + self.method = method + self.readall = readall + self.payload_exception = payload_exception + self.response_with_body = response_with_body + self.read_until_eof = read_until_eof + + self._lines = [] # type: List[bytes] + self._tail = b"" + self._upgraded = False + self._payload = None + self._payload_parser = None # type: Optional[HttpPayloadParser] + self._auto_decompress = auto_decompress + self._limit = limit + self._headers_parser = HeadersParser(max_line_size, max_headers, max_field_size) + + @abc.abstractmethod + def parse_message(self, lines: List[bytes]) -> _MsgT: + pass + + def feed_eof(self) -> Optional[_MsgT]: + if self._payload_parser is not None: + self._payload_parser.feed_eof() + self._payload_parser = None + else: + # try to extract partial message + if self._tail: + self._lines.append(self._tail) + + if self._lines: + if self._lines[-1] != "\r\n": + self._lines.append(b"") + with suppress(Exception): + return self.parse_message(self._lines) + return None + + def feed_data( + self, + data: bytes, + SEP: bytes = b"\r\n", + EMPTY: bytes = b"", + CONTENT_LENGTH: istr = hdrs.CONTENT_LENGTH, + METH_CONNECT: str = hdrs.METH_CONNECT, + SEC_WEBSOCKET_KEY1: istr = hdrs.SEC_WEBSOCKET_KEY1, + ) -> Tuple[List[Tuple[_MsgT, StreamReader]], bool, bytes]: + + messages = [] + + if self._tail: + data, self._tail = self._tail + data, b"" + + data_len = len(data) + start_pos = 0 + loop = self.loop + + while start_pos < data_len: + + # read HTTP message (request/response line + headers), \r\n\r\n + # and split by lines + if self._payload_parser is None and not self._upgraded: + pos = data.find(SEP, start_pos) + # consume \r\n + if pos == start_pos and not self._lines: + start_pos = pos + 2 + continue + + if pos >= start_pos: + # line found + self._lines.append(data[start_pos:pos]) + start_pos = pos + 2 + + # \r\n\r\n found + if self._lines[-1] == EMPTY: + try: + msg: _MsgT = self.parse_message(self._lines) + finally: + self._lines.clear() + + def get_content_length() -> Optional[int]: + # payload length + length_hdr = msg.headers.get(CONTENT_LENGTH) + if length_hdr is None: + return None + + try: + length = int(length_hdr) + except ValueError: + raise InvalidHeader(CONTENT_LENGTH) + + if length < 0: + raise InvalidHeader(CONTENT_LENGTH) + + return length + + length = get_content_length() + # do not support old websocket spec + if SEC_WEBSOCKET_KEY1 in msg.headers: + raise InvalidHeader(SEC_WEBSOCKET_KEY1) + + self._upgraded = msg.upgrade + + method = getattr(msg, "method", self.method) + + assert self.protocol is not None + # calculate payload + if ( + (length is not None and length > 0) + or msg.chunked + and not msg.upgrade + ): + payload = StreamReader( + self.protocol, + timer=self.timer, + loop=loop, + limit=self._limit, + ) + payload_parser = HttpPayloadParser( + payload, + length=length, + chunked=msg.chunked, + method=method, + compression=msg.compression, + code=self.code, + readall=self.readall, + response_with_body=self.response_with_body, + auto_decompress=self._auto_decompress, + ) + if not payload_parser.done: + self._payload_parser = payload_parser + elif method == METH_CONNECT: + assert isinstance(msg, RawRequestMessage) + payload = StreamReader( + self.protocol, + timer=self.timer, + loop=loop, + limit=self._limit, + ) + self._upgraded = True + self._payload_parser = HttpPayloadParser( + payload, + method=msg.method, + compression=msg.compression, + readall=True, + auto_decompress=self._auto_decompress, + ) + else: + if ( + getattr(msg, "code", 100) >= 199 + and length is None + and self.read_until_eof + ): + payload = StreamReader( + self.protocol, + timer=self.timer, + loop=loop, + limit=self._limit, + ) + payload_parser = HttpPayloadParser( + payload, + length=length, + chunked=msg.chunked, + method=method, + compression=msg.compression, + code=self.code, + readall=True, + response_with_body=self.response_with_body, + auto_decompress=self._auto_decompress, + ) + if not payload_parser.done: + self._payload_parser = payload_parser + else: + payload = EMPTY_PAYLOAD + + messages.append((msg, payload)) + else: + self._tail = data[start_pos:] + data = EMPTY + break + + # no parser, just store + elif self._payload_parser is None and self._upgraded: + assert not self._lines + break + + # feed payload + elif data and start_pos < data_len: + assert not self._lines + assert self._payload_parser is not None + try: + eof, data = self._payload_parser.feed_data(data[start_pos:]) + except BaseException as exc: + if self.payload_exception is not None: + self._payload_parser.payload.set_exception( + self.payload_exception(str(exc)) + ) + else: + self._payload_parser.payload.set_exception(exc) + + eof = True + data = b"" + + if eof: + start_pos = 0 + data_len = len(data) + self._payload_parser = None + continue + else: + break + + if data and start_pos < data_len: + data = data[start_pos:] + else: + data = EMPTY + + return messages, self._upgraded, data + + def parse_headers( + self, lines: List[bytes] + ) -> Tuple[ + "CIMultiDictProxy[str]", RawHeaders, Optional[bool], Optional[str], bool, bool + ]: + """Parses RFC 5322 headers from a stream. + + Line continuations are supported. Returns list of header name + and value pairs. Header name is in upper case. + """ + headers, raw_headers = self._headers_parser.parse_headers(lines) + close_conn = None + encoding = None + upgrade = False + chunked = False + + # keep-alive + conn = headers.get(hdrs.CONNECTION) + if conn: + v = conn.lower() + if v == "close": + close_conn = True + elif v == "keep-alive": + close_conn = False + elif v == "upgrade": + upgrade = True + + # encoding + enc = headers.get(hdrs.CONTENT_ENCODING) + if enc: + enc = enc.lower() + if enc in ("gzip", "deflate", "br"): + encoding = enc + + # chunking + te = headers.get(hdrs.TRANSFER_ENCODING) + if te is not None: + if "chunked" == te.lower(): + chunked = True + else: + raise BadHttpMessage("Request has invalid `Transfer-Encoding`") + + if hdrs.CONTENT_LENGTH in headers: + raise BadHttpMessage( + "Content-Length can't be present with Transfer-Encoding", + ) + + return (headers, raw_headers, close_conn, encoding, upgrade, chunked) + + def set_upgraded(self, val: bool) -> None: + """Set connection upgraded (to websocket) mode. + + :param bool val: new state. + """ + self._upgraded = val + + +class HttpRequestParser(HttpParser[RawRequestMessage]): + """Read request status line. + + Exception .http_exceptions.BadStatusLine + could be raised in case of any errors in status line. + Returns RawRequestMessage. + """ + + def parse_message(self, lines: List[bytes]) -> RawRequestMessage: + # request line + line = lines[0].decode("utf-8", "surrogateescape") + try: + method, path, version = line.split(None, 2) + except ValueError: + raise BadStatusLine(line) from None + + if len(path) > self.max_line_size: + raise LineTooLong( + "Status line is too long", str(self.max_line_size), str(len(path)) + ) + + path_part, _hash_separator, url_fragment = path.partition("#") + path_part, _question_mark_separator, qs_part = path_part.partition("?") + + # method + if not METHRE.match(method): + raise BadStatusLine(method) + + # version + try: + if version.startswith("HTTP/"): + n1, n2 = version[5:].split(".", 1) + version_o = HttpVersion(int(n1), int(n2)) + else: + raise BadStatusLine(version) + except Exception: + raise BadStatusLine(version) + + # read headers + ( + headers, + raw_headers, + close, + compression, + upgrade, + chunked, + ) = self.parse_headers(lines) + + if close is None: # then the headers weren't set in the request + if version_o <= HttpVersion10: # HTTP 1.0 must asks to not close + close = True + else: # HTTP 1.1 must ask to close. + close = False + + return RawRequestMessage( + method, + path, + version_o, + headers, + raw_headers, + close, + compression, + upgrade, + chunked, + # NOTE: `yarl.URL.build()` is used to mimic what the Cython-based + # NOTE: parser does, otherwise it results into the same + # NOTE: HTTP Request-Line input producing different + # NOTE: `yarl.URL()` objects + URL.build( + path=path_part, + query_string=qs_part, + fragment=url_fragment, + encoded=True, + ), + ) + + +class HttpResponseParser(HttpParser[RawResponseMessage]): + """Read response status line and headers. + + BadStatusLine could be raised in case of any errors in status line. + Returns RawResponseMessage. + """ + + def parse_message(self, lines: List[bytes]) -> RawResponseMessage: + line = lines[0].decode("utf-8", "surrogateescape") + try: + version, status = line.split(None, 1) + except ValueError: + raise BadStatusLine(line) from None + + try: + status, reason = status.split(None, 1) + except ValueError: + reason = "" + + if len(reason) > self.max_line_size: + raise LineTooLong( + "Status line is too long", str(self.max_line_size), str(len(reason)) + ) + + # version + match = VERSRE.match(version) + if match is None: + raise BadStatusLine(line) + version_o = HttpVersion(int(match.group(1)), int(match.group(2))) + + # The status code is a three-digit number + try: + status_i = int(status) + except ValueError: + raise BadStatusLine(line) from None + + if status_i > 999: + raise BadStatusLine(line) + + # read headers + ( + headers, + raw_headers, + close, + compression, + upgrade, + chunked, + ) = self.parse_headers(lines) + + if close is None: + close = version_o <= HttpVersion10 + + return RawResponseMessage( + version_o, + status_i, + reason.strip(), + headers, + raw_headers, + close, + compression, + upgrade, + chunked, + ) + + +class HttpPayloadParser: + def __init__( + self, + payload: StreamReader, + length: Optional[int] = None, + chunked: bool = False, + compression: Optional[str] = None, + code: Optional[int] = None, + method: Optional[str] = None, + readall: bool = False, + response_with_body: bool = True, + auto_decompress: bool = True, + ) -> None: + self._length = 0 + self._type = ParseState.PARSE_NONE + self._chunk = ChunkState.PARSE_CHUNKED_SIZE + self._chunk_size = 0 + self._chunk_tail = b"" + self._auto_decompress = auto_decompress + self.done = False + + # payload decompression wrapper + if response_with_body and compression and self._auto_decompress: + real_payload = DeflateBuffer( + payload, compression + ) # type: Union[StreamReader, DeflateBuffer] + else: + real_payload = payload + + # payload parser + if not response_with_body: + # don't parse payload if it's not expected to be received + self._type = ParseState.PARSE_NONE + real_payload.feed_eof() + self.done = True + + elif chunked: + self._type = ParseState.PARSE_CHUNKED + elif length is not None: + self._type = ParseState.PARSE_LENGTH + self._length = length + if self._length == 0: + real_payload.feed_eof() + self.done = True + else: + if readall and code != 204: + self._type = ParseState.PARSE_UNTIL_EOF + elif method in ("PUT", "POST"): + internal_logger.warning( # pragma: no cover + "Content-Length or Transfer-Encoding header is required" + ) + self._type = ParseState.PARSE_NONE + real_payload.feed_eof() + self.done = True + + self.payload = real_payload + + def feed_eof(self) -> None: + if self._type == ParseState.PARSE_UNTIL_EOF: + self.payload.feed_eof() + elif self._type == ParseState.PARSE_LENGTH: + raise ContentLengthError( + "Not enough data for satisfy content length header." + ) + elif self._type == ParseState.PARSE_CHUNKED: + raise TransferEncodingError( + "Not enough data for satisfy transfer length header." + ) + + def feed_data( + self, chunk: bytes, SEP: bytes = b"\r\n", CHUNK_EXT: bytes = b";" + ) -> Tuple[bool, bytes]: + # Read specified amount of bytes + if self._type == ParseState.PARSE_LENGTH: + required = self._length + chunk_len = len(chunk) + + if required >= chunk_len: + self._length = required - chunk_len + self.payload.feed_data(chunk, chunk_len) + if self._length == 0: + self.payload.feed_eof() + return True, b"" + else: + self._length = 0 + self.payload.feed_data(chunk[:required], required) + self.payload.feed_eof() + return True, chunk[required:] + + # Chunked transfer encoding parser + elif self._type == ParseState.PARSE_CHUNKED: + if self._chunk_tail: + chunk = self._chunk_tail + chunk + self._chunk_tail = b"" + + while chunk: + + # read next chunk size + if self._chunk == ChunkState.PARSE_CHUNKED_SIZE: + pos = chunk.find(SEP) + if pos >= 0: + i = chunk.find(CHUNK_EXT, 0, pos) + if i >= 0: + size_b = chunk[:i] # strip chunk-extensions + else: + size_b = chunk[:pos] + + try: + size = int(bytes(size_b), 16) + except ValueError: + exc = TransferEncodingError( + chunk[:pos].decode("ascii", "surrogateescape") + ) + self.payload.set_exception(exc) + raise exc from None + + chunk = chunk[pos + 2 :] + if size == 0: # eof marker + self._chunk = ChunkState.PARSE_MAYBE_TRAILERS + else: + self._chunk = ChunkState.PARSE_CHUNKED_CHUNK + self._chunk_size = size + self.payload.begin_http_chunk_receiving() + else: + self._chunk_tail = chunk + return False, b"" + + # read chunk and feed buffer + if self._chunk == ChunkState.PARSE_CHUNKED_CHUNK: + required = self._chunk_size + chunk_len = len(chunk) + + if required > chunk_len: + self._chunk_size = required - chunk_len + self.payload.feed_data(chunk, chunk_len) + return False, b"" + else: + self._chunk_size = 0 + self.payload.feed_data(chunk[:required], required) + chunk = chunk[required:] + self._chunk = ChunkState.PARSE_CHUNKED_CHUNK_EOF + self.payload.end_http_chunk_receiving() + + # toss the CRLF at the end of the chunk + if self._chunk == ChunkState.PARSE_CHUNKED_CHUNK_EOF: + if chunk[:2] == SEP: + chunk = chunk[2:] + self._chunk = ChunkState.PARSE_CHUNKED_SIZE + else: + self._chunk_tail = chunk + return False, b"" + + # if stream does not contain trailer, after 0\r\n + # we should get another \r\n otherwise + # trailers needs to be skiped until \r\n\r\n + if self._chunk == ChunkState.PARSE_MAYBE_TRAILERS: + head = chunk[:2] + if head == SEP: + # end of stream + self.payload.feed_eof() + return True, chunk[2:] + # Both CR and LF, or only LF may not be received yet. It is + # expected that CRLF or LF will be shown at the very first + # byte next time, otherwise trailers should come. The last + # CRLF which marks the end of response might not be + # contained in the same TCP segment which delivered the + # size indicator. + if not head: + return False, b"" + if head == SEP[:1]: + self._chunk_tail = head + return False, b"" + self._chunk = ChunkState.PARSE_TRAILERS + + # read and discard trailer up to the CRLF terminator + if self._chunk == ChunkState.PARSE_TRAILERS: + pos = chunk.find(SEP) + if pos >= 0: + chunk = chunk[pos + 2 :] + self._chunk = ChunkState.PARSE_MAYBE_TRAILERS + else: + self._chunk_tail = chunk + return False, b"" + + # Read all bytes until eof + elif self._type == ParseState.PARSE_UNTIL_EOF: + self.payload.feed_data(chunk, len(chunk)) + + return False, b"" + + +class DeflateBuffer: + """DeflateStream decompress stream and feed data into specified stream.""" + + decompressor: Any + + def __init__(self, out: StreamReader, encoding: Optional[str]) -> None: + self.out = out + self.size = 0 + self.encoding = encoding + self._started_decoding = False + + if encoding == "br": + if not HAS_BROTLI: # pragma: no cover + raise ContentEncodingError( + "Can not decode content-encoding: brotli (br). " + "Please install `Brotli`" + ) + + class BrotliDecoder: + # Supports both 'brotlipy' and 'Brotli' packages + # since they share an import name. The top branches + # are for 'brotlipy' and bottom branches for 'Brotli' + def __init__(self) -> None: + self._obj = brotli.Decompressor() + + def decompress(self, data: bytes) -> bytes: + if hasattr(self._obj, "decompress"): + return cast(bytes, self._obj.decompress(data)) + return cast(bytes, self._obj.process(data)) + + def flush(self) -> bytes: + if hasattr(self._obj, "flush"): + return cast(bytes, self._obj.flush()) + return b"" + + self.decompressor = BrotliDecoder() + else: + zlib_mode = 16 + zlib.MAX_WBITS if encoding == "gzip" else zlib.MAX_WBITS + self.decompressor = zlib.decompressobj(wbits=zlib_mode) + + def set_exception(self, exc: BaseException) -> None: + self.out.set_exception(exc) + + def feed_data(self, chunk: bytes, size: int) -> None: + if not size: + return + + self.size += size + + # RFC1950 + # bits 0..3 = CM = 0b1000 = 8 = "deflate" + # bits 4..7 = CINFO = 1..7 = windows size. + if ( + not self._started_decoding + and self.encoding == "deflate" + and chunk[0] & 0xF != 8 + ): + # Change the decoder to decompress incorrectly compressed data + # Actually we should issue a warning about non-RFC-compliant data. + self.decompressor = zlib.decompressobj(wbits=-zlib.MAX_WBITS) + + try: + chunk = self.decompressor.decompress(chunk) + except Exception: + raise ContentEncodingError( + "Can not decode content-encoding: %s" % self.encoding + ) + + self._started_decoding = True + + if chunk: + self.out.feed_data(chunk, len(chunk)) + + def feed_eof(self) -> None: + chunk = self.decompressor.flush() + + if chunk or self.size > 0: + self.out.feed_data(chunk, len(chunk)) + if self.encoding == "deflate" and not self.decompressor.eof: + raise ContentEncodingError("deflate") + + self.out.feed_eof() + + def begin_http_chunk_receiving(self) -> None: + self.out.begin_http_chunk_receiving() + + def end_http_chunk_receiving(self) -> None: + self.out.end_http_chunk_receiving() + + +HttpRequestParserPy = HttpRequestParser +HttpResponseParserPy = HttpResponseParser +RawRequestMessagePy = RawRequestMessage +RawResponseMessagePy = RawResponseMessage + +try: + if not NO_EXTENSIONS: + from ._http_parser import ( # type: ignore[import,no-redef] + HttpRequestParser, + HttpResponseParser, + RawRequestMessage, + RawResponseMessage, + ) + + HttpRequestParserC = HttpRequestParser + HttpResponseParserC = HttpResponseParser + RawRequestMessageC = RawRequestMessage + RawResponseMessageC = RawResponseMessage +except ImportError: # pragma: no cover + pass diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/locks.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/locks.py new file mode 100644 index 0000000000000000000000000000000000000000..df65e3e47d8860b6c3604ad48ba8fe1a289fa71f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/locks.py @@ -0,0 +1,41 @@ +import asyncio +import collections +from typing import Any, Deque, Optional + + +class EventResultOrError: + """Event asyncio lock helper class. + + Wraps the Event asyncio lock allowing either to awake the + locked Tasks without any error or raising an exception. + + thanks to @vorpalsmith for the simple design. + """ + + def __init__(self, loop: asyncio.AbstractEventLoop) -> None: + self._loop = loop + self._exc = None # type: Optional[BaseException] + self._event = asyncio.Event() + self._waiters = collections.deque() # type: Deque[asyncio.Future[Any]] + + def set(self, exc: Optional[BaseException] = None) -> None: + self._exc = exc + self._event.set() + + async def wait(self) -> Any: + waiter = self._loop.create_task(self._event.wait()) + self._waiters.append(waiter) + try: + val = await waiter + finally: + self._waiters.remove(waiter) + + if self._exc is not None: + raise self._exc + + return val + + def cancel(self) -> None: + """Cancel all waiters""" + for waiter in self._waiters: + waiter.cancel() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/log.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/log.py new file mode 100644 index 0000000000000000000000000000000000000000..3cecea2bac185df741bccd0a32a5fef9cfe23299 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/log.py @@ -0,0 +1,8 @@ +import logging + +access_logger = logging.getLogger("aiohttp.access") +client_logger = logging.getLogger("aiohttp.client") +internal_logger = logging.getLogger("aiohttp.internal") +server_logger = logging.getLogger("aiohttp.server") +web_logger = logging.getLogger("aiohttp.web") +ws_logger = logging.getLogger("aiohttp.websocket") diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/payload.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/payload.py new file mode 100644 index 0000000000000000000000000000000000000000..2ee90beea8a2977ed0c02543faa71bd2f2c68831 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/payload.py @@ -0,0 +1,465 @@ +import asyncio +import enum +import io +import json +import mimetypes +import os +import warnings +from abc import ABC, abstractmethod +from itertools import chain +from typing import ( + IO, + TYPE_CHECKING, + Any, + ByteString, + Dict, + Iterable, + Optional, + TextIO, + Tuple, + Type, + Union, +) + +from multidict import CIMultiDict + +from . import hdrs +from .abc import AbstractStreamWriter +from .helpers import ( + PY_36, + content_disposition_header, + guess_filename, + parse_mimetype, + sentinel, +) +from .streams import StreamReader +from .typedefs import Final, JSONEncoder, _CIMultiDict + +__all__ = ( + "PAYLOAD_REGISTRY", + "get_payload", + "payload_type", + "Payload", + "BytesPayload", + "StringPayload", + "IOBasePayload", + "BytesIOPayload", + "BufferedReaderPayload", + "TextIOPayload", + "StringIOPayload", + "JsonPayload", + "AsyncIterablePayload", +) + +TOO_LARGE_BYTES_BODY: Final[int] = 2 ** 20 # 1 MB + +if TYPE_CHECKING: # pragma: no cover + from typing import List + + +class LookupError(Exception): + pass + + +class Order(str, enum.Enum): + normal = "normal" + try_first = "try_first" + try_last = "try_last" + + +def get_payload(data: Any, *args: Any, **kwargs: Any) -> "Payload": + return PAYLOAD_REGISTRY.get(data, *args, **kwargs) + + +def register_payload( + factory: Type["Payload"], type: Any, *, order: Order = Order.normal +) -> None: + PAYLOAD_REGISTRY.register(factory, type, order=order) + + +class payload_type: + def __init__(self, type: Any, *, order: Order = Order.normal) -> None: + self.type = type + self.order = order + + def __call__(self, factory: Type["Payload"]) -> Type["Payload"]: + register_payload(factory, self.type, order=self.order) + return factory + + +PayloadType = Type["Payload"] +_PayloadRegistryItem = Tuple[PayloadType, Any] + + +class PayloadRegistry: + """Payload registry. + + note: we need zope.interface for more efficient adapter search + """ + + def __init__(self) -> None: + self._first = [] # type: List[_PayloadRegistryItem] + self._normal = [] # type: List[_PayloadRegistryItem] + self._last = [] # type: List[_PayloadRegistryItem] + + def get( + self, + data: Any, + *args: Any, + _CHAIN: "Type[chain[_PayloadRegistryItem]]" = chain, + **kwargs: Any, + ) -> "Payload": + if isinstance(data, Payload): + return data + for factory, type in _CHAIN(self._first, self._normal, self._last): + if isinstance(data, type): + return factory(data, *args, **kwargs) + + raise LookupError() + + def register( + self, factory: PayloadType, type: Any, *, order: Order = Order.normal + ) -> None: + if order is Order.try_first: + self._first.append((factory, type)) + elif order is Order.normal: + self._normal.append((factory, type)) + elif order is Order.try_last: + self._last.append((factory, type)) + else: + raise ValueError(f"Unsupported order {order!r}") + + +class Payload(ABC): + + _default_content_type = "application/octet-stream" # type: str + _size = None # type: Optional[int] + + def __init__( + self, + value: Any, + headers: Optional[ + Union[_CIMultiDict, Dict[str, str], Iterable[Tuple[str, str]]] + ] = None, + content_type: Optional[str] = sentinel, + filename: Optional[str] = None, + encoding: Optional[str] = None, + **kwargs: Any, + ) -> None: + self._encoding = encoding + self._filename = filename + self._headers = CIMultiDict() # type: _CIMultiDict + self._value = value + if content_type is not sentinel and content_type is not None: + self._headers[hdrs.CONTENT_TYPE] = content_type + elif self._filename is not None: + content_type = mimetypes.guess_type(self._filename)[0] + if content_type is None: + content_type = self._default_content_type + self._headers[hdrs.CONTENT_TYPE] = content_type + else: + self._headers[hdrs.CONTENT_TYPE] = self._default_content_type + self._headers.update(headers or {}) + + @property + def size(self) -> Optional[int]: + """Size of the payload.""" + return self._size + + @property + def filename(self) -> Optional[str]: + """Filename of the payload.""" + return self._filename + + @property + def headers(self) -> _CIMultiDict: + """Custom item headers""" + return self._headers + + @property + def _binary_headers(self) -> bytes: + return ( + "".join([k + ": " + v + "\r\n" for k, v in self.headers.items()]).encode( + "utf-8" + ) + + b"\r\n" + ) + + @property + def encoding(self) -> Optional[str]: + """Payload encoding""" + return self._encoding + + @property + def content_type(self) -> str: + """Content type""" + return self._headers[hdrs.CONTENT_TYPE] + + def set_content_disposition( + self, + disptype: str, + quote_fields: bool = True, + _charset: str = "utf-8", + **params: Any, + ) -> None: + """Sets ``Content-Disposition`` header.""" + self._headers[hdrs.CONTENT_DISPOSITION] = content_disposition_header( + disptype, quote_fields=quote_fields, _charset=_charset, **params + ) + + @abstractmethod + async def write(self, writer: AbstractStreamWriter) -> None: + """Write payload. + + writer is an AbstractStreamWriter instance: + """ + + +class BytesPayload(Payload): + def __init__(self, value: ByteString, *args: Any, **kwargs: Any) -> None: + if not isinstance(value, (bytes, bytearray, memoryview)): + raise TypeError(f"value argument must be byte-ish, not {type(value)!r}") + + if "content_type" not in kwargs: + kwargs["content_type"] = "application/octet-stream" + + super().__init__(value, *args, **kwargs) + + if isinstance(value, memoryview): + self._size = value.nbytes + else: + self._size = len(value) + + if self._size > TOO_LARGE_BYTES_BODY: + if PY_36: + kwargs = {"source": self} + else: + kwargs = {} + warnings.warn( + "Sending a large body directly with raw bytes might" + " lock the event loop. You should probably pass an " + "io.BytesIO object instead", + ResourceWarning, + **kwargs, + ) + + async def write(self, writer: AbstractStreamWriter) -> None: + await writer.write(self._value) + + +class StringPayload(BytesPayload): + def __init__( + self, + value: str, + *args: Any, + encoding: Optional[str] = None, + content_type: Optional[str] = None, + **kwargs: Any, + ) -> None: + + if encoding is None: + if content_type is None: + real_encoding = "utf-8" + content_type = "text/plain; charset=utf-8" + else: + mimetype = parse_mimetype(content_type) + real_encoding = mimetype.parameters.get("charset", "utf-8") + else: + if content_type is None: + content_type = "text/plain; charset=%s" % encoding + real_encoding = encoding + + super().__init__( + value.encode(real_encoding), + encoding=real_encoding, + content_type=content_type, + *args, + **kwargs, + ) + + +class StringIOPayload(StringPayload): + def __init__(self, value: IO[str], *args: Any, **kwargs: Any) -> None: + super().__init__(value.read(), *args, **kwargs) + + +class IOBasePayload(Payload): + _value: IO[Any] + + def __init__( + self, value: IO[Any], disposition: str = "attachment", *args: Any, **kwargs: Any + ) -> None: + if "filename" not in kwargs: + kwargs["filename"] = guess_filename(value) + + super().__init__(value, *args, **kwargs) + + if self._filename is not None and disposition is not None: + if hdrs.CONTENT_DISPOSITION not in self.headers: + self.set_content_disposition(disposition, filename=self._filename) + + async def write(self, writer: AbstractStreamWriter) -> None: + loop = asyncio.get_event_loop() + try: + chunk = await loop.run_in_executor(None, self._value.read, 2 ** 16) + while chunk: + await writer.write(chunk) + chunk = await loop.run_in_executor(None, self._value.read, 2 ** 16) + finally: + await loop.run_in_executor(None, self._value.close) + + +class TextIOPayload(IOBasePayload): + _value: TextIO + + def __init__( + self, + value: TextIO, + *args: Any, + encoding: Optional[str] = None, + content_type: Optional[str] = None, + **kwargs: Any, + ) -> None: + + if encoding is None: + if content_type is None: + encoding = "utf-8" + content_type = "text/plain; charset=utf-8" + else: + mimetype = parse_mimetype(content_type) + encoding = mimetype.parameters.get("charset", "utf-8") + else: + if content_type is None: + content_type = "text/plain; charset=%s" % encoding + + super().__init__( + value, + content_type=content_type, + encoding=encoding, + *args, + **kwargs, + ) + + @property + def size(self) -> Optional[int]: + try: + return os.fstat(self._value.fileno()).st_size - self._value.tell() + except OSError: + return None + + async def write(self, writer: AbstractStreamWriter) -> None: + loop = asyncio.get_event_loop() + try: + chunk = await loop.run_in_executor(None, self._value.read, 2 ** 16) + while chunk: + data = ( + chunk.encode(encoding=self._encoding) + if self._encoding + else chunk.encode() + ) + await writer.write(data) + chunk = await loop.run_in_executor(None, self._value.read, 2 ** 16) + finally: + await loop.run_in_executor(None, self._value.close) + + +class BytesIOPayload(IOBasePayload): + @property + def size(self) -> int: + position = self._value.tell() + end = self._value.seek(0, os.SEEK_END) + self._value.seek(position) + return end - position + + +class BufferedReaderPayload(IOBasePayload): + @property + def size(self) -> Optional[int]: + try: + return os.fstat(self._value.fileno()).st_size - self._value.tell() + except OSError: + # data.fileno() is not supported, e.g. + # io.BufferedReader(io.BytesIO(b'data')) + return None + + +class JsonPayload(BytesPayload): + def __init__( + self, + value: Any, + encoding: str = "utf-8", + content_type: str = "application/json", + dumps: JSONEncoder = json.dumps, + *args: Any, + **kwargs: Any, + ) -> None: + + super().__init__( + dumps(value).encode(encoding), + content_type=content_type, + encoding=encoding, + *args, + **kwargs, + ) + + +if TYPE_CHECKING: # pragma: no cover + from typing import AsyncIterable, AsyncIterator + + _AsyncIterator = AsyncIterator[bytes] + _AsyncIterable = AsyncIterable[bytes] +else: + from collections.abc import AsyncIterable, AsyncIterator + + _AsyncIterator = AsyncIterator + _AsyncIterable = AsyncIterable + + +class AsyncIterablePayload(Payload): + + _iter = None # type: Optional[_AsyncIterator] + + def __init__(self, value: _AsyncIterable, *args: Any, **kwargs: Any) -> None: + if not isinstance(value, AsyncIterable): + raise TypeError( + "value argument must support " + "collections.abc.AsyncIterablebe interface, " + "got {!r}".format(type(value)) + ) + + if "content_type" not in kwargs: + kwargs["content_type"] = "application/octet-stream" + + super().__init__(value, *args, **kwargs) + + self._iter = value.__aiter__() + + async def write(self, writer: AbstractStreamWriter) -> None: + if self._iter: + try: + # iter is not None check prevents rare cases + # when the case iterable is used twice + while True: + chunk = await self._iter.__anext__() + await writer.write(chunk) + except StopAsyncIteration: + self._iter = None + + +class StreamReaderPayload(AsyncIterablePayload): + def __init__(self, value: StreamReader, *args: Any, **kwargs: Any) -> None: + super().__init__(value.iter_any(), *args, **kwargs) + + +PAYLOAD_REGISTRY = PayloadRegistry() +PAYLOAD_REGISTRY.register(BytesPayload, (bytes, bytearray, memoryview)) +PAYLOAD_REGISTRY.register(StringPayload, str) +PAYLOAD_REGISTRY.register(StringIOPayload, io.StringIO) +PAYLOAD_REGISTRY.register(TextIOPayload, io.TextIOBase) +PAYLOAD_REGISTRY.register(BytesIOPayload, io.BytesIO) +PAYLOAD_REGISTRY.register(BufferedReaderPayload, (io.BufferedReader, io.BufferedRandom)) +PAYLOAD_REGISTRY.register(IOBasePayload, io.IOBase) +PAYLOAD_REGISTRY.register(StreamReaderPayload, StreamReader) +# try_last for giving a chance to more specialized async interables like +# multidict.BodyPartReaderPayload override the default +PAYLOAD_REGISTRY.register(AsyncIterablePayload, AsyncIterable, order=Order.try_last) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/py.typed b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..f5642f79f21d872f010979dcf6f0c4a415acc19d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/py.typed @@ -0,0 +1 @@ +Marker diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/resolver.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/resolver.py new file mode 100644 index 0000000000000000000000000000000000000000..531ce93fccc2d3be442556de644cdc78d31d9c6e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/resolver.py @@ -0,0 +1,160 @@ +import asyncio +import socket +from typing import Any, Dict, List, Optional, Type, Union + +from .abc import AbstractResolver +from .helpers import get_running_loop + +__all__ = ("ThreadedResolver", "AsyncResolver", "DefaultResolver") + +try: + import aiodns + + # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname') +except ImportError: # pragma: no cover + aiodns = None + +aiodns_default = False + + +class ThreadedResolver(AbstractResolver): + """Threaded resolver. + + Uses an Executor for synchronous getaddrinfo() calls. + concurrent.futures.ThreadPoolExecutor is used by default. + """ + + def __init__(self, loop: Optional[asyncio.AbstractEventLoop] = None) -> None: + self._loop = get_running_loop(loop) + + async def resolve( + self, hostname: str, port: int = 0, family: int = socket.AF_INET + ) -> List[Dict[str, Any]]: + infos = await self._loop.getaddrinfo( + hostname, + port, + type=socket.SOCK_STREAM, + family=family, + flags=socket.AI_ADDRCONFIG, + ) + + hosts = [] + for family, _, proto, _, address in infos: + if family == socket.AF_INET6: + if len(address) < 3: + # IPv6 is not supported by Python build, + # or IPv6 is not enabled in the host + continue + if address[3]: # type: ignore[misc] + # This is essential for link-local IPv6 addresses. + # LL IPv6 is a VERY rare case. Strictly speaking, we should use + # getnameinfo() unconditionally, but performance makes sense. + host, _port = socket.getnameinfo( + address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV + ) + port = int(_port) + else: + host, port = address[:2] + else: # IPv4 + assert family == socket.AF_INET + host, port = address # type: ignore[misc] + hosts.append( + { + "hostname": hostname, + "host": host, + "port": port, + "family": family, + "proto": proto, + "flags": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV, + } + ) + + return hosts + + async def close(self) -> None: + pass + + +class AsyncResolver(AbstractResolver): + """Use the `aiodns` package to make asynchronous DNS lookups""" + + def __init__( + self, + loop: Optional[asyncio.AbstractEventLoop] = None, + *args: Any, + **kwargs: Any + ) -> None: + if aiodns is None: + raise RuntimeError("Resolver requires aiodns library") + + self._loop = get_running_loop(loop) + self._resolver = aiodns.DNSResolver(*args, loop=loop, **kwargs) + + if not hasattr(self._resolver, "gethostbyname"): + # aiodns 1.1 is not available, fallback to DNSResolver.query + self.resolve = self._resolve_with_query # type: ignore + + async def resolve( + self, host: str, port: int = 0, family: int = socket.AF_INET + ) -> List[Dict[str, Any]]: + try: + resp = await self._resolver.gethostbyname(host, family) + except aiodns.error.DNSError as exc: + msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed" + raise OSError(msg) from exc + hosts = [] + for address in resp.addresses: + hosts.append( + { + "hostname": host, + "host": address, + "port": port, + "family": family, + "proto": 0, + "flags": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV, + } + ) + + if not hosts: + raise OSError("DNS lookup failed") + + return hosts + + async def _resolve_with_query( + self, host: str, port: int = 0, family: int = socket.AF_INET + ) -> List[Dict[str, Any]]: + if family == socket.AF_INET6: + qtype = "AAAA" + else: + qtype = "A" + + try: + resp = await self._resolver.query(host, qtype) + except aiodns.error.DNSError as exc: + msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed" + raise OSError(msg) from exc + + hosts = [] + for rr in resp: + hosts.append( + { + "hostname": host, + "host": rr.host, + "port": port, + "family": family, + "proto": 0, + "flags": socket.AI_NUMERICHOST, + } + ) + + if not hosts: + raise OSError("DNS lookup failed") + + return hosts + + async def close(self) -> None: + self._resolver.cancel() + + +_DefaultType = Type[Union[AsyncResolver, ThreadedResolver]] +DefaultResolver: _DefaultType = AsyncResolver if aiodns_default else ThreadedResolver diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/streams.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/streams.py new file mode 100644 index 0000000000000000000000000000000000000000..055848877e3e43a1595971fa24ea2b4ed2b825eb --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/streams.py @@ -0,0 +1,660 @@ +import asyncio +import collections +import warnings +from typing import Awaitable, Callable, Deque, Generic, List, Optional, Tuple, TypeVar + +from .base_protocol import BaseProtocol +from .helpers import BaseTimerContext, set_exception, set_result +from .log import internal_logger +from .typedefs import Final + +__all__ = ( + "EMPTY_PAYLOAD", + "EofStream", + "StreamReader", + "DataQueue", + "FlowControlDataQueue", +) + +_T = TypeVar("_T") + + +class EofStream(Exception): + """eof stream indication.""" + + +class AsyncStreamIterator(Generic[_T]): + def __init__(self, read_func: Callable[[], Awaitable[_T]]) -> None: + self.read_func = read_func + + def __aiter__(self) -> "AsyncStreamIterator[_T]": + return self + + async def __anext__(self) -> _T: + try: + rv = await self.read_func() + except EofStream: + raise StopAsyncIteration + if rv == b"": + raise StopAsyncIteration + return rv + + +class ChunkTupleAsyncStreamIterator: + def __init__(self, stream: "StreamReader") -> None: + self._stream = stream + + def __aiter__(self) -> "ChunkTupleAsyncStreamIterator": + return self + + async def __anext__(self) -> Tuple[bytes, bool]: + rv = await self._stream.readchunk() + if rv == (b"", False): + raise StopAsyncIteration + return rv + + +class AsyncStreamReaderMixin: + def __aiter__(self) -> AsyncStreamIterator[bytes]: + return AsyncStreamIterator(self.readline) # type: ignore[attr-defined] + + def iter_chunked(self, n: int) -> AsyncStreamIterator[bytes]: + """Returns an asynchronous iterator that yields chunks of size n. + + Python-3.5 available for Python 3.5+ only + """ + return AsyncStreamIterator( + lambda: self.read(n) # type: ignore[attr-defined,no-any-return] + ) + + def iter_any(self) -> AsyncStreamIterator[bytes]: + """Yield all available data as soon as it is received. + + Python-3.5 available for Python 3.5+ only + """ + return AsyncStreamIterator(self.readany) # type: ignore[attr-defined] + + def iter_chunks(self) -> ChunkTupleAsyncStreamIterator: + """Yield chunks of data as they are received by the server. + + The yielded objects are tuples + of (bytes, bool) as returned by the StreamReader.readchunk method. + + Python-3.5 available for Python 3.5+ only + """ + return ChunkTupleAsyncStreamIterator(self) # type: ignore[arg-type] + + +class StreamReader(AsyncStreamReaderMixin): + """An enhancement of asyncio.StreamReader. + + Supports asynchronous iteration by line, chunk or as available:: + + async for line in reader: + ... + async for chunk in reader.iter_chunked(1024): + ... + async for slice in reader.iter_any(): + ... + + """ + + total_bytes = 0 + + def __init__( + self, + protocol: BaseProtocol, + limit: int, + *, + timer: Optional[BaseTimerContext] = None, + loop: Optional[asyncio.AbstractEventLoop] = None, + ) -> None: + self._protocol = protocol + self._low_water = limit + self._high_water = limit * 2 + if loop is None: + loop = asyncio.get_event_loop() + self._loop = loop + self._size = 0 + self._cursor = 0 + self._http_chunk_splits = None # type: Optional[List[int]] + self._buffer = collections.deque() # type: Deque[bytes] + self._buffer_offset = 0 + self._eof = False + self._waiter = None # type: Optional[asyncio.Future[None]] + self._eof_waiter = None # type: Optional[asyncio.Future[None]] + self._exception = None # type: Optional[BaseException] + self._timer = timer + self._eof_callbacks = [] # type: List[Callable[[], None]] + + def __repr__(self) -> str: + info = [self.__class__.__name__] + if self._size: + info.append("%d bytes" % self._size) + if self._eof: + info.append("eof") + if self._low_water != 2 ** 16: # default limit + info.append("low=%d high=%d" % (self._low_water, self._high_water)) + if self._waiter: + info.append("w=%r" % self._waiter) + if self._exception: + info.append("e=%r" % self._exception) + return "<%s>" % " ".join(info) + + def get_read_buffer_limits(self) -> Tuple[int, int]: + return (self._low_water, self._high_water) + + def exception(self) -> Optional[BaseException]: + return self._exception + + def set_exception(self, exc: BaseException) -> None: + self._exception = exc + self._eof_callbacks.clear() + + waiter = self._waiter + if waiter is not None: + self._waiter = None + set_exception(waiter, exc) + + waiter = self._eof_waiter + if waiter is not None: + self._eof_waiter = None + set_exception(waiter, exc) + + def on_eof(self, callback: Callable[[], None]) -> None: + if self._eof: + try: + callback() + except Exception: + internal_logger.exception("Exception in eof callback") + else: + self._eof_callbacks.append(callback) + + def feed_eof(self) -> None: + self._eof = True + + waiter = self._waiter + if waiter is not None: + self._waiter = None + set_result(waiter, None) + + waiter = self._eof_waiter + if waiter is not None: + self._eof_waiter = None + set_result(waiter, None) + + for cb in self._eof_callbacks: + try: + cb() + except Exception: + internal_logger.exception("Exception in eof callback") + + self._eof_callbacks.clear() + + def is_eof(self) -> bool: + """Return True if 'feed_eof' was called.""" + return self._eof + + def at_eof(self) -> bool: + """Return True if the buffer is empty and 'feed_eof' was called.""" + return self._eof and not self._buffer + + async def wait_eof(self) -> None: + if self._eof: + return + + assert self._eof_waiter is None + self._eof_waiter = self._loop.create_future() + try: + await self._eof_waiter + finally: + self._eof_waiter = None + + def unread_data(self, data: bytes) -> None: + """rollback reading some data from stream, inserting it to buffer head.""" + warnings.warn( + "unread_data() is deprecated " + "and will be removed in future releases (#3260)", + DeprecationWarning, + stacklevel=2, + ) + if not data: + return + + if self._buffer_offset: + self._buffer[0] = self._buffer[0][self._buffer_offset :] + self._buffer_offset = 0 + self._size += len(data) + self._cursor -= len(data) + self._buffer.appendleft(data) + self._eof_counter = 0 + + # TODO: size is ignored, remove the param later + def feed_data(self, data: bytes, size: int = 0) -> None: + assert not self._eof, "feed_data after feed_eof" + + if not data: + return + + self._size += len(data) + self._buffer.append(data) + self.total_bytes += len(data) + + waiter = self._waiter + if waiter is not None: + self._waiter = None + set_result(waiter, None) + + if self._size > self._high_water and not self._protocol._reading_paused: + self._protocol.pause_reading() + + def begin_http_chunk_receiving(self) -> None: + if self._http_chunk_splits is None: + if self.total_bytes: + raise RuntimeError( + "Called begin_http_chunk_receiving when" "some data was already fed" + ) + self._http_chunk_splits = [] + + def end_http_chunk_receiving(self) -> None: + if self._http_chunk_splits is None: + raise RuntimeError( + "Called end_chunk_receiving without calling " + "begin_chunk_receiving first" + ) + + # self._http_chunk_splits contains logical byte offsets from start of + # the body transfer. Each offset is the offset of the end of a chunk. + # "Logical" means bytes, accessible for a user. + # If no chunks containig logical data were received, current position + # is difinitely zero. + pos = self._http_chunk_splits[-1] if self._http_chunk_splits else 0 + + if self.total_bytes == pos: + # We should not add empty chunks here. So we check for that. + # Note, when chunked + gzip is used, we can receive a chunk + # of compressed data, but that data may not be enough for gzip FSM + # to yield any uncompressed data. That's why current position may + # not change after receiving a chunk. + return + + self._http_chunk_splits.append(self.total_bytes) + + # wake up readchunk when end of http chunk received + waiter = self._waiter + if waiter is not None: + self._waiter = None + set_result(waiter, None) + + async def _wait(self, func_name: str) -> None: + # StreamReader uses a future to link the protocol feed_data() method + # to a read coroutine. Running two read coroutines at the same time + # would have an unexpected behaviour. It would not possible to know + # which coroutine would get the next data. + if self._waiter is not None: + raise RuntimeError( + "%s() called while another coroutine is " + "already waiting for incoming data" % func_name + ) + + waiter = self._waiter = self._loop.create_future() + try: + if self._timer: + with self._timer: + await waiter + else: + await waiter + finally: + self._waiter = None + + async def readline(self) -> bytes: + return await self.readuntil() + + async def readuntil(self, separator: bytes = b"\n") -> bytes: + seplen = len(separator) + if seplen == 0: + raise ValueError("Separator should be at least one-byte string") + + if self._exception is not None: + raise self._exception + + chunk = b"" + chunk_size = 0 + not_enough = True + + while not_enough: + while self._buffer and not_enough: + offset = self._buffer_offset + ichar = self._buffer[0].find(separator, offset) + 1 + # Read from current offset to found separator or to the end. + data = self._read_nowait_chunk(ichar - offset if ichar else -1) + chunk += data + chunk_size += len(data) + if ichar: + not_enough = False + + if chunk_size > self._high_water: + raise ValueError("Chunk too big") + + if self._eof: + break + + if not_enough: + await self._wait("readuntil") + + return chunk + + async def read(self, n: int = -1) -> bytes: + if self._exception is not None: + raise self._exception + + # migration problem; with DataQueue you have to catch + # EofStream exception, so common way is to run payload.read() inside + # infinite loop. what can cause real infinite loop with StreamReader + # lets keep this code one major release. + if __debug__: + if self._eof and not self._buffer: + self._eof_counter = getattr(self, "_eof_counter", 0) + 1 + if self._eof_counter > 5: + internal_logger.warning( + "Multiple access to StreamReader in eof state, " + "might be infinite loop.", + stack_info=True, + ) + + if not n: + return b"" + + if n < 0: + # This used to just loop creating a new waiter hoping to + # collect everything in self._buffer, but that would + # deadlock if the subprocess sends more than self.limit + # bytes. So just call self.readany() until EOF. + blocks = [] + while True: + block = await self.readany() + if not block: + break + blocks.append(block) + return b"".join(blocks) + + # TODO: should be `if` instead of `while` + # because waiter maybe triggered on chunk end, + # without feeding any data + while not self._buffer and not self._eof: + await self._wait("read") + + return self._read_nowait(n) + + async def readany(self) -> bytes: + if self._exception is not None: + raise self._exception + + # TODO: should be `if` instead of `while` + # because waiter maybe triggered on chunk end, + # without feeding any data + while not self._buffer and not self._eof: + await self._wait("readany") + + return self._read_nowait(-1) + + async def readchunk(self) -> Tuple[bytes, bool]: + """Returns a tuple of (data, end_of_http_chunk). + + When chunked transfer + encoding is used, end_of_http_chunk is a boolean indicating if the end + of the data corresponds to the end of a HTTP chunk , otherwise it is + always False. + """ + while True: + if self._exception is not None: + raise self._exception + + while self._http_chunk_splits: + pos = self._http_chunk_splits.pop(0) + if pos == self._cursor: + return (b"", True) + if pos > self._cursor: + return (self._read_nowait(pos - self._cursor), True) + internal_logger.warning( + "Skipping HTTP chunk end due to data " + "consumption beyond chunk boundary" + ) + + if self._buffer: + return (self._read_nowait_chunk(-1), False) + # return (self._read_nowait(-1), False) + + if self._eof: + # Special case for signifying EOF. + # (b'', True) is not a final return value actually. + return (b"", False) + + await self._wait("readchunk") + + async def readexactly(self, n: int) -> bytes: + if self._exception is not None: + raise self._exception + + blocks = [] # type: List[bytes] + while n > 0: + block = await self.read(n) + if not block: + partial = b"".join(blocks) + raise asyncio.IncompleteReadError(partial, len(partial) + n) + blocks.append(block) + n -= len(block) + + return b"".join(blocks) + + def read_nowait(self, n: int = -1) -> bytes: + # default was changed to be consistent with .read(-1) + # + # I believe the most users don't know about the method and + # they are not affected. + if self._exception is not None: + raise self._exception + + if self._waiter and not self._waiter.done(): + raise RuntimeError( + "Called while some coroutine is waiting for incoming data." + ) + + return self._read_nowait(n) + + def _read_nowait_chunk(self, n: int) -> bytes: + first_buffer = self._buffer[0] + offset = self._buffer_offset + if n != -1 and len(first_buffer) - offset > n: + data = first_buffer[offset : offset + n] + self._buffer_offset += n + + elif offset: + self._buffer.popleft() + data = first_buffer[offset:] + self._buffer_offset = 0 + + else: + data = self._buffer.popleft() + + self._size -= len(data) + self._cursor += len(data) + + chunk_splits = self._http_chunk_splits + # Prevent memory leak: drop useless chunk splits + while chunk_splits and chunk_splits[0] < self._cursor: + chunk_splits.pop(0) + + if self._size < self._low_water and self._protocol._reading_paused: + self._protocol.resume_reading() + return data + + def _read_nowait(self, n: int) -> bytes: + """Read not more than n bytes, or whole buffer if n == -1""" + chunks = [] + + while self._buffer: + chunk = self._read_nowait_chunk(n) + chunks.append(chunk) + if n != -1: + n -= len(chunk) + if n == 0: + break + + return b"".join(chunks) if chunks else b"" + + +class EmptyStreamReader(StreamReader): # lgtm [py/missing-call-to-init] + def __init__(self) -> None: + pass + + def exception(self) -> Optional[BaseException]: + return None + + def set_exception(self, exc: BaseException) -> None: + pass + + def on_eof(self, callback: Callable[[], None]) -> None: + try: + callback() + except Exception: + internal_logger.exception("Exception in eof callback") + + def feed_eof(self) -> None: + pass + + def is_eof(self) -> bool: + return True + + def at_eof(self) -> bool: + return True + + async def wait_eof(self) -> None: + return + + def feed_data(self, data: bytes, n: int = 0) -> None: + pass + + async def readline(self) -> bytes: + return b"" + + async def read(self, n: int = -1) -> bytes: + return b"" + + # TODO add async def readuntil + + async def readany(self) -> bytes: + return b"" + + async def readchunk(self) -> Tuple[bytes, bool]: + return (b"", True) + + async def readexactly(self, n: int) -> bytes: + raise asyncio.IncompleteReadError(b"", n) + + def read_nowait(self, n: int = -1) -> bytes: + return b"" + + +EMPTY_PAYLOAD: Final[StreamReader] = EmptyStreamReader() + + +class DataQueue(Generic[_T]): + """DataQueue is a general-purpose blocking queue with one reader.""" + + def __init__(self, loop: asyncio.AbstractEventLoop) -> None: + self._loop = loop + self._eof = False + self._waiter = None # type: Optional[asyncio.Future[None]] + self._exception = None # type: Optional[BaseException] + self._size = 0 + self._buffer = collections.deque() # type: Deque[Tuple[_T, int]] + + def __len__(self) -> int: + return len(self._buffer) + + def is_eof(self) -> bool: + return self._eof + + def at_eof(self) -> bool: + return self._eof and not self._buffer + + def exception(self) -> Optional[BaseException]: + return self._exception + + def set_exception(self, exc: BaseException) -> None: + self._eof = True + self._exception = exc + + waiter = self._waiter + if waiter is not None: + self._waiter = None + set_exception(waiter, exc) + + def feed_data(self, data: _T, size: int = 0) -> None: + self._size += size + self._buffer.append((data, size)) + + waiter = self._waiter + if waiter is not None: + self._waiter = None + set_result(waiter, None) + + def feed_eof(self) -> None: + self._eof = True + + waiter = self._waiter + if waiter is not None: + self._waiter = None + set_result(waiter, None) + + async def read(self) -> _T: + if not self._buffer and not self._eof: + assert not self._waiter + self._waiter = self._loop.create_future() + try: + await self._waiter + except (asyncio.CancelledError, asyncio.TimeoutError): + self._waiter = None + raise + + if self._buffer: + data, size = self._buffer.popleft() + self._size -= size + return data + else: + if self._exception is not None: + raise self._exception + else: + raise EofStream + + def __aiter__(self) -> AsyncStreamIterator[_T]: + return AsyncStreamIterator(self.read) + + +class FlowControlDataQueue(DataQueue[_T]): + """FlowControlDataQueue resumes and pauses an underlying stream. + + It is a destination for parsed data. + """ + + def __init__( + self, protocol: BaseProtocol, limit: int, *, loop: asyncio.AbstractEventLoop + ) -> None: + super().__init__(loop=loop) + + self._protocol = protocol + self._limit = limit * 2 + + def feed_data(self, data: _T, size: int = 0) -> None: + super().feed_data(data, size) + + if self._size > self._limit and not self._protocol._reading_paused: + self._protocol.pause_reading() + + async def read(self) -> _T: + try: + return await super().read() + finally: + if self._size < self._limit and self._protocol._reading_paused: + self._protocol.resume_reading() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/tracing.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/tracing.py new file mode 100644 index 0000000000000000000000000000000000000000..0e118a399713726e445e5c15788b05f3176f1025 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/tracing.py @@ -0,0 +1,472 @@ +from types import SimpleNamespace +from typing import TYPE_CHECKING, Awaitable, Optional, Type, TypeVar + +import attr +from aiosignal import Signal +from multidict import CIMultiDict +from yarl import URL + +from .client_reqrep import ClientResponse + +if TYPE_CHECKING: # pragma: no cover + from .client import ClientSession + from .typedefs import Protocol + + _ParamT_contra = TypeVar("_ParamT_contra", contravariant=True) + + class _SignalCallback(Protocol[_ParamT_contra]): + def __call__( + self, + __client_session: ClientSession, + __trace_config_ctx: SimpleNamespace, + __params: _ParamT_contra, + ) -> Awaitable[None]: + ... + + +__all__ = ( + "TraceConfig", + "TraceRequestStartParams", + "TraceRequestEndParams", + "TraceRequestExceptionParams", + "TraceConnectionQueuedStartParams", + "TraceConnectionQueuedEndParams", + "TraceConnectionCreateStartParams", + "TraceConnectionCreateEndParams", + "TraceConnectionReuseconnParams", + "TraceDnsResolveHostStartParams", + "TraceDnsResolveHostEndParams", + "TraceDnsCacheHitParams", + "TraceDnsCacheMissParams", + "TraceRequestRedirectParams", + "TraceRequestChunkSentParams", + "TraceResponseChunkReceivedParams", + "TraceRequestHeadersSentParams", +) + + +class TraceConfig: + """First-class used to trace requests launched via ClientSession objects.""" + + def __init__( + self, trace_config_ctx_factory: Type[SimpleNamespace] = SimpleNamespace + ) -> None: + self._on_request_start = Signal( + self + ) # type: Signal[_SignalCallback[TraceRequestStartParams]] + self._on_request_chunk_sent = Signal( + self + ) # type: Signal[_SignalCallback[TraceRequestChunkSentParams]] + self._on_response_chunk_received = Signal( + self + ) # type: Signal[_SignalCallback[TraceResponseChunkReceivedParams]] + self._on_request_end = Signal( + self + ) # type: Signal[_SignalCallback[TraceRequestEndParams]] + self._on_request_exception = Signal( + self + ) # type: Signal[_SignalCallback[TraceRequestExceptionParams]] + self._on_request_redirect = Signal( + self + ) # type: Signal[_SignalCallback[TraceRequestRedirectParams]] + self._on_connection_queued_start = Signal( + self + ) # type: Signal[_SignalCallback[TraceConnectionQueuedStartParams]] + self._on_connection_queued_end = Signal( + self + ) # type: Signal[_SignalCallback[TraceConnectionQueuedEndParams]] + self._on_connection_create_start = Signal( + self + ) # type: Signal[_SignalCallback[TraceConnectionCreateStartParams]] + self._on_connection_create_end = Signal( + self + ) # type: Signal[_SignalCallback[TraceConnectionCreateEndParams]] + self._on_connection_reuseconn = Signal( + self + ) # type: Signal[_SignalCallback[TraceConnectionReuseconnParams]] + self._on_dns_resolvehost_start = Signal( + self + ) # type: Signal[_SignalCallback[TraceDnsResolveHostStartParams]] + self._on_dns_resolvehost_end = Signal( + self + ) # type: Signal[_SignalCallback[TraceDnsResolveHostEndParams]] + self._on_dns_cache_hit = Signal( + self + ) # type: Signal[_SignalCallback[TraceDnsCacheHitParams]] + self._on_dns_cache_miss = Signal( + self + ) # type: Signal[_SignalCallback[TraceDnsCacheMissParams]] + self._on_request_headers_sent = Signal( + self + ) # type: Signal[_SignalCallback[TraceRequestHeadersSentParams]] + + self._trace_config_ctx_factory = trace_config_ctx_factory + + def trace_config_ctx( + self, trace_request_ctx: Optional[SimpleNamespace] = None + ) -> SimpleNamespace: + """Return a new trace_config_ctx instance""" + return self._trace_config_ctx_factory(trace_request_ctx=trace_request_ctx) + + def freeze(self) -> None: + self._on_request_start.freeze() + self._on_request_chunk_sent.freeze() + self._on_response_chunk_received.freeze() + self._on_request_end.freeze() + self._on_request_exception.freeze() + self._on_request_redirect.freeze() + self._on_connection_queued_start.freeze() + self._on_connection_queued_end.freeze() + self._on_connection_create_start.freeze() + self._on_connection_create_end.freeze() + self._on_connection_reuseconn.freeze() + self._on_dns_resolvehost_start.freeze() + self._on_dns_resolvehost_end.freeze() + self._on_dns_cache_hit.freeze() + self._on_dns_cache_miss.freeze() + self._on_request_headers_sent.freeze() + + @property + def on_request_start(self) -> "Signal[_SignalCallback[TraceRequestStartParams]]": + return self._on_request_start + + @property + def on_request_chunk_sent( + self, + ) -> "Signal[_SignalCallback[TraceRequestChunkSentParams]]": + return self._on_request_chunk_sent + + @property + def on_response_chunk_received( + self, + ) -> "Signal[_SignalCallback[TraceResponseChunkReceivedParams]]": + return self._on_response_chunk_received + + @property + def on_request_end(self) -> "Signal[_SignalCallback[TraceRequestEndParams]]": + return self._on_request_end + + @property + def on_request_exception( + self, + ) -> "Signal[_SignalCallback[TraceRequestExceptionParams]]": + return self._on_request_exception + + @property + def on_request_redirect( + self, + ) -> "Signal[_SignalCallback[TraceRequestRedirectParams]]": + return self._on_request_redirect + + @property + def on_connection_queued_start( + self, + ) -> "Signal[_SignalCallback[TraceConnectionQueuedStartParams]]": + return self._on_connection_queued_start + + @property + def on_connection_queued_end( + self, + ) -> "Signal[_SignalCallback[TraceConnectionQueuedEndParams]]": + return self._on_connection_queued_end + + @property + def on_connection_create_start( + self, + ) -> "Signal[_SignalCallback[TraceConnectionCreateStartParams]]": + return self._on_connection_create_start + + @property + def on_connection_create_end( + self, + ) -> "Signal[_SignalCallback[TraceConnectionCreateEndParams]]": + return self._on_connection_create_end + + @property + def on_connection_reuseconn( + self, + ) -> "Signal[_SignalCallback[TraceConnectionReuseconnParams]]": + return self._on_connection_reuseconn + + @property + def on_dns_resolvehost_start( + self, + ) -> "Signal[_SignalCallback[TraceDnsResolveHostStartParams]]": + return self._on_dns_resolvehost_start + + @property + def on_dns_resolvehost_end( + self, + ) -> "Signal[_SignalCallback[TraceDnsResolveHostEndParams]]": + return self._on_dns_resolvehost_end + + @property + def on_dns_cache_hit(self) -> "Signal[_SignalCallback[TraceDnsCacheHitParams]]": + return self._on_dns_cache_hit + + @property + def on_dns_cache_miss(self) -> "Signal[_SignalCallback[TraceDnsCacheMissParams]]": + return self._on_dns_cache_miss + + @property + def on_request_headers_sent( + self, + ) -> "Signal[_SignalCallback[TraceRequestHeadersSentParams]]": + return self._on_request_headers_sent + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceRequestStartParams: + """Parameters sent by the `on_request_start` signal""" + + method: str + url: URL + headers: "CIMultiDict[str]" + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceRequestChunkSentParams: + """Parameters sent by the `on_request_chunk_sent` signal""" + + method: str + url: URL + chunk: bytes + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceResponseChunkReceivedParams: + """Parameters sent by the `on_response_chunk_received` signal""" + + method: str + url: URL + chunk: bytes + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceRequestEndParams: + """Parameters sent by the `on_request_end` signal""" + + method: str + url: URL + headers: "CIMultiDict[str]" + response: ClientResponse + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceRequestExceptionParams: + """Parameters sent by the `on_request_exception` signal""" + + method: str + url: URL + headers: "CIMultiDict[str]" + exception: BaseException + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceRequestRedirectParams: + """Parameters sent by the `on_request_redirect` signal""" + + method: str + url: URL + headers: "CIMultiDict[str]" + response: ClientResponse + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceConnectionQueuedStartParams: + """Parameters sent by the `on_connection_queued_start` signal""" + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceConnectionQueuedEndParams: + """Parameters sent by the `on_connection_queued_end` signal""" + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceConnectionCreateStartParams: + """Parameters sent by the `on_connection_create_start` signal""" + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceConnectionCreateEndParams: + """Parameters sent by the `on_connection_create_end` signal""" + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceConnectionReuseconnParams: + """Parameters sent by the `on_connection_reuseconn` signal""" + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceDnsResolveHostStartParams: + """Parameters sent by the `on_dns_resolvehost_start` signal""" + + host: str + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceDnsResolveHostEndParams: + """Parameters sent by the `on_dns_resolvehost_end` signal""" + + host: str + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceDnsCacheHitParams: + """Parameters sent by the `on_dns_cache_hit` signal""" + + host: str + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceDnsCacheMissParams: + """Parameters sent by the `on_dns_cache_miss` signal""" + + host: str + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class TraceRequestHeadersSentParams: + """Parameters sent by the `on_request_headers_sent` signal""" + + method: str + url: URL + headers: "CIMultiDict[str]" + + +class Trace: + """Internal dependency holder class. + + Used to keep together the main dependencies used + at the moment of send a signal. + """ + + def __init__( + self, + session: "ClientSession", + trace_config: TraceConfig, + trace_config_ctx: SimpleNamespace, + ) -> None: + self._trace_config = trace_config + self._trace_config_ctx = trace_config_ctx + self._session = session + + async def send_request_start( + self, method: str, url: URL, headers: "CIMultiDict[str]" + ) -> None: + return await self._trace_config.on_request_start.send( + self._session, + self._trace_config_ctx, + TraceRequestStartParams(method, url, headers), + ) + + async def send_request_chunk_sent( + self, method: str, url: URL, chunk: bytes + ) -> None: + return await self._trace_config.on_request_chunk_sent.send( + self._session, + self._trace_config_ctx, + TraceRequestChunkSentParams(method, url, chunk), + ) + + async def send_response_chunk_received( + self, method: str, url: URL, chunk: bytes + ) -> None: + return await self._trace_config.on_response_chunk_received.send( + self._session, + self._trace_config_ctx, + TraceResponseChunkReceivedParams(method, url, chunk), + ) + + async def send_request_end( + self, + method: str, + url: URL, + headers: "CIMultiDict[str]", + response: ClientResponse, + ) -> None: + return await self._trace_config.on_request_end.send( + self._session, + self._trace_config_ctx, + TraceRequestEndParams(method, url, headers, response), + ) + + async def send_request_exception( + self, + method: str, + url: URL, + headers: "CIMultiDict[str]", + exception: BaseException, + ) -> None: + return await self._trace_config.on_request_exception.send( + self._session, + self._trace_config_ctx, + TraceRequestExceptionParams(method, url, headers, exception), + ) + + async def send_request_redirect( + self, + method: str, + url: URL, + headers: "CIMultiDict[str]", + response: ClientResponse, + ) -> None: + return await self._trace_config._on_request_redirect.send( + self._session, + self._trace_config_ctx, + TraceRequestRedirectParams(method, url, headers, response), + ) + + async def send_connection_queued_start(self) -> None: + return await self._trace_config.on_connection_queued_start.send( + self._session, self._trace_config_ctx, TraceConnectionQueuedStartParams() + ) + + async def send_connection_queued_end(self) -> None: + return await self._trace_config.on_connection_queued_end.send( + self._session, self._trace_config_ctx, TraceConnectionQueuedEndParams() + ) + + async def send_connection_create_start(self) -> None: + return await self._trace_config.on_connection_create_start.send( + self._session, self._trace_config_ctx, TraceConnectionCreateStartParams() + ) + + async def send_connection_create_end(self) -> None: + return await self._trace_config.on_connection_create_end.send( + self._session, self._trace_config_ctx, TraceConnectionCreateEndParams() + ) + + async def send_connection_reuseconn(self) -> None: + return await self._trace_config.on_connection_reuseconn.send( + self._session, self._trace_config_ctx, TraceConnectionReuseconnParams() + ) + + async def send_dns_resolvehost_start(self, host: str) -> None: + return await self._trace_config.on_dns_resolvehost_start.send( + self._session, self._trace_config_ctx, TraceDnsResolveHostStartParams(host) + ) + + async def send_dns_resolvehost_end(self, host: str) -> None: + return await self._trace_config.on_dns_resolvehost_end.send( + self._session, self._trace_config_ctx, TraceDnsResolveHostEndParams(host) + ) + + async def send_dns_cache_hit(self, host: str) -> None: + return await self._trace_config.on_dns_cache_hit.send( + self._session, self._trace_config_ctx, TraceDnsCacheHitParams(host) + ) + + async def send_dns_cache_miss(self, host: str) -> None: + return await self._trace_config.on_dns_cache_miss.send( + self._session, self._trace_config_ctx, TraceDnsCacheMissParams(host) + ) + + async def send_request_headers( + self, method: str, url: URL, headers: "CIMultiDict[str]" + ) -> None: + return await self._trace_config._on_request_headers_sent.send( + self._session, + self._trace_config_ctx, + TraceRequestHeadersSentParams(method, url, headers), + ) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/typedefs.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/typedefs.py new file mode 100644 index 0000000000000000000000000000000000000000..84283d9a4634a4836cd50cabe34efd2ae5915f56 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/typedefs.py @@ -0,0 +1,64 @@ +import json +import os +import sys +from typing import ( + TYPE_CHECKING, + Any, + Awaitable, + Callable, + Iterable, + Mapping, + Tuple, + Union, +) + +from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy, istr +from yarl import URL + +# These are for other modules to use (to avoid repeating the conditional import). +if sys.version_info >= (3, 8): + from typing import Final as Final, Protocol as Protocol, TypedDict as TypedDict +else: + from typing_extensions import ( # noqa: F401 + Final, + Protocol as Protocol, + TypedDict as TypedDict, + ) + +DEFAULT_JSON_ENCODER = json.dumps +DEFAULT_JSON_DECODER = json.loads + +if TYPE_CHECKING: # pragma: no cover + _CIMultiDict = CIMultiDict[str] + _CIMultiDictProxy = CIMultiDictProxy[str] + _MultiDict = MultiDict[str] + _MultiDictProxy = MultiDictProxy[str] + from http.cookies import BaseCookie, Morsel + + from .web import Request, StreamResponse +else: + _CIMultiDict = CIMultiDict + _CIMultiDictProxy = CIMultiDictProxy + _MultiDict = MultiDict + _MultiDictProxy = MultiDictProxy + +Byteish = Union[bytes, bytearray, memoryview] +JSONEncoder = Callable[[Any], str] +JSONDecoder = Callable[[str], Any] +LooseHeaders = Union[Mapping[Union[str, istr], str], _CIMultiDict, _CIMultiDictProxy] +RawHeaders = Tuple[Tuple[bytes, bytes], ...] +StrOrURL = Union[str, URL] + +LooseCookiesMappings = Mapping[str, Union[str, "BaseCookie[str]", "Morsel[Any]"]] +LooseCookiesIterables = Iterable[ + Tuple[str, Union[str, "BaseCookie[str]", "Morsel[Any]"]] +] +LooseCookies = Union[ + LooseCookiesMappings, + LooseCookiesIterables, + "BaseCookie[str]", +] + +Handler = Callable[["Request"], Awaitable["StreamResponse"]] + +PathLike = Union[str, "os.PathLike[str]"] diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/web_middlewares.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/web_middlewares.py new file mode 100644 index 0000000000000000000000000000000000000000..fabcc449a2107211fd99cd59f576a2d855d0e042 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/web_middlewares.py @@ -0,0 +1,119 @@ +import re +from typing import TYPE_CHECKING, Awaitable, Callable, Tuple, Type, TypeVar + +from .typedefs import Handler +from .web_exceptions import HTTPPermanentRedirect, _HTTPMove +from .web_request import Request +from .web_response import StreamResponse +from .web_urldispatcher import SystemRoute + +__all__ = ( + "middleware", + "normalize_path_middleware", +) + +if TYPE_CHECKING: # pragma: no cover + from .web_app import Application + +_Func = TypeVar("_Func") + + +async def _check_request_resolves(request: Request, path: str) -> Tuple[bool, Request]: + alt_request = request.clone(rel_url=path) + + match_info = await request.app.router.resolve(alt_request) + alt_request._match_info = match_info + + if match_info.http_exception is None: + return True, alt_request + + return False, request + + +def middleware(f: _Func) -> _Func: + f.__middleware_version__ = 1 # type: ignore[attr-defined] + return f + + +_Middleware = Callable[[Request, Handler], Awaitable[StreamResponse]] + + +def normalize_path_middleware( + *, + append_slash: bool = True, + remove_slash: bool = False, + merge_slashes: bool = True, + redirect_class: Type[_HTTPMove] = HTTPPermanentRedirect, +) -> _Middleware: + """Factory for producing a middleware that normalizes the path of a request. + + Normalizing means: + - Add or remove a trailing slash to the path. + - Double slashes are replaced by one. + + The middleware returns as soon as it finds a path that resolves + correctly. The order if both merge and append/remove are enabled is + 1) merge slashes + 2) append/remove slash + 3) both merge slashes and append/remove slash. + If the path resolves with at least one of those conditions, it will + redirect to the new path. + + Only one of `append_slash` and `remove_slash` can be enabled. If both + are `True` the factory will raise an assertion error + + If `append_slash` is `True` the middleware will append a slash when + needed. If a resource is defined with trailing slash and the request + comes without it, it will append it automatically. + + If `remove_slash` is `True`, `append_slash` must be `False`. When enabled + the middleware will remove trailing slashes and redirect if the resource + is defined + + If merge_slashes is True, merge multiple consecutive slashes in the + path into one. + """ + correct_configuration = not (append_slash and remove_slash) + assert correct_configuration, "Cannot both remove and append slash" + + @middleware + async def impl(request: Request, handler: Handler) -> StreamResponse: + if isinstance(request.match_info.route, SystemRoute): + paths_to_check = [] + if "?" in request.raw_path: + path, query = request.raw_path.split("?", 1) + query = "?" + query + else: + query = "" + path = request.raw_path + + if merge_slashes: + paths_to_check.append(re.sub("//+", "/", path)) + if append_slash and not request.path.endswith("/"): + paths_to_check.append(path + "/") + if remove_slash and request.path.endswith("/"): + paths_to_check.append(path[:-1]) + if merge_slashes and append_slash: + paths_to_check.append(re.sub("//+", "/", path + "/")) + if merge_slashes and remove_slash: + merged_slashes = re.sub("//+", "/", path) + paths_to_check.append(merged_slashes[:-1]) + + for path in paths_to_check: + path = re.sub("^//+", "/", path) # SECURITY: GHSA-v6wp-4m6f-gcjg + resolves, request = await _check_request_resolves(request, path) + if resolves: + raise redirect_class(request.raw_path + query) + + return await handler(request) + + return impl + + +def _fix_request_current_app(app: "Application") -> _Middleware: + @middleware + async def impl(request: Request, handler: Handler) -> StreamResponse: + with request.match_info.set_current_app(app): + return await handler(request) + + return impl diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/web_protocol.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/web_protocol.py new file mode 100644 index 0000000000000000000000000000000000000000..ad0c0498e39562e30f1f8275a719b554a813b06c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/web_protocol.py @@ -0,0 +1,681 @@ +import asyncio +import asyncio.streams +import traceback +import warnings +from collections import deque +from contextlib import suppress +from html import escape as html_escape +from http import HTTPStatus +from logging import Logger +from typing import ( + TYPE_CHECKING, + Any, + Awaitable, + Callable, + Deque, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +import attr +import yarl + +from .abc import AbstractAccessLogger, AbstractStreamWriter +from .base_protocol import BaseProtocol +from .helpers import ceil_timeout +from .http import ( + HttpProcessingError, + HttpRequestParser, + HttpVersion10, + RawRequestMessage, + StreamWriter, +) +from .log import access_logger, server_logger +from .streams import EMPTY_PAYLOAD, StreamReader +from .tcp_helpers import tcp_keepalive +from .web_exceptions import HTTPException +from .web_log import AccessLogger +from .web_request import BaseRequest +from .web_response import Response, StreamResponse + +__all__ = ("RequestHandler", "RequestPayloadError", "PayloadAccessError") + +if TYPE_CHECKING: # pragma: no cover + from .web_server import Server + + +_RequestFactory = Callable[ + [ + RawRequestMessage, + StreamReader, + "RequestHandler", + AbstractStreamWriter, + "asyncio.Task[None]", + ], + BaseRequest, +] + +_RequestHandler = Callable[[BaseRequest], Awaitable[StreamResponse]] + +ERROR = RawRequestMessage( + "UNKNOWN", + "/", + HttpVersion10, + {}, # type: ignore[arg-type] + {}, # type: ignore[arg-type] + True, + None, + False, + False, + yarl.URL("/"), +) + + +class RequestPayloadError(Exception): + """Payload parsing error.""" + + +class PayloadAccessError(Exception): + """Payload was accessed after response was sent.""" + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class _ErrInfo: + status: int + exc: BaseException + message: str + + +_MsgType = Tuple[Union[RawRequestMessage, _ErrInfo], StreamReader] + + +class RequestHandler(BaseProtocol): + """HTTP protocol implementation. + + RequestHandler handles incoming HTTP request. It reads request line, + request headers and request payload and calls handle_request() method. + By default it always returns with 404 response. + + RequestHandler handles errors in incoming request, like bad + status line, bad headers or incomplete payload. If any error occurs, + connection gets closed. + + keepalive_timeout -- number of seconds before closing + keep-alive connection + + tcp_keepalive -- TCP keep-alive is on, default is on + + debug -- enable debug mode + + logger -- custom logger object + + access_log_class -- custom class for access_logger + + access_log -- custom logging object + + access_log_format -- access log format string + + loop -- Optional event loop + + max_line_size -- Optional maximum header line size + + max_field_size -- Optional maximum header field size + + max_headers -- Optional maximum header size + + """ + + KEEPALIVE_RESCHEDULE_DELAY = 1 + + __slots__ = ( + "_request_count", + "_keepalive", + "_manager", + "_request_handler", + "_request_factory", + "_tcp_keepalive", + "_keepalive_time", + "_keepalive_handle", + "_keepalive_timeout", + "_lingering_time", + "_messages", + "_message_tail", + "_waiter", + "_task_handler", + "_upgrade", + "_payload_parser", + "_request_parser", + "_reading_paused", + "logger", + "debug", + "access_log", + "access_logger", + "_close", + "_force_close", + "_current_request", + ) + + def __init__( + self, + manager: "Server", + *, + loop: asyncio.AbstractEventLoop, + keepalive_timeout: float = 75.0, # NGINX default is 75 secs + tcp_keepalive: bool = True, + logger: Logger = server_logger, + access_log_class: Type[AbstractAccessLogger] = AccessLogger, + access_log: Logger = access_logger, + access_log_format: str = AccessLogger.LOG_FORMAT, + debug: bool = False, + max_line_size: int = 8190, + max_headers: int = 32768, + max_field_size: int = 8190, + lingering_time: float = 10.0, + read_bufsize: int = 2 ** 16, + auto_decompress: bool = True, + ): + super().__init__(loop) + + self._request_count = 0 + self._keepalive = False + self._current_request = None # type: Optional[BaseRequest] + self._manager = manager # type: Optional[Server] + self._request_handler: Optional[_RequestHandler] = manager.request_handler + self._request_factory: Optional[_RequestFactory] = manager.request_factory + + self._tcp_keepalive = tcp_keepalive + # placeholder to be replaced on keepalive timeout setup + self._keepalive_time = 0.0 + self._keepalive_handle = None # type: Optional[asyncio.Handle] + self._keepalive_timeout = keepalive_timeout + self._lingering_time = float(lingering_time) + + self._messages: Deque[_MsgType] = deque() + self._message_tail = b"" + + self._waiter = None # type: Optional[asyncio.Future[None]] + self._task_handler = None # type: Optional[asyncio.Task[None]] + + self._upgrade = False + self._payload_parser = None # type: Any + self._request_parser = HttpRequestParser( + self, + loop, + read_bufsize, + max_line_size=max_line_size, + max_field_size=max_field_size, + max_headers=max_headers, + payload_exception=RequestPayloadError, + auto_decompress=auto_decompress, + ) # type: Optional[HttpRequestParser] + + self.logger = logger + self.debug = debug + self.access_log = access_log + if access_log: + self.access_logger = access_log_class( + access_log, access_log_format + ) # type: Optional[AbstractAccessLogger] + else: + self.access_logger = None + + self._close = False + self._force_close = False + + def __repr__(self) -> str: + return "<{} {}>".format( + self.__class__.__name__, + "connected" if self.transport is not None else "disconnected", + ) + + @property + def keepalive_timeout(self) -> float: + return self._keepalive_timeout + + async def shutdown(self, timeout: Optional[float] = 15.0) -> None: + """Do worker process exit preparations. + + We need to clean up everything and stop accepting requests. + It is especially important for keep-alive connections. + """ + self._force_close = True + + if self._keepalive_handle is not None: + self._keepalive_handle.cancel() + + if self._waiter: + self._waiter.cancel() + + # wait for handlers + with suppress(asyncio.CancelledError, asyncio.TimeoutError): + async with ceil_timeout(timeout): + if self._current_request is not None: + self._current_request._cancel(asyncio.CancelledError()) + + if self._task_handler is not None and not self._task_handler.done(): + await self._task_handler + + # force-close non-idle handler + if self._task_handler is not None: + self._task_handler.cancel() + + if self.transport is not None: + self.transport.close() + self.transport = None + + def connection_made(self, transport: asyncio.BaseTransport) -> None: + super().connection_made(transport) + + real_transport = cast(asyncio.Transport, transport) + if self._tcp_keepalive: + tcp_keepalive(real_transport) + + self._task_handler = self._loop.create_task(self.start()) + assert self._manager is not None + self._manager.connection_made(self, real_transport) + + def connection_lost(self, exc: Optional[BaseException]) -> None: + if self._manager is None: + return + self._manager.connection_lost(self, exc) + + super().connection_lost(exc) + + self._manager = None + self._force_close = True + self._request_factory = None + self._request_handler = None + self._request_parser = None + + if self._keepalive_handle is not None: + self._keepalive_handle.cancel() + + if self._current_request is not None: + if exc is None: + exc = ConnectionResetError("Connection lost") + self._current_request._cancel(exc) + + if self._task_handler is not None: + self._task_handler.cancel() + if self._waiter is not None: + self._waiter.cancel() + + self._task_handler = None + + if self._payload_parser is not None: + self._payload_parser.feed_eof() + self._payload_parser = None + + def set_parser(self, parser: Any) -> None: + # Actual type is WebReader + assert self._payload_parser is None + + self._payload_parser = parser + + if self._message_tail: + self._payload_parser.feed_data(self._message_tail) + self._message_tail = b"" + + def eof_received(self) -> None: + pass + + def data_received(self, data: bytes) -> None: + if self._force_close or self._close: + return + # parse http messages + messages: Sequence[_MsgType] + if self._payload_parser is None and not self._upgrade: + assert self._request_parser is not None + try: + messages, upgraded, tail = self._request_parser.feed_data(data) + except HttpProcessingError as exc: + messages = [ + (_ErrInfo(status=400, exc=exc, message=exc.message), EMPTY_PAYLOAD) + ] + upgraded = False + tail = b"" + + for msg, payload in messages or (): + self._request_count += 1 + self._messages.append((msg, payload)) + + waiter = self._waiter + if messages and waiter is not None and not waiter.done(): + # don't set result twice + waiter.set_result(None) + + self._upgrade = upgraded + if upgraded and tail: + self._message_tail = tail + + # no parser, just store + elif self._payload_parser is None and self._upgrade and data: + self._message_tail += data + + # feed payload + elif data: + eof, tail = self._payload_parser.feed_data(data) + if eof: + self.close() + + def keep_alive(self, val: bool) -> None: + """Set keep-alive connection mode. + + :param bool val: new state. + """ + self._keepalive = val + if self._keepalive_handle: + self._keepalive_handle.cancel() + self._keepalive_handle = None + + def close(self) -> None: + """Close connection. + + Stop accepting new pipelining messages and close + connection when handlers done processing messages. + """ + self._close = True + if self._waiter: + self._waiter.cancel() + + def force_close(self) -> None: + """Forcefully close connection.""" + self._force_close = True + if self._waiter: + self._waiter.cancel() + if self.transport is not None: + self.transport.close() + self.transport = None + + def log_access( + self, request: BaseRequest, response: StreamResponse, time: float + ) -> None: + if self.access_logger is not None: + self.access_logger.log(request, response, self._loop.time() - time) + + def log_debug(self, *args: Any, **kw: Any) -> None: + if self.debug: + self.logger.debug(*args, **kw) + + def log_exception(self, *args: Any, **kw: Any) -> None: + self.logger.exception(*args, **kw) + + def _process_keepalive(self) -> None: + if self._force_close or not self._keepalive: + return + + next = self._keepalive_time + self._keepalive_timeout + + # handler in idle state + if self._waiter: + if self._loop.time() > next: + self.force_close() + return + + # not all request handlers are done, + # reschedule itself to next second + self._keepalive_handle = self._loop.call_later( + self.KEEPALIVE_RESCHEDULE_DELAY, self._process_keepalive + ) + + async def _handle_request( + self, + request: BaseRequest, + start_time: float, + request_handler: Callable[[BaseRequest], Awaitable[StreamResponse]], + ) -> Tuple[StreamResponse, bool]: + assert self._request_handler is not None + try: + try: + self._current_request = request + resp = await request_handler(request) + finally: + self._current_request = None + except HTTPException as exc: + resp = exc + reset = await self.finish_response(request, resp, start_time) + except asyncio.CancelledError: + raise + except asyncio.TimeoutError as exc: + self.log_debug("Request handler timed out.", exc_info=exc) + resp = self.handle_error(request, 504) + reset = await self.finish_response(request, resp, start_time) + except Exception as exc: + resp = self.handle_error(request, 500, exc) + reset = await self.finish_response(request, resp, start_time) + else: + # Deprecation warning (See #2415) + if getattr(resp, "__http_exception__", False): + warnings.warn( + "returning HTTPException object is deprecated " + "(#2415) and will be removed, " + "please raise the exception instead", + DeprecationWarning, + ) + + reset = await self.finish_response(request, resp, start_time) + + return resp, reset + + async def start(self) -> None: + """Process incoming request. + + It reads request line, request headers and request payload, then + calls handle_request() method. Subclass has to override + handle_request(). start() handles various exceptions in request + or response handling. Connection is being closed always unless + keep_alive(True) specified. + """ + loop = self._loop + handler = self._task_handler + assert handler is not None + manager = self._manager + assert manager is not None + keepalive_timeout = self._keepalive_timeout + resp = None + assert self._request_factory is not None + assert self._request_handler is not None + + while not self._force_close: + if not self._messages: + try: + # wait for next request + self._waiter = loop.create_future() + await self._waiter + except asyncio.CancelledError: + break + finally: + self._waiter = None + + message, payload = self._messages.popleft() + + start = loop.time() + + manager.requests_count += 1 + writer = StreamWriter(self, loop) + if isinstance(message, _ErrInfo): + # make request_factory work + request_handler = self._make_error_handler(message) + message = ERROR + else: + request_handler = self._request_handler + + request = self._request_factory(message, payload, self, writer, handler) + try: + # a new task is used for copy context vars (#3406) + task = self._loop.create_task( + self._handle_request(request, start, request_handler) + ) + try: + resp, reset = await task + except (asyncio.CancelledError, ConnectionError): + self.log_debug("Ignored premature client disconnection") + break + + # Drop the processed task from asyncio.Task.all_tasks() early + del task + if reset: + self.log_debug("Ignored premature client disconnection 2") + break + + # notify server about keep-alive + self._keepalive = bool(resp.keep_alive) + + # check payload + if not payload.is_eof(): + lingering_time = self._lingering_time + if not self._force_close and lingering_time: + self.log_debug( + "Start lingering close timer for %s sec.", lingering_time + ) + + now = loop.time() + end_t = now + lingering_time + + with suppress(asyncio.TimeoutError, asyncio.CancelledError): + while not payload.is_eof() and now < end_t: + async with ceil_timeout(end_t - now): + # read and ignore + await payload.readany() + now = loop.time() + + # if payload still uncompleted + if not payload.is_eof() and not self._force_close: + self.log_debug("Uncompleted request.") + self.close() + + payload.set_exception(PayloadAccessError()) + + except asyncio.CancelledError: + self.log_debug("Ignored premature client disconnection ") + break + except RuntimeError as exc: + if self.debug: + self.log_exception("Unhandled runtime exception", exc_info=exc) + self.force_close() + except Exception as exc: + self.log_exception("Unhandled exception", exc_info=exc) + self.force_close() + finally: + if self.transport is None and resp is not None: + self.log_debug("Ignored premature client disconnection.") + elif not self._force_close: + if self._keepalive and not self._close: + # start keep-alive timer + if keepalive_timeout is not None: + now = self._loop.time() + self._keepalive_time = now + if self._keepalive_handle is None: + self._keepalive_handle = loop.call_at( + now + keepalive_timeout, self._process_keepalive + ) + else: + break + + # remove handler, close transport if no handlers left + if not self._force_close: + self._task_handler = None + if self.transport is not None: + self.transport.close() + + async def finish_response( + self, request: BaseRequest, resp: StreamResponse, start_time: float + ) -> bool: + """Prepare the response and write_eof, then log access. + + This has to + be called within the context of any exception so the access logger + can get exception information. Returns True if the client disconnects + prematurely. + """ + if self._request_parser is not None: + self._request_parser.set_upgraded(False) + self._upgrade = False + if self._message_tail: + self._request_parser.feed_data(self._message_tail) + self._message_tail = b"" + try: + prepare_meth = resp.prepare + except AttributeError: + if resp is None: + raise RuntimeError("Missing return " "statement on request handler") + else: + raise RuntimeError( + "Web-handler should return " + "a response instance, " + "got {!r}".format(resp) + ) + try: + await prepare_meth(request) + await resp.write_eof() + except ConnectionError: + self.log_access(request, resp, start_time) + return True + else: + self.log_access(request, resp, start_time) + return False + + def handle_error( + self, + request: BaseRequest, + status: int = 500, + exc: Optional[BaseException] = None, + message: Optional[str] = None, + ) -> StreamResponse: + """Handle errors. + + Returns HTTP response with specific status code. Logs additional + information. It always closes current connection. + """ + self.log_exception("Error handling request", exc_info=exc) + + # some data already got sent, connection is broken + if request.writer.output_size > 0: + raise ConnectionError( + "Response is sent already, cannot send another response " + "with the error message" + ) + + ct = "text/plain" + if status == HTTPStatus.INTERNAL_SERVER_ERROR: + title = "{0.value} {0.phrase}".format(HTTPStatus.INTERNAL_SERVER_ERROR) + msg = HTTPStatus.INTERNAL_SERVER_ERROR.description + tb = None + if self.debug: + with suppress(Exception): + tb = traceback.format_exc() + + if "text/html" in request.headers.get("Accept", ""): + if tb: + tb = html_escape(tb) + msg = f"

Traceback:

\n
{tb}
" + message = ( + "" + "{title}" + "\n

{title}

" + "\n{msg}\n\n" + ).format(title=title, msg=msg) + ct = "text/html" + else: + if tb: + msg = tb + message = title + "\n\n" + msg + + resp = Response(status=status, text=message, content_type=ct) + resp.force_close() + + return resp + + def _make_error_handler( + self, err_info: _ErrInfo + ) -> Callable[[BaseRequest], Awaitable[StreamResponse]]: + async def handler(request: BaseRequest) -> StreamResponse: + return self.handle_error( + request, err_info.status, err_info.exc, err_info.message + ) + + return handler diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/web_routedef.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/web_routedef.py new file mode 100644 index 0000000000000000000000000000000000000000..671e5c7f464a0d05ddc010cd6d8c7cc6a8b68709 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/web_routedef.py @@ -0,0 +1,213 @@ +import abc +import os # noqa +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterator, + List, + Optional, + Sequence, + Type, + Union, + overload, +) + +import attr + +from . import hdrs +from .abc import AbstractView +from .typedefs import Handler, PathLike + +if TYPE_CHECKING: # pragma: no cover + from .web_request import Request + from .web_response import StreamResponse + from .web_urldispatcher import AbstractRoute, UrlDispatcher +else: + Request = StreamResponse = UrlDispatcher = AbstractRoute = None + + +__all__ = ( + "AbstractRouteDef", + "RouteDef", + "StaticDef", + "RouteTableDef", + "head", + "options", + "get", + "post", + "patch", + "put", + "delete", + "route", + "view", + "static", +) + + +class AbstractRouteDef(abc.ABC): + @abc.abstractmethod + def register(self, router: UrlDispatcher) -> List[AbstractRoute]: + pass # pragma: no cover + + +_HandlerType = Union[Type[AbstractView], Handler] + + +@attr.s(auto_attribs=True, frozen=True, repr=False, slots=True) +class RouteDef(AbstractRouteDef): + method: str + path: str + handler: _HandlerType + kwargs: Dict[str, Any] + + def __repr__(self) -> str: + info = [] + for name, value in sorted(self.kwargs.items()): + info.append(f", {name}={value!r}") + return " {handler.__name__!r}" "{info}>".format( + method=self.method, path=self.path, handler=self.handler, info="".join(info) + ) + + def register(self, router: UrlDispatcher) -> List[AbstractRoute]: + if self.method in hdrs.METH_ALL: + reg = getattr(router, "add_" + self.method.lower()) + return [reg(self.path, self.handler, **self.kwargs)] + else: + return [ + router.add_route(self.method, self.path, self.handler, **self.kwargs) + ] + + +@attr.s(auto_attribs=True, frozen=True, repr=False, slots=True) +class StaticDef(AbstractRouteDef): + prefix: str + path: PathLike + kwargs: Dict[str, Any] + + def __repr__(self) -> str: + info = [] + for name, value in sorted(self.kwargs.items()): + info.append(f", {name}={value!r}") + return " {path}" "{info}>".format( + prefix=self.prefix, path=self.path, info="".join(info) + ) + + def register(self, router: UrlDispatcher) -> List[AbstractRoute]: + resource = router.add_static(self.prefix, self.path, **self.kwargs) + routes = resource.get_info().get("routes", {}) + return list(routes.values()) + + +def route(method: str, path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: + return RouteDef(method, path, handler, kwargs) + + +def head(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: + return route(hdrs.METH_HEAD, path, handler, **kwargs) + + +def options(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: + return route(hdrs.METH_OPTIONS, path, handler, **kwargs) + + +def get( + path: str, + handler: _HandlerType, + *, + name: Optional[str] = None, + allow_head: bool = True, + **kwargs: Any, +) -> RouteDef: + return route( + hdrs.METH_GET, path, handler, name=name, allow_head=allow_head, **kwargs + ) + + +def post(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: + return route(hdrs.METH_POST, path, handler, **kwargs) + + +def put(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: + return route(hdrs.METH_PUT, path, handler, **kwargs) + + +def patch(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: + return route(hdrs.METH_PATCH, path, handler, **kwargs) + + +def delete(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: + return route(hdrs.METH_DELETE, path, handler, **kwargs) + + +def view(path: str, handler: Type[AbstractView], **kwargs: Any) -> RouteDef: + return route(hdrs.METH_ANY, path, handler, **kwargs) + + +def static(prefix: str, path: PathLike, **kwargs: Any) -> StaticDef: + return StaticDef(prefix, path, kwargs) + + +_Deco = Callable[[_HandlerType], _HandlerType] + + +class RouteTableDef(Sequence[AbstractRouteDef]): + """Route definition table""" + + def __init__(self) -> None: + self._items = [] # type: List[AbstractRouteDef] + + def __repr__(self) -> str: + return f"" + + @overload + def __getitem__(self, index: int) -> AbstractRouteDef: + ... + + @overload + def __getitem__(self, index: slice) -> List[AbstractRouteDef]: + ... + + def __getitem__(self, index): # type: ignore[no-untyped-def] + return self._items[index] + + def __iter__(self) -> Iterator[AbstractRouteDef]: + return iter(self._items) + + def __len__(self) -> int: + return len(self._items) + + def __contains__(self, item: object) -> bool: + return item in self._items + + def route(self, method: str, path: str, **kwargs: Any) -> _Deco: + def inner(handler: _HandlerType) -> _HandlerType: + self._items.append(RouteDef(method, path, handler, kwargs)) + return handler + + return inner + + def head(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_HEAD, path, **kwargs) + + def get(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_GET, path, **kwargs) + + def post(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_POST, path, **kwargs) + + def put(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_PUT, path, **kwargs) + + def patch(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_PATCH, path, **kwargs) + + def delete(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_DELETE, path, **kwargs) + + def view(self, path: str, **kwargs: Any) -> _Deco: + return self.route(hdrs.METH_ANY, path, **kwargs) + + def static(self, prefix: str, path: PathLike, **kwargs: Any) -> None: + self._items.append(StaticDef(prefix, path, kwargs)) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/web_runner.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/web_runner.py new file mode 100644 index 0000000000000000000000000000000000000000..f4a64bff662dbfe8e4730a932e106cb03e90907d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/web_runner.py @@ -0,0 +1,381 @@ +import asyncio +import signal +import socket +from abc import ABC, abstractmethod +from typing import Any, List, Optional, Set + +from yarl import URL + +from .web_app import Application +from .web_server import Server + +try: + from ssl import SSLContext +except ImportError: + SSLContext = object # type: ignore[misc,assignment] + + +__all__ = ( + "BaseSite", + "TCPSite", + "UnixSite", + "NamedPipeSite", + "SockSite", + "BaseRunner", + "AppRunner", + "ServerRunner", + "GracefulExit", +) + + +class GracefulExit(SystemExit): + code = 1 + + +def _raise_graceful_exit() -> None: + raise GracefulExit() + + +class BaseSite(ABC): + __slots__ = ("_runner", "_shutdown_timeout", "_ssl_context", "_backlog", "_server") + + def __init__( + self, + runner: "BaseRunner", + *, + shutdown_timeout: float = 60.0, + ssl_context: Optional[SSLContext] = None, + backlog: int = 128, + ) -> None: + if runner.server is None: + raise RuntimeError("Call runner.setup() before making a site") + self._runner = runner + self._shutdown_timeout = shutdown_timeout + self._ssl_context = ssl_context + self._backlog = backlog + self._server = None # type: Optional[asyncio.AbstractServer] + + @property + @abstractmethod + def name(self) -> str: + pass # pragma: no cover + + @abstractmethod + async def start(self) -> None: + self._runner._reg_site(self) + + async def stop(self) -> None: + self._runner._check_site(self) + if self._server is None: + self._runner._unreg_site(self) + return # not started yet + self._server.close() + # named pipes do not have wait_closed property + if hasattr(self._server, "wait_closed"): + await self._server.wait_closed() + await self._runner.shutdown() + assert self._runner.server + await self._runner.server.shutdown(self._shutdown_timeout) + self._runner._unreg_site(self) + + +class TCPSite(BaseSite): + __slots__ = ("_host", "_port", "_reuse_address", "_reuse_port") + + def __init__( + self, + runner: "BaseRunner", + host: Optional[str] = None, + port: Optional[int] = None, + *, + shutdown_timeout: float = 60.0, + ssl_context: Optional[SSLContext] = None, + backlog: int = 128, + reuse_address: Optional[bool] = None, + reuse_port: Optional[bool] = None, + ) -> None: + super().__init__( + runner, + shutdown_timeout=shutdown_timeout, + ssl_context=ssl_context, + backlog=backlog, + ) + self._host = host + if port is None: + port = 8443 if self._ssl_context else 8080 + self._port = port + self._reuse_address = reuse_address + self._reuse_port = reuse_port + + @property + def name(self) -> str: + scheme = "https" if self._ssl_context else "http" + host = "0.0.0.0" if self._host is None else self._host + return str(URL.build(scheme=scheme, host=host, port=self._port)) + + async def start(self) -> None: + await super().start() + loop = asyncio.get_event_loop() + server = self._runner.server + assert server is not None + self._server = await loop.create_server( + server, + self._host, + self._port, + ssl=self._ssl_context, + backlog=self._backlog, + reuse_address=self._reuse_address, + reuse_port=self._reuse_port, + ) + + +class UnixSite(BaseSite): + __slots__ = ("_path",) + + def __init__( + self, + runner: "BaseRunner", + path: str, + *, + shutdown_timeout: float = 60.0, + ssl_context: Optional[SSLContext] = None, + backlog: int = 128, + ) -> None: + super().__init__( + runner, + shutdown_timeout=shutdown_timeout, + ssl_context=ssl_context, + backlog=backlog, + ) + self._path = path + + @property + def name(self) -> str: + scheme = "https" if self._ssl_context else "http" + return f"{scheme}://unix:{self._path}:" + + async def start(self) -> None: + await super().start() + loop = asyncio.get_event_loop() + server = self._runner.server + assert server is not None + self._server = await loop.create_unix_server( + server, self._path, ssl=self._ssl_context, backlog=self._backlog + ) + + +class NamedPipeSite(BaseSite): + __slots__ = ("_path",) + + def __init__( + self, runner: "BaseRunner", path: str, *, shutdown_timeout: float = 60.0 + ) -> None: + loop = asyncio.get_event_loop() + if not isinstance( + loop, asyncio.ProactorEventLoop # type: ignore[attr-defined] + ): + raise RuntimeError( + "Named Pipes only available in proactor" "loop under windows" + ) + super().__init__(runner, shutdown_timeout=shutdown_timeout) + self._path = path + + @property + def name(self) -> str: + return self._path + + async def start(self) -> None: + await super().start() + loop = asyncio.get_event_loop() + server = self._runner.server + assert server is not None + _server = await loop.start_serving_pipe( # type: ignore[attr-defined] + server, self._path + ) + self._server = _server[0] + + +class SockSite(BaseSite): + __slots__ = ("_sock", "_name") + + def __init__( + self, + runner: "BaseRunner", + sock: socket.socket, + *, + shutdown_timeout: float = 60.0, + ssl_context: Optional[SSLContext] = None, + backlog: int = 128, + ) -> None: + super().__init__( + runner, + shutdown_timeout=shutdown_timeout, + ssl_context=ssl_context, + backlog=backlog, + ) + self._sock = sock + scheme = "https" if self._ssl_context else "http" + if hasattr(socket, "AF_UNIX") and sock.family == socket.AF_UNIX: + name = f"{scheme}://unix:{sock.getsockname()}:" + else: + host, port = sock.getsockname()[:2] + name = str(URL.build(scheme=scheme, host=host, port=port)) + self._name = name + + @property + def name(self) -> str: + return self._name + + async def start(self) -> None: + await super().start() + loop = asyncio.get_event_loop() + server = self._runner.server + assert server is not None + self._server = await loop.create_server( + server, sock=self._sock, ssl=self._ssl_context, backlog=self._backlog + ) + + +class BaseRunner(ABC): + __slots__ = ("_handle_signals", "_kwargs", "_server", "_sites") + + def __init__(self, *, handle_signals: bool = False, **kwargs: Any) -> None: + self._handle_signals = handle_signals + self._kwargs = kwargs + self._server = None # type: Optional[Server] + self._sites = [] # type: List[BaseSite] + + @property + def server(self) -> Optional[Server]: + return self._server + + @property + def addresses(self) -> List[Any]: + ret = [] # type: List[Any] + for site in self._sites: + server = site._server + if server is not None: + sockets = server.sockets + if sockets is not None: + for sock in sockets: + ret.append(sock.getsockname()) + return ret + + @property + def sites(self) -> Set[BaseSite]: + return set(self._sites) + + async def setup(self) -> None: + loop = asyncio.get_event_loop() + + if self._handle_signals: + try: + loop.add_signal_handler(signal.SIGINT, _raise_graceful_exit) + loop.add_signal_handler(signal.SIGTERM, _raise_graceful_exit) + except NotImplementedError: # pragma: no cover + # add_signal_handler is not implemented on Windows + pass + + self._server = await self._make_server() + + @abstractmethod + async def shutdown(self) -> None: + pass # pragma: no cover + + async def cleanup(self) -> None: + loop = asyncio.get_event_loop() + + # The loop over sites is intentional, an exception on gather() + # leaves self._sites in unpredictable state. + # The loop guaranties that a site is either deleted on success or + # still present on failure + for site in list(self._sites): + await site.stop() + await self._cleanup_server() + self._server = None + if self._handle_signals: + try: + loop.remove_signal_handler(signal.SIGINT) + loop.remove_signal_handler(signal.SIGTERM) + except NotImplementedError: # pragma: no cover + # remove_signal_handler is not implemented on Windows + pass + + @abstractmethod + async def _make_server(self) -> Server: + pass # pragma: no cover + + @abstractmethod + async def _cleanup_server(self) -> None: + pass # pragma: no cover + + def _reg_site(self, site: BaseSite) -> None: + if site in self._sites: + raise RuntimeError(f"Site {site} is already registered in runner {self}") + self._sites.append(site) + + def _check_site(self, site: BaseSite) -> None: + if site not in self._sites: + raise RuntimeError(f"Site {site} is not registered in runner {self}") + + def _unreg_site(self, site: BaseSite) -> None: + if site not in self._sites: + raise RuntimeError(f"Site {site} is not registered in runner {self}") + self._sites.remove(site) + + +class ServerRunner(BaseRunner): + """Low-level web server runner""" + + __slots__ = ("_web_server",) + + def __init__( + self, web_server: Server, *, handle_signals: bool = False, **kwargs: Any + ) -> None: + super().__init__(handle_signals=handle_signals, **kwargs) + self._web_server = web_server + + async def shutdown(self) -> None: + pass + + async def _make_server(self) -> Server: + return self._web_server + + async def _cleanup_server(self) -> None: + pass + + +class AppRunner(BaseRunner): + """Web Application runner""" + + __slots__ = ("_app",) + + def __init__( + self, app: Application, *, handle_signals: bool = False, **kwargs: Any + ) -> None: + super().__init__(handle_signals=handle_signals, **kwargs) + if not isinstance(app, Application): + raise TypeError( + "The first argument should be web.Application " + "instance, got {!r}".format(app) + ) + self._app = app + + @property + def app(self) -> Application: + return self._app + + async def shutdown(self) -> None: + await self._app.shutdown() + + async def _make_server(self) -> Server: + loop = asyncio.get_event_loop() + self._app._set_loop(loop) + self._app.on_startup.freeze() + await self._app.startup() + self._app.freeze() + + return self._app._make_handler(loop=loop, **self._kwargs) + + async def _cleanup_server(self) -> None: + await self._app.cleanup() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/web_server.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/web_server.py new file mode 100644 index 0000000000000000000000000000000000000000..5657ed9c800874ef3e6e05250fe6a255c7d07fde --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/web_server.py @@ -0,0 +1,62 @@ +"""Low level HTTP server.""" +import asyncio +from typing import Any, Awaitable, Callable, Dict, List, Optional # noqa + +from .abc import AbstractStreamWriter +from .helpers import get_running_loop +from .http_parser import RawRequestMessage +from .streams import StreamReader +from .web_protocol import RequestHandler, _RequestFactory, _RequestHandler +from .web_request import BaseRequest + +__all__ = ("Server",) + + +class Server: + def __init__( + self, + handler: _RequestHandler, + *, + request_factory: Optional[_RequestFactory] = None, + loop: Optional[asyncio.AbstractEventLoop] = None, + **kwargs: Any + ) -> None: + self._loop = get_running_loop(loop) + self._connections = {} # type: Dict[RequestHandler, asyncio.Transport] + self._kwargs = kwargs + self.requests_count = 0 + self.request_handler = handler + self.request_factory = request_factory or self._make_request + + @property + def connections(self) -> List[RequestHandler]: + return list(self._connections.keys()) + + def connection_made( + self, handler: RequestHandler, transport: asyncio.Transport + ) -> None: + self._connections[handler] = transport + + def connection_lost( + self, handler: RequestHandler, exc: Optional[BaseException] = None + ) -> None: + if handler in self._connections: + del self._connections[handler] + + def _make_request( + self, + message: RawRequestMessage, + payload: StreamReader, + protocol: RequestHandler, + writer: AbstractStreamWriter, + task: "asyncio.Task[None]", + ) -> BaseRequest: + return BaseRequest(message, payload, protocol, writer, task, self._loop) + + async def shutdown(self, timeout: Optional[float] = None) -> None: + coros = [conn.shutdown(timeout) for conn in self._connections] + await asyncio.gather(*coros) + self._connections.clear() + + def __call__(self) -> RequestHandler: + return RequestHandler(self, loop=self._loop, **self._kwargs) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/web_ws.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/web_ws.py new file mode 100644 index 0000000000000000000000000000000000000000..16b0a1747cf186ed447cbe7be8a890d4ed528d1d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/aiohttp/web_ws.py @@ -0,0 +1,487 @@ +import asyncio +import base64 +import binascii +import hashlib +import json +from typing import Any, Iterable, Optional, Tuple, cast + +import async_timeout +import attr +from multidict import CIMultiDict + +from . import hdrs +from .abc import AbstractStreamWriter +from .helpers import call_later, set_result +from .http import ( + WS_CLOSED_MESSAGE, + WS_CLOSING_MESSAGE, + WS_KEY, + WebSocketError, + WebSocketReader, + WebSocketWriter, + WSCloseCode, + WSMessage, + WSMsgType as WSMsgType, + ws_ext_gen, + ws_ext_parse, +) +from .log import ws_logger +from .streams import EofStream, FlowControlDataQueue +from .typedefs import Final, JSONDecoder, JSONEncoder +from .web_exceptions import HTTPBadRequest, HTTPException +from .web_request import BaseRequest +from .web_response import StreamResponse + +__all__ = ( + "WebSocketResponse", + "WebSocketReady", + "WSMsgType", +) + +THRESHOLD_CONNLOST_ACCESS: Final[int] = 5 + + +@attr.s(auto_attribs=True, frozen=True, slots=True) +class WebSocketReady: + ok: bool + protocol: Optional[str] + + def __bool__(self) -> bool: + return self.ok + + +class WebSocketResponse(StreamResponse): + + _length_check = False + + def __init__( + self, + *, + timeout: float = 10.0, + receive_timeout: Optional[float] = None, + autoclose: bool = True, + autoping: bool = True, + heartbeat: Optional[float] = None, + protocols: Iterable[str] = (), + compress: bool = True, + max_msg_size: int = 4 * 1024 * 1024, + ) -> None: + super().__init__(status=101) + self._protocols = protocols + self._ws_protocol = None # type: Optional[str] + self._writer = None # type: Optional[WebSocketWriter] + self._reader = None # type: Optional[FlowControlDataQueue[WSMessage]] + self._closed = False + self._closing = False + self._conn_lost = 0 + self._close_code = None # type: Optional[int] + self._loop = None # type: Optional[asyncio.AbstractEventLoop] + self._waiting = None # type: Optional[asyncio.Future[bool]] + self._exception = None # type: Optional[BaseException] + self._timeout = timeout + self._receive_timeout = receive_timeout + self._autoclose = autoclose + self._autoping = autoping + self._heartbeat = heartbeat + self._heartbeat_cb: Optional[asyncio.TimerHandle] = None + if heartbeat is not None: + self._pong_heartbeat = heartbeat / 2.0 + self._pong_response_cb: Optional[asyncio.TimerHandle] = None + self._compress = compress + self._max_msg_size = max_msg_size + + def _cancel_heartbeat(self) -> None: + if self._pong_response_cb is not None: + self._pong_response_cb.cancel() + self._pong_response_cb = None + + if self._heartbeat_cb is not None: + self._heartbeat_cb.cancel() + self._heartbeat_cb = None + + def _reset_heartbeat(self) -> None: + self._cancel_heartbeat() + + if self._heartbeat is not None: + assert self._loop is not None + self._heartbeat_cb = call_later( + self._send_heartbeat, self._heartbeat, self._loop + ) + + def _send_heartbeat(self) -> None: + if self._heartbeat is not None and not self._closed: + assert self._loop is not None + # fire-and-forget a task is not perfect but maybe ok for + # sending ping. Otherwise we need a long-living heartbeat + # task in the class. + self._loop.create_task(self._writer.ping()) # type: ignore[union-attr] + + if self._pong_response_cb is not None: + self._pong_response_cb.cancel() + self._pong_response_cb = call_later( + self._pong_not_received, self._pong_heartbeat, self._loop + ) + + def _pong_not_received(self) -> None: + if self._req is not None and self._req.transport is not None: + self._closed = True + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + self._exception = asyncio.TimeoutError() + self._req.transport.close() + + async def prepare(self, request: BaseRequest) -> AbstractStreamWriter: + # make pre-check to don't hide it by do_handshake() exceptions + if self._payload_writer is not None: + return self._payload_writer + + protocol, writer = self._pre_start(request) + payload_writer = await super().prepare(request) + assert payload_writer is not None + self._post_start(request, protocol, writer) + await payload_writer.drain() + return payload_writer + + def _handshake( + self, request: BaseRequest + ) -> Tuple["CIMultiDict[str]", str, bool, bool]: + headers = request.headers + if "websocket" != headers.get(hdrs.UPGRADE, "").lower().strip(): + raise HTTPBadRequest( + text=( + "No WebSocket UPGRADE hdr: {}\n Can " + '"Upgrade" only to "WebSocket".' + ).format(headers.get(hdrs.UPGRADE)) + ) + + if "upgrade" not in headers.get(hdrs.CONNECTION, "").lower(): + raise HTTPBadRequest( + text="No CONNECTION upgrade hdr: {}".format( + headers.get(hdrs.CONNECTION) + ) + ) + + # find common sub-protocol between client and server + protocol = None + if hdrs.SEC_WEBSOCKET_PROTOCOL in headers: + req_protocols = [ + str(proto.strip()) + for proto in headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(",") + ] + + for proto in req_protocols: + if proto in self._protocols: + protocol = proto + break + else: + # No overlap found: Return no protocol as per spec + ws_logger.warning( + "Client protocols %r don’t overlap server-known ones %r", + req_protocols, + self._protocols, + ) + + # check supported version + version = headers.get(hdrs.SEC_WEBSOCKET_VERSION, "") + if version not in ("13", "8", "7"): + raise HTTPBadRequest(text=f"Unsupported version: {version}") + + # check client handshake for validity + key = headers.get(hdrs.SEC_WEBSOCKET_KEY) + try: + if not key or len(base64.b64decode(key)) != 16: + raise HTTPBadRequest(text=f"Handshake error: {key!r}") + except binascii.Error: + raise HTTPBadRequest(text=f"Handshake error: {key!r}") from None + + accept_val = base64.b64encode( + hashlib.sha1(key.encode() + WS_KEY).digest() + ).decode() + response_headers = CIMultiDict( # type: ignore[var-annotated] + { + hdrs.UPGRADE: "websocket", # type: ignore[arg-type] + hdrs.CONNECTION: "upgrade", + hdrs.SEC_WEBSOCKET_ACCEPT: accept_val, + } + ) + + notakeover = False + compress = 0 + if self._compress: + extensions = headers.get(hdrs.SEC_WEBSOCKET_EXTENSIONS) + # Server side always get return with no exception. + # If something happened, just drop compress extension + compress, notakeover = ws_ext_parse(extensions, isserver=True) + if compress: + enabledext = ws_ext_gen( + compress=compress, isserver=True, server_notakeover=notakeover + ) + response_headers[hdrs.SEC_WEBSOCKET_EXTENSIONS] = enabledext + + if protocol: + response_headers[hdrs.SEC_WEBSOCKET_PROTOCOL] = protocol + return ( + response_headers, + protocol, + compress, + notakeover, + ) # type: ignore[return-value] + + def _pre_start(self, request: BaseRequest) -> Tuple[str, WebSocketWriter]: + self._loop = request._loop + + headers, protocol, compress, notakeover = self._handshake(request) + + self.set_status(101) + self.headers.update(headers) + self.force_close() + self._compress = compress + transport = request._protocol.transport + assert transport is not None + writer = WebSocketWriter( + request._protocol, transport, compress=compress, notakeover=notakeover + ) + + return protocol, writer + + def _post_start( + self, request: BaseRequest, protocol: str, writer: WebSocketWriter + ) -> None: + self._ws_protocol = protocol + self._writer = writer + + self._reset_heartbeat() + + loop = self._loop + assert loop is not None + self._reader = FlowControlDataQueue(request._protocol, 2 ** 16, loop=loop) + request.protocol.set_parser( + WebSocketReader(self._reader, self._max_msg_size, compress=self._compress) + ) + # disable HTTP keepalive for WebSocket + request.protocol.keep_alive(False) + + def can_prepare(self, request: BaseRequest) -> WebSocketReady: + if self._writer is not None: + raise RuntimeError("Already started") + try: + _, protocol, _, _ = self._handshake(request) + except HTTPException: + return WebSocketReady(False, None) + else: + return WebSocketReady(True, protocol) + + @property + def closed(self) -> bool: + return self._closed + + @property + def close_code(self) -> Optional[int]: + return self._close_code + + @property + def ws_protocol(self) -> Optional[str]: + return self._ws_protocol + + @property + def compress(self) -> bool: + return self._compress + + def exception(self) -> Optional[BaseException]: + return self._exception + + async def ping(self, message: bytes = b"") -> None: + if self._writer is None: + raise RuntimeError("Call .prepare() first") + await self._writer.ping(message) + + async def pong(self, message: bytes = b"") -> None: + # unsolicited pong + if self._writer is None: + raise RuntimeError("Call .prepare() first") + await self._writer.pong(message) + + async def send_str(self, data: str, compress: Optional[bool] = None) -> None: + if self._writer is None: + raise RuntimeError("Call .prepare() first") + if not isinstance(data, str): + raise TypeError("data argument must be str (%r)" % type(data)) + await self._writer.send(data, binary=False, compress=compress) + + async def send_bytes(self, data: bytes, compress: Optional[bool] = None) -> None: + if self._writer is None: + raise RuntimeError("Call .prepare() first") + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError("data argument must be byte-ish (%r)" % type(data)) + await self._writer.send(data, binary=True, compress=compress) + + async def send_json( + self, + data: Any, + compress: Optional[bool] = None, + *, + dumps: JSONEncoder = json.dumps, + ) -> None: + await self.send_str(dumps(data), compress=compress) + + async def write_eof(self) -> None: # type: ignore[override] + if self._eof_sent: + return + if self._payload_writer is None: + raise RuntimeError("Response has not been started") + + await self.close() + self._eof_sent = True + + async def close(self, *, code: int = WSCloseCode.OK, message: bytes = b"") -> bool: + if self._writer is None: + raise RuntimeError("Call .prepare() first") + + self._cancel_heartbeat() + reader = self._reader + assert reader is not None + + # we need to break `receive()` cycle first, + # `close()` may be called from different task + if self._waiting is not None and not self._closed: + reader.feed_data(WS_CLOSING_MESSAGE, 0) + await self._waiting + + if not self._closed: + self._closed = True + try: + await self._writer.close(code, message) + writer = self._payload_writer + assert writer is not None + await writer.drain() + except (asyncio.CancelledError, asyncio.TimeoutError): + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + raise + except Exception as exc: + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + self._exception = exc + return True + + if self._closing: + return True + + reader = self._reader + assert reader is not None + try: + async with async_timeout.timeout(self._timeout): + msg = await reader.read() + except asyncio.CancelledError: + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + raise + except Exception as exc: + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + self._exception = exc + return True + + if msg.type == WSMsgType.CLOSE: + self._close_code = msg.data + return True + + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + self._exception = asyncio.TimeoutError() + return True + else: + return False + + async def receive(self, timeout: Optional[float] = None) -> WSMessage: + if self._reader is None: + raise RuntimeError("Call .prepare() first") + + loop = self._loop + assert loop is not None + while True: + if self._waiting is not None: + raise RuntimeError("Concurrent call to receive() is not allowed") + + if self._closed: + self._conn_lost += 1 + if self._conn_lost >= THRESHOLD_CONNLOST_ACCESS: + raise RuntimeError("WebSocket connection is closed.") + return WS_CLOSED_MESSAGE + elif self._closing: + return WS_CLOSING_MESSAGE + + try: + self._waiting = loop.create_future() + try: + async with async_timeout.timeout(timeout or self._receive_timeout): + msg = await self._reader.read() + self._reset_heartbeat() + finally: + waiter = self._waiting + set_result(waiter, True) + self._waiting = None + except (asyncio.CancelledError, asyncio.TimeoutError): + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + raise + except EofStream: + self._close_code = WSCloseCode.OK + await self.close() + return WSMessage(WSMsgType.CLOSED, None, None) + except WebSocketError as exc: + self._close_code = exc.code + await self.close(code=exc.code) + return WSMessage(WSMsgType.ERROR, exc, None) + except Exception as exc: + self._exception = exc + self._closing = True + self._close_code = WSCloseCode.ABNORMAL_CLOSURE + await self.close() + return WSMessage(WSMsgType.ERROR, exc, None) + + if msg.type == WSMsgType.CLOSE: + self._closing = True + self._close_code = msg.data + if not self._closed and self._autoclose: + await self.close() + elif msg.type == WSMsgType.CLOSING: + self._closing = True + elif msg.type == WSMsgType.PING and self._autoping: + await self.pong(msg.data) + continue + elif msg.type == WSMsgType.PONG and self._autoping: + continue + + return msg + + async def receive_str(self, *, timeout: Optional[float] = None) -> str: + msg = await self.receive(timeout) + if msg.type != WSMsgType.TEXT: + raise TypeError( + "Received message {}:{!r} is not WSMsgType.TEXT".format( + msg.type, msg.data + ) + ) + return cast(str, msg.data) + + async def receive_bytes(self, *, timeout: Optional[float] = None) -> bytes: + msg = await self.receive(timeout) + if msg.type != WSMsgType.BINARY: + raise TypeError(f"Received message {msg.type}:{msg.data!r} is not bytes") + return cast(bytes, msg.data) + + async def receive_json( + self, *, loads: JSONDecoder = json.loads, timeout: Optional[float] = None + ) -> Any: + data = await self.receive_str(timeout=timeout) + return loads(data) + + async def write(self, data: bytes) -> None: + raise RuntimeError("Cannot call .write() for websocket") + + def __aiter__(self) -> "WebSocketResponse": + return self + + async def __anext__(self) -> WSMessage: + msg = await self.receive() + if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.CLOSED): + raise StopAsyncIteration + return msg + + def _cancel(self, exc: BaseException) -> None: + if self._reader is not None: + self._reader.set_exception(exc) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4_python3_runtime-4.9.3.dist-info/INSTALLER b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4_python3_runtime-4.9.3.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4_python3_runtime-4.9.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4_python3_runtime-4.9.3.dist-info/METADATA b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4_python3_runtime-4.9.3.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..e7c616206861b0bb05ebd720c4850748e0dcc538 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4_python3_runtime-4.9.3.dist-info/METADATA @@ -0,0 +1,10 @@ +Metadata-Version: 2.1 +Name: antlr4-python3-runtime +Version: 4.9.3 +Summary: ANTLR 4.9.3 runtime for Python 3.7 +Home-page: http://www.antlr.org +Author: Eric Vergnaud, Terence Parr, Sam Harwell +Author-email: eric.vergnaud@wanadoo.fr +License: BSD +Requires-Dist: typing ; python_version < "3.5" + diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4_python3_runtime-4.9.3.dist-info/RECORD b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4_python3_runtime-4.9.3.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..7db52979fc12e3d2e3fdc6066e5e98de4f2e5f61 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4_python3_runtime-4.9.3.dist-info/RECORD @@ -0,0 +1,119 @@ +../../../bin/pygrun,sha256=MBVjE9upRGiOwLc7_ldLSW9L_KPHA_5riogxOL9AcJU,6139 +antlr4/BufferedTokenStream.py,sha256=_BwmzOH1TO6yL2yC_ZaUzkghq8wzc0UPHfI3UpnZUwM,10780 +antlr4/CommonTokenFactory.py,sha256=Tv16zg_pWD1Dv3IphsxFu8nwWdLeXYcqJ8CC5yHwjH8,2110 +antlr4/CommonTokenStream.py,sha256=NNJHXwRg2_Zn46ZhJyDxZtvZzsPWhb6JjXa7BjM45eg,2770 +antlr4/FileStream.py,sha256=-ZR_-jl_If9IIBYLINIwlQrlTSmu5k1VUKDc3ie7WR4,868 +antlr4/InputStream.py,sha256=sggjE2jEGvSgQmxFvqeeuT3aOVgcH5tS7mMybW8wKS4,2334 +antlr4/IntervalSet.py,sha256=Cd0WKhd_kYbiLYKkDNncgSM19GAuS7OaTOC4-5Yubs4,5965 +antlr4/LL1Analyzer.py,sha256=oJBvO7_S8cAlb_D4qWNxd2IlK0qP4ka-oeoDxx16CZ4,7752 +antlr4/Lexer.py,sha256=C72hqayfkympxb46AcSnhPD9kVZ0quWgboGxa6gcIcg,11542 +antlr4/ListTokenSource.py,sha256=IffLMo7YQnD_CjKryrrgNWSk0q5QSYd7puZyyUk7vOk,5356 +antlr4/Parser.py,sha256=F2Q25z0-__KHfa354KQhDu3ZOVzLFfag3s2ixJ4dl_o,22883 +antlr4/ParserInterpreter.py,sha256=-QU9kn4x3WCQ-LSA99R231HoicTqakiHZ5KM72l-hIo,7206 +antlr4/ParserRuleContext.py,sha256=wHAVdOxMAO5jkUqloTXVzn_xYnJhiHbvvuhZpth0ZF8,6762 +antlr4/PredictionContext.py,sha256=cb4KI6EGpS7sRzJ8UvPEkxphINZuWhyiZ95752g3prI,22977 +antlr4/Recognizer.py,sha256=vmKAtSjIgR9LQr5YzuK5OmPZWMJ3x69OuVZQ_FTzQHE,5383 +antlr4/RuleContext.py,sha256=GiviRv2k_al1IBgdJOEEoD0ohJaVd-_h5T_CPG_Bsmg,8099 +antlr4/StdinStream.py,sha256=MMSH4zN8T6i_nu-3_TlN-3E4nPM4b5KgK4GT6n_FUQA,303 +antlr4/Token.py,sha256=OtWCab4Ut52X_nLLAA-8x4Zl6xaF6TEN-0033uaoaEo,5206 +antlr4/TokenStreamRewriter.py,sha256=cuErQTrXwC_0kqVv3MsTWGZSm-E1Vy1yzA-3SOhKd_s,10324 +antlr4/Utils.py,sha256=Oyg8CJCRL1TrF_QSB_LLlVdWOB4loVcKOgFNT-icO7c,931 +antlr4/__init__.py,sha256=g8UGpflnlMWcAyLtihejzrgAP1Uo3b9GhwfI8QnZjtw,1125 +antlr4/__pycache__/BufferedTokenStream.cpython-38.pyc,, +antlr4/__pycache__/CommonTokenFactory.cpython-38.pyc,, +antlr4/__pycache__/CommonTokenStream.cpython-38.pyc,, +antlr4/__pycache__/FileStream.cpython-38.pyc,, +antlr4/__pycache__/InputStream.cpython-38.pyc,, +antlr4/__pycache__/IntervalSet.cpython-38.pyc,, +antlr4/__pycache__/LL1Analyzer.cpython-38.pyc,, +antlr4/__pycache__/Lexer.cpython-38.pyc,, +antlr4/__pycache__/ListTokenSource.cpython-38.pyc,, +antlr4/__pycache__/Parser.cpython-38.pyc,, +antlr4/__pycache__/ParserInterpreter.cpython-38.pyc,, +antlr4/__pycache__/ParserRuleContext.cpython-38.pyc,, +antlr4/__pycache__/PredictionContext.cpython-38.pyc,, +antlr4/__pycache__/Recognizer.cpython-38.pyc,, +antlr4/__pycache__/RuleContext.cpython-38.pyc,, +antlr4/__pycache__/StdinStream.cpython-38.pyc,, +antlr4/__pycache__/Token.cpython-38.pyc,, +antlr4/__pycache__/TokenStreamRewriter.cpython-38.pyc,, +antlr4/__pycache__/Utils.cpython-38.pyc,, +antlr4/__pycache__/__init__.cpython-38.pyc,, +antlr4/atn/ATN.py,sha256=LYE8kT-D8FpUd5fpOtyOLqvXLFkUSa83TVFowhCWAiY,5789 +antlr4/atn/ATNConfig.py,sha256=tNdIC6_GrxXllHBx3npAWyDh6KrohLZDV_XyPrydRMY,6565 +antlr4/atn/ATNConfigSet.py,sha256=qRzVsBeMqk2txjG3DrGptwF6Vb2hHC5w3umkSL0GNJw,8312 +antlr4/atn/ATNDeserializationOptions.py,sha256=lUV_bGW6mxj7t20esda5Yv-X9m-U_x1-0xaLifhXIPo,1010 +antlr4/atn/ATNDeserializer.py,sha256=aYLDDtQ-wyo3gId6A-wD1E3QmpfrPZlXxj4_IDm-mUY,22252 +antlr4/atn/ATNSimulator.py,sha256=mDc-G3GF3kSeqpfGDabUOLJ0WLVTqibxZlkvXQYmBRk,2298 +antlr4/atn/ATNState.py,sha256=NbndISWUwFDF_vuBfbTiZZ8GPHoQa6UXdqbD-yjJE7c,7663 +antlr4/atn/ATNType.py,sha256=xgv8AMVU7tc07U73_hRTm1AiZ7MvGhoaP5fTiOrrCGg,422 +antlr4/atn/LexerATNSimulator.py,sha256=kYXRwUvHptSRU8T_K9pSrGlCk9YypWeHlAcjgry1VVo,25465 +antlr4/atn/LexerAction.py,sha256=KUeJwKekBch0m1poSPskHIh-15dcKAG4lR7zlq98tzc,10014 +antlr4/atn/LexerActionExecutor.py,sha256=7rlg17THcwLsuTmh7NsLrTbRH4DTrm8qIdW9_235CEc,6420 +antlr4/atn/ParserATNSimulator.py,sha256=IKCzsDLcznROSVojU-daAygKr3svl0DmK5DhkUllASY,80365 +antlr4/atn/PredictionMode.py,sha256=i8B7MULA7v-qbXeCY_xp6sgi21kHM6kybqIrG6rSrro,22486 +antlr4/atn/SemanticContext.py,sha256=ds0TmM4qenb0LN-rl2Fp_N_xB959abN67I19EF6rs8o,10495 +antlr4/atn/Transition.py,sha256=ZAsEFpa5I_n-zxD6U-DauM5_33jFK65x3PWu6-NW0RA,8762 +antlr4/atn/__init__.py,sha256=gsnQdtTH8IUgCiVUpQfzhxx2pFRvksW76SjwIk3fYSk,28 +antlr4/atn/__pycache__/ATN.cpython-38.pyc,, +antlr4/atn/__pycache__/ATNConfig.cpython-38.pyc,, +antlr4/atn/__pycache__/ATNConfigSet.cpython-38.pyc,, +antlr4/atn/__pycache__/ATNDeserializationOptions.cpython-38.pyc,, +antlr4/atn/__pycache__/ATNDeserializer.cpython-38.pyc,, +antlr4/atn/__pycache__/ATNSimulator.cpython-38.pyc,, +antlr4/atn/__pycache__/ATNState.cpython-38.pyc,, +antlr4/atn/__pycache__/ATNType.cpython-38.pyc,, +antlr4/atn/__pycache__/LexerATNSimulator.cpython-38.pyc,, +antlr4/atn/__pycache__/LexerAction.cpython-38.pyc,, +antlr4/atn/__pycache__/LexerActionExecutor.cpython-38.pyc,, +antlr4/atn/__pycache__/ParserATNSimulator.cpython-38.pyc,, +antlr4/atn/__pycache__/PredictionMode.cpython-38.pyc,, +antlr4/atn/__pycache__/SemanticContext.cpython-38.pyc,, +antlr4/atn/__pycache__/Transition.cpython-38.pyc,, +antlr4/atn/__pycache__/__init__.cpython-38.pyc,, +antlr4/dfa/DFA.py,sha256=weIh0uaRfakP12mFvHo7U0tqO3GONV3-nHFkc2xk-ZE,5388 +antlr4/dfa/DFASerializer.py,sha256=1st_HO85yXLYy7gInTEnkztgA6am4CT-yReh-mazp9E,2518 +antlr4/dfa/DFAState.py,sha256=R7JwKf0GtAEs9J_MD_Y0WKcuzdt0BVX1sow-uv9yFYc,5583 +antlr4/dfa/__init__.py,sha256=gsnQdtTH8IUgCiVUpQfzhxx2pFRvksW76SjwIk3fYSk,28 +antlr4/dfa/__pycache__/DFA.cpython-38.pyc,, +antlr4/dfa/__pycache__/DFASerializer.cpython-38.pyc,, +antlr4/dfa/__pycache__/DFAState.cpython-38.pyc,, +antlr4/dfa/__pycache__/__init__.cpython-38.pyc,, +antlr4/error/DiagnosticErrorListener.py,sha256=EwS2D_Ox6CmvCa16NPJ9ud4QYPHmlPXt6-Wdn1h5Kg8,4430 +antlr4/error/ErrorListener.py,sha256=yP_MDguol4Cj0_pEPyNzeH3v4ZvUjW5iwDjhYTVAHbE,2722 +antlr4/error/ErrorStrategy.py,sha256=0mhzFL57ZVnjKkGrtadta93Zm3NXdF-HW10DVD07VXs,30391 +antlr4/error/Errors.py,sha256=hlKngclBfXdkDiAymhYsvh2OCXlvmHM2kTl_A1vgp-w,6759 +antlr4/error/__init__.py,sha256=gsnQdtTH8IUgCiVUpQfzhxx2pFRvksW76SjwIk3fYSk,28 +antlr4/error/__pycache__/DiagnosticErrorListener.cpython-38.pyc,, +antlr4/error/__pycache__/ErrorListener.cpython-38.pyc,, +antlr4/error/__pycache__/ErrorStrategy.cpython-38.pyc,, +antlr4/error/__pycache__/Errors.cpython-38.pyc,, +antlr4/error/__pycache__/__init__.cpython-38.pyc,, +antlr4/tree/Chunk.py,sha256=oCIZjolLq9xkxtVDROEDxfUGgndcEnsDW0eUmLM7Gpk,695 +antlr4/tree/ParseTreeMatch.py,sha256=Dc6GVWSUqoIAFXUaUZqUwCUlZfTcgUbGLGzNf6QxQvE,4485 +antlr4/tree/ParseTreePattern.py,sha256=ASBNaQORh3f7f8KnFeZJC2yWFFx4uQlxvC2Y55ifhY0,2825 +antlr4/tree/ParseTreePatternMatcher.py,sha256=HtE9yi1Urr2QPLGLJDBvr0lxv6bjuj9CHl-4clahSe8,16388 +antlr4/tree/RuleTagToken.py,sha256=n4zXcmrrfsGyl91pj5ZYcc_CeKMhPrvYkUdppgMBpbY,2022 +antlr4/tree/TokenTagToken.py,sha256=S3o3DJhfzL5kpClxsKyI-Il-xvuuZQiBAIsLCKFjRHo,1576 +antlr4/tree/Tree.py,sha256=ZI7U_5IxBLm_IrnfJOtb12BCPIWyzfeZtLnhHKVVZIw,5572 +antlr4/tree/Trees.py,sha256=JtQ7cYWmKwI9TIBP6y9XIgjlNS4mYjv3ARwOfwWc5Vg,3968 +antlr4/tree/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +antlr4/tree/__pycache__/Chunk.cpython-38.pyc,, +antlr4/tree/__pycache__/ParseTreeMatch.cpython-38.pyc,, +antlr4/tree/__pycache__/ParseTreePattern.cpython-38.pyc,, +antlr4/tree/__pycache__/ParseTreePatternMatcher.cpython-38.pyc,, +antlr4/tree/__pycache__/RuleTagToken.cpython-38.pyc,, +antlr4/tree/__pycache__/TokenTagToken.cpython-38.pyc,, +antlr4/tree/__pycache__/Tree.cpython-38.pyc,, +antlr4/tree/__pycache__/Trees.cpython-38.pyc,, +antlr4/tree/__pycache__/__init__.cpython-38.pyc,, +antlr4/xpath/XPath.py,sha256=O9s4-EDvLbAbYytP_bae9Z2khLl0iAtRzPAtVbuWUM4,13015 +antlr4/xpath/__init__.py,sha256=gsnQdtTH8IUgCiVUpQfzhxx2pFRvksW76SjwIk3fYSk,28 +antlr4/xpath/__pycache__/XPath.cpython-38.pyc,, +antlr4/xpath/__pycache__/__init__.cpython-38.pyc,, +antlr4_python3_runtime-4.9.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +antlr4_python3_runtime-4.9.3.dist-info/METADATA,sha256=dTjDfhCFn6_28g1-_VewZKwBUrfG-ttt11anMui4jRU,291 +antlr4_python3_runtime-4.9.3.dist-info/RECORD,, +antlr4_python3_runtime-4.9.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +antlr4_python3_runtime-4.9.3.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92 +antlr4_python3_runtime-4.9.3.dist-info/top_level.txt,sha256=OsoZsh9bb30wgXb2zBUjdDwYg46MfV-RVZA6Pk8pcB0,7 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4_python3_runtime-4.9.3.dist-info/REQUESTED b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4_python3_runtime-4.9.3.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4_python3_runtime-4.9.3.dist-info/WHEEL b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4_python3_runtime-4.9.3.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..385faab0525ccdbfd1070a8bebcca3ac8617236e --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4_python3_runtime-4.9.3.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.36.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4_python3_runtime-4.9.3.dist-info/top_level.txt b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4_python3_runtime-4.9.3.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..3dee19f3e13c2cbbb49b818e6ac67c63f77b865f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/antlr4_python3_runtime-4.9.3.dist-info/top_level.txt @@ -0,0 +1 @@ +antlr4 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/attr/__init__.pyi b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/attr/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..c0a21265036a6f6ceb35eeff504e5d1189db6400 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/attr/__init__.pyi @@ -0,0 +1,484 @@ +import sys + +from typing import ( + Any, + Callable, + Dict, + Generic, + List, + Mapping, + Optional, + Sequence, + Tuple, + Type, + TypeVar, + Union, + overload, +) + +# `import X as X` is required to make these public +from . import converters as converters +from . import exceptions as exceptions +from . import filters as filters +from . import setters as setters +from . import validators as validators +from ._version_info import VersionInfo + +__version__: str +__version_info__: VersionInfo +__title__: str +__description__: str +__url__: str +__uri__: str +__author__: str +__email__: str +__license__: str +__copyright__: str + +_T = TypeVar("_T") +_C = TypeVar("_C", bound=type) + +_EqOrderType = Union[bool, Callable[[Any], Any]] +_ValidatorType = Callable[[Any, Attribute[_T], _T], Any] +_ConverterType = Callable[[Any], Any] +_FilterType = Callable[[Attribute[_T], _T], bool] +_ReprType = Callable[[Any], str] +_ReprArgType = Union[bool, _ReprType] +_OnSetAttrType = Callable[[Any, Attribute[Any], Any], Any] +_OnSetAttrArgType = Union[ + _OnSetAttrType, List[_OnSetAttrType], setters._NoOpType +] +_FieldTransformer = Callable[ + [type, List[Attribute[Any]]], List[Attribute[Any]] +] +_CompareWithType = Callable[[Any, Any], bool] +# FIXME: in reality, if multiple validators are passed they must be in a list +# or tuple, but those are invariant and so would prevent subtypes of +# _ValidatorType from working when passed in a list or tuple. +_ValidatorArgType = Union[_ValidatorType[_T], Sequence[_ValidatorType[_T]]] + +# _make -- + +NOTHING: object + +# NOTE: Factory lies about its return type to make this possible: +# `x: List[int] # = Factory(list)` +# Work around mypy issue #4554 in the common case by using an overload. +if sys.version_info >= (3, 8): + from typing import Literal + @overload + def Factory(factory: Callable[[], _T]) -> _T: ... + @overload + def Factory( + factory: Callable[[Any], _T], + takes_self: Literal[True], + ) -> _T: ... + @overload + def Factory( + factory: Callable[[], _T], + takes_self: Literal[False], + ) -> _T: ... + +else: + @overload + def Factory(factory: Callable[[], _T]) -> _T: ... + @overload + def Factory( + factory: Union[Callable[[Any], _T], Callable[[], _T]], + takes_self: bool = ..., + ) -> _T: ... + +# Static type inference support via __dataclass_transform__ implemented as per: +# https://github.com/microsoft/pyright/blob/1.1.135/specs/dataclass_transforms.md +# This annotation must be applied to all overloads of "define" and "attrs" +# +# NOTE: This is a typing construct and does not exist at runtime. Extensions +# wrapping attrs decorators should declare a separate __dataclass_transform__ +# signature in the extension module using the specification linked above to +# provide pyright support. +def __dataclass_transform__( + *, + eq_default: bool = True, + order_default: bool = False, + kw_only_default: bool = False, + field_descriptors: Tuple[Union[type, Callable[..., Any]], ...] = (()), +) -> Callable[[_T], _T]: ... + +class Attribute(Generic[_T]): + name: str + default: Optional[_T] + validator: Optional[_ValidatorType[_T]] + repr: _ReprArgType + cmp: _EqOrderType + eq: _EqOrderType + order: _EqOrderType + hash: Optional[bool] + init: bool + converter: Optional[_ConverterType] + metadata: Dict[Any, Any] + type: Optional[Type[_T]] + kw_only: bool + on_setattr: _OnSetAttrType + def evolve(self, **changes: Any) -> "Attribute[Any]": ... + +# NOTE: We had several choices for the annotation to use for type arg: +# 1) Type[_T] +# - Pros: Handles simple cases correctly +# - Cons: Might produce less informative errors in the case of conflicting +# TypeVars e.g. `attr.ib(default='bad', type=int)` +# 2) Callable[..., _T] +# - Pros: Better error messages than #1 for conflicting TypeVars +# - Cons: Terrible error messages for validator checks. +# e.g. attr.ib(type=int, validator=validate_str) +# -> error: Cannot infer function type argument +# 3) type (and do all of the work in the mypy plugin) +# - Pros: Simple here, and we could customize the plugin with our own errors. +# - Cons: Would need to write mypy plugin code to handle all the cases. +# We chose option #1. + +# `attr` lies about its return type to make the following possible: +# attr() -> Any +# attr(8) -> int +# attr(validator=) -> Whatever the callable expects. +# This makes this type of assignments possible: +# x: int = attr(8) +# +# This form catches explicit None or no default but with no other arguments +# returns Any. +@overload +def attrib( + default: None = ..., + validator: None = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: None = ..., + converter: None = ..., + factory: None = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> Any: ... + +# This form catches an explicit None or no default and infers the type from the +# other arguments. +@overload +def attrib( + default: None = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: Optional[Type[_T]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> _T: ... + +# This form catches an explicit default argument. +@overload +def attrib( + default: _T, + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: Optional[Type[_T]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> _T: ... + +# This form covers type=non-Type: e.g. forward references (str), Any +@overload +def attrib( + default: Optional[_T] = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: object = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> Any: ... +@overload +def field( + *, + default: None = ..., + validator: None = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: None = ..., + factory: None = ..., + kw_only: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> Any: ... + +# This form catches an explicit None or no default and infers the type from the +# other arguments. +@overload +def field( + *, + default: None = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> _T: ... + +# This form catches an explicit default argument. +@overload +def field( + *, + default: _T, + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> _T: ... + +# This form covers type=non-Type: e.g. forward references (str), Any +@overload +def field( + *, + default: Optional[_T] = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: _ReprArgType = ..., + hash: Optional[bool] = ..., + init: bool = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + converter: Optional[_ConverterType] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., +) -> Any: ... +@overload +@__dataclass_transform__(order_default=True, field_descriptors=(attrib, field)) +def attrs( + maybe_cls: _C, + these: Optional[Dict[str, Any]] = ..., + repr_ns: Optional[str] = ..., + repr: bool = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + auto_detect: bool = ..., + collect_by_mro: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> _C: ... +@overload +@__dataclass_transform__(order_default=True, field_descriptors=(attrib, field)) +def attrs( + maybe_cls: None = ..., + these: Optional[Dict[str, Any]] = ..., + repr_ns: Optional[str] = ..., + repr: bool = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + auto_detect: bool = ..., + collect_by_mro: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> Callable[[_C], _C]: ... +@overload +@__dataclass_transform__(field_descriptors=(attrib, field)) +def define( + maybe_cls: _C, + *, + these: Optional[Dict[str, Any]] = ..., + repr: bool = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + auto_detect: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> _C: ... +@overload +@__dataclass_transform__(field_descriptors=(attrib, field)) +def define( + maybe_cls: None = ..., + *, + these: Optional[Dict[str, Any]] = ..., + repr: bool = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[bool] = ..., + order: Optional[bool] = ..., + auto_detect: bool = ..., + getstate_setstate: Optional[bool] = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., + match_args: bool = ..., +) -> Callable[[_C], _C]: ... + +mutable = define +frozen = define # they differ only in their defaults + +# TODO: add support for returning NamedTuple from the mypy plugin +class _Fields(Tuple[Attribute[Any], ...]): + def __getattr__(self, name: str) -> Attribute[Any]: ... + +def fields(cls: type) -> _Fields: ... +def fields_dict(cls: type) -> Dict[str, Attribute[Any]]: ... +def validate(inst: Any) -> None: ... +def resolve_types( + cls: _C, + globalns: Optional[Dict[str, Any]] = ..., + localns: Optional[Dict[str, Any]] = ..., + attribs: Optional[List[Attribute[Any]]] = ..., +) -> _C: ... + +# TODO: add support for returning a proper attrs class from the mypy plugin +# we use Any instead of _CountingAttr so that e.g. `make_class('Foo', +# [attr.ib()])` is valid +def make_class( + name: str, + attrs: Union[List[str], Tuple[str, ...], Dict[str, Any]], + bases: Tuple[type, ...] = ..., + repr_ns: Optional[str] = ..., + repr: bool = ..., + cmp: Optional[_EqOrderType] = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: Optional[_EqOrderType] = ..., + order: Optional[_EqOrderType] = ..., + collect_by_mro: bool = ..., + on_setattr: Optional[_OnSetAttrArgType] = ..., + field_transformer: Optional[_FieldTransformer] = ..., +) -> type: ... + +# _funcs -- + +# TODO: add support for returning TypedDict from the mypy plugin +# FIXME: asdict/astuple do not honor their factory args. Waiting on one of +# these: +# https://github.com/python/mypy/issues/4236 +# https://github.com/python/typing/issues/253 +# XXX: remember to fix attrs.asdict/astuple too! +def asdict( + inst: Any, + recurse: bool = ..., + filter: Optional[_FilterType[Any]] = ..., + dict_factory: Type[Mapping[Any, Any]] = ..., + retain_collection_types: bool = ..., + value_serializer: Optional[ + Callable[[type, Attribute[Any], Any], Any] + ] = ..., + tuple_keys: Optional[bool] = ..., +) -> Dict[str, Any]: ... + +# TODO: add support for returning NamedTuple from the mypy plugin +def astuple( + inst: Any, + recurse: bool = ..., + filter: Optional[_FilterType[Any]] = ..., + tuple_factory: Type[Sequence[Any]] = ..., + retain_collection_types: bool = ..., +) -> Tuple[Any, ...]: ... +def has(cls: type) -> bool: ... +def assoc(inst: _T, **changes: Any) -> _T: ... +def evolve(inst: _T, **changes: Any) -> _T: ... + +# _config -- + +def set_run_validators(run: bool) -> None: ... +def get_run_validators() -> bool: ... + +# aliases -- + +s = attributes = attrs +ib = attr = attrib +dataclass = attrs # Technically, partial(attrs, auto_attribs=True) ;) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/attr/_cmp.pyi b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/attr/_cmp.pyi new file mode 100644 index 0000000000000000000000000000000000000000..e71aaff7a19b7a60ae49738bd81348e80af53a13 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/attr/_cmp.pyi @@ -0,0 +1,13 @@ +from typing import Type + +from . import _CompareWithType + +def cmp_using( + eq: Optional[_CompareWithType], + lt: Optional[_CompareWithType], + le: Optional[_CompareWithType], + gt: Optional[_CompareWithType], + ge: Optional[_CompareWithType], + require_same_type: bool, + class_name: str, +) -> Type: ... diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/attr/_config.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/attr/_config.py new file mode 100644 index 0000000000000000000000000000000000000000..fc9be29d0081225dfa01fb6dcd7188d665e4b101 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/attr/_config.py @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: MIT + +from __future__ import absolute_import, division, print_function + + +__all__ = ["set_run_validators", "get_run_validators"] + +_run_validators = True + + +def set_run_validators(run): + """ + Set whether or not validators are run. By default, they are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.set_disabled()` + instead. + """ + if not isinstance(run, bool): + raise TypeError("'run' must be bool.") + global _run_validators + _run_validators = run + + +def get_run_validators(): + """ + Return whether or not validators are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.get_disabled()` + instead. + """ + return _run_validators diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/attr/_make.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/attr/_make.py new file mode 100644 index 0000000000000000000000000000000000000000..d46f8a3e7a42338af1f6e8763b8df0c1b64e3d89 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/attr/_make.py @@ -0,0 +1,3173 @@ +# SPDX-License-Identifier: MIT + +from __future__ import absolute_import, division, print_function + +import copy +import inspect +import linecache +import sys +import warnings + +from operator import itemgetter + +# We need to import _compat itself in addition to the _compat members to avoid +# having the thread-local in the globals here. +from . import _compat, _config, setters +from ._compat import ( + HAS_F_STRINGS, + PY2, + PY310, + PYPY, + isclass, + iteritems, + metadata_proxy, + new_class, + ordered_dict, + set_closure_cell, +) +from .exceptions import ( + DefaultAlreadySetError, + FrozenInstanceError, + NotAnAttrsClassError, + PythonTooOldError, + UnannotatedAttributeError, +) + + +if not PY2: + import typing + + +# This is used at least twice, so cache it here. +_obj_setattr = object.__setattr__ +_init_converter_pat = "__attr_converter_%s" +_init_factory_pat = "__attr_factory_{}" +_tuple_property_pat = ( + " {attr_name} = _attrs_property(_attrs_itemgetter({index}))" +) +_classvar_prefixes = ( + "typing.ClassVar", + "t.ClassVar", + "ClassVar", + "typing_extensions.ClassVar", +) +# we don't use a double-underscore prefix because that triggers +# name mangling when trying to create a slot for the field +# (when slots=True) +_hash_cache_field = "_attrs_cached_hash" + +_empty_metadata_singleton = metadata_proxy({}) + +# Unique object for unequivocal getattr() defaults. +_sentinel = object() + +_ng_default_on_setattr = setters.pipe(setters.convert, setters.validate) + + +class _Nothing(object): + """ + Sentinel class to indicate the lack of a value when ``None`` is ambiguous. + + ``_Nothing`` is a singleton. There is only ever one of it. + + .. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False. + """ + + _singleton = None + + def __new__(cls): + if _Nothing._singleton is None: + _Nothing._singleton = super(_Nothing, cls).__new__(cls) + return _Nothing._singleton + + def __repr__(self): + return "NOTHING" + + def __bool__(self): + return False + + def __len__(self): + return 0 # __bool__ for Python 2 + + +NOTHING = _Nothing() +""" +Sentinel to indicate the lack of a value when ``None`` is ambiguous. +""" + + +class _CacheHashWrapper(int): + """ + An integer subclass that pickles / copies as None + + This is used for non-slots classes with ``cache_hash=True``, to avoid + serializing a potentially (even likely) invalid hash value. Since ``None`` + is the default value for uncalculated hashes, whenever this is copied, + the copy's value for the hash should automatically reset. + + See GH #613 for more details. + """ + + if PY2: + # For some reason `type(None)` isn't callable in Python 2, but we don't + # actually need a constructor for None objects, we just need any + # available function that returns None. + def __reduce__(self, _none_constructor=getattr, _args=(0, "", None)): + return _none_constructor, _args + + else: + + def __reduce__(self, _none_constructor=type(None), _args=()): + return _none_constructor, _args + + +def attrib( + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=None, + init=True, + metadata=None, + type=None, + converter=None, + factory=None, + kw_only=False, + eq=None, + order=None, + on_setattr=None, +): + """ + Create a new attribute on a class. + + .. warning:: + + Does *not* do anything unless the class is also decorated with + `attr.s`! + + :param default: A value that is used if an ``attrs``-generated ``__init__`` + is used and no value is passed while instantiating or the attribute is + excluded using ``init=False``. + + If the value is an instance of `attrs.Factory`, its callable will be + used to construct a new value (useful for mutable data types like lists + or dicts). + + If a default is not set (or set manually to `attrs.NOTHING`), a value + *must* be supplied when instantiating; otherwise a `TypeError` + will be raised. + + The default can also be set using decorator notation as shown below. + + :type default: Any value + + :param callable factory: Syntactic sugar for + ``default=attr.Factory(factory)``. + + :param validator: `callable` that is called by ``attrs``-generated + ``__init__`` methods after the instance has been initialized. They + receive the initialized instance, the :func:`~attrs.Attribute`, and the + passed value. + + The return value is *not* inspected so the validator has to throw an + exception itself. + + If a `list` is passed, its items are treated as validators and must + all pass. + + Validators can be globally disabled and re-enabled using + `get_run_validators`. + + The validator can also be set using decorator notation as shown below. + + :type validator: `callable` or a `list` of `callable`\\ s. + + :param repr: Include this attribute in the generated ``__repr__`` + method. If ``True``, include the attribute; if ``False``, omit it. By + default, the built-in ``repr()`` function is used. To override how the + attribute value is formatted, pass a ``callable`` that takes a single + value and returns a string. Note that the resulting string is used + as-is, i.e. it will be used directly *instead* of calling ``repr()`` + (the default). + :type repr: a `bool` or a `callable` to use a custom function. + + :param eq: If ``True`` (default), include this attribute in the + generated ``__eq__`` and ``__ne__`` methods that check two instances + for equality. To override how the attribute value is compared, + pass a ``callable`` that takes a single value and returns the value + to be compared. + :type eq: a `bool` or a `callable`. + + :param order: If ``True`` (default), include this attributes in the + generated ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods. + To override how the attribute value is ordered, + pass a ``callable`` that takes a single value and returns the value + to be ordered. + :type order: a `bool` or a `callable`. + + :param cmp: Setting *cmp* is equivalent to setting *eq* and *order* to the + same value. Must not be mixed with *eq* or *order*. + :type cmp: a `bool` or a `callable`. + + :param Optional[bool] hash: Include this attribute in the generated + ``__hash__`` method. If ``None`` (default), mirror *eq*'s value. This + is the correct behavior according the Python spec. Setting this value + to anything else than ``None`` is *discouraged*. + :param bool init: Include this attribute in the generated ``__init__`` + method. It is possible to set this to ``False`` and set a default + value. In that case this attributed is unconditionally initialized + with the specified default value or factory. + :param callable converter: `callable` that is called by + ``attrs``-generated ``__init__`` methods to convert attribute's value + to the desired format. It is given the passed-in value, and the + returned value will be used as the new value of the attribute. The + value is converted before being passed to the validator, if any. + :param metadata: An arbitrary mapping, to be used by third-party + components. See `extending_metadata`. + :param type: The type of the attribute. In Python 3.6 or greater, the + preferred method to specify the type is using a variable annotation + (see `PEP 526 `_). + This argument is provided for backward compatibility. + Regardless of the approach used, the type will be stored on + ``Attribute.type``. + + Please note that ``attrs`` doesn't do anything with this metadata by + itself. You can use it as part of your own code or for + `static type checking `. + :param kw_only: Make this attribute keyword-only (Python 3+) + in the generated ``__init__`` (if ``init`` is ``False``, this + parameter is ignored). + :param on_setattr: Allows to overwrite the *on_setattr* setting from + `attr.s`. If left `None`, the *on_setattr* value from `attr.s` is used. + Set to `attrs.setters.NO_OP` to run **no** `setattr` hooks for this + attribute -- regardless of the setting in `attr.s`. + :type on_setattr: `callable`, or a list of callables, or `None`, or + `attrs.setters.NO_OP` + + .. versionadded:: 15.2.0 *convert* + .. versionadded:: 16.3.0 *metadata* + .. versionchanged:: 17.1.0 *validator* can be a ``list`` now. + .. versionchanged:: 17.1.0 + *hash* is ``None`` and therefore mirrors *eq* by default. + .. versionadded:: 17.3.0 *type* + .. deprecated:: 17.4.0 *convert* + .. versionadded:: 17.4.0 *converter* as a replacement for the deprecated + *convert* to achieve consistency with other noun-based arguments. + .. versionadded:: 18.1.0 + ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``. + .. versionadded:: 18.2.0 *kw_only* + .. versionchanged:: 19.2.0 *convert* keyword argument removed. + .. versionchanged:: 19.2.0 *repr* also accepts a custom callable. + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.3.0 *kw_only* backported to Python 2 + .. versionchanged:: 21.1.0 + *eq*, *order*, and *cmp* also accept a custom callable + .. versionchanged:: 21.1.0 *cmp* undeprecated + """ + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq, order, True + ) + + if hash is not None and hash is not True and hash is not False: + raise TypeError( + "Invalid value for hash. Must be True, False, or None." + ) + + if factory is not None: + if default is not NOTHING: + raise ValueError( + "The `default` and `factory` arguments are mutually " + "exclusive." + ) + if not callable(factory): + raise ValueError("The `factory` argument must be a callable.") + default = Factory(factory) + + if metadata is None: + metadata = {} + + # Apply syntactic sugar by auto-wrapping. + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + if validator and isinstance(validator, (list, tuple)): + validator = and_(*validator) + + if converter and isinstance(converter, (list, tuple)): + converter = pipe(*converter) + + return _CountingAttr( + default=default, + validator=validator, + repr=repr, + cmp=None, + hash=hash, + init=init, + converter=converter, + metadata=metadata, + type=type, + kw_only=kw_only, + eq=eq, + eq_key=eq_key, + order=order, + order_key=order_key, + on_setattr=on_setattr, + ) + + +def _compile_and_eval(script, globs, locs=None, filename=""): + """ + "Exec" the script with the given global (globs) and local (locs) variables. + """ + bytecode = compile(script, filename, "exec") + eval(bytecode, globs, locs) + + +def _make_method(name, script, filename, globs=None): + """ + Create the method with the script given and return the method object. + """ + locs = {} + if globs is None: + globs = {} + + # In order of debuggers like PDB being able to step through the code, + # we add a fake linecache entry. + count = 1 + base_filename = filename + while True: + linecache_tuple = ( + len(script), + None, + script.splitlines(True), + filename, + ) + old_val = linecache.cache.setdefault(filename, linecache_tuple) + if old_val == linecache_tuple: + break + else: + filename = "{}-{}>".format(base_filename[:-1], count) + count += 1 + + _compile_and_eval(script, globs, locs, filename) + + return locs[name] + + +def _make_attr_tuple_class(cls_name, attr_names): + """ + Create a tuple subclass to hold `Attribute`s for an `attrs` class. + + The subclass is a bare tuple with properties for names. + + class MyClassAttributes(tuple): + __slots__ = () + x = property(itemgetter(0)) + """ + attr_class_name = "{}Attributes".format(cls_name) + attr_class_template = [ + "class {}(tuple):".format(attr_class_name), + " __slots__ = ()", + ] + if attr_names: + for i, attr_name in enumerate(attr_names): + attr_class_template.append( + _tuple_property_pat.format(index=i, attr_name=attr_name) + ) + else: + attr_class_template.append(" pass") + globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property} + _compile_and_eval("\n".join(attr_class_template), globs) + return globs[attr_class_name] + + +# Tuple class for extracted attributes from a class definition. +# `base_attrs` is a subset of `attrs`. +_Attributes = _make_attr_tuple_class( + "_Attributes", + [ + # all attributes to build dunder methods for + "attrs", + # attributes that have been inherited + "base_attrs", + # map inherited attributes to their originating classes + "base_attrs_map", + ], +) + + +def _is_class_var(annot): + """ + Check whether *annot* is a typing.ClassVar. + + The string comparison hack is used to avoid evaluating all string + annotations which would put attrs-based classes at a performance + disadvantage compared to plain old classes. + """ + annot = str(annot) + + # Annotation can be quoted. + if annot.startswith(("'", '"')) and annot.endswith(("'", '"')): + annot = annot[1:-1] + + return annot.startswith(_classvar_prefixes) + + +def _has_own_attribute(cls, attrib_name): + """ + Check whether *cls* defines *attrib_name* (and doesn't just inherit it). + + Requires Python 3. + """ + attr = getattr(cls, attrib_name, _sentinel) + if attr is _sentinel: + return False + + for base_cls in cls.__mro__[1:]: + a = getattr(base_cls, attrib_name, None) + if attr is a: + return False + + return True + + +def _get_annotations(cls): + """ + Get annotations for *cls*. + """ + if _has_own_attribute(cls, "__annotations__"): + return cls.__annotations__ + + return {} + + +def _counter_getter(e): + """ + Key function for sorting to avoid re-creating a lambda for every class. + """ + return e[1].counter + + +def _collect_base_attrs(cls, taken_attr_names): + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in reversed(cls.__mro__[1:-1]): + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.inherited or a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + # For each name, only keep the freshest definition i.e. the furthest at the + # back. base_attr_map is fine because it gets overwritten with every new + # instance. + filtered = [] + seen = set() + for a in reversed(base_attrs): + if a.name in seen: + continue + filtered.insert(0, a) + seen.add(a.name) + + return filtered, base_attr_map + + +def _collect_base_attrs_broken(cls, taken_attr_names): + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + + N.B. *taken_attr_names* will be mutated. + + Adhere to the old incorrect behavior. + + Notably it collects from the front and considers inherited attributes which + leads to the buggy behavior reported in #428. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in cls.__mro__[1:-1]: + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) + taken_attr_names.add(a.name) + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + return base_attrs, base_attr_map + + +def _transform_attrs( + cls, these, auto_attribs, kw_only, collect_by_mro, field_transformer +): + """ + Transform all `_CountingAttr`s on a class into `Attribute`s. + + If *these* is passed, use that and don't look for them on the class. + + *collect_by_mro* is True, collect them in the correct MRO order, otherwise + use the old -- incorrect -- order. See #428. + + Return an `_Attributes`. + """ + cd = cls.__dict__ + anns = _get_annotations(cls) + + if these is not None: + ca_list = [(name, ca) for name, ca in iteritems(these)] + + if not isinstance(these, ordered_dict): + ca_list.sort(key=_counter_getter) + elif auto_attribs is True: + ca_names = { + name + for name, attr in cd.items() + if isinstance(attr, _CountingAttr) + } + ca_list = [] + annot_names = set() + for attr_name, type in anns.items(): + if _is_class_var(type): + continue + annot_names.add(attr_name) + a = cd.get(attr_name, NOTHING) + + if not isinstance(a, _CountingAttr): + if a is NOTHING: + a = attrib() + else: + a = attrib(default=a) + ca_list.append((attr_name, a)) + + unannotated = ca_names - annot_names + if len(unannotated) > 0: + raise UnannotatedAttributeError( + "The following `attr.ib`s lack a type annotation: " + + ", ".join( + sorted(unannotated, key=lambda n: cd.get(n).counter) + ) + + "." + ) + else: + ca_list = sorted( + ( + (name, attr) + for name, attr in cd.items() + if isinstance(attr, _CountingAttr) + ), + key=lambda e: e[1].counter, + ) + + own_attrs = [ + Attribute.from_counting_attr( + name=attr_name, ca=ca, type=anns.get(attr_name) + ) + for attr_name, ca in ca_list + ] + + if collect_by_mro: + base_attrs, base_attr_map = _collect_base_attrs( + cls, {a.name for a in own_attrs} + ) + else: + base_attrs, base_attr_map = _collect_base_attrs_broken( + cls, {a.name for a in own_attrs} + ) + + if kw_only: + own_attrs = [a.evolve(kw_only=True) for a in own_attrs] + base_attrs = [a.evolve(kw_only=True) for a in base_attrs] + + attrs = base_attrs + own_attrs + + # Mandatory vs non-mandatory attr order only matters when they are part of + # the __init__ signature and when they aren't kw_only (which are moved to + # the end and can be mandatory or non-mandatory in any order, as they will + # be specified as keyword args anyway). Check the order of those attrs: + had_default = False + for a in (a for a in attrs if a.init is not False and a.kw_only is False): + if had_default is True and a.default is NOTHING: + raise ValueError( + "No mandatory attributes allowed after an attribute with a " + "default value or factory. Attribute in question: %r" % (a,) + ) + + if had_default is False and a.default is not NOTHING: + had_default = True + + if field_transformer is not None: + attrs = field_transformer(cls, attrs) + + # Create AttrsClass *after* applying the field_transformer since it may + # add or remove attributes! + attr_names = [a.name for a in attrs] + AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names) + + return _Attributes((AttrsClass(attrs), base_attrs, base_attr_map)) + + +if PYPY: + + def _frozen_setattrs(self, name, value): + """ + Attached to frozen classes as __setattr__. + """ + if isinstance(self, BaseException) and name in ( + "__cause__", + "__context__", + ): + BaseException.__setattr__(self, name, value) + return + + raise FrozenInstanceError() + +else: + + def _frozen_setattrs(self, name, value): + """ + Attached to frozen classes as __setattr__. + """ + raise FrozenInstanceError() + + +def _frozen_delattrs(self, name): + """ + Attached to frozen classes as __delattr__. + """ + raise FrozenInstanceError() + + +class _ClassBuilder(object): + """ + Iteratively build *one* class. + """ + + __slots__ = ( + "_attr_names", + "_attrs", + "_base_attr_map", + "_base_names", + "_cache_hash", + "_cls", + "_cls_dict", + "_delete_attribs", + "_frozen", + "_has_pre_init", + "_has_post_init", + "_is_exc", + "_on_setattr", + "_slots", + "_weakref_slot", + "_wrote_own_setattr", + "_has_custom_setattr", + ) + + def __init__( + self, + cls, + these, + slots, + frozen, + weakref_slot, + getstate_setstate, + auto_attribs, + kw_only, + cache_hash, + is_exc, + collect_by_mro, + on_setattr, + has_custom_setattr, + field_transformer, + ): + attrs, base_attrs, base_map = _transform_attrs( + cls, + these, + auto_attribs, + kw_only, + collect_by_mro, + field_transformer, + ) + + self._cls = cls + self._cls_dict = dict(cls.__dict__) if slots else {} + self._attrs = attrs + self._base_names = set(a.name for a in base_attrs) + self._base_attr_map = base_map + self._attr_names = tuple(a.name for a in attrs) + self._slots = slots + self._frozen = frozen + self._weakref_slot = weakref_slot + self._cache_hash = cache_hash + self._has_pre_init = bool(getattr(cls, "__attrs_pre_init__", False)) + self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False)) + self._delete_attribs = not bool(these) + self._is_exc = is_exc + self._on_setattr = on_setattr + + self._has_custom_setattr = has_custom_setattr + self._wrote_own_setattr = False + + self._cls_dict["__attrs_attrs__"] = self._attrs + + if frozen: + self._cls_dict["__setattr__"] = _frozen_setattrs + self._cls_dict["__delattr__"] = _frozen_delattrs + + self._wrote_own_setattr = True + elif on_setattr in ( + _ng_default_on_setattr, + setters.validate, + setters.convert, + ): + has_validator = has_converter = False + for a in attrs: + if a.validator is not None: + has_validator = True + if a.converter is not None: + has_converter = True + + if has_validator and has_converter: + break + if ( + ( + on_setattr == _ng_default_on_setattr + and not (has_validator or has_converter) + ) + or (on_setattr == setters.validate and not has_validator) + or (on_setattr == setters.convert and not has_converter) + ): + # If class-level on_setattr is set to convert + validate, but + # there's no field to convert or validate, pretend like there's + # no on_setattr. + self._on_setattr = None + + if getstate_setstate: + ( + self._cls_dict["__getstate__"], + self._cls_dict["__setstate__"], + ) = self._make_getstate_setstate() + + def __repr__(self): + return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__) + + def build_class(self): + """ + Finalize class based on the accumulated configuration. + + Builder cannot be used after calling this method. + """ + if self._slots is True: + return self._create_slots_class() + else: + return self._patch_original_class() + + def _patch_original_class(self): + """ + Apply accumulated methods and return the class. + """ + cls = self._cls + base_names = self._base_names + + # Clean class of attribute definitions (`attr.ib()`s). + if self._delete_attribs: + for name in self._attr_names: + if ( + name not in base_names + and getattr(cls, name, _sentinel) is not _sentinel + ): + try: + delattr(cls, name) + except AttributeError: + # This can happen if a base class defines a class + # variable and we want to set an attribute with the + # same name by using only a type annotation. + pass + + # Attach our dunder methods. + for name, value in self._cls_dict.items(): + setattr(cls, name, value) + + # If we've inherited an attrs __setattr__ and don't write our own, + # reset it to object's. + if not self._wrote_own_setattr and getattr( + cls, "__attrs_own_setattr__", False + ): + cls.__attrs_own_setattr__ = False + + if not self._has_custom_setattr: + cls.__setattr__ = object.__setattr__ + + return cls + + def _create_slots_class(self): + """ + Build and return a new class with a `__slots__` attribute. + """ + cd = { + k: v + for k, v in iteritems(self._cls_dict) + if k not in tuple(self._attr_names) + ("__dict__", "__weakref__") + } + + # If our class doesn't have its own implementation of __setattr__ + # (either from the user or by us), check the bases, if one of them has + # an attrs-made __setattr__, that needs to be reset. We don't walk the + # MRO because we only care about our immediate base classes. + # XXX: This can be confused by subclassing a slotted attrs class with + # XXX: a non-attrs class and subclass the resulting class with an attrs + # XXX: class. See `test_slotted_confused` for details. For now that's + # XXX: OK with us. + if not self._wrote_own_setattr: + cd["__attrs_own_setattr__"] = False + + if not self._has_custom_setattr: + for base_cls in self._cls.__bases__: + if base_cls.__dict__.get("__attrs_own_setattr__", False): + cd["__setattr__"] = object.__setattr__ + break + + # Traverse the MRO to collect existing slots + # and check for an existing __weakref__. + existing_slots = dict() + weakref_inherited = False + for base_cls in self._cls.__mro__[1:-1]: + if base_cls.__dict__.get("__weakref__", None) is not None: + weakref_inherited = True + existing_slots.update( + { + name: getattr(base_cls, name) + for name in getattr(base_cls, "__slots__", []) + } + ) + + base_names = set(self._base_names) + + names = self._attr_names + if ( + self._weakref_slot + and "__weakref__" not in getattr(self._cls, "__slots__", ()) + and "__weakref__" not in names + and not weakref_inherited + ): + names += ("__weakref__",) + + # We only add the names of attributes that aren't inherited. + # Setting __slots__ to inherited attributes wastes memory. + slot_names = [name for name in names if name not in base_names] + # There are slots for attributes from current class + # that are defined in parent classes. + # As their descriptors may be overriden by a child class, + # we collect them here and update the class dict + reused_slots = { + slot: slot_descriptor + for slot, slot_descriptor in iteritems(existing_slots) + if slot in slot_names + } + slot_names = [name for name in slot_names if name not in reused_slots] + cd.update(reused_slots) + if self._cache_hash: + slot_names.append(_hash_cache_field) + cd["__slots__"] = tuple(slot_names) + + qualname = getattr(self._cls, "__qualname__", None) + if qualname is not None: + cd["__qualname__"] = qualname + + # Create new class based on old class and our methods. + cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd) + + # The following is a fix for + # . On Python 3, + # if a method mentions `__class__` or uses the no-arg super(), the + # compiler will bake a reference to the class in the method itself + # as `method.__closure__`. Since we replace the class with a + # clone, we rewrite these references so it keeps working. + for item in cls.__dict__.values(): + if isinstance(item, (classmethod, staticmethod)): + # Class- and staticmethods hide their functions inside. + # These might need to be rewritten as well. + closure_cells = getattr(item.__func__, "__closure__", None) + elif isinstance(item, property): + # Workaround for property `super()` shortcut (PY3-only). + # There is no universal way for other descriptors. + closure_cells = getattr(item.fget, "__closure__", None) + else: + closure_cells = getattr(item, "__closure__", None) + + if not closure_cells: # Catch None or the empty list. + continue + for cell in closure_cells: + try: + match = cell.cell_contents is self._cls + except ValueError: # ValueError: Cell is empty + pass + else: + if match: + set_closure_cell(cell, cls) + + return cls + + def add_repr(self, ns): + self._cls_dict["__repr__"] = self._add_method_dunders( + _make_repr(self._attrs, ns, self._cls) + ) + return self + + def add_str(self): + repr = self._cls_dict.get("__repr__") + if repr is None: + raise ValueError( + "__str__ can only be generated if a __repr__ exists." + ) + + def __str__(self): + return self.__repr__() + + self._cls_dict["__str__"] = self._add_method_dunders(__str__) + return self + + def _make_getstate_setstate(self): + """ + Create custom __setstate__ and __getstate__ methods. + """ + # __weakref__ is not writable. + state_attr_names = tuple( + an for an in self._attr_names if an != "__weakref__" + ) + + def slots_getstate(self): + """ + Automatically created by attrs. + """ + return tuple(getattr(self, name) for name in state_attr_names) + + hash_caching_enabled = self._cache_hash + + def slots_setstate(self, state): + """ + Automatically created by attrs. + """ + __bound_setattr = _obj_setattr.__get__(self, Attribute) + for name, value in zip(state_attr_names, state): + __bound_setattr(name, value) + + # The hash code cache is not included when the object is + # serialized, but it still needs to be initialized to None to + # indicate that the first call to __hash__ should be a cache + # miss. + if hash_caching_enabled: + __bound_setattr(_hash_cache_field, None) + + return slots_getstate, slots_setstate + + def make_unhashable(self): + self._cls_dict["__hash__"] = None + return self + + def add_hash(self): + self._cls_dict["__hash__"] = self._add_method_dunders( + _make_hash( + self._cls, + self._attrs, + frozen=self._frozen, + cache_hash=self._cache_hash, + ) + ) + + return self + + def add_init(self): + self._cls_dict["__init__"] = self._add_method_dunders( + _make_init( + self._cls, + self._attrs, + self._has_pre_init, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=False, + ) + ) + + return self + + def add_match_args(self): + self._cls_dict["__match_args__"] = tuple( + field.name + for field in self._attrs + if field.init and not field.kw_only + ) + + def add_attrs_init(self): + self._cls_dict["__attrs_init__"] = self._add_method_dunders( + _make_init( + self._cls, + self._attrs, + self._has_pre_init, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=True, + ) + ) + + return self + + def add_eq(self): + cd = self._cls_dict + + cd["__eq__"] = self._add_method_dunders( + _make_eq(self._cls, self._attrs) + ) + cd["__ne__"] = self._add_method_dunders(_make_ne()) + + return self + + def add_order(self): + cd = self._cls_dict + + cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = ( + self._add_method_dunders(meth) + for meth in _make_order(self._cls, self._attrs) + ) + + return self + + def add_setattr(self): + if self._frozen: + return self + + sa_attrs = {} + for a in self._attrs: + on_setattr = a.on_setattr or self._on_setattr + if on_setattr and on_setattr is not setters.NO_OP: + sa_attrs[a.name] = a, on_setattr + + if not sa_attrs: + return self + + if self._has_custom_setattr: + # We need to write a __setattr__ but there already is one! + raise ValueError( + "Can't combine custom __setattr__ with on_setattr hooks." + ) + + # docstring comes from _add_method_dunders + def __setattr__(self, name, val): + try: + a, hook = sa_attrs[name] + except KeyError: + nval = val + else: + nval = hook(self, a, val) + + _obj_setattr(self, name, nval) + + self._cls_dict["__attrs_own_setattr__"] = True + self._cls_dict["__setattr__"] = self._add_method_dunders(__setattr__) + self._wrote_own_setattr = True + + return self + + def _add_method_dunders(self, method): + """ + Add __module__ and __qualname__ to a *method* if possible. + """ + try: + method.__module__ = self._cls.__module__ + except AttributeError: + pass + + try: + method.__qualname__ = ".".join( + (self._cls.__qualname__, method.__name__) + ) + except AttributeError: + pass + + try: + method.__doc__ = "Method generated by attrs for class %s." % ( + self._cls.__qualname__, + ) + except AttributeError: + pass + + return method + + +_CMP_DEPRECATION = ( + "The usage of `cmp` is deprecated and will be removed on or after " + "2021-06-01. Please use `eq` and `order` instead." +) + + +def _determine_attrs_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + raise ValueError("Don't mix `cmp` with `eq' and `order`.") + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + return cmp, cmp + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq = default_eq + + if order is None: + order = eq + + if eq is False and order is True: + raise ValueError("`order` can only be True if `eq` is True too.") + + return eq, order + + +def _determine_attrib_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + raise ValueError("Don't mix `cmp` with `eq' and `order`.") + + def decide_callable_or_boolean(value): + """ + Decide whether a key function is used. + """ + if callable(value): + value, key = True, value + else: + key = None + return value, key + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + cmp, cmp_key = decide_callable_or_boolean(cmp) + return cmp, cmp_key, cmp, cmp_key + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq, eq_key = default_eq, None + else: + eq, eq_key = decide_callable_or_boolean(eq) + + if order is None: + order, order_key = eq, eq_key + else: + order, order_key = decide_callable_or_boolean(order) + + if eq is False and order is True: + raise ValueError("`order` can only be True if `eq` is True too.") + + return eq, eq_key, order, order_key + + +def _determine_whether_to_implement( + cls, flag, auto_detect, dunders, default=True +): + """ + Check whether we should implement a set of methods for *cls*. + + *flag* is the argument passed into @attr.s like 'init', *auto_detect* the + same as passed into @attr.s and *dunders* is a tuple of attribute names + whose presence signal that the user has implemented it themselves. + + Return *default* if no reason for either for or against is found. + + auto_detect must be False on Python 2. + """ + if flag is True or flag is False: + return flag + + if flag is None and auto_detect is False: + return default + + # Logically, flag is None and auto_detect is True here. + for dunder in dunders: + if _has_own_attribute(cls, dunder): + return False + + return default + + +def attrs( + maybe_cls=None, + these=None, + repr_ns=None, + repr=None, + cmp=None, + hash=None, + init=None, + slots=False, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=False, + kw_only=False, + cache_hash=False, + auto_exc=False, + eq=None, + order=None, + auto_detect=False, + collect_by_mro=False, + getstate_setstate=None, + on_setattr=None, + field_transformer=None, + match_args=True, +): + r""" + A class decorator that adds `dunder + `_\ -methods according to the + specified attributes using `attr.ib` or the *these* argument. + + :param these: A dictionary of name to `attr.ib` mappings. This is + useful to avoid the definition of your attributes within the class body + because you can't (e.g. if you want to add ``__repr__`` methods to + Django models) or don't want to. + + If *these* is not ``None``, ``attrs`` will *not* search the class body + for attributes and will *not* remove any attributes from it. + + If *these* is an ordered dict (`dict` on Python 3.6+, + `collections.OrderedDict` otherwise), the order is deduced from + the order of the attributes inside *these*. Otherwise the order + of the definition of the attributes is used. + + :type these: `dict` of `str` to `attr.ib` + + :param str repr_ns: When using nested classes, there's no way in Python 2 + to automatically detect that. Therefore it's possible to set the + namespace explicitly for a more meaningful ``repr`` output. + :param bool auto_detect: Instead of setting the *init*, *repr*, *eq*, + *order*, and *hash* arguments explicitly, assume they are set to + ``True`` **unless any** of the involved methods for one of the + arguments is implemented in the *current* class (i.e. it is *not* + inherited from some base class). + + So for example by implementing ``__eq__`` on a class yourself, + ``attrs`` will deduce ``eq=False`` and will create *neither* + ``__eq__`` *nor* ``__ne__`` (but Python classes come with a sensible + ``__ne__`` by default, so it *should* be enough to only implement + ``__eq__`` in most cases). + + .. warning:: + + If you prevent ``attrs`` from creating the ordering methods for you + (``order=False``, e.g. by implementing ``__le__``), it becomes + *your* responsibility to make sure its ordering is sound. The best + way is to use the `functools.total_ordering` decorator. + + + Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*, + *cmp*, or *hash* overrides whatever *auto_detect* would determine. + + *auto_detect* requires Python 3. Setting it ``True`` on Python 2 raises + an `attrs.exceptions.PythonTooOldError`. + + :param bool repr: Create a ``__repr__`` method with a human readable + representation of ``attrs`` attributes.. + :param bool str: Create a ``__str__`` method that is identical to + ``__repr__``. This is usually not necessary except for + `Exception`\ s. + :param Optional[bool] eq: If ``True`` or ``None`` (default), add ``__eq__`` + and ``__ne__`` methods that check two instances for equality. + + They compare the instances as if they were tuples of their ``attrs`` + attributes if and only if the types of both classes are *identical*! + :param Optional[bool] order: If ``True``, add ``__lt__``, ``__le__``, + ``__gt__``, and ``__ge__`` methods that behave like *eq* above and + allow instances to be ordered. If ``None`` (default) mirror value of + *eq*. + :param Optional[bool] cmp: Setting *cmp* is equivalent to setting *eq* + and *order* to the same value. Must not be mixed with *eq* or *order*. + :param Optional[bool] hash: If ``None`` (default), the ``__hash__`` method + is generated according how *eq* and *frozen* are set. + + 1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you. + 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to + None, marking it unhashable (which it is). + 3. If *eq* is False, ``__hash__`` will be left untouched meaning the + ``__hash__`` method of the base class will be used (if base class is + ``object``, this means it will fall back to id-based hashing.). + + Although not recommended, you can decide for yourself and force + ``attrs`` to create one (e.g. if the class is immutable even though you + didn't freeze it programmatically) by passing ``True`` or not. Both of + these cases are rather special and should be used carefully. + + See our documentation on `hashing`, Python's documentation on + `object.__hash__`, and the `GitHub issue that led to the default \ + behavior `_ for more + details. + :param bool init: Create a ``__init__`` method that initializes the + ``attrs`` attributes. Leading underscores are stripped for the argument + name. If a ``__attrs_pre_init__`` method exists on the class, it will + be called before the class is initialized. If a ``__attrs_post_init__`` + method exists on the class, it will be called after the class is fully + initialized. + + If ``init`` is ``False``, an ``__attrs_init__`` method will be + injected instead. This allows you to define a custom ``__init__`` + method that can do pre-init work such as ``super().__init__()``, + and then call ``__attrs_init__()`` and ``__attrs_post_init__()``. + :param bool slots: Create a `slotted class ` that's more + memory-efficient. Slotted classes are generally superior to the default + dict classes, but have some gotchas you should know about, so we + encourage you to read the `glossary entry `. + :param bool frozen: Make instances immutable after initialization. If + someone attempts to modify a frozen instance, + `attr.exceptions.FrozenInstanceError` is raised. + + .. note:: + + 1. This is achieved by installing a custom ``__setattr__`` method + on your class, so you can't implement your own. + + 2. True immutability is impossible in Python. + + 3. This *does* have a minor a runtime performance `impact + ` when initializing new instances. In other words: + ``__init__`` is slightly slower with ``frozen=True``. + + 4. If a class is frozen, you cannot modify ``self`` in + ``__attrs_post_init__`` or a self-written ``__init__``. You can + circumvent that limitation by using + ``object.__setattr__(self, "attribute_name", value)``. + + 5. Subclasses of a frozen class are frozen too. + + :param bool weakref_slot: Make instances weak-referenceable. This has no + effect unless ``slots`` is also enabled. + :param bool auto_attribs: If ``True``, collect `PEP 526`_-annotated + attributes (Python 3.6 and later only) from the class body. + + In this case, you **must** annotate every field. If ``attrs`` + encounters a field that is set to an `attr.ib` but lacks a type + annotation, an `attr.exceptions.UnannotatedAttributeError` is + raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't + want to set a type. + + If you assign a value to those attributes (e.g. ``x: int = 42``), that + value becomes the default value like if it were passed using + ``attr.ib(default=42)``. Passing an instance of `attrs.Factory` also + works as expected in most cases (see warning below). + + Attributes annotated as `typing.ClassVar`, and attributes that are + neither annotated nor set to an `attr.ib` are **ignored**. + + .. warning:: + For features that use the attribute name to create decorators (e.g. + `validators `), you still *must* assign `attr.ib` to + them. Otherwise Python will either not find the name or try to use + the default value to call e.g. ``validator`` on it. + + These errors can be quite confusing and probably the most common bug + report on our bug tracker. + + .. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/ + :param bool kw_only: Make all attributes keyword-only (Python 3+) + in the generated ``__init__`` (if ``init`` is ``False``, this + parameter is ignored). + :param bool cache_hash: Ensure that the object's hash code is computed + only once and stored on the object. If this is set to ``True``, + hashing must be either explicitly or implicitly enabled for this + class. If the hash code is cached, avoid any reassignments of + fields involved in hash code computation or mutations of the objects + those fields point to after object creation. If such changes occur, + the behavior of the object's hash code is undefined. + :param bool auto_exc: If the class subclasses `BaseException` + (which implicitly includes any subclass of any exception), the + following happens to behave like a well-behaved Python exceptions + class: + + - the values for *eq*, *order*, and *hash* are ignored and the + instances compare and hash by the instance's ids (N.B. ``attrs`` will + *not* remove existing implementations of ``__hash__`` or the equality + methods. It just won't add own ones.), + - all attributes that are either passed into ``__init__`` or have a + default value are additionally available as a tuple in the ``args`` + attribute, + - the value of *str* is ignored leaving ``__str__`` to base classes. + :param bool collect_by_mro: Setting this to `True` fixes the way ``attrs`` + collects attributes from base classes. The default behavior is + incorrect in certain cases of multiple inheritance. It should be on by + default but is kept off for backward-compatibility. + + See issue `#428 `_ for + more details. + + :param Optional[bool] getstate_setstate: + .. note:: + This is usually only interesting for slotted classes and you should + probably just set *auto_detect* to `True`. + + If `True`, ``__getstate__`` and + ``__setstate__`` are generated and attached to the class. This is + necessary for slotted classes to be pickleable. If left `None`, it's + `True` by default for slotted classes and ``False`` for dict classes. + + If *auto_detect* is `True`, and *getstate_setstate* is left `None`, + and **either** ``__getstate__`` or ``__setstate__`` is detected directly + on the class (i.e. not inherited), it is set to `False` (this is usually + what you want). + + :param on_setattr: A callable that is run whenever the user attempts to set + an attribute (either by assignment like ``i.x = 42`` or by using + `setattr` like ``setattr(i, "x", 42)``). It receives the same arguments + as validators: the instance, the attribute that is being modified, and + the new value. + + If no exception is raised, the attribute is set to the return value of + the callable. + + If a list of callables is passed, they're automatically wrapped in an + `attrs.setters.pipe`. + + :param Optional[callable] field_transformer: + A function that is called with the original class object and all + fields right before ``attrs`` finalizes the class. You can use + this, e.g., to automatically add converters or validators to + fields based on their types. See `transform-fields` for more details. + + :param bool match_args: + If `True` (default), set ``__match_args__`` on the class to support + `PEP 634 `_ (Structural + Pattern Matching). It is a tuple of all positional-only ``__init__`` + parameter names on Python 3.10 and later. Ignored on older Python + versions. + + .. versionadded:: 16.0.0 *slots* + .. versionadded:: 16.1.0 *frozen* + .. versionadded:: 16.3.0 *str* + .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``. + .. versionchanged:: 17.1.0 + *hash* supports ``None`` as value which is also the default now. + .. versionadded:: 17.3.0 *auto_attribs* + .. versionchanged:: 18.1.0 + If *these* is passed, no attributes are deleted from the class body. + .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained. + .. versionadded:: 18.2.0 *weakref_slot* + .. deprecated:: 18.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a + `DeprecationWarning` if the classes compared are subclasses of + each other. ``__eq`` and ``__ne__`` never tried to compared subclasses + to each other. + .. versionchanged:: 19.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider + subclasses comparable anymore. + .. versionadded:: 18.2.0 *kw_only* + .. versionadded:: 18.2.0 *cache_hash* + .. versionadded:: 19.1.0 *auto_exc* + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *auto_detect* + .. versionadded:: 20.1.0 *collect_by_mro* + .. versionadded:: 20.1.0 *getstate_setstate* + .. versionadded:: 20.1.0 *on_setattr* + .. versionadded:: 20.3.0 *field_transformer* + .. versionchanged:: 21.1.0 + ``init=False`` injects ``__attrs_init__`` + .. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__`` + .. versionchanged:: 21.1.0 *cmp* undeprecated + .. versionadded:: 21.3.0 *match_args* + """ + if auto_detect and PY2: + raise PythonTooOldError( + "auto_detect only works on Python 3 and later." + ) + + eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None) + hash_ = hash # work around the lack of nonlocal + + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + def wrap(cls): + + if getattr(cls, "__class__", None) is None: + raise TypeError("attrs only works with new-style classes.") + + is_frozen = frozen or _has_frozen_base_class(cls) + is_exc = auto_exc is True and issubclass(cls, BaseException) + has_own_setattr = auto_detect and _has_own_attribute( + cls, "__setattr__" + ) + + if has_own_setattr and is_frozen: + raise ValueError("Can't freeze a class with a custom __setattr__.") + + builder = _ClassBuilder( + cls, + these, + slots, + is_frozen, + weakref_slot, + _determine_whether_to_implement( + cls, + getstate_setstate, + auto_detect, + ("__getstate__", "__setstate__"), + default=slots, + ), + auto_attribs, + kw_only, + cache_hash, + is_exc, + collect_by_mro, + on_setattr, + has_own_setattr, + field_transformer, + ) + if _determine_whether_to_implement( + cls, repr, auto_detect, ("__repr__",) + ): + builder.add_repr(repr_ns) + if str is True: + builder.add_str() + + eq = _determine_whether_to_implement( + cls, eq_, auto_detect, ("__eq__", "__ne__") + ) + if not is_exc and eq is True: + builder.add_eq() + if not is_exc and _determine_whether_to_implement( + cls, order_, auto_detect, ("__lt__", "__le__", "__gt__", "__ge__") + ): + builder.add_order() + + builder.add_setattr() + + if ( + hash_ is None + and auto_detect is True + and _has_own_attribute(cls, "__hash__") + ): + hash = False + else: + hash = hash_ + if hash is not True and hash is not False and hash is not None: + # Can't use `hash in` because 1 == True for example. + raise TypeError( + "Invalid value for hash. Must be True, False, or None." + ) + elif hash is False or (hash is None and eq is False) or is_exc: + # Don't do anything. Should fall back to __object__'s __hash__ + # which is by id. + if cache_hash: + raise TypeError( + "Invalid value for cache_hash. To use hash caching," + " hashing must be either explicitly or implicitly " + "enabled." + ) + elif hash is True or ( + hash is None and eq is True and is_frozen is True + ): + # Build a __hash__ if told so, or if it's safe. + builder.add_hash() + else: + # Raise TypeError on attempts to hash. + if cache_hash: + raise TypeError( + "Invalid value for cache_hash. To use hash caching," + " hashing must be either explicitly or implicitly " + "enabled." + ) + builder.make_unhashable() + + if _determine_whether_to_implement( + cls, init, auto_detect, ("__init__",) + ): + builder.add_init() + else: + builder.add_attrs_init() + if cache_hash: + raise TypeError( + "Invalid value for cache_hash. To use hash caching," + " init must be True." + ) + + if ( + PY310 + and match_args + and not _has_own_attribute(cls, "__match_args__") + ): + builder.add_match_args() + + return builder.build_class() + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but ``None`` if used as `@attrs()`. + if maybe_cls is None: + return wrap + else: + return wrap(maybe_cls) + + +_attrs = attrs +""" +Internal alias so we can use it in functions that take an argument called +*attrs*. +""" + + +if PY2: + + def _has_frozen_base_class(cls): + """ + Check whether *cls* has a frozen ancestor by looking at its + __setattr__. + """ + return ( + getattr(cls.__setattr__, "__module__", None) + == _frozen_setattrs.__module__ + and cls.__setattr__.__name__ == _frozen_setattrs.__name__ + ) + +else: + + def _has_frozen_base_class(cls): + """ + Check whether *cls* has a frozen ancestor by looking at its + __setattr__. + """ + return cls.__setattr__ == _frozen_setattrs + + +def _generate_unique_filename(cls, func_name): + """ + Create a "filename" suitable for a function being generated. + """ + unique_filename = "".format( + func_name, + cls.__module__, + getattr(cls, "__qualname__", cls.__name__), + ) + return unique_filename + + +def _make_hash(cls, attrs, frozen, cache_hash): + attrs = tuple( + a for a in attrs if a.hash is True or (a.hash is None and a.eq is True) + ) + + tab = " " + + unique_filename = _generate_unique_filename(cls, "hash") + type_hash = hash(unique_filename) + + hash_def = "def __hash__(self" + hash_func = "hash((" + closing_braces = "))" + if not cache_hash: + hash_def += "):" + else: + if not PY2: + hash_def += ", *" + + hash_def += ( + ", _cache_wrapper=" + + "__import__('attr._make')._make._CacheHashWrapper):" + ) + hash_func = "_cache_wrapper(" + hash_func + closing_braces += ")" + + method_lines = [hash_def] + + def append_hash_computation_lines(prefix, indent): + """ + Generate the code for actually computing the hash code. + Below this will either be returned directly or used to compute + a value which is then cached, depending on the value of cache_hash + """ + + method_lines.extend( + [ + indent + prefix + hash_func, + indent + " %d," % (type_hash,), + ] + ) + + for a in attrs: + method_lines.append(indent + " self.%s," % a.name) + + method_lines.append(indent + " " + closing_braces) + + if cache_hash: + method_lines.append(tab + "if self.%s is None:" % _hash_cache_field) + if frozen: + append_hash_computation_lines( + "object.__setattr__(self, '%s', " % _hash_cache_field, tab * 2 + ) + method_lines.append(tab * 2 + ")") # close __setattr__ + else: + append_hash_computation_lines( + "self.%s = " % _hash_cache_field, tab * 2 + ) + method_lines.append(tab + "return self.%s" % _hash_cache_field) + else: + append_hash_computation_lines("return ", tab) + + script = "\n".join(method_lines) + return _make_method("__hash__", script, unique_filename) + + +def _add_hash(cls, attrs): + """ + Add a hash method to *cls*. + """ + cls.__hash__ = _make_hash(cls, attrs, frozen=False, cache_hash=False) + return cls + + +def _make_ne(): + """ + Create __ne__ method. + """ + + def __ne__(self, other): + """ + Check equality and either forward a NotImplemented or + return the result negated. + """ + result = self.__eq__(other) + if result is NotImplemented: + return NotImplemented + + return not result + + return __ne__ + + +def _make_eq(cls, attrs): + """ + Create __eq__ method for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.eq] + + unique_filename = _generate_unique_filename(cls, "eq") + lines = [ + "def __eq__(self, other):", + " if other.__class__ is not self.__class__:", + " return NotImplemented", + ] + + # We can't just do a big self.x = other.x and... clause due to + # irregularities like nan == nan is false but (nan,) == (nan,) is true. + globs = {} + if attrs: + lines.append(" return (") + others = [" ) == ("] + for a in attrs: + if a.eq_key: + cmp_name = "_%s_key" % (a.name,) + # Add the key function to the global namespace + # of the evaluated function. + globs[cmp_name] = a.eq_key + lines.append( + " %s(self.%s)," + % ( + cmp_name, + a.name, + ) + ) + others.append( + " %s(other.%s)," + % ( + cmp_name, + a.name, + ) + ) + else: + lines.append(" self.%s," % (a.name,)) + others.append(" other.%s," % (a.name,)) + + lines += others + [" )"] + else: + lines.append(" return True") + + script = "\n".join(lines) + + return _make_method("__eq__", script, unique_filename, globs) + + +def _make_order(cls, attrs): + """ + Create ordering methods for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.order] + + def attrs_to_tuple(obj): + """ + Save us some typing. + """ + return tuple( + key(value) if key else value + for value, key in ( + (getattr(obj, a.name), a.order_key) for a in attrs + ) + ) + + def __lt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) < attrs_to_tuple(other) + + return NotImplemented + + def __le__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) <= attrs_to_tuple(other) + + return NotImplemented + + def __gt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) > attrs_to_tuple(other) + + return NotImplemented + + def __ge__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) >= attrs_to_tuple(other) + + return NotImplemented + + return __lt__, __le__, __gt__, __ge__ + + +def _add_eq(cls, attrs=None): + """ + Add equality methods to *cls* with *attrs*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + cls.__eq__ = _make_eq(cls, attrs) + cls.__ne__ = _make_ne() + + return cls + + +if HAS_F_STRINGS: + + def _make_repr(attrs, ns, cls): + unique_filename = _generate_unique_filename(cls, "repr") + # Figure out which attributes to include, and which function to use to + # format them. The a.repr value can be either bool or a custom + # callable. + attr_names_with_reprs = tuple( + (a.name, (repr if a.repr is True else a.repr), a.init) + for a in attrs + if a.repr is not False + ) + globs = { + name + "_repr": r + for name, r, _ in attr_names_with_reprs + if r != repr + } + globs["_compat"] = _compat + globs["AttributeError"] = AttributeError + globs["NOTHING"] = NOTHING + attribute_fragments = [] + for name, r, i in attr_names_with_reprs: + accessor = ( + "self." + name + if i + else 'getattr(self, "' + name + '", NOTHING)' + ) + fragment = ( + "%s={%s!r}" % (name, accessor) + if r == repr + else "%s={%s_repr(%s)}" % (name, name, accessor) + ) + attribute_fragments.append(fragment) + repr_fragment = ", ".join(attribute_fragments) + + if ns is None: + cls_name_fragment = ( + '{self.__class__.__qualname__.rsplit(">.", 1)[-1]}' + ) + else: + cls_name_fragment = ns + ".{self.__class__.__name__}" + + lines = [ + "def __repr__(self):", + " try:", + " already_repring = _compat.repr_context.already_repring", + " except AttributeError:", + " already_repring = {id(self),}", + " _compat.repr_context.already_repring = already_repring", + " else:", + " if id(self) in already_repring:", + " return '...'", + " else:", + " already_repring.add(id(self))", + " try:", + " return f'%s(%s)'" % (cls_name_fragment, repr_fragment), + " finally:", + " already_repring.remove(id(self))", + ] + + return _make_method( + "__repr__", "\n".join(lines), unique_filename, globs=globs + ) + +else: + + def _make_repr(attrs, ns, _): + """ + Make a repr method that includes relevant *attrs*, adding *ns* to the + full name. + """ + + # Figure out which attributes to include, and which function to use to + # format them. The a.repr value can be either bool or a custom + # callable. + attr_names_with_reprs = tuple( + (a.name, repr if a.repr is True else a.repr) + for a in attrs + if a.repr is not False + ) + + def __repr__(self): + """ + Automatically created by attrs. + """ + try: + already_repring = _compat.repr_context.already_repring + except AttributeError: + already_repring = set() + _compat.repr_context.already_repring = already_repring + + if id(self) in already_repring: + return "..." + real_cls = self.__class__ + if ns is None: + qualname = getattr(real_cls, "__qualname__", None) + if qualname is not None: # pragma: no cover + # This case only happens on Python 3.5 and 3.6. We exclude + # it from coverage, because we don't want to slow down our + # test suite by running them under coverage too for this + # one line. + class_name = qualname.rsplit(">.", 1)[-1] + else: + class_name = real_cls.__name__ + else: + class_name = ns + "." + real_cls.__name__ + + # Since 'self' remains on the stack (i.e.: strongly referenced) + # for the duration of this call, it's safe to depend on id(...) + # stability, and not need to track the instance and therefore + # worry about properties like weakref- or hash-ability. + already_repring.add(id(self)) + try: + result = [class_name, "("] + first = True + for name, attr_repr in attr_names_with_reprs: + if first: + first = False + else: + result.append(", ") + result.extend( + (name, "=", attr_repr(getattr(self, name, NOTHING))) + ) + return "".join(result) + ")" + finally: + already_repring.remove(id(self)) + + return __repr__ + + +def _add_repr(cls, ns=None, attrs=None): + """ + Add a repr method to *cls*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + cls.__repr__ = _make_repr(attrs, ns, cls) + return cls + + +def fields(cls): + """ + Return the tuple of ``attrs`` attributes for a class. + + The tuple also allows accessing the fields by their names (see below for + examples). + + :param type cls: Class to introspect. + + :raise TypeError: If *cls* is not a class. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + :rtype: tuple (with name accessors) of `attrs.Attribute` + + .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields + by name. + """ + if not isclass(cls): + raise TypeError("Passed object must be a class.") + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is None: + raise NotAnAttrsClassError( + "{cls!r} is not an attrs-decorated class.".format(cls=cls) + ) + return attrs + + +def fields_dict(cls): + """ + Return an ordered dictionary of ``attrs`` attributes for a class, whose + keys are the attribute names. + + :param type cls: Class to introspect. + + :raise TypeError: If *cls* is not a class. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + :rtype: an ordered dict where keys are attribute names and values are + `attrs.Attribute`\\ s. This will be a `dict` if it's + naturally ordered like on Python 3.6+ or an + :class:`~collections.OrderedDict` otherwise. + + .. versionadded:: 18.1.0 + """ + if not isclass(cls): + raise TypeError("Passed object must be a class.") + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is None: + raise NotAnAttrsClassError( + "{cls!r} is not an attrs-decorated class.".format(cls=cls) + ) + return ordered_dict(((a.name, a) for a in attrs)) + + +def validate(inst): + """ + Validate all attributes on *inst* that have a validator. + + Leaves all exceptions through. + + :param inst: Instance of a class with ``attrs`` attributes. + """ + if _config._run_validators is False: + return + + for a in fields(inst.__class__): + v = a.validator + if v is not None: + v(inst, a, getattr(inst, a.name)) + + +def _is_slot_cls(cls): + return "__slots__" in cls.__dict__ + + +def _is_slot_attr(a_name, base_attr_map): + """ + Check if the attribute name comes from a slot class. + """ + return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name]) + + +def _make_init( + cls, + attrs, + pre_init, + post_init, + frozen, + slots, + cache_hash, + base_attr_map, + is_exc, + cls_on_setattr, + attrs_init, +): + has_cls_on_setattr = ( + cls_on_setattr is not None and cls_on_setattr is not setters.NO_OP + ) + + if frozen and has_cls_on_setattr: + raise ValueError("Frozen classes can't use on_setattr.") + + needs_cached_setattr = cache_hash or frozen + filtered_attrs = [] + attr_dict = {} + for a in attrs: + if not a.init and a.default is NOTHING: + continue + + filtered_attrs.append(a) + attr_dict[a.name] = a + + if a.on_setattr is not None: + if frozen is True: + raise ValueError("Frozen classes can't use on_setattr.") + + needs_cached_setattr = True + elif has_cls_on_setattr and a.on_setattr is not setters.NO_OP: + needs_cached_setattr = True + + unique_filename = _generate_unique_filename(cls, "init") + + script, globs, annotations = _attrs_to_init_script( + filtered_attrs, + frozen, + slots, + pre_init, + post_init, + cache_hash, + base_attr_map, + is_exc, + needs_cached_setattr, + has_cls_on_setattr, + attrs_init, + ) + if cls.__module__ in sys.modules: + # This makes typing.get_type_hints(CLS.__init__) resolve string types. + globs.update(sys.modules[cls.__module__].__dict__) + + globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict}) + + if needs_cached_setattr: + # Save the lookup overhead in __init__ if we need to circumvent + # setattr hooks. + globs["_cached_setattr"] = _obj_setattr + + init = _make_method( + "__attrs_init__" if attrs_init else "__init__", + script, + unique_filename, + globs, + ) + init.__annotations__ = annotations + + return init + + +def _setattr(attr_name, value_var, has_on_setattr): + """ + Use the cached object.setattr to set *attr_name* to *value_var*. + """ + return "_setattr('%s', %s)" % (attr_name, value_var) + + +def _setattr_with_converter(attr_name, value_var, has_on_setattr): + """ + Use the cached object.setattr to set *attr_name* to *value_var*, but run + its converter first. + """ + return "_setattr('%s', %s(%s))" % ( + attr_name, + _init_converter_pat % (attr_name,), + value_var, + ) + + +def _assign(attr_name, value, has_on_setattr): + """ + Unless *attr_name* has an on_setattr hook, use normal assignment. Otherwise + relegate to _setattr. + """ + if has_on_setattr: + return _setattr(attr_name, value, True) + + return "self.%s = %s" % (attr_name, value) + + +def _assign_with_converter(attr_name, value_var, has_on_setattr): + """ + Unless *attr_name* has an on_setattr hook, use normal assignment after + conversion. Otherwise relegate to _setattr_with_converter. + """ + if has_on_setattr: + return _setattr_with_converter(attr_name, value_var, True) + + return "self.%s = %s(%s)" % ( + attr_name, + _init_converter_pat % (attr_name,), + value_var, + ) + + +if PY2: + + def _unpack_kw_only_py2(attr_name, default=None): + """ + Unpack *attr_name* from _kw_only dict. + """ + if default is not None: + arg_default = ", %s" % default + else: + arg_default = "" + return "%s = _kw_only.pop('%s'%s)" % ( + attr_name, + attr_name, + arg_default, + ) + + def _unpack_kw_only_lines_py2(kw_only_args): + """ + Unpack all *kw_only_args* from _kw_only dict and handle errors. + + Given a list of strings "{attr_name}" and "{attr_name}={default}" + generates list of lines of code that pop attrs from _kw_only dict and + raise TypeError similar to builtin if required attr is missing or + extra key is passed. + + >>> print("\n".join(_unpack_kw_only_lines_py2(["a", "b=42"]))) + try: + a = _kw_only.pop('a') + b = _kw_only.pop('b', 42) + except KeyError as _key_error: + raise TypeError( + ... + if _kw_only: + raise TypeError( + ... + """ + lines = ["try:"] + lines.extend( + " " + _unpack_kw_only_py2(*arg.split("=")) + for arg in kw_only_args + ) + lines += """\ +except KeyError as _key_error: + raise TypeError( + '__init__() missing required keyword-only argument: %s' % _key_error + ) +if _kw_only: + raise TypeError( + '__init__() got an unexpected keyword argument %r' + % next(iter(_kw_only)) + ) +""".split( + "\n" + ) + return lines + + +def _attrs_to_init_script( + attrs, + frozen, + slots, + pre_init, + post_init, + cache_hash, + base_attr_map, + is_exc, + needs_cached_setattr, + has_cls_on_setattr, + attrs_init, +): + """ + Return a script of an initializer for *attrs* and a dict of globals. + + The globals are expected by the generated script. + + If *frozen* is True, we cannot set the attributes directly so we use + a cached ``object.__setattr__``. + """ + lines = [] + if pre_init: + lines.append("self.__attrs_pre_init__()") + + if needs_cached_setattr: + lines.append( + # Circumvent the __setattr__ descriptor to save one lookup per + # assignment. + # Note _setattr will be used again below if cache_hash is True + "_setattr = _cached_setattr.__get__(self, self.__class__)" + ) + + if frozen is True: + if slots is True: + fmt_setter = _setattr + fmt_setter_with_converter = _setattr_with_converter + else: + # Dict frozen classes assign directly to __dict__. + # But only if the attribute doesn't come from an ancestor slot + # class. + # Note _inst_dict will be used again below if cache_hash is True + lines.append("_inst_dict = self.__dict__") + + def fmt_setter(attr_name, value_var, has_on_setattr): + if _is_slot_attr(attr_name, base_attr_map): + return _setattr(attr_name, value_var, has_on_setattr) + + return "_inst_dict['%s'] = %s" % (attr_name, value_var) + + def fmt_setter_with_converter( + attr_name, value_var, has_on_setattr + ): + if has_on_setattr or _is_slot_attr(attr_name, base_attr_map): + return _setattr_with_converter( + attr_name, value_var, has_on_setattr + ) + + return "_inst_dict['%s'] = %s(%s)" % ( + attr_name, + _init_converter_pat % (attr_name,), + value_var, + ) + + else: + # Not frozen. + fmt_setter = _assign + fmt_setter_with_converter = _assign_with_converter + + args = [] + kw_only_args = [] + attrs_to_validate = [] + + # This is a dictionary of names to validator and converter callables. + # Injecting this into __init__ globals lets us avoid lookups. + names_for_globals = {} + annotations = {"return": None} + + for a in attrs: + if a.validator: + attrs_to_validate.append(a) + + attr_name = a.name + has_on_setattr = a.on_setattr is not None or ( + a.on_setattr is not setters.NO_OP and has_cls_on_setattr + ) + arg_name = a.name.lstrip("_") + + has_factory = isinstance(a.default, Factory) + if has_factory and a.default.takes_self: + maybe_self = "self" + else: + maybe_self = "" + + if a.init is False: + if has_factory: + init_factory_name = _init_factory_pat.format(a.name) + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + init_factory_name + "(%s)" % (maybe_self,), + has_on_setattr, + ) + ) + conv_name = _init_converter_pat % (a.name,) + names_for_globals[conv_name] = a.converter + else: + lines.append( + fmt_setter( + attr_name, + init_factory_name + "(%s)" % (maybe_self,), + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + else: + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + "attr_dict['%s'].default" % (attr_name,), + has_on_setattr, + ) + ) + conv_name = _init_converter_pat % (a.name,) + names_for_globals[conv_name] = a.converter + else: + lines.append( + fmt_setter( + attr_name, + "attr_dict['%s'].default" % (attr_name,), + has_on_setattr, + ) + ) + elif a.default is not NOTHING and not has_factory: + arg = "%s=attr_dict['%s'].default" % (arg_name, attr_name) + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr + ) + ) + names_for_globals[ + _init_converter_pat % (a.name,) + ] = a.converter + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + elif has_factory: + arg = "%s=NOTHING" % (arg_name,) + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + lines.append("if %s is not NOTHING:" % (arg_name,)) + + init_factory_name = _init_factory_pat.format(a.name) + if a.converter is not None: + lines.append( + " " + + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr + ) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter_with_converter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + ) + ) + names_for_globals[ + _init_converter_pat % (a.name,) + ] = a.converter + else: + lines.append( + " " + fmt_setter(attr_name, arg_name, has_on_setattr) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + else: + if a.kw_only: + kw_only_args.append(arg_name) + else: + args.append(arg_name) + + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr + ) + ) + names_for_globals[ + _init_converter_pat % (a.name,) + ] = a.converter + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + if a.init is True: + if a.type is not None and a.converter is None: + annotations[arg_name] = a.type + elif a.converter is not None and not PY2: + # Try to get the type from the converter. + sig = None + try: + sig = inspect.signature(a.converter) + except (ValueError, TypeError): # inspect failed + pass + if sig: + sig_params = list(sig.parameters.values()) + if ( + sig_params + and sig_params[0].annotation + is not inspect.Parameter.empty + ): + annotations[arg_name] = sig_params[0].annotation + + if attrs_to_validate: # we can skip this if there are no validators. + names_for_globals["_config"] = _config + lines.append("if _config._run_validators is True:") + for a in attrs_to_validate: + val_name = "__attr_validator_" + a.name + attr_name = "__attr_" + a.name + lines.append( + " %s(self, %s, self.%s)" % (val_name, attr_name, a.name) + ) + names_for_globals[val_name] = a.validator + names_for_globals[attr_name] = a + + if post_init: + lines.append("self.__attrs_post_init__()") + + # because this is set only after __attrs_post_init is called, a crash + # will result if post-init tries to access the hash code. This seemed + # preferable to setting this beforehand, in which case alteration to + # field values during post-init combined with post-init accessing the + # hash code would result in silent bugs. + if cache_hash: + if frozen: + if slots: + # if frozen and slots, then _setattr defined above + init_hash_cache = "_setattr('%s', %s)" + else: + # if frozen and not slots, then _inst_dict defined above + init_hash_cache = "_inst_dict['%s'] = %s" + else: + init_hash_cache = "self.%s = %s" + lines.append(init_hash_cache % (_hash_cache_field, "None")) + + # For exceptions we rely on BaseException.__init__ for proper + # initialization. + if is_exc: + vals = ",".join("self." + a.name for a in attrs if a.init) + + lines.append("BaseException.__init__(self, %s)" % (vals,)) + + args = ", ".join(args) + if kw_only_args: + if PY2: + lines = _unpack_kw_only_lines_py2(kw_only_args) + lines + + args += "%s**_kw_only" % (", " if args else "",) # leading comma + else: + args += "%s*, %s" % ( + ", " if args else "", # leading comma + ", ".join(kw_only_args), # kw_only args + ) + return ( + """\ +def {init_name}(self, {args}): + {lines} +""".format( + init_name=("__attrs_init__" if attrs_init else "__init__"), + args=args, + lines="\n ".join(lines) if lines else "pass", + ), + names_for_globals, + annotations, + ) + + +class Attribute(object): + """ + *Read-only* representation of an attribute. + + The class has *all* arguments of `attr.ib` (except for ``factory`` + which is only syntactic sugar for ``default=Factory(...)`` plus the + following: + + - ``name`` (`str`): The name of the attribute. + - ``inherited`` (`bool`): Whether or not that attribute has been inherited + from a base class. + - ``eq_key`` and ``order_key`` (`typing.Callable` or `None`): The callables + that are used for comparing and ordering objects by this attribute, + respectively. These are set by passing a callable to `attr.ib`'s ``eq``, + ``order``, or ``cmp`` arguments. See also :ref:`comparison customization + `. + + Instances of this class are frequently used for introspection purposes + like: + + - `fields` returns a tuple of them. + - Validators get them passed as the first argument. + - The :ref:`field transformer ` hook receives a list of + them. + + .. versionadded:: 20.1.0 *inherited* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.2.0 *inherited* is not taken into account for + equality checks and hashing anymore. + .. versionadded:: 21.1.0 *eq_key* and *order_key* + + For the full version history of the fields, see `attr.ib`. + """ + + __slots__ = ( + "name", + "default", + "validator", + "repr", + "eq", + "eq_key", + "order", + "order_key", + "hash", + "init", + "metadata", + "type", + "converter", + "kw_only", + "inherited", + "on_setattr", + ) + + def __init__( + self, + name, + default, + validator, + repr, + cmp, # XXX: unused, remove along with other cmp code. + hash, + init, + inherited, + metadata=None, + type=None, + converter=None, + kw_only=False, + eq=None, + eq_key=None, + order=None, + order_key=None, + on_setattr=None, + ): + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq_key or eq, order_key or order, True + ) + + # Cache this descriptor here to speed things up later. + bound_setattr = _obj_setattr.__get__(self, Attribute) + + # Despite the big red warning, people *do* instantiate `Attribute` + # themselves. + bound_setattr("name", name) + bound_setattr("default", default) + bound_setattr("validator", validator) + bound_setattr("repr", repr) + bound_setattr("eq", eq) + bound_setattr("eq_key", eq_key) + bound_setattr("order", order) + bound_setattr("order_key", order_key) + bound_setattr("hash", hash) + bound_setattr("init", init) + bound_setattr("converter", converter) + bound_setattr( + "metadata", + ( + metadata_proxy(metadata) + if metadata + else _empty_metadata_singleton + ), + ) + bound_setattr("type", type) + bound_setattr("kw_only", kw_only) + bound_setattr("inherited", inherited) + bound_setattr("on_setattr", on_setattr) + + def __setattr__(self, name, value): + raise FrozenInstanceError() + + @classmethod + def from_counting_attr(cls, name, ca, type=None): + # type holds the annotated value. deal with conflicts: + if type is None: + type = ca.type + elif ca.type is not None: + raise ValueError( + "Type annotation and type argument cannot both be present" + ) + inst_dict = { + k: getattr(ca, k) + for k in Attribute.__slots__ + if k + not in ( + "name", + "validator", + "default", + "type", + "inherited", + ) # exclude methods and deprecated alias + } + return cls( + name=name, + validator=ca._validator, + default=ca._default, + type=type, + cmp=None, + inherited=False, + **inst_dict + ) + + @property + def cmp(self): + """ + Simulate the presence of a cmp attribute and warn. + """ + warnings.warn(_CMP_DEPRECATION, DeprecationWarning, stacklevel=2) + + return self.eq and self.order + + # Don't use attr.evolve since fields(Attribute) doesn't work + def evolve(self, **changes): + """ + Copy *self* and apply *changes*. + + This works similarly to `attr.evolve` but that function does not work + with ``Attribute``. + + It is mainly meant to be used for `transform-fields`. + + .. versionadded:: 20.3.0 + """ + new = copy.copy(self) + + new._setattrs(changes.items()) + + return new + + # Don't use _add_pickle since fields(Attribute) doesn't work + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple( + getattr(self, name) if name != "metadata" else dict(self.metadata) + for name in self.__slots__ + ) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + self._setattrs(zip(self.__slots__, state)) + + def _setattrs(self, name_values_pairs): + bound_setattr = _obj_setattr.__get__(self, Attribute) + for name, value in name_values_pairs: + if name != "metadata": + bound_setattr(name, value) + else: + bound_setattr( + name, + metadata_proxy(value) + if value + else _empty_metadata_singleton, + ) + + +_a = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=(name != "metadata"), + init=True, + inherited=False, + ) + for name in Attribute.__slots__ +] + +Attribute = _add_hash( + _add_eq( + _add_repr(Attribute, attrs=_a), + attrs=[a for a in _a if a.name != "inherited"], + ), + attrs=[a for a in _a if a.hash and a.name != "inherited"], +) + + +class _CountingAttr(object): + """ + Intermediate representation of attributes that uses a counter to preserve + the order in which the attributes have been defined. + + *Internal* data structure of the attrs library. Running into is most + likely the result of a bug like a forgotten `@attr.s` decorator. + """ + + __slots__ = ( + "counter", + "_default", + "repr", + "eq", + "eq_key", + "order", + "order_key", + "hash", + "init", + "metadata", + "_validator", + "converter", + "type", + "kw_only", + "on_setattr", + ) + __attrs_attrs__ = tuple( + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=True, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ) + for name in ( + "counter", + "_default", + "repr", + "eq", + "order", + "hash", + "init", + "on_setattr", + ) + ) + ( + Attribute( + name="metadata", + default=None, + validator=None, + repr=True, + cmp=None, + hash=False, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ), + ) + cls_counter = 0 + + def __init__( + self, + default, + validator, + repr, + cmp, + hash, + init, + converter, + metadata, + type, + kw_only, + eq, + eq_key, + order, + order_key, + on_setattr, + ): + _CountingAttr.cls_counter += 1 + self.counter = _CountingAttr.cls_counter + self._default = default + self._validator = validator + self.converter = converter + self.repr = repr + self.eq = eq + self.eq_key = eq_key + self.order = order + self.order_key = order_key + self.hash = hash + self.init = init + self.metadata = metadata + self.type = type + self.kw_only = kw_only + self.on_setattr = on_setattr + + def validator(self, meth): + """ + Decorator that adds *meth* to the list of validators. + + Returns *meth* unchanged. + + .. versionadded:: 17.1.0 + """ + if self._validator is None: + self._validator = meth + else: + self._validator = and_(self._validator, meth) + return meth + + def default(self, meth): + """ + Decorator that allows to set the default for an attribute. + + Returns *meth* unchanged. + + :raises DefaultAlreadySetError: If default has been set before. + + .. versionadded:: 17.1.0 + """ + if self._default is not NOTHING: + raise DefaultAlreadySetError() + + self._default = Factory(meth, takes_self=True) + + return meth + + +_CountingAttr = _add_eq(_add_repr(_CountingAttr)) + + +class Factory(object): + """ + Stores a factory callable. + + If passed as the default value to `attrs.field`, the factory is used to + generate a new value. + + :param callable factory: A callable that takes either none or exactly one + mandatory positional argument depending on *takes_self*. + :param bool takes_self: Pass the partially initialized instance that is + being initialized as a positional argument. + + .. versionadded:: 17.1.0 *takes_self* + """ + + __slots__ = ("factory", "takes_self") + + def __init__(self, factory, takes_self=False): + """ + `Factory` is part of the default machinery so if we want a default + value here, we have to implement it ourselves. + """ + self.factory = factory + self.takes_self = takes_self + + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple(getattr(self, name) for name in self.__slots__) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + for name, value in zip(self.__slots__, state): + setattr(self, name, value) + + +_f = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=True, + init=True, + inherited=False, + ) + for name in Factory.__slots__ +] + +Factory = _add_hash(_add_eq(_add_repr(Factory, attrs=_f), attrs=_f), attrs=_f) + + +def make_class(name, attrs, bases=(object,), **attributes_arguments): + """ + A quick way to create a new class called *name* with *attrs*. + + :param str name: The name for the new class. + + :param attrs: A list of names or a dictionary of mappings of names to + attributes. + + If *attrs* is a list or an ordered dict (`dict` on Python 3.6+, + `collections.OrderedDict` otherwise), the order is deduced from + the order of the names or attributes inside *attrs*. Otherwise the + order of the definition of the attributes is used. + :type attrs: `list` or `dict` + + :param tuple bases: Classes that the new class will subclass. + + :param attributes_arguments: Passed unmodified to `attr.s`. + + :return: A new class with *attrs*. + :rtype: type + + .. versionadded:: 17.1.0 *bases* + .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained. + """ + if isinstance(attrs, dict): + cls_dict = attrs + elif isinstance(attrs, (list, tuple)): + cls_dict = dict((a, attrib()) for a in attrs) + else: + raise TypeError("attrs argument must be a dict or a list.") + + pre_init = cls_dict.pop("__attrs_pre_init__", None) + post_init = cls_dict.pop("__attrs_post_init__", None) + user_init = cls_dict.pop("__init__", None) + + body = {} + if pre_init is not None: + body["__attrs_pre_init__"] = pre_init + if post_init is not None: + body["__attrs_post_init__"] = post_init + if user_init is not None: + body["__init__"] = user_init + + type_ = new_class(name, bases, {}, lambda ns: ns.update(body)) + + # For pickling to work, the __module__ variable needs to be set to the + # frame where the class is created. Bypass this step in environments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython). + try: + type_.__module__ = sys._getframe(1).f_globals.get( + "__name__", "__main__" + ) + except (AttributeError, ValueError): + pass + + # We do it here for proper warnings with meaningful stacklevel. + cmp = attributes_arguments.pop("cmp", None) + ( + attributes_arguments["eq"], + attributes_arguments["order"], + ) = _determine_attrs_eq_order( + cmp, + attributes_arguments.get("eq"), + attributes_arguments.get("order"), + True, + ) + + return _attrs(these=cls_dict, **attributes_arguments)(type_) + + +# These are required by within this module so we define them here and merely +# import into .validators / .converters. + + +@attrs(slots=True, hash=True) +class _AndValidator(object): + """ + Compose many validators to a single one. + """ + + _validators = attrib() + + def __call__(self, inst, attr, value): + for v in self._validators: + v(inst, attr, value) + + +def and_(*validators): + """ + A validator that composes multiple validators into one. + + When called on a value, it runs all wrapped validators. + + :param callables validators: Arbitrary number of validators. + + .. versionadded:: 17.1.0 + """ + vals = [] + for validator in validators: + vals.extend( + validator._validators + if isinstance(validator, _AndValidator) + else [validator] + ) + + return _AndValidator(tuple(vals)) + + +def pipe(*converters): + """ + A converter that composes multiple converters into one. + + When called on a value, it runs all wrapped converters, returning the + *last* value. + + Type annotations will be inferred from the wrapped converters', if + they have any. + + :param callables converters: Arbitrary number of converters. + + .. versionadded:: 20.1.0 + """ + + def pipe_converter(val): + for converter in converters: + val = converter(val) + + return val + + if not PY2: + if not converters: + # If the converter list is empty, pipe_converter is the identity. + A = typing.TypeVar("A") + pipe_converter.__annotations__ = {"val": A, "return": A} + else: + # Get parameter type. + sig = None + try: + sig = inspect.signature(converters[0]) + except (ValueError, TypeError): # inspect failed + pass + if sig: + params = list(sig.parameters.values()) + if ( + params + and params[0].annotation is not inspect.Parameter.empty + ): + pipe_converter.__annotations__["val"] = params[ + 0 + ].annotation + # Get return type. + sig = None + try: + sig = inspect.signature(converters[-1]) + except (ValueError, TypeError): # inspect failed + pass + if sig and sig.return_annotation is not inspect.Signature().empty: + pipe_converter.__annotations__[ + "return" + ] = sig.return_annotation + + return pipe_converter diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/attr/converters.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/attr/converters.py new file mode 100644 index 0000000000000000000000000000000000000000..1fb6c05d7bb8893fcdd8ed81f2a708cbf21d98f6 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/attr/converters.py @@ -0,0 +1,155 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful converters. +""" + +from __future__ import absolute_import, division, print_function + +from ._compat import PY2 +from ._make import NOTHING, Factory, pipe + + +if not PY2: + import inspect + import typing + + +__all__ = [ + "default_if_none", + "optional", + "pipe", + "to_bool", +] + + +def optional(converter): + """ + A converter that allows an attribute to be optional. An optional attribute + is one which can be set to ``None``. + + Type annotations will be inferred from the wrapped converter's, if it + has any. + + :param callable converter: the converter that is used for non-``None`` + values. + + .. versionadded:: 17.1.0 + """ + + def optional_converter(val): + if val is None: + return None + return converter(val) + + if not PY2: + sig = None + try: + sig = inspect.signature(converter) + except (ValueError, TypeError): # inspect failed + pass + if sig: + params = list(sig.parameters.values()) + if params and params[0].annotation is not inspect.Parameter.empty: + optional_converter.__annotations__["val"] = typing.Optional[ + params[0].annotation + ] + if sig.return_annotation is not inspect.Signature.empty: + optional_converter.__annotations__["return"] = typing.Optional[ + sig.return_annotation + ] + + return optional_converter + + +def default_if_none(default=NOTHING, factory=None): + """ + A converter that allows to replace ``None`` values by *default* or the + result of *factory*. + + :param default: Value to be used if ``None`` is passed. Passing an instance + of `attrs.Factory` is supported, however the ``takes_self`` option + is *not*. + :param callable factory: A callable that takes no parameters whose result + is used if ``None`` is passed. + + :raises TypeError: If **neither** *default* or *factory* is passed. + :raises TypeError: If **both** *default* and *factory* are passed. + :raises ValueError: If an instance of `attrs.Factory` is passed with + ``takes_self=True``. + + .. versionadded:: 18.2.0 + """ + if default is NOTHING and factory is None: + raise TypeError("Must pass either `default` or `factory`.") + + if default is not NOTHING and factory is not None: + raise TypeError( + "Must pass either `default` or `factory` but not both." + ) + + if factory is not None: + default = Factory(factory) + + if isinstance(default, Factory): + if default.takes_self: + raise ValueError( + "`takes_self` is not supported by default_if_none." + ) + + def default_if_none_converter(val): + if val is not None: + return val + + return default.factory() + + else: + + def default_if_none_converter(val): + if val is not None: + return val + + return default + + return default_if_none_converter + + +def to_bool(val): + """ + Convert "boolean" strings (e.g., from env. vars.) to real booleans. + + Values mapping to :code:`True`: + + - :code:`True` + - :code:`"true"` / :code:`"t"` + - :code:`"yes"` / :code:`"y"` + - :code:`"on"` + - :code:`"1"` + - :code:`1` + + Values mapping to :code:`False`: + + - :code:`False` + - :code:`"false"` / :code:`"f"` + - :code:`"no"` / :code:`"n"` + - :code:`"off"` + - :code:`"0"` + - :code:`0` + + :raises ValueError: for any other value. + + .. versionadded:: 21.3.0 + """ + if isinstance(val, str): + val = val.lower() + truthy = {True, "true", "t", "yes", "y", "on", "1", 1} + falsy = {False, "false", "f", "no", "n", "off", "0", 0} + try: + if val in truthy: + return True + if val in falsy: + return False + except TypeError: + # Raised when "val" is not hashable (e.g., lists) + pass + raise ValueError("Cannot convert value to bool: {}".format(val)) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/attr/exceptions.pyi b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/attr/exceptions.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f2680118b404db8f5227d04d27e8439331341c4d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/attr/exceptions.pyi @@ -0,0 +1,17 @@ +from typing import Any + +class FrozenError(AttributeError): + msg: str = ... + +class FrozenInstanceError(FrozenError): ... +class FrozenAttributeError(FrozenError): ... +class AttrsAttributeNotFoundError(ValueError): ... +class NotAnAttrsClassError(ValueError): ... +class DefaultAlreadySetError(RuntimeError): ... +class UnannotatedAttributeError(RuntimeError): ... +class PythonTooOldError(RuntimeError): ... + +class NotCallableError(TypeError): + msg: str = ... + value: Any = ... + def __init__(self, msg: str, value: Any) -> None: ... diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2fb831d474274d92edf7d2f181d050c1a76ba9dd --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/__init__.py @@ -0,0 +1,40 @@ +# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors +# +# This module is part of GitDB and is released under +# the New BSD License: http://www.opensource.org/licenses/bsd-license.php +"""Initialize the object database module""" + +import sys +import os + +#{ Initialization + + +def _init_externals(): + """Initialize external projects by putting them into the path""" + for module in ('smmap',): + if 'PYOXIDIZER' not in os.environ: + sys.path.append(os.path.join(os.path.dirname(__file__), 'ext', module)) + + try: + __import__(module) + except ImportError as e: + raise ImportError("'%s' could not be imported, assure it is located in your PYTHONPATH" % module) from e + # END verify import + # END handel imports + +#} END initialization + +_init_externals() + +__author__ = "Sebastian Thiel" +__contact__ = "byronimo@gmail.com" +__homepage__ = "https://github.com/gitpython-developers/gitdb" +version_info = (4, 0, 9) +__version__ = '.'.join(str(i) for i in version_info) + + +# default imports +from gitdb.base import * +from gitdb.db import * +from gitdb.stream import * diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/base.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/base.py new file mode 100644 index 0000000000000000000000000000000000000000..42e71d0fa42831fcb97515df5b3cc9fb421df782 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/base.py @@ -0,0 +1,315 @@ +# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors +# +# This module is part of GitDB and is released under +# the New BSD License: http://www.opensource.org/licenses/bsd-license.php +"""Module with basic data structures - they are designed to be lightweight and fast""" +from gitdb.util import bin_to_hex + +from gitdb.fun import ( + type_id_to_type_map, + type_to_type_id_map +) + +__all__ = ('OInfo', 'OPackInfo', 'ODeltaPackInfo', + 'OStream', 'OPackStream', 'ODeltaPackStream', + 'IStream', 'InvalidOInfo', 'InvalidOStream') + +#{ ODB Bases + + +class OInfo(tuple): + + """Carries information about an object in an ODB, providing information + about the binary sha of the object, the type_string as well as the uncompressed size + in bytes. + + It can be accessed using tuple notation and using attribute access notation:: + + assert dbi[0] == dbi.binsha + assert dbi[1] == dbi.type + assert dbi[2] == dbi.size + + The type is designed to be as lightweight as possible.""" + __slots__ = tuple() + + def __new__(cls, sha, type, size): + return tuple.__new__(cls, (sha, type, size)) + + def __init__(self, *args): + tuple.__init__(self) + + #{ Interface + @property + def binsha(self): + """:return: our sha as binary, 20 bytes""" + return self[0] + + @property + def hexsha(self): + """:return: our sha, hex encoded, 40 bytes""" + return bin_to_hex(self[0]) + + @property + def type(self): + return self[1] + + @property + def type_id(self): + return type_to_type_id_map[self[1]] + + @property + def size(self): + return self[2] + #} END interface + + +class OPackInfo(tuple): + + """As OInfo, but provides a type_id property to retrieve the numerical type id, and + does not include a sha. + + Additionally, the pack_offset is the absolute offset into the packfile at which + all object information is located. The data_offset property points to the absolute + location in the pack at which that actual data stream can be found.""" + __slots__ = tuple() + + def __new__(cls, packoffset, type, size): + return tuple.__new__(cls, (packoffset, type, size)) + + def __init__(self, *args): + tuple.__init__(self) + + #{ Interface + + @property + def pack_offset(self): + return self[0] + + @property + def type(self): + return type_id_to_type_map[self[1]] + + @property + def type_id(self): + return self[1] + + @property + def size(self): + return self[2] + + #} END interface + + +class ODeltaPackInfo(OPackInfo): + + """Adds delta specific information, + Either the 20 byte sha which points to some object in the database, + or the negative offset from the pack_offset, so that pack_offset - delta_info yields + the pack offset of the base object""" + __slots__ = tuple() + + def __new__(cls, packoffset, type, size, delta_info): + return tuple.__new__(cls, (packoffset, type, size, delta_info)) + + #{ Interface + @property + def delta_info(self): + return self[3] + #} END interface + + +class OStream(OInfo): + + """Base for object streams retrieved from the database, providing additional + information about the stream. + Generally, ODB streams are read-only as objects are immutable""" + __slots__ = tuple() + + def __new__(cls, sha, type, size, stream, *args, **kwargs): + """Helps with the initialization of subclasses""" + return tuple.__new__(cls, (sha, type, size, stream)) + + def __init__(self, *args, **kwargs): + tuple.__init__(self) + + #{ Stream Reader Interface + + def read(self, size=-1): + return self[3].read(size) + + @property + def stream(self): + return self[3] + + #} END stream reader interface + + +class ODeltaStream(OStream): + + """Uses size info of its stream, delaying reads""" + + def __new__(cls, sha, type, size, stream, *args, **kwargs): + """Helps with the initialization of subclasses""" + return tuple.__new__(cls, (sha, type, size, stream)) + + #{ Stream Reader Interface + + @property + def size(self): + return self[3].size + + #} END stream reader interface + + +class OPackStream(OPackInfo): + + """Next to pack object information, a stream outputting an undeltified base object + is provided""" + __slots__ = tuple() + + def __new__(cls, packoffset, type, size, stream, *args): + """Helps with the initialization of subclasses""" + return tuple.__new__(cls, (packoffset, type, size, stream)) + + #{ Stream Reader Interface + def read(self, size=-1): + return self[3].read(size) + + @property + def stream(self): + return self[3] + #} END stream reader interface + + +class ODeltaPackStream(ODeltaPackInfo): + + """Provides a stream outputting the uncompressed offset delta information""" + __slots__ = tuple() + + def __new__(cls, packoffset, type, size, delta_info, stream): + return tuple.__new__(cls, (packoffset, type, size, delta_info, stream)) + + #{ Stream Reader Interface + def read(self, size=-1): + return self[4].read(size) + + @property + def stream(self): + return self[4] + #} END stream reader interface + + +class IStream(list): + + """Represents an input content stream to be fed into the ODB. It is mutable to allow + the ODB to record information about the operations outcome right in this instance. + + It provides interfaces for the OStream and a StreamReader to allow the instance + to blend in without prior conversion. + + The only method your content stream must support is 'read'""" + __slots__ = tuple() + + def __new__(cls, type, size, stream, sha=None): + return list.__new__(cls, (sha, type, size, stream, None)) + + def __init__(self, type, size, stream, sha=None): + list.__init__(self, (sha, type, size, stream, None)) + + #{ Interface + @property + def hexsha(self): + """:return: our sha, hex encoded, 40 bytes""" + return bin_to_hex(self[0]) + + def _error(self): + """:return: the error that occurred when processing the stream, or None""" + return self[4] + + def _set_error(self, exc): + """Set this input stream to the given exc, may be None to reset the error""" + self[4] = exc + + error = property(_error, _set_error) + + #} END interface + + #{ Stream Reader Interface + + def read(self, size=-1): + """Implements a simple stream reader interface, passing the read call on + to our internal stream""" + return self[3].read(size) + + #} END stream reader interface + + #{ interface + + def _set_binsha(self, binsha): + self[0] = binsha + + def _binsha(self): + return self[0] + + binsha = property(_binsha, _set_binsha) + + def _type(self): + return self[1] + + def _set_type(self, type): + self[1] = type + + type = property(_type, _set_type) + + def _size(self): + return self[2] + + def _set_size(self, size): + self[2] = size + + size = property(_size, _set_size) + + def _stream(self): + return self[3] + + def _set_stream(self, stream): + self[3] = stream + + stream = property(_stream, _set_stream) + + #} END odb info interface + + +class InvalidOInfo(tuple): + + """Carries information about a sha identifying an object which is invalid in + the queried database. The exception attribute provides more information about + the cause of the issue""" + __slots__ = tuple() + + def __new__(cls, sha, exc): + return tuple.__new__(cls, (sha, exc)) + + def __init__(self, sha, exc): + tuple.__init__(self, (sha, exc)) + + @property + def binsha(self): + return self[0] + + @property + def hexsha(self): + return bin_to_hex(self[0]) + + @property + def error(self): + """:return: exception instance explaining the failure""" + return self[1] + + +class InvalidOStream(InvalidOInfo): + + """Carries information about an invalid ODB stream""" + __slots__ = tuple() + +#} END ODB Bases diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/const.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/const.py new file mode 100644 index 0000000000000000000000000000000000000000..6391d796f3fa3714802c8858960f5cfb3947c230 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/const.py @@ -0,0 +1,4 @@ +BYTE_SPACE = b' ' +NULL_BYTE = b'\0' +NULL_HEX_SHA = "0" * 40 +NULL_BIN_SHA = NULL_BYTE * 20 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/exc.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/exc.py new file mode 100644 index 0000000000000000000000000000000000000000..947e5d8bfce2314702905671db6e1a60e4db40fe --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/exc.py @@ -0,0 +1,46 @@ +# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors +# +# This module is part of GitDB and is released under +# the New BSD License: http://www.opensource.org/licenses/bsd-license.php +"""Module with common exceptions""" +from gitdb.util import to_hex_sha + + +class ODBError(Exception): + """All errors thrown by the object database""" + + +class InvalidDBRoot(ODBError): + """Thrown if an object database cannot be initialized at the given path""" + + +class BadObject(ODBError): + """The object with the given SHA does not exist. Instantiate with the + failed sha""" + + def __str__(self): + return "BadObject: %s" % to_hex_sha(self.args[0]) + + +class BadName(ODBError): + """A name provided to rev_parse wasn't understood""" + + def __str__(self): + return "Ref '%s' did not resolve to an object" % self.args[0] + + +class ParseError(ODBError): + """Thrown if the parsing of a file failed due to an invalid format""" + + +class AmbiguousObjectName(ODBError): + """Thrown if a possibly shortened name does not uniquely represent a single object + in the database""" + + +class BadObjectType(ODBError): + """The object had an unsupported type""" + + +class UnsupportedOperation(ODBError): + """Thrown if the given operation cannot be supported by the object database""" diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/fun.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/fun.py new file mode 100644 index 0000000000000000000000000000000000000000..98465975d034d700f5fed45d34f057c0327b781c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/fun.py @@ -0,0 +1,704 @@ +# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors +# +# This module is part of GitDB and is released under +# the New BSD License: http://www.opensource.org/licenses/bsd-license.php +"""Contains basic c-functions which usually contain performance critical code +Keeping this code separate from the beginning makes it easier to out-source +it into c later, if required""" + +import zlib +from gitdb.util import byte_ord +decompressobj = zlib.decompressobj + +import mmap +from itertools import islice +from functools import reduce + +from gitdb.const import NULL_BYTE, BYTE_SPACE +from gitdb.utils.encoding import force_text +from gitdb.typ import ( + str_blob_type, + str_commit_type, + str_tree_type, + str_tag_type, +) + +from io import StringIO + +# INVARIANTS +OFS_DELTA = 6 +REF_DELTA = 7 +delta_types = (OFS_DELTA, REF_DELTA) + +type_id_to_type_map = { + 0: b'', # EXT 1 + 1: str_commit_type, + 2: str_tree_type, + 3: str_blob_type, + 4: str_tag_type, + 5: b'', # EXT 2 + OFS_DELTA: "OFS_DELTA", # OFFSET DELTA + REF_DELTA: "REF_DELTA" # REFERENCE DELTA +} + +type_to_type_id_map = { + str_commit_type: 1, + str_tree_type: 2, + str_blob_type: 3, + str_tag_type: 4, + "OFS_DELTA": OFS_DELTA, + "REF_DELTA": REF_DELTA, +} + +# used when dealing with larger streams +chunk_size = 1000 * mmap.PAGESIZE + +__all__ = ('is_loose_object', 'loose_object_header_info', 'msb_size', 'pack_object_header_info', + 'write_object', 'loose_object_header', 'stream_copy', 'apply_delta_data', + 'is_equal_canonical_sha', 'connect_deltas', 'DeltaChunkList', 'create_pack_object_header') + + +#{ Structures + +def _set_delta_rbound(d, size): + """Truncate the given delta to the given size + :param size: size relative to our target offset, may not be 0, must be smaller or equal + to our size + :return: d""" + d.ts = size + + # NOTE: data is truncated automatically when applying the delta + # MUST NOT DO THIS HERE + return d + + +def _move_delta_lbound(d, bytes): + """Move the delta by the given amount of bytes, reducing its size so that its + right bound stays static + :param bytes: amount of bytes to move, must be smaller than delta size + :return: d""" + if bytes == 0: + return + + d.to += bytes + d.so += bytes + d.ts -= bytes + if d.data is not None: + d.data = d.data[bytes:] + # END handle data + + return d + + +def delta_duplicate(src): + return DeltaChunk(src.to, src.ts, src.so, src.data) + + +def delta_chunk_apply(dc, bbuf, write): + """Apply own data to the target buffer + :param bbuf: buffer providing source bytes for copy operations + :param write: write method to call with data to write""" + if dc.data is None: + # COPY DATA FROM SOURCE + write(bbuf[dc.so:dc.so + dc.ts]) + else: + # APPEND DATA + # whats faster: if + 4 function calls or just a write with a slice ? + # Considering data can be larger than 127 bytes now, it should be worth it + if dc.ts < len(dc.data): + write(dc.data[:dc.ts]) + else: + write(dc.data) + # END handle truncation + # END handle chunk mode + + +class DeltaChunk(object): + + """Represents a piece of a delta, it can either add new data, or copy existing + one from a source buffer""" + __slots__ = ( + 'to', # start offset in the target buffer in bytes + 'ts', # size of this chunk in the target buffer in bytes + 'so', # start offset in the source buffer in bytes or None + 'data', # chunk of bytes to be added to the target buffer, + # DeltaChunkList to use as base, or None + ) + + def __init__(self, to, ts, so, data): + self.to = to + self.ts = ts + self.so = so + self.data = data + + def __repr__(self): + return "DeltaChunk(%i, %i, %s, %s)" % (self.to, self.ts, self.so, self.data or "") + + #{ Interface + + def rbound(self): + return self.to + self.ts + + def has_data(self): + """:return: True if the instance has data to add to the target stream""" + return self.data is not None + + #} END interface + + +def _closest_index(dcl, absofs): + """:return: index at which the given absofs should be inserted. The index points + to the DeltaChunk with a target buffer absofs that equals or is greater than + absofs. + **Note:** global method for performance only, it belongs to DeltaChunkList""" + lo = 0 + hi = len(dcl) + while lo < hi: + mid = (lo + hi) / 2 + dc = dcl[mid] + if dc.to > absofs: + hi = mid + elif dc.rbound() > absofs or dc.to == absofs: + return mid + else: + lo = mid + 1 + # END handle bound + # END for each delta absofs + return len(dcl) - 1 + + +def delta_list_apply(dcl, bbuf, write): + """Apply the chain's changes and write the final result using the passed + write function. + :param bbuf: base buffer containing the base of all deltas contained in this + list. It will only be used if the chunk in question does not have a base + chain. + :param write: function taking a string of bytes to write to the output""" + for dc in dcl: + delta_chunk_apply(dc, bbuf, write) + # END for each dc + + +def delta_list_slice(dcl, absofs, size, ndcl): + """:return: Subsection of this list at the given absolute offset, with the given + size in bytes. + :return: None""" + cdi = _closest_index(dcl, absofs) # delta start index + cd = dcl[cdi] + slen = len(dcl) + lappend = ndcl.append + + if cd.to != absofs: + tcd = DeltaChunk(cd.to, cd.ts, cd.so, cd.data) + _move_delta_lbound(tcd, absofs - cd.to) + tcd.ts = min(tcd.ts, size) + lappend(tcd) + size -= tcd.ts + cdi += 1 + # END lbound overlap handling + + while cdi < slen and size: + # are we larger than the current block + cd = dcl[cdi] + if cd.ts <= size: + lappend(DeltaChunk(cd.to, cd.ts, cd.so, cd.data)) + size -= cd.ts + else: + tcd = DeltaChunk(cd.to, cd.ts, cd.so, cd.data) + tcd.ts = size + lappend(tcd) + size -= tcd.ts + break + # END hadle size + cdi += 1 + # END for each chunk + + +class DeltaChunkList(list): + + """List with special functionality to deal with DeltaChunks. + There are two types of lists we represent. The one was created bottom-up, working + towards the latest delta, the other kind was created top-down, working from the + latest delta down to the earliest ancestor. This attribute is queryable + after all processing with is_reversed.""" + + __slots__ = tuple() + + def rbound(self): + """:return: rightmost extend in bytes, absolute""" + if len(self) == 0: + return 0 + return self[-1].rbound() + + def lbound(self): + """:return: leftmost byte at which this chunklist starts""" + if len(self) == 0: + return 0 + return self[0].to + + def size(self): + """:return: size of bytes as measured by our delta chunks""" + return self.rbound() - self.lbound() + + def apply(self, bbuf, write): + """Only used by public clients, internally we only use the global routines + for performance""" + return delta_list_apply(self, bbuf, write) + + def compress(self): + """Alter the list to reduce the amount of nodes. Currently we concatenate + add-chunks + :return: self""" + slen = len(self) + if slen < 2: + return self + i = 0 + + first_data_index = None + while i < slen: + dc = self[i] + i += 1 + if dc.data is None: + if first_data_index is not None and i - 2 - first_data_index > 1: + # if first_data_index is not None: + nd = StringIO() # new data + so = self[first_data_index].to # start offset in target buffer + for x in range(first_data_index, i - 1): + xdc = self[x] + nd.write(xdc.data[:xdc.ts]) + # END collect data + + del(self[first_data_index:i - 1]) + buf = nd.getvalue() + self.insert(first_data_index, DeltaChunk(so, len(buf), 0, buf)) + + slen = len(self) + i = first_data_index + 1 + + # END concatenate data + first_data_index = None + continue + # END skip non-data chunks + + if first_data_index is None: + first_data_index = i - 1 + # END iterate list + + # if slen_orig != len(self): + # print "INFO: Reduced delta list len to %f %% of former size" % ((float(len(self)) / slen_orig) * 100) + return self + + def check_integrity(self, target_size=-1): + """Verify the list has non-overlapping chunks only, and the total size matches + target_size + :param target_size: if not -1, the total size of the chain must be target_size + :raise AssertionError: if the size doen't match""" + if target_size > -1: + assert self[-1].rbound() == target_size + assert reduce(lambda x, y: x + y, (d.ts for d in self), 0) == target_size + # END target size verification + + if len(self) < 2: + return + + # check data + for dc in self: + assert dc.ts > 0 + if dc.has_data(): + assert len(dc.data) >= dc.ts + # END for each dc + + left = islice(self, 0, len(self) - 1) + right = iter(self) + right.next() + # this is very pythonic - we might have just use index based access here, + # but this could actually be faster + for lft, rgt in zip(left, right): + assert lft.rbound() == rgt.to + assert lft.to + lft.ts == rgt.to + # END for each pair + + +class TopdownDeltaChunkList(DeltaChunkList): + + """Represents a list which is generated by feeding its ancestor streams one by + one""" + __slots__ = tuple() + + def connect_with_next_base(self, bdcl): + """Connect this chain with the next level of our base delta chunklist. + The goal in this game is to mark as many of our chunks rigid, hence they + cannot be changed by any of the upcoming bases anymore. Once all our + chunks are marked like that, we can stop all processing + :param bdcl: data chunk list being one of our bases. They must be fed in + consequtively and in order, towards the earliest ancestor delta + :return: True if processing was done. Use it to abort processing of + remaining streams if False is returned""" + nfc = 0 # number of frozen chunks + dci = 0 # delta chunk index + slen = len(self) # len of self + ccl = list() # temporary list + while dci < slen: + dc = self[dci] + dci += 1 + + # all add-chunks which are already topmost don't need additional processing + if dc.data is not None: + nfc += 1 + continue + # END skip add chunks + + # copy chunks + # integrate the portion of the base list into ourselves. Lists + # dont support efficient insertion ( just one at a time ), but for now + # we live with it. Internally, its all just a 32/64bit pointer, and + # the portions of moved memory should be smallish. Maybe we just rebuild + # ourselves in order to reduce the amount of insertions ... + del(ccl[:]) + delta_list_slice(bdcl, dc.so, dc.ts, ccl) + + # move the target bounds into place to match with our chunk + ofs = dc.to - dc.so + for cdc in ccl: + cdc.to += ofs + # END update target bounds + + if len(ccl) == 1: + self[dci - 1] = ccl[0] + else: + # maybe try to compute the expenses here, and pick the right algorithm + # It would normally be faster than copying everything physically though + # TODO: Use a deque here, and decide by the index whether to extend + # or extend left ! + post_dci = self[dci:] + del(self[dci - 1:]) # include deletion of dc + self.extend(ccl) + self.extend(post_dci) + + slen = len(self) + dci += len(ccl) - 1 # deleted dc, added rest + + # END handle chunk replacement + # END for each chunk + + if nfc == slen: + return False + # END handle completeness + return True + + +#} END structures + +#{ Routines + +def is_loose_object(m): + """ + :return: True the file contained in memory map m appears to be a loose object. + Only the first two bytes are needed""" + b0, b1 = map(ord, m[:2]) + word = (b0 << 8) + b1 + return b0 == 0x78 and (word % 31) == 0 + + +def loose_object_header_info(m): + """ + :return: tuple(type_string, uncompressed_size_in_bytes) the type string of the + object as well as its uncompressed size in bytes. + :param m: memory map from which to read the compressed object data""" + decompress_size = 8192 # is used in cgit as well + hdr = decompressobj().decompress(m, decompress_size) + type_name, size = hdr[:hdr.find(NULL_BYTE)].split(BYTE_SPACE) + + return type_name, int(size) + + +def pack_object_header_info(data): + """ + :return: tuple(type_id, uncompressed_size_in_bytes, byte_offset) + The type_id should be interpreted according to the ``type_id_to_type_map`` map + The byte-offset specifies the start of the actual zlib compressed datastream + :param m: random-access memory, like a string or memory map""" + c = byte_ord(data[0]) # first byte + i = 1 # next char to read + type_id = (c >> 4) & 7 # numeric type + size = c & 15 # starting size + s = 4 # starting bit-shift size + while c & 0x80: + c = byte_ord(data[i]) + i += 1 + size += (c & 0x7f) << s + s += 7 + # END character loop + # end performance at expense of maintenance ... + return (type_id, size, i) + + +def create_pack_object_header(obj_type, obj_size): + """ + :return: string defining the pack header comprised of the object type + and its incompressed size in bytes + + :param obj_type: pack type_id of the object + :param obj_size: uncompressed size in bytes of the following object stream""" + c = 0 # 1 byte + hdr = bytearray() # output string + + c = (obj_type << 4) | (obj_size & 0xf) + obj_size >>= 4 + while obj_size: + hdr.append(c | 0x80) + c = obj_size & 0x7f + obj_size >>= 7 + # END until size is consumed + hdr.append(c) + # end handle interpreter + return hdr + + +def msb_size(data, offset=0): + """ + :return: tuple(read_bytes, size) read the msb size from the given random + access data starting at the given byte offset""" + size = 0 + i = 0 + l = len(data) + hit_msb = False + while i < l: + c = data[i + offset] + size |= (c & 0x7f) << i * 7 + i += 1 + if not c & 0x80: + hit_msb = True + break + # END check msb bit + # END while in range + # end performance ... + if not hit_msb: + raise AssertionError("Could not find terminating MSB byte in data stream") + return i + offset, size + + +def loose_object_header(type, size): + """ + :return: bytes representing the loose object header, which is immediately + followed by the content stream of size 'size'""" + return ('%s %i\0' % (force_text(type), size)).encode('ascii') + + +def write_object(type, size, read, write, chunk_size=chunk_size): + """ + Write the object as identified by type, size and source_stream into the + target_stream + + :param type: type string of the object + :param size: amount of bytes to write from source_stream + :param read: read method of a stream providing the content data + :param write: write method of the output stream + :param close_target_stream: if True, the target stream will be closed when + the routine exits, even if an error is thrown + :return: The actual amount of bytes written to stream, which includes the header and a trailing newline""" + tbw = 0 # total num bytes written + + # WRITE HEADER: type SP size NULL + tbw += write(loose_object_header(type, size)) + tbw += stream_copy(read, write, size, chunk_size) + + return tbw + + +def stream_copy(read, write, size, chunk_size): + """ + Copy a stream up to size bytes using the provided read and write methods, + in chunks of chunk_size + + **Note:** its much like stream_copy utility, but operates just using methods""" + dbw = 0 # num data bytes written + + # WRITE ALL DATA UP TO SIZE + while True: + cs = min(chunk_size, size - dbw) + # NOTE: not all write methods return the amount of written bytes, like + # mmap.write. Its bad, but we just deal with it ... perhaps its not + # even less efficient + # data_len = write(read(cs)) + # dbw += data_len + data = read(cs) + data_len = len(data) + dbw += data_len + write(data) + if data_len < cs or dbw == size: + break + # END check for stream end + # END duplicate data + return dbw + + +def connect_deltas(dstreams): + """ + Read the condensed delta chunk information from dstream and merge its information + into a list of existing delta chunks + + :param dstreams: iterable of delta stream objects, the delta to be applied last + comes first, then all its ancestors in order + :return: DeltaChunkList, containing all operations to apply""" + tdcl = None # topmost dcl + + dcl = tdcl = TopdownDeltaChunkList() + for dsi, ds in enumerate(dstreams): + # print "Stream", dsi + db = ds.read() + delta_buf_size = ds.size + + # read header + i, base_size = msb_size(db) + i, target_size = msb_size(db, i) + + # interpret opcodes + tbw = 0 # amount of target bytes written + while i < delta_buf_size: + c = ord(db[i]) + i += 1 + if c & 0x80: + cp_off, cp_size = 0, 0 + if (c & 0x01): + cp_off = ord(db[i]) + i += 1 + if (c & 0x02): + cp_off |= (ord(db[i]) << 8) + i += 1 + if (c & 0x04): + cp_off |= (ord(db[i]) << 16) + i += 1 + if (c & 0x08): + cp_off |= (ord(db[i]) << 24) + i += 1 + if (c & 0x10): + cp_size = ord(db[i]) + i += 1 + if (c & 0x20): + cp_size |= (ord(db[i]) << 8) + i += 1 + if (c & 0x40): + cp_size |= (ord(db[i]) << 16) + i += 1 + + if not cp_size: + cp_size = 0x10000 + + rbound = cp_off + cp_size + if (rbound < cp_size or + rbound > base_size): + break + + dcl.append(DeltaChunk(tbw, cp_size, cp_off, None)) + tbw += cp_size + elif c: + # NOTE: in C, the data chunks should probably be concatenated here. + # In python, we do it as a post-process + dcl.append(DeltaChunk(tbw, c, 0, db[i:i + c])) + i += c + tbw += c + else: + raise ValueError("unexpected delta opcode 0") + # END handle command byte + # END while processing delta data + + dcl.compress() + + # merge the lists ! + if dsi > 0: + if not tdcl.connect_with_next_base(dcl): + break + # END handle merge + + # prepare next base + dcl = DeltaChunkList() + # END for each delta stream + + return tdcl + + +def apply_delta_data(src_buf, src_buf_size, delta_buf, delta_buf_size, write): + """ + Apply data from a delta buffer using a source buffer to the target file + + :param src_buf: random access data from which the delta was created + :param src_buf_size: size of the source buffer in bytes + :param delta_buf_size: size fo the delta buffer in bytes + :param delta_buf: random access delta data + :param write: write method taking a chunk of bytes + + **Note:** transcribed to python from the similar routine in patch-delta.c""" + i = 0 + db = delta_buf + while i < delta_buf_size: + c = db[i] + i += 1 + if c & 0x80: + cp_off, cp_size = 0, 0 + if (c & 0x01): + cp_off = db[i] + i += 1 + if (c & 0x02): + cp_off |= (db[i] << 8) + i += 1 + if (c & 0x04): + cp_off |= (db[i] << 16) + i += 1 + if (c & 0x08): + cp_off |= (db[i] << 24) + i += 1 + if (c & 0x10): + cp_size = db[i] + i += 1 + if (c & 0x20): + cp_size |= (db[i] << 8) + i += 1 + if (c & 0x40): + cp_size |= (db[i] << 16) + i += 1 + + if not cp_size: + cp_size = 0x10000 + + rbound = cp_off + cp_size + if (rbound < cp_size or + rbound > src_buf_size): + break + write(src_buf[cp_off:cp_off + cp_size]) + elif c: + write(db[i:i + c]) + i += c + else: + raise ValueError("unexpected delta opcode 0") + # END handle command byte + # END while processing delta data + + # yes, lets use the exact same error message that git uses :) + assert i == delta_buf_size, "delta replay has gone wild" + + +def is_equal_canonical_sha(canonical_length, match, sha1): + """ + :return: True if the given lhs and rhs 20 byte binary shas + The comparison will take the canonical_length of the match sha into account, + hence the comparison will only use the last 4 bytes for uneven canonical representations + :param match: less than 20 byte sha + :param sha1: 20 byte sha""" + binary_length = canonical_length // 2 + if match[:binary_length] != sha1[:binary_length]: + return False + + if canonical_length - binary_length and \ + (byte_ord(match[-1]) ^ byte_ord(sha1[len(match) - 1])) & 0xf0: + return False + # END handle uneven canonnical length + return True + +#} END routines + + +try: + from gitdb_speedups._perf import connect_deltas +except ImportError: + pass diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/pack.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/pack.py new file mode 100644 index 0000000000000000000000000000000000000000..a38468e37f899d6448643f3a5bce7114b7dd8e80 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/pack.py @@ -0,0 +1,1031 @@ +# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors +# +# This module is part of GitDB and is released under +# the New BSD License: http://www.opensource.org/licenses/bsd-license.php +"""Contains PackIndexFile and PackFile implementations""" +import zlib + +from gitdb.exc import ( + BadObject, + AmbiguousObjectName, + UnsupportedOperation, + ParseError +) + +from gitdb.util import ( + mman, + LazyMixin, + unpack_from, + bin_to_hex, + byte_ord, +) + +from gitdb.fun import ( + create_pack_object_header, + pack_object_header_info, + is_equal_canonical_sha, + type_id_to_type_map, + write_object, + stream_copy, + chunk_size, + delta_types, + OFS_DELTA, + REF_DELTA, + msb_size +) + +try: + from gitdb_speedups._perf import PackIndexFile_sha_to_index +except ImportError: + pass +# END try c module + +from gitdb.base import ( # Amazing ! + OInfo, + OStream, + OPackInfo, + OPackStream, + ODeltaStream, + ODeltaPackInfo, + ODeltaPackStream, +) + +from gitdb.stream import ( + DecompressMemMapReader, + DeltaApplyReader, + Sha1Writer, + NullStream, + FlexibleSha1Writer +) + +from struct import pack +from binascii import crc32 + +from gitdb.const import NULL_BYTE + +import tempfile +import array +import os +import sys + +__all__ = ('PackIndexFile', 'PackFile', 'PackEntity') + + +#{ Utilities + +def pack_object_at(cursor, offset, as_stream): + """ + :return: Tuple(abs_data_offset, PackInfo|PackStream) + an object of the correct type according to the type_id of the object. + If as_stream is True, the object will contain a stream, allowing the + data to be read decompressed. + :param data: random accessible data containing all required information + :parma offset: offset in to the data at which the object information is located + :param as_stream: if True, a stream object will be returned that can read + the data, otherwise you receive an info object only""" + data = cursor.use_region(offset).buffer() + type_id, uncomp_size, data_rela_offset = pack_object_header_info(data) + total_rela_offset = None # set later, actual offset until data stream begins + delta_info = None + + # OFFSET DELTA + if type_id == OFS_DELTA: + i = data_rela_offset + c = byte_ord(data[i]) + i += 1 + delta_offset = c & 0x7f + while c & 0x80: + c = byte_ord(data[i]) + i += 1 + delta_offset += 1 + delta_offset = (delta_offset << 7) + (c & 0x7f) + # END character loop + delta_info = delta_offset + total_rela_offset = i + # REF DELTA + elif type_id == REF_DELTA: + total_rela_offset = data_rela_offset + 20 + delta_info = data[data_rela_offset:total_rela_offset] + # BASE OBJECT + else: + # assume its a base object + total_rela_offset = data_rela_offset + # END handle type id + abs_data_offset = offset + total_rela_offset + if as_stream: + stream = DecompressMemMapReader(data[total_rela_offset:], False, uncomp_size) + if delta_info is None: + return abs_data_offset, OPackStream(offset, type_id, uncomp_size, stream) + else: + return abs_data_offset, ODeltaPackStream(offset, type_id, uncomp_size, delta_info, stream) + else: + if delta_info is None: + return abs_data_offset, OPackInfo(offset, type_id, uncomp_size) + else: + return abs_data_offset, ODeltaPackInfo(offset, type_id, uncomp_size, delta_info) + # END handle info + # END handle stream + + +def write_stream_to_pack(read, write, zstream, base_crc=None): + """Copy a stream as read from read function, zip it, and write the result. + Count the number of written bytes and return it + :param base_crc: if not None, the crc will be the base for all compressed data + we consecutively write and generate a crc32 from. If None, no crc will be generated + :return: tuple(no bytes read, no bytes written, crc32) crc might be 0 if base_crc + was false""" + br = 0 # bytes read + bw = 0 # bytes written + want_crc = base_crc is not None + crc = 0 + if want_crc: + crc = base_crc + # END initialize crc + + while True: + chunk = read(chunk_size) + br += len(chunk) + compressed = zstream.compress(chunk) + bw += len(compressed) + write(compressed) # cannot assume return value + + if want_crc: + crc = crc32(compressed, crc) + # END handle crc + + if len(chunk) != chunk_size: + break + # END copy loop + + compressed = zstream.flush() + bw += len(compressed) + write(compressed) + if want_crc: + crc = crc32(compressed, crc) + # END handle crc + + return (br, bw, crc) + + +#} END utilities + + +class IndexWriter(object): + + """Utility to cache index information, allowing to write all information later + in one go to the given stream + **Note:** currently only writes v2 indices""" + __slots__ = '_objs' + + def __init__(self): + self._objs = list() + + def append(self, binsha, crc, offset): + """Append one piece of object information""" + self._objs.append((binsha, crc, offset)) + + def write(self, pack_sha, write): + """Write the index file using the given write method + :param pack_sha: binary sha over the whole pack that we index + :return: sha1 binary sha over all index file contents""" + # sort for sha1 hash + self._objs.sort(key=lambda o: o[0]) + + sha_writer = FlexibleSha1Writer(write) + sha_write = sha_writer.write + sha_write(PackIndexFile.index_v2_signature) + sha_write(pack(">L", PackIndexFile.index_version_default)) + + # fanout + tmplist = list((0,) * 256) # fanout or list with 64 bit offsets + for t in self._objs: + tmplist[byte_ord(t[0][0])] += 1 + # END prepare fanout + for i in range(255): + v = tmplist[i] + sha_write(pack('>L', v)) + tmplist[i + 1] += v + # END write each fanout entry + sha_write(pack('>L', tmplist[255])) + + # sha1 ordered + # save calls, that is push them into c + sha_write(b''.join(t[0] for t in self._objs)) + + # crc32 + for t in self._objs: + sha_write(pack('>L', t[1] & 0xffffffff)) + # END for each crc + + tmplist = list() + # offset 32 + for t in self._objs: + ofs = t[2] + if ofs > 0x7fffffff: + tmplist.append(ofs) + ofs = 0x80000000 + len(tmplist) - 1 + # END hande 64 bit offsets + sha_write(pack('>L', ofs & 0xffffffff)) + # END for each offset + + # offset 64 + for ofs in tmplist: + sha_write(pack(">Q", ofs)) + # END for each offset + + # trailer + assert(len(pack_sha) == 20) + sha_write(pack_sha) + sha = sha_writer.sha(as_hex=False) + write(sha) + return sha + + +class PackIndexFile(LazyMixin): + + """A pack index provides offsets into the corresponding pack, allowing to find + locations for offsets faster.""" + + # Dont use slots as we dynamically bind functions for each version, need a dict for this + # The slots you see here are just to keep track of our instance variables + # __slots__ = ('_indexpath', '_fanout_table', '_cursor', '_version', + # '_sha_list_offset', '_crc_list_offset', '_pack_offset', '_pack_64_offset') + + # used in v2 indices + _sha_list_offset = 8 + 1024 + index_v2_signature = b'\xfftOc' + index_version_default = 2 + + def __init__(self, indexpath): + super(PackIndexFile, self).__init__() + self._indexpath = indexpath + + def close(self): + mman.force_map_handle_removal_win(self._indexpath) + self._cursor = None + + def _set_cache_(self, attr): + if attr == "_packfile_checksum": + self._packfile_checksum = self._cursor.map()[-40:-20] + elif attr == "_packfile_checksum": + self._packfile_checksum = self._cursor.map()[-20:] + elif attr == "_cursor": + # Note: We don't lock the file when reading as we cannot be sure + # that we can actually write to the location - it could be a read-only + # alternate for instance + self._cursor = mman.make_cursor(self._indexpath).use_region() + # We will assume that the index will always fully fit into memory ! + if mman.window_size() > 0 and self._cursor.file_size() > mman.window_size(): + raise AssertionError("The index file at %s is too large to fit into a mapped window (%i > %i). This is a limitation of the implementation" % ( + self._indexpath, self._cursor.file_size(), mman.window_size())) + # END assert window size + else: + # now its time to initialize everything - if we are here, someone wants + # to access the fanout table or related properties + + # CHECK VERSION + mmap = self._cursor.map() + self._version = (mmap[:4] == self.index_v2_signature and 2) or 1 + if self._version == 2: + version_id = unpack_from(">L", mmap, 4)[0] + assert version_id == self._version, "Unsupported index version: %i" % version_id + # END assert version + + # SETUP FUNCTIONS + # setup our functions according to the actual version + for fname in ('entry', 'offset', 'sha', 'crc'): + setattr(self, fname, getattr(self, "_%s_v%i" % (fname, self._version))) + # END for each function to initialize + + # INITIALIZE DATA + # byte offset is 8 if version is 2, 0 otherwise + self._initialize() + # END handle attributes + + #{ Access V1 + + def _entry_v1(self, i): + """:return: tuple(offset, binsha, 0)""" + return unpack_from(">L20s", self._cursor.map(), 1024 + i * 24) + (0, ) + + def _offset_v1(self, i): + """see ``_offset_v2``""" + return unpack_from(">L", self._cursor.map(), 1024 + i * 24)[0] + + def _sha_v1(self, i): + """see ``_sha_v2``""" + base = 1024 + (i * 24) + 4 + return self._cursor.map()[base:base + 20] + + def _crc_v1(self, i): + """unsupported""" + return 0 + + #} END access V1 + + #{ Access V2 + def _entry_v2(self, i): + """:return: tuple(offset, binsha, crc)""" + return (self._offset_v2(i), self._sha_v2(i), self._crc_v2(i)) + + def _offset_v2(self, i): + """:return: 32 or 64 byte offset into pack files. 64 byte offsets will only + be returned if the pack is larger than 4 GiB, or 2^32""" + offset = unpack_from(">L", self._cursor.map(), self._pack_offset + i * 4)[0] + + # if the high-bit is set, this indicates that we have to lookup the offset + # in the 64 bit region of the file. The current offset ( lower 31 bits ) + # are the index into it + if offset & 0x80000000: + offset = unpack_from(">Q", self._cursor.map(), self._pack_64_offset + (offset & ~0x80000000) * 8)[0] + # END handle 64 bit offset + + return offset + + def _sha_v2(self, i): + """:return: sha at the given index of this file index instance""" + base = self._sha_list_offset + i * 20 + return self._cursor.map()[base:base + 20] + + def _crc_v2(self, i): + """:return: 4 bytes crc for the object at index i""" + return unpack_from(">L", self._cursor.map(), self._crc_list_offset + i * 4)[0] + + #} END access V2 + + #{ Initialization + + def _initialize(self): + """initialize base data""" + self._fanout_table = self._read_fanout((self._version == 2) * 8) + + if self._version == 2: + self._crc_list_offset = self._sha_list_offset + self.size() * 20 + self._pack_offset = self._crc_list_offset + self.size() * 4 + self._pack_64_offset = self._pack_offset + self.size() * 4 + # END setup base + + def _read_fanout(self, byte_offset): + """Generate a fanout table from our data""" + d = self._cursor.map() + out = list() + append = out.append + for i in range(256): + append(unpack_from('>L', d, byte_offset + i * 4)[0]) + # END for each entry + return out + + #} END initialization + + #{ Properties + def version(self): + return self._version + + def size(self): + """:return: amount of objects referred to by this index""" + return self._fanout_table[255] + + def path(self): + """:return: path to the packindexfile""" + return self._indexpath + + def packfile_checksum(self): + """:return: 20 byte sha representing the sha1 hash of the pack file""" + return self._cursor.map()[-40:-20] + + def indexfile_checksum(self): + """:return: 20 byte sha representing the sha1 hash of this index file""" + return self._cursor.map()[-20:] + + def offsets(self): + """:return: sequence of all offsets in the order in which they were written + + **Note:** return value can be random accessed, but may be immmutable""" + if self._version == 2: + # read stream to array, convert to tuple + a = array.array('I') # 4 byte unsigned int, long are 8 byte on 64 bit it appears + a.frombytes(self._cursor.map()[self._pack_offset:self._pack_64_offset]) + + # networkbyteorder to something array likes more + if sys.byteorder == 'little': + a.byteswap() + return a + else: + return tuple(self.offset(index) for index in range(self.size())) + # END handle version + + def sha_to_index(self, sha): + """ + :return: index usable with the ``offset`` or ``entry`` method, or None + if the sha was not found in this pack index + :param sha: 20 byte sha to lookup""" + first_byte = byte_ord(sha[0]) + get_sha = self.sha + lo = 0 # lower index, the left bound of the bisection + if first_byte != 0: + lo = self._fanout_table[first_byte - 1] + hi = self._fanout_table[first_byte] # the upper, right bound of the bisection + + # bisect until we have the sha + while lo < hi: + mid = (lo + hi) // 2 + mid_sha = get_sha(mid) + if sha < mid_sha: + hi = mid + elif sha == mid_sha: + return mid + else: + lo = mid + 1 + # END handle midpoint + # END bisect + return None + + def partial_sha_to_index(self, partial_bin_sha, canonical_length): + """ + :return: index as in `sha_to_index` or None if the sha was not found in this + index file + :param partial_bin_sha: an at least two bytes of a partial binary sha as bytes + :param canonical_length: length of the original hexadecimal representation of the + given partial binary sha + :raise AmbiguousObjectName:""" + if len(partial_bin_sha) < 2: + raise ValueError("Require at least 2 bytes of partial sha") + + assert isinstance(partial_bin_sha, bytes), "partial_bin_sha must be bytes" + first_byte = byte_ord(partial_bin_sha[0]) + + get_sha = self.sha + lo = 0 # lower index, the left bound of the bisection + if first_byte != 0: + lo = self._fanout_table[first_byte - 1] + hi = self._fanout_table[first_byte] # the upper, right bound of the bisection + + # fill the partial to full 20 bytes + filled_sha = partial_bin_sha + NULL_BYTE * (20 - len(partial_bin_sha)) + + # find lowest + while lo < hi: + mid = (lo + hi) // 2 + mid_sha = get_sha(mid) + if filled_sha < mid_sha: + hi = mid + elif filled_sha == mid_sha: + # perfect match + lo = mid + break + else: + lo = mid + 1 + # END handle midpoint + # END bisect + + if lo < self.size(): + cur_sha = get_sha(lo) + if is_equal_canonical_sha(canonical_length, partial_bin_sha, cur_sha): + next_sha = None + if lo + 1 < self.size(): + next_sha = get_sha(lo + 1) + if next_sha and next_sha == cur_sha: + raise AmbiguousObjectName(partial_bin_sha) + return lo + # END if we have a match + # END if we found something + return None + + if 'PackIndexFile_sha_to_index' in globals(): + # NOTE: Its just about 25% faster, the major bottleneck might be the attr + # accesses + def sha_to_index(self, sha): + return PackIndexFile_sha_to_index(self, sha) + # END redefine heavy-hitter with c version + + #} END properties + + +class PackFile(LazyMixin): + + """A pack is a file written according to the Version 2 for git packs + + As we currently use memory maps, it could be assumed that the maximum size of + packs therefor is 32 bit on 32 bit systems. On 64 bit systems, this should be + fine though. + + **Note:** at some point, this might be implemented using streams as well, or + streams are an alternate path in the case memory maps cannot be created + for some reason - one clearly doesn't want to read 10GB at once in that + case""" + + __slots__ = ('_packpath', '_cursor', '_size', '_version') + pack_signature = 0x5041434b # 'PACK' + pack_version_default = 2 + + # offset into our data at which the first object starts + first_object_offset = 3 * 4 # header bytes + footer_size = 20 # final sha + + def __init__(self, packpath): + self._packpath = packpath + + def close(self): + mman.force_map_handle_removal_win(self._packpath) + self._cursor = None + + def _set_cache_(self, attr): + # we fill the whole cache, whichever attribute gets queried first + self._cursor = mman.make_cursor(self._packpath).use_region() + + # read the header information + type_id, self._version, self._size = unpack_from(">LLL", self._cursor.map(), 0) + + # TODO: figure out whether we should better keep the lock, or maybe + # add a .keep file instead ? + if type_id != self.pack_signature: + raise ParseError("Invalid pack signature: %i" % type_id) + + def _iter_objects(self, start_offset, as_stream=True): + """Handle the actual iteration of objects within this pack""" + c = self._cursor + content_size = c.file_size() - self.footer_size + cur_offset = start_offset or self.first_object_offset + + null = NullStream() + while cur_offset < content_size: + data_offset, ostream = pack_object_at(c, cur_offset, True) + # scrub the stream to the end - this decompresses the object, but yields + # the amount of compressed bytes we need to get to the next offset + + stream_copy(ostream.read, null.write, ostream.size, chunk_size) + assert ostream.stream._br == ostream.size + cur_offset += (data_offset - ostream.pack_offset) + ostream.stream.compressed_bytes_read() + + # if a stream is requested, reset it beforehand + # Otherwise return the Stream object directly, its derived from the + # info object + if as_stream: + ostream.stream.seek(0) + yield ostream + # END until we have read everything + + #{ Pack Information + + def size(self): + """:return: The amount of objects stored in this pack""" + return self._size + + def version(self): + """:return: the version of this pack""" + return self._version + + def data(self): + """ + :return: read-only data of this pack. It provides random access and usually + is a memory map. + :note: This method is unsafe as it returns a window into a file which might be larger than than the actual window size""" + # can use map as we are starting at offset 0. Otherwise we would have to use buffer() + return self._cursor.use_region().map() + + def checksum(self): + """:return: 20 byte sha1 hash on all object sha's contained in this file""" + return self._cursor.use_region(self._cursor.file_size() - 20).buffer()[:] + + def path(self): + """:return: path to the packfile""" + return self._packpath + #} END pack information + + #{ Pack Specific + + def collect_streams(self, offset): + """ + :return: list of pack streams which are required to build the object + at the given offset. The first entry of the list is the object at offset, + the last one is either a full object, or a REF_Delta stream. The latter + type needs its reference object to be locked up in an ODB to form a valid + delta chain. + If the object at offset is no delta, the size of the list is 1. + :param offset: specifies the first byte of the object within this pack""" + out = list() + c = self._cursor + while True: + ostream = pack_object_at(c, offset, True)[1] + out.append(ostream) + if ostream.type_id == OFS_DELTA: + offset = ostream.pack_offset - ostream.delta_info + else: + # the only thing we can lookup are OFFSET deltas. Everything + # else is either an object, or a ref delta, in the latter + # case someone else has to find it + break + # END handle type + # END while chaining streams + return out + + #} END pack specific + + #{ Read-Database like Interface + + def info(self, offset): + """Retrieve information about the object at the given file-absolute offset + + :param offset: byte offset + :return: OPackInfo instance, the actual type differs depending on the type_id attribute""" + return pack_object_at(self._cursor, offset or self.first_object_offset, False)[1] + + def stream(self, offset): + """Retrieve an object at the given file-relative offset as stream along with its information + + :param offset: byte offset + :return: OPackStream instance, the actual type differs depending on the type_id attribute""" + return pack_object_at(self._cursor, offset or self.first_object_offset, True)[1] + + def stream_iter(self, start_offset=0): + """ + :return: iterator yielding OPackStream compatible instances, allowing + to access the data in the pack directly. + :param start_offset: offset to the first object to iterate. If 0, iteration + starts at the very first object in the pack. + + **Note:** Iterating a pack directly is costly as the datastream has to be decompressed + to determine the bounds between the objects""" + return self._iter_objects(start_offset, as_stream=True) + + #} END Read-Database like Interface + + +class PackEntity(LazyMixin): + + """Combines the PackIndexFile and the PackFile into one, allowing the + actual objects to be resolved and iterated""" + + __slots__ = ('_index', # our index file + '_pack', # our pack file + '_offset_map' # on demand dict mapping one offset to the next consecutive one + ) + + IndexFileCls = PackIndexFile + PackFileCls = PackFile + + def __init__(self, pack_or_index_path): + """Initialize ourselves with the path to the respective pack or index file""" + basename, ext = os.path.splitext(pack_or_index_path) + self._index = self.IndexFileCls("%s.idx" % basename) # PackIndexFile instance + self._pack = self.PackFileCls("%s.pack" % basename) # corresponding PackFile instance + + def close(self): + self._index.close() + self._pack.close() + + def _set_cache_(self, attr): + # currently this can only be _offset_map + # TODO: make this a simple sorted offset array which can be bisected + # to find the respective entry, from which we can take a +1 easily + # This might be slower, but should also be much lighter in memory ! + offsets_sorted = sorted(self._index.offsets()) + last_offset = len(self._pack.data()) - self._pack.footer_size + assert offsets_sorted, "Cannot handle empty indices" + + offset_map = None + if len(offsets_sorted) == 1: + offset_map = {offsets_sorted[0]: last_offset} + else: + iter_offsets = iter(offsets_sorted) + iter_offsets_plus_one = iter(offsets_sorted) + next(iter_offsets_plus_one) + consecutive = zip(iter_offsets, iter_offsets_plus_one) + + offset_map = dict(consecutive) + + # the last offset is not yet set + offset_map[offsets_sorted[-1]] = last_offset + # END handle offset amount + self._offset_map = offset_map + + def _sha_to_index(self, sha): + """:return: index for the given sha, or raise""" + index = self._index.sha_to_index(sha) + if index is None: + raise BadObject(sha) + return index + + def _iter_objects(self, as_stream): + """Iterate over all objects in our index and yield their OInfo or OStream instences""" + _sha = self._index.sha + _object = self._object + for index in range(self._index.size()): + yield _object(_sha(index), as_stream, index) + # END for each index + + def _object(self, sha, as_stream, index=-1): + """:return: OInfo or OStream object providing information about the given sha + :param index: if not -1, its assumed to be the sha's index in the IndexFile""" + # its a little bit redundant here, but it needs to be efficient + if index < 0: + index = self._sha_to_index(sha) + if sha is None: + sha = self._index.sha(index) + # END assure sha is present ( in output ) + offset = self._index.offset(index) + type_id, uncomp_size, data_rela_offset = pack_object_header_info(self._pack._cursor.use_region(offset).buffer()) + if as_stream: + if type_id not in delta_types: + packstream = self._pack.stream(offset) + return OStream(sha, packstream.type, packstream.size, packstream.stream) + # END handle non-deltas + + # produce a delta stream containing all info + # To prevent it from applying the deltas when querying the size, + # we extract it from the delta stream ourselves + streams = self.collect_streams_at_offset(offset) + dstream = DeltaApplyReader.new(streams) + + return ODeltaStream(sha, dstream.type, None, dstream) + else: + if type_id not in delta_types: + return OInfo(sha, type_id_to_type_map[type_id], uncomp_size) + # END handle non-deltas + + # deltas are a little tougher - unpack the first bytes to obtain + # the actual target size, as opposed to the size of the delta data + streams = self.collect_streams_at_offset(offset) + buf = streams[0].read(512) + offset, src_size = msb_size(buf) + offset, target_size = msb_size(buf, offset) + + # collect the streams to obtain the actual object type + if streams[-1].type_id in delta_types: + raise BadObject(sha, "Could not resolve delta object") + return OInfo(sha, streams[-1].type, target_size) + # END handle stream + + #{ Read-Database like Interface + + def info(self, sha): + """Retrieve information about the object identified by the given sha + + :param sha: 20 byte sha1 + :raise BadObject: + :return: OInfo instance, with 20 byte sha""" + return self._object(sha, False) + + def stream(self, sha): + """Retrieve an object stream along with its information as identified by the given sha + + :param sha: 20 byte sha1 + :raise BadObject: + :return: OStream instance, with 20 byte sha""" + return self._object(sha, True) + + def info_at_index(self, index): + """As ``info``, but uses a PackIndexFile compatible index to refer to the object""" + return self._object(None, False, index) + + def stream_at_index(self, index): + """As ``stream``, but uses a PackIndexFile compatible index to refer to the + object""" + return self._object(None, True, index) + + #} END Read-Database like Interface + + #{ Interface + + def pack(self): + """:return: the underlying pack file instance""" + return self._pack + + def index(self): + """:return: the underlying pack index file instance""" + return self._index + + def is_valid_stream(self, sha, use_crc=False): + """ + Verify that the stream at the given sha is valid. + + :param use_crc: if True, the index' crc is run over the compressed stream of + the object, which is much faster than checking the sha1. It is also + more prone to unnoticed corruption or manipulation. + :param sha: 20 byte sha1 of the object whose stream to verify + whether the compressed stream of the object is valid. If it is + a delta, this only verifies that the delta's data is valid, not the + data of the actual undeltified object, as it depends on more than + just this stream. + If False, the object will be decompressed and the sha generated. It must + match the given sha + + :return: True if the stream is valid + :raise UnsupportedOperation: If the index is version 1 only + :raise BadObject: sha was not found""" + if use_crc: + if self._index.version() < 2: + raise UnsupportedOperation("Version 1 indices do not contain crc's, verify by sha instead") + # END handle index version + + index = self._sha_to_index(sha) + offset = self._index.offset(index) + next_offset = self._offset_map[offset] + crc_value = self._index.crc(index) + + # create the current crc value, on the compressed object data + # Read it in chunks, without copying the data + crc_update = zlib.crc32 + pack_data = self._pack.data() + cur_pos = offset + this_crc_value = 0 + while cur_pos < next_offset: + rbound = min(cur_pos + chunk_size, next_offset) + size = rbound - cur_pos + this_crc_value = crc_update(pack_data[cur_pos:cur_pos + size], this_crc_value) + cur_pos += size + # END window size loop + + # crc returns signed 32 bit numbers, the AND op forces it into unsigned + # mode ... wow, sneaky, from dulwich. + return (this_crc_value & 0xffffffff) == crc_value + else: + shawriter = Sha1Writer() + stream = self._object(sha, as_stream=True) + # write a loose object, which is the basis for the sha + write_object(stream.type, stream.size, stream.read, shawriter.write) + + assert shawriter.sha(as_hex=False) == sha + return shawriter.sha(as_hex=False) == sha + # END handle crc/sha verification + return True + + def info_iter(self): + """ + :return: Iterator over all objects in this pack. The iterator yields + OInfo instances""" + return self._iter_objects(as_stream=False) + + def stream_iter(self): + """ + :return: iterator over all objects in this pack. The iterator yields + OStream instances""" + return self._iter_objects(as_stream=True) + + def collect_streams_at_offset(self, offset): + """ + As the version in the PackFile, but can resolve REF deltas within this pack + For more info, see ``collect_streams`` + + :param offset: offset into the pack file at which the object can be found""" + streams = self._pack.collect_streams(offset) + + # try to resolve the last one if needed. It is assumed to be either + # a REF delta, or a base object, as OFFSET deltas are resolved by the pack + if streams[-1].type_id == REF_DELTA: + stream = streams[-1] + while stream.type_id in delta_types: + if stream.type_id == REF_DELTA: + # smmap can return memory view objects, which can't be compared as buffers/bytes can ... + if isinstance(stream.delta_info, memoryview): + sindex = self._index.sha_to_index(stream.delta_info.tobytes()) + else: + sindex = self._index.sha_to_index(stream.delta_info) + if sindex is None: + break + stream = self._pack.stream(self._index.offset(sindex)) + streams.append(stream) + else: + # must be another OFS DELTA - this could happen if a REF + # delta we resolve previously points to an OFS delta. Who + # would do that ;) ? We can handle it though + stream = self._pack.stream(stream.delta_info) + streams.append(stream) + # END handle ref delta + # END resolve ref streams + # END resolve streams + + return streams + + def collect_streams(self, sha): + """ + As ``PackFile.collect_streams``, but takes a sha instead of an offset. + Additionally, ref_delta streams will be resolved within this pack. + If this is not possible, the stream will be left alone, hence it is adivsed + to check for unresolved ref-deltas and resolve them before attempting to + construct a delta stream. + + :param sha: 20 byte sha1 specifying the object whose related streams you want to collect + :return: list of streams, first being the actual object delta, the last being + a possibly unresolved base object. + :raise BadObject:""" + return self.collect_streams_at_offset(self._index.offset(self._sha_to_index(sha))) + + @classmethod + def write_pack(cls, object_iter, pack_write, index_write=None, + object_count=None, zlib_compression=zlib.Z_BEST_SPEED): + """ + Create a new pack by putting all objects obtained by the object_iterator + into a pack which is written using the pack_write method. + The respective index is produced as well if index_write is not Non. + + :param object_iter: iterator yielding odb output objects + :param pack_write: function to receive strings to write into the pack stream + :param indx_write: if not None, the function writes the index file corresponding + to the pack. + :param object_count: if you can provide the amount of objects in your iteration, + this would be the place to put it. Otherwise we have to pre-iterate and store + all items into a list to get the number, which uses more memory than necessary. + :param zlib_compression: the zlib compression level to use + :return: tuple(pack_sha, index_binsha) binary sha over all the contents of the pack + and over all contents of the index. If index_write was None, index_binsha will be None + + **Note:** The destination of the write functions is up to the user. It could + be a socket, or a file for instance + + **Note:** writes only undeltified objects""" + objs = object_iter + if not object_count: + if not isinstance(object_iter, (tuple, list)): + objs = list(object_iter) + # END handle list type + object_count = len(objs) + # END handle object + + pack_writer = FlexibleSha1Writer(pack_write) + pwrite = pack_writer.write + ofs = 0 # current offset into the pack file + index = None + wants_index = index_write is not None + + # write header + pwrite(pack('>LLL', PackFile.pack_signature, PackFile.pack_version_default, object_count)) + ofs += 12 + + if wants_index: + index = IndexWriter() + # END handle index header + + actual_count = 0 + for obj in objs: + actual_count += 1 + crc = 0 + + # object header + hdr = create_pack_object_header(obj.type_id, obj.size) + if index_write: + crc = crc32(hdr) + else: + crc = None + # END handle crc + pwrite(hdr) + + # data stream + zstream = zlib.compressobj(zlib_compression) + ostream = obj.stream + br, bw, crc = write_stream_to_pack(ostream.read, pwrite, zstream, base_crc=crc) + assert(br == obj.size) + if wants_index: + index.append(obj.binsha, crc, ofs) + # END handle index + + ofs += len(hdr) + bw + if actual_count == object_count: + break + # END abort once we are done + # END for each object + + if actual_count != object_count: + raise ValueError( + "Expected to write %i objects into pack, but received only %i from iterators" % (object_count, actual_count)) + # END count assertion + + # write footer + pack_sha = pack_writer.sha(as_hex=False) + assert len(pack_sha) == 20 + pack_write(pack_sha) + ofs += len(pack_sha) # just for completeness ;) + + index_sha = None + if wants_index: + index_sha = index.write(pack_sha, index_write) + # END handle index + + return pack_sha, index_sha + + @classmethod + def create(cls, object_iter, base_dir, object_count=None, zlib_compression=zlib.Z_BEST_SPEED): + """Create a new on-disk entity comprised of a properly named pack file and a properly named + and corresponding index file. The pack contains all OStream objects contained in object iter. + :param base_dir: directory which is to contain the files + :return: PackEntity instance initialized with the new pack + + **Note:** for more information on the other parameters see the write_pack method""" + pack_fd, pack_path = tempfile.mkstemp('', 'pack', base_dir) + index_fd, index_path = tempfile.mkstemp('', 'index', base_dir) + pack_write = lambda d: os.write(pack_fd, d) + index_write = lambda d: os.write(index_fd, d) + + pack_binsha, index_binsha = cls.write_pack(object_iter, pack_write, index_write, object_count, zlib_compression) + os.close(pack_fd) + os.close(index_fd) + + fmt = "pack-%s.%s" + new_pack_path = os.path.join(base_dir, fmt % (bin_to_hex(pack_binsha), 'pack')) + new_index_path = os.path.join(base_dir, fmt % (bin_to_hex(pack_binsha), 'idx')) + os.rename(pack_path, new_pack_path) + os.rename(index_path, new_index_path) + + return cls(new_pack_path) + + #} END interface diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/stream.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/stream.py new file mode 100644 index 0000000000000000000000000000000000000000..d58d1a63ee0d686438cb9518285db0ae1ee8b325 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/stream.py @@ -0,0 +1,730 @@ +# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors +# +# This module is part of GitDB and is released under +# the New BSD License: http://www.opensource.org/licenses/bsd-license.php + +from io import BytesIO + +import mmap +import os +import sys +import zlib + +from gitdb.fun import ( + msb_size, + stream_copy, + apply_delta_data, + connect_deltas, + delta_types +) + +from gitdb.util import ( + allocate_memory, + LazyMixin, + make_sha, + write, + close, +) + +from gitdb.const import NULL_BYTE, BYTE_SPACE +from gitdb.utils.encoding import force_bytes + +has_perf_mod = False +try: + from gitdb_speedups._perf import apply_delta as c_apply_delta + has_perf_mod = True +except ImportError: + pass + +__all__ = ('DecompressMemMapReader', 'FDCompressedSha1Writer', 'DeltaApplyReader', + 'Sha1Writer', 'FlexibleSha1Writer', 'ZippedStoreShaWriter', 'FDCompressedSha1Writer', + 'FDStream', 'NullStream') + + +#{ RO Streams + +class DecompressMemMapReader(LazyMixin): + + """Reads data in chunks from a memory map and decompresses it. The client sees + only the uncompressed data, respective file-like read calls are handling on-demand + buffered decompression accordingly + + A constraint on the total size of bytes is activated, simulating + a logical file within a possibly larger physical memory area + + To read efficiently, you clearly don't want to read individual bytes, instead, + read a few kilobytes at least. + + **Note:** The chunk-size should be carefully selected as it will involve quite a bit + of string copying due to the way the zlib is implemented. Its very wasteful, + hence we try to find a good tradeoff between allocation time and number of + times we actually allocate. An own zlib implementation would be good here + to better support streamed reading - it would only need to keep the mmap + and decompress it into chunks, that's all ... """ + __slots__ = ('_m', '_zip', '_buf', '_buflen', '_br', '_cws', '_cwe', '_s', '_close', + '_cbr', '_phi') + + max_read_size = 512 * 1024 # currently unused + + def __init__(self, m, close_on_deletion, size=None): + """Initialize with mmap for stream reading + :param m: must be content data - use new if you have object data and no size""" + self._m = m + self._zip = zlib.decompressobj() + self._buf = None # buffer of decompressed bytes + self._buflen = 0 # length of bytes in buffer + if size is not None: + self._s = size # size of uncompressed data to read in total + self._br = 0 # num uncompressed bytes read + self._cws = 0 # start byte of compression window + self._cwe = 0 # end byte of compression window + self._cbr = 0 # number of compressed bytes read + self._phi = False # is True if we parsed the header info + self._close = close_on_deletion # close the memmap on deletion ? + + def _set_cache_(self, attr): + assert attr == '_s' + # only happens for size, which is a marker to indicate we still + # have to parse the header from the stream + self._parse_header_info() + + def __del__(self): + self.close() + + def _parse_header_info(self): + """If this stream contains object data, parse the header info and skip the + stream to a point where each read will yield object content + + :return: parsed type_string, size""" + # read header + # should really be enough, cgit uses 8192 I believe + # And for good reason !! This needs to be that high for the header to be read correctly in all cases + maxb = 8192 + self._s = maxb + hdr = self.read(maxb) + hdrend = hdr.find(NULL_BYTE) + typ, size = hdr[:hdrend].split(BYTE_SPACE) + size = int(size) + self._s = size + + # adjust internal state to match actual header length that we ignore + # The buffer will be depleted first on future reads + self._br = 0 + hdrend += 1 + self._buf = BytesIO(hdr[hdrend:]) + self._buflen = len(hdr) - hdrend + + self._phi = True + + return typ, size + + #{ Interface + + @classmethod + def new(self, m, close_on_deletion=False): + """Create a new DecompressMemMapReader instance for acting as a read-only stream + This method parses the object header from m and returns the parsed + type and size, as well as the created stream instance. + + :param m: memory map on which to operate. It must be object data ( header + contents ) + :param close_on_deletion: if True, the memory map will be closed once we are + being deleted""" + inst = DecompressMemMapReader(m, close_on_deletion, 0) + typ, size = inst._parse_header_info() + return typ, size, inst + + def data(self): + """:return: random access compatible data we are working on""" + return self._m + + def close(self): + """Close our underlying stream of compressed bytes if this was allowed during initialization + :return: True if we closed the underlying stream + :note: can be called safely + """ + if self._close: + if hasattr(self._m, 'close'): + self._m.close() + self._close = False + # END handle resource freeing + + def compressed_bytes_read(self): + """ + :return: number of compressed bytes read. This includes the bytes it + took to decompress the header ( if there was one )""" + # ABSTRACT: When decompressing a byte stream, it can be that the first + # x bytes which were requested match the first x bytes in the loosely + # compressed datastream. This is the worst-case assumption that the reader + # does, it assumes that it will get at least X bytes from X compressed bytes + # in call cases. + # The caveat is that the object, according to our known uncompressed size, + # is already complete, but there are still some bytes left in the compressed + # stream that contribute to the amount of compressed bytes. + # How can we know that we are truly done, and have read all bytes we need + # to read ? + # Without help, we cannot know, as we need to obtain the status of the + # decompression. If it is not finished, we need to decompress more data + # until it is finished, to yield the actual number of compressed bytes + # belonging to the decompressed object + # We are using a custom zlib module for this, if its not present, + # we try to put in additional bytes up for decompression if feasible + # and check for the unused_data. + + # Only scrub the stream forward if we are officially done with the + # bytes we were to have. + if self._br == self._s and not self._zip.unused_data: + # manipulate the bytes-read to allow our own read method to continue + # but keep the window at its current position + self._br = 0 + if hasattr(self._zip, 'status'): + while self._zip.status == zlib.Z_OK: + self.read(mmap.PAGESIZE) + # END scrub-loop custom zlib + else: + # pass in additional pages, until we have unused data + while not self._zip.unused_data and self._cbr != len(self._m): + self.read(mmap.PAGESIZE) + # END scrub-loop default zlib + # END handle stream scrubbing + + # reset bytes read, just to be sure + self._br = self._s + # END handle stream scrubbing + + # unused data ends up in the unconsumed tail, which was removed + # from the count already + return self._cbr + + #} END interface + + def seek(self, offset, whence=getattr(os, 'SEEK_SET', 0)): + """Allows to reset the stream to restart reading + :raise ValueError: If offset and whence are not 0""" + if offset != 0 or whence != getattr(os, 'SEEK_SET', 0): + raise ValueError("Can only seek to position 0") + # END handle offset + + self._zip = zlib.decompressobj() + self._br = self._cws = self._cwe = self._cbr = 0 + if self._phi: + self._phi = False + del(self._s) # trigger header parsing on first access + # END skip header + + def read(self, size=-1): + if size < 1: + size = self._s - self._br + else: + size = min(size, self._s - self._br) + # END clamp size + + if size == 0: + return bytes() + # END handle depletion + + # deplete the buffer, then just continue using the decompress object + # which has an own buffer. We just need this to transparently parse the + # header from the zlib stream + dat = bytes() + if self._buf: + if self._buflen >= size: + # have enough data + dat = self._buf.read(size) + self._buflen -= size + self._br += size + return dat + else: + dat = self._buf.read() # ouch, duplicates data + size -= self._buflen + self._br += self._buflen + + self._buflen = 0 + self._buf = None + # END handle buffer len + # END handle buffer + + # decompress some data + # Abstract: zlib needs to operate on chunks of our memory map ( which may + # be large ), as it will otherwise and always fill in the 'unconsumed_tail' + # attribute which possible reads our whole map to the end, forcing + # everything to be read from disk even though just a portion was requested. + # As this would be a nogo, we workaround it by passing only chunks of data, + # moving the window into the memory map along as we decompress, which keeps + # the tail smaller than our chunk-size. This causes 'only' the chunk to be + # copied once, and another copy of a part of it when it creates the unconsumed + # tail. We have to use it to hand in the appropriate amount of bytes during + # the next read. + tail = self._zip.unconsumed_tail + if tail: + # move the window, make it as large as size demands. For code-clarity, + # we just take the chunk from our map again instead of reusing the unconsumed + # tail. The latter one would safe some memory copying, but we could end up + # with not getting enough data uncompressed, so we had to sort that out as well. + # Now we just assume the worst case, hence the data is uncompressed and the window + # needs to be as large as the uncompressed bytes we want to read. + self._cws = self._cwe - len(tail) + self._cwe = self._cws + size + else: + cws = self._cws + self._cws = self._cwe + self._cwe = cws + size + # END handle tail + + # if window is too small, make it larger so zip can decompress something + if self._cwe - self._cws < 8: + self._cwe = self._cws + 8 + # END adjust winsize + + # takes a slice, but doesn't copy the data, it says ... + indata = self._m[self._cws:self._cwe] + + # get the actual window end to be sure we don't use it for computations + self._cwe = self._cws + len(indata) + dcompdat = self._zip.decompress(indata, size) + # update the amount of compressed bytes read + # We feed possibly overlapping chunks, which is why the unconsumed tail + # has to be taken into consideration, as well as the unused data + # if we hit the end of the stream + # NOTE: Behavior changed in PY2.7 onward, which requires special handling to make the tests work properly. + # They are thorough, and I assume it is truly working. + # Why is this logic as convoluted as it is ? Please look at the table in + # https://github.com/gitpython-developers/gitdb/issues/19 to learn about the test-results. + # Bascially, on py2.6, you want to use branch 1, whereas on all other python version, the second branch + # will be the one that works. + # However, the zlib VERSIONs as well as the platform check is used to further match the entries in the + # table in the github issue. This is it ... it was the only way I could make this work everywhere. + # IT's CERTAINLY GOING TO BITE US IN THE FUTURE ... . + if zlib.ZLIB_VERSION in ('1.2.7', '1.2.5') and not sys.platform == 'darwin': + unused_datalen = len(self._zip.unconsumed_tail) + else: + unused_datalen = len(self._zip.unconsumed_tail) + len(self._zip.unused_data) + # # end handle very special case ... + + self._cbr += len(indata) - unused_datalen + self._br += len(dcompdat) + + if dat: + dcompdat = dat + dcompdat + # END prepend our cached data + + # it can happen, depending on the compression, that we get less bytes + # than ordered as it needs the final portion of the data as well. + # Recursively resolve that. + # Note: dcompdat can be empty even though we still appear to have bytes + # to read, if we are called by compressed_bytes_read - it manipulates + # us to empty the stream + if dcompdat and (len(dcompdat) - len(dat)) < size and self._br < self._s: + dcompdat += self.read(size - len(dcompdat)) + # END handle special case + return dcompdat + + +class DeltaApplyReader(LazyMixin): + + """A reader which dynamically applies pack deltas to a base object, keeping the + memory demands to a minimum. + + The size of the final object is only obtainable once all deltas have been + applied, unless it is retrieved from a pack index. + + The uncompressed Delta has the following layout (MSB being a most significant + bit encoded dynamic size): + + * MSB Source Size - the size of the base against which the delta was created + * MSB Target Size - the size of the resulting data after the delta was applied + * A list of one byte commands (cmd) which are followed by a specific protocol: + + * cmd & 0x80 - copy delta_data[offset:offset+size] + + * Followed by an encoded offset into the delta data + * Followed by an encoded size of the chunk to copy + + * cmd & 0x7f - insert + + * insert cmd bytes from the delta buffer into the output stream + + * cmd == 0 - invalid operation ( or error in delta stream ) + """ + __slots__ = ( + "_bstream", # base stream to which to apply the deltas + "_dstreams", # tuple of delta stream readers + "_mm_target", # memory map of the delta-applied data + "_size", # actual number of bytes in _mm_target + "_br" # number of bytes read + ) + + #{ Configuration + k_max_memory_move = 250 * 1000 * 1000 + #} END configuration + + def __init__(self, stream_list): + """Initialize this instance with a list of streams, the first stream being + the delta to apply on top of all following deltas, the last stream being the + base object onto which to apply the deltas""" + assert len(stream_list) > 1, "Need at least one delta and one base stream" + + self._bstream = stream_list[-1] + self._dstreams = tuple(stream_list[:-1]) + self._br = 0 + + def _set_cache_too_slow_without_c(self, attr): + # the direct algorithm is fastest and most direct if there is only one + # delta. Also, the extra overhead might not be worth it for items smaller + # than X - definitely the case in python, every function call costs + # huge amounts of time + # if len(self._dstreams) * self._bstream.size < self.k_max_memory_move: + if len(self._dstreams) == 1: + return self._set_cache_brute_(attr) + + # Aggregate all deltas into one delta in reverse order. Hence we take + # the last delta, and reverse-merge its ancestor delta, until we receive + # the final delta data stream. + dcl = connect_deltas(self._dstreams) + + # call len directly, as the (optional) c version doesn't implement the sequence + # protocol + if dcl.rbound() == 0: + self._size = 0 + self._mm_target = allocate_memory(0) + return + # END handle empty list + + self._size = dcl.rbound() + self._mm_target = allocate_memory(self._size) + + bbuf = allocate_memory(self._bstream.size) + stream_copy(self._bstream.read, bbuf.write, self._bstream.size, 256 * mmap.PAGESIZE) + + # APPLY CHUNKS + write = self._mm_target.write + dcl.apply(bbuf, write) + + self._mm_target.seek(0) + + def _set_cache_brute_(self, attr): + """If we are here, we apply the actual deltas""" + # TODO: There should be a special case if there is only one stream + # Then the default-git algorithm should perform a tad faster, as the + # delta is not peaked into, causing less overhead. + buffer_info_list = list() + max_target_size = 0 + for dstream in self._dstreams: + buf = dstream.read(512) # read the header information + X + offset, src_size = msb_size(buf) + offset, target_size = msb_size(buf, offset) + buffer_info_list.append((buf[offset:], offset, src_size, target_size)) + max_target_size = max(max_target_size, target_size) + # END for each delta stream + + # sanity check - the first delta to apply should have the same source + # size as our actual base stream + base_size = self._bstream.size + target_size = max_target_size + + # if we have more than 1 delta to apply, we will swap buffers, hence we must + # assure that all buffers we use are large enough to hold all the results + if len(self._dstreams) > 1: + base_size = target_size = max(base_size, max_target_size) + # END adjust buffer sizes + + # Allocate private memory map big enough to hold the first base buffer + # We need random access to it + bbuf = allocate_memory(base_size) + stream_copy(self._bstream.read, bbuf.write, base_size, 256 * mmap.PAGESIZE) + + # allocate memory map large enough for the largest (intermediate) target + # We will use it as scratch space for all delta ops. If the final + # target buffer is smaller than our allocated space, we just use parts + # of it upon return. + tbuf = allocate_memory(target_size) + + # for each delta to apply, memory map the decompressed delta and + # work on the op-codes to reconstruct everything. + # For the actual copying, we use a seek and write pattern of buffer + # slices. + final_target_size = None + for (dbuf, offset, src_size, target_size), dstream in zip(reversed(buffer_info_list), reversed(self._dstreams)): + # allocate a buffer to hold all delta data - fill in the data for + # fast access. We do this as we know that reading individual bytes + # from our stream would be slower than necessary ( although possible ) + # The dbuf buffer contains commands after the first two MSB sizes, the + # offset specifies the amount of bytes read to get the sizes. + ddata = allocate_memory(dstream.size - offset) + ddata.write(dbuf) + # read the rest from the stream. The size we give is larger than necessary + stream_copy(dstream.read, ddata.write, dstream.size, 256 * mmap.PAGESIZE) + + ####################################################################### + if 'c_apply_delta' in globals(): + c_apply_delta(bbuf, ddata, tbuf) + else: + apply_delta_data(bbuf, src_size, ddata, len(ddata), tbuf.write) + ####################################################################### + + # finally, swap out source and target buffers. The target is now the + # base for the next delta to apply + bbuf, tbuf = tbuf, bbuf + bbuf.seek(0) + tbuf.seek(0) + final_target_size = target_size + # END for each delta to apply + + # its already seeked to 0, constrain it to the actual size + # NOTE: in the end of the loop, it swaps buffers, hence our target buffer + # is not tbuf, but bbuf ! + self._mm_target = bbuf + self._size = final_target_size + + #{ Configuration + if not has_perf_mod: + _set_cache_ = _set_cache_brute_ + else: + _set_cache_ = _set_cache_too_slow_without_c + + #} END configuration + + def read(self, count=0): + bl = self._size - self._br # bytes left + if count < 1 or count > bl: + count = bl + # NOTE: we could check for certain size limits, and possibly + # return buffers instead of strings to prevent byte copying + data = self._mm_target.read(count) + self._br += len(data) + return data + + def seek(self, offset, whence=getattr(os, 'SEEK_SET', 0)): + """Allows to reset the stream to restart reading + + :raise ValueError: If offset and whence are not 0""" + if offset != 0 or whence != getattr(os, 'SEEK_SET', 0): + raise ValueError("Can only seek to position 0") + # END handle offset + self._br = 0 + self._mm_target.seek(0) + + #{ Interface + + @classmethod + def new(cls, stream_list): + """ + Convert the given list of streams into a stream which resolves deltas + when reading from it. + + :param stream_list: two or more stream objects, first stream is a Delta + to the object that you want to resolve, followed by N additional delta + streams. The list's last stream must be a non-delta stream. + + :return: Non-Delta OPackStream object whose stream can be used to obtain + the decompressed resolved data + :raise ValueError: if the stream list cannot be handled""" + if len(stream_list) < 2: + raise ValueError("Need at least two streams") + # END single object special handling + + if stream_list[-1].type_id in delta_types: + raise ValueError( + "Cannot resolve deltas if there is no base object stream, last one was type: %s" % stream_list[-1].type) + # END check stream + return cls(stream_list) + + #} END interface + + #{ OInfo like Interface + + @property + def type(self): + return self._bstream.type + + @property + def type_id(self): + return self._bstream.type_id + + @property + def size(self): + """:return: number of uncompressed bytes in the stream""" + return self._size + + #} END oinfo like interface + + +#} END RO streams + + +#{ W Streams + +class Sha1Writer(object): + + """Simple stream writer which produces a sha whenever you like as it degests + everything it is supposed to write""" + __slots__ = "sha1" + + def __init__(self): + self.sha1 = make_sha() + + #{ Stream Interface + + def write(self, data): + """:raise IOError: If not all bytes could be written + :param data: byte object + :return: length of incoming data""" + + self.sha1.update(data) + + return len(data) + + # END stream interface + + #{ Interface + + def sha(self, as_hex=False): + """:return: sha so far + :param as_hex: if True, sha will be hex-encoded, binary otherwise""" + if as_hex: + return self.sha1.hexdigest() + return self.sha1.digest() + + #} END interface + + +class FlexibleSha1Writer(Sha1Writer): + + """Writer producing a sha1 while passing on the written bytes to the given + write function""" + __slots__ = 'writer' + + def __init__(self, writer): + Sha1Writer.__init__(self) + self.writer = writer + + def write(self, data): + Sha1Writer.write(self, data) + self.writer(data) + + +class ZippedStoreShaWriter(Sha1Writer): + + """Remembers everything someone writes to it and generates a sha""" + __slots__ = ('buf', 'zip') + + def __init__(self): + Sha1Writer.__init__(self) + self.buf = BytesIO() + self.zip = zlib.compressobj(zlib.Z_BEST_SPEED) + + def __getattr__(self, attr): + return getattr(self.buf, attr) + + def write(self, data): + alen = Sha1Writer.write(self, data) + self.buf.write(self.zip.compress(data)) + + return alen + + def close(self): + self.buf.write(self.zip.flush()) + + def seek(self, offset, whence=getattr(os, 'SEEK_SET', 0)): + """Seeking currently only supports to rewind written data + Multiple writes are not supported""" + if offset != 0 or whence != getattr(os, 'SEEK_SET', 0): + raise ValueError("Can only seek to position 0") + # END handle offset + self.buf.seek(0) + + def getvalue(self): + """:return: string value from the current stream position to the end""" + return self.buf.getvalue() + + +class FDCompressedSha1Writer(Sha1Writer): + + """Digests data written to it, making the sha available, then compress the + data and write it to the file descriptor + + **Note:** operates on raw file descriptors + **Note:** for this to work, you have to use the close-method of this instance""" + __slots__ = ("fd", "sha1", "zip") + + # default exception + exc = IOError("Failed to write all bytes to filedescriptor") + + def __init__(self, fd): + super(FDCompressedSha1Writer, self).__init__() + self.fd = fd + self.zip = zlib.compressobj(zlib.Z_BEST_SPEED) + + #{ Stream Interface + + def write(self, data): + """:raise IOError: If not all bytes could be written + :return: length of incoming data""" + self.sha1.update(data) + cdata = self.zip.compress(data) + bytes_written = write(self.fd, cdata) + + if bytes_written != len(cdata): + raise self.exc + + return len(data) + + def close(self): + remainder = self.zip.flush() + if write(self.fd, remainder) != len(remainder): + raise self.exc + return close(self.fd) + + #} END stream interface + + +class FDStream(object): + + """A simple wrapper providing the most basic functions on a file descriptor + with the fileobject interface. Cannot use os.fdopen as the resulting stream + takes ownership""" + __slots__ = ("_fd", '_pos') + + def __init__(self, fd): + self._fd = fd + self._pos = 0 + + def write(self, data): + self._pos += len(data) + os.write(self._fd, data) + + def read(self, count=0): + if count == 0: + count = os.path.getsize(self._filepath) + # END handle read everything + + bytes = os.read(self._fd, count) + self._pos += len(bytes) + return bytes + + def fileno(self): + return self._fd + + def tell(self): + return self._pos + + def close(self): + close(self._fd) + + +class NullStream(object): + + """A stream that does nothing but providing a stream interface. + Use it like /dev/null""" + __slots__ = tuple() + + def read(self, size=0): + return '' + + def close(self): + pass + + def write(self, data): + return len(data) + + +#} END W streams diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/typ.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/typ.py new file mode 100644 index 0000000000000000000000000000000000000000..98d15f3ecdb70a1ebba00a3ea22fc0153a353d39 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/typ.py @@ -0,0 +1,10 @@ +# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors +# +# This module is part of GitDB and is released under +# the New BSD License: http://www.opensource.org/licenses/bsd-license.php +"""Module containing information about types known to the database""" + +str_blob_type = b'blob' +str_commit_type = b'commit' +str_tree_type = b'tree' +str_tag_type = b'tag' diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/util.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/util.py new file mode 100644 index 0000000000000000000000000000000000000000..c4cafecc61cbdf88c2c7582106e4f964f66f292f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/gitdb/util.py @@ -0,0 +1,398 @@ +# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors +# +# This module is part of GitDB and is released under +# the New BSD License: http://www.opensource.org/licenses/bsd-license.php +import binascii +import os +import mmap +import sys +import time +import errno + +from io import BytesIO + +from smmap import ( + StaticWindowMapManager, + SlidingWindowMapManager, + SlidingWindowMapBuffer +) + +# initialize our global memory manager instance +# Use it to free cached (and unused) resources. +mman = SlidingWindowMapManager() +# END handle mman + +import hashlib + +try: + from struct import unpack_from +except ImportError: + from struct import unpack, calcsize + __calcsize_cache = dict() + + def unpack_from(fmt, data, offset=0): + try: + size = __calcsize_cache[fmt] + except KeyError: + size = calcsize(fmt) + __calcsize_cache[fmt] = size + # END exception handling + return unpack(fmt, data[offset: offset + size]) + # END own unpack_from implementation + + +#{ Aliases + +hex_to_bin = binascii.a2b_hex +bin_to_hex = binascii.b2a_hex + +# errors +ENOENT = errno.ENOENT + +# os shortcuts +exists = os.path.exists +mkdir = os.mkdir +chmod = os.chmod +isdir = os.path.isdir +isfile = os.path.isfile +rename = os.rename +dirname = os.path.dirname +basename = os.path.basename +join = os.path.join +read = os.read +write = os.write +close = os.close +fsync = os.fsync + + +def _retry(func, *args, **kwargs): + # Wrapper around functions, that are problematic on "Windows". Sometimes + # the OS or someone else has still a handle to the file + if sys.platform == "win32": + for _ in range(10): + try: + return func(*args, **kwargs) + except Exception: + time.sleep(0.1) + return func(*args, **kwargs) + else: + return func(*args, **kwargs) + + +def remove(*args, **kwargs): + return _retry(os.remove, *args, **kwargs) + + +# Backwards compatibility imports +from gitdb.const import ( + NULL_BIN_SHA, + NULL_HEX_SHA +) + +#} END Aliases + +#{ compatibility stuff ... + + +class _RandomAccessBytesIO(object): + + """Wrapper to provide required functionality in case memory maps cannot or may + not be used. This is only really required in python 2.4""" + __slots__ = '_sio' + + def __init__(self, buf=''): + self._sio = BytesIO(buf) + + def __getattr__(self, attr): + return getattr(self._sio, attr) + + def __len__(self): + return len(self.getvalue()) + + def __getitem__(self, i): + return self.getvalue()[i] + + def __getslice__(self, start, end): + return self.getvalue()[start:end] + + +def byte_ord(b): + """ + Return the integer representation of the byte string. This supports Python + 3 byte arrays as well as standard strings. + """ + try: + return ord(b) + except TypeError: + return b + +#} END compatibility stuff ... + +#{ Routines + + +def make_sha(source=''.encode("ascii")): + """A python2.4 workaround for the sha/hashlib module fiasco + + **Note** From the dulwich project """ + try: + return hashlib.sha1(source) + except NameError: + import sha + sha1 = sha.sha(source) + return sha1 + + +def allocate_memory(size): + """:return: a file-protocol accessible memory block of the given size""" + if size == 0: + return _RandomAccessBytesIO(b'') + # END handle empty chunks gracefully + + try: + return mmap.mmap(-1, size) # read-write by default + except EnvironmentError: + # setup real memory instead + # this of course may fail if the amount of memory is not available in + # one chunk - would only be the case in python 2.4, being more likely on + # 32 bit systems. + return _RandomAccessBytesIO(b"\0" * size) + # END handle memory allocation + + +def file_contents_ro(fd, stream=False, allow_mmap=True): + """:return: read-only contents of the file represented by the file descriptor fd + + :param fd: file descriptor opened for reading + :param stream: if False, random access is provided, otherwise the stream interface + is provided. + :param allow_mmap: if True, its allowed to map the contents into memory, which + allows large files to be handled and accessed efficiently. The file-descriptor + will change its position if this is False""" + try: + if allow_mmap: + # supports stream and random access + try: + return mmap.mmap(fd, 0, access=mmap.ACCESS_READ) + except EnvironmentError: + # python 2.4 issue, 0 wants to be the actual size + return mmap.mmap(fd, os.fstat(fd).st_size, access=mmap.ACCESS_READ) + # END handle python 2.4 + except OSError: + pass + # END exception handling + + # read manully + contents = os.read(fd, os.fstat(fd).st_size) + if stream: + return _RandomAccessBytesIO(contents) + return contents + + +def file_contents_ro_filepath(filepath, stream=False, allow_mmap=True, flags=0): + """Get the file contents at filepath as fast as possible + + :return: random access compatible memory of the given filepath + :param stream: see ``file_contents_ro`` + :param allow_mmap: see ``file_contents_ro`` + :param flags: additional flags to pass to os.open + :raise OSError: If the file could not be opened + + **Note** for now we don't try to use O_NOATIME directly as the right value needs to be + shared per database in fact. It only makes a real difference for loose object + databases anyway, and they use it with the help of the ``flags`` parameter""" + fd = os.open(filepath, os.O_RDONLY | getattr(os, 'O_BINARY', 0) | flags) + try: + return file_contents_ro(fd, stream, allow_mmap) + finally: + close(fd) + # END assure file is closed + + +def sliding_ro_buffer(filepath, flags=0): + """ + :return: a buffer compatible object which uses our mapped memory manager internally + ready to read the whole given filepath""" + return SlidingWindowMapBuffer(mman.make_cursor(filepath), flags=flags) + + +def to_hex_sha(sha): + """:return: hexified version of sha""" + if len(sha) == 40: + return sha + return bin_to_hex(sha) + + +def to_bin_sha(sha): + if len(sha) == 20: + return sha + return hex_to_bin(sha) + + +#} END routines + + +#{ Utilities + +class LazyMixin(object): + + """ + Base class providing an interface to lazily retrieve attribute values upon + first access. If slots are used, memory will only be reserved once the attribute + is actually accessed and retrieved the first time. All future accesses will + return the cached value as stored in the Instance's dict or slot. + """ + + __slots__ = tuple() + + def __getattr__(self, attr): + """ + Whenever an attribute is requested that we do not know, we allow it + to be created and set. Next time the same attribute is reqeusted, it is simply + returned from our dict/slots. """ + self._set_cache_(attr) + # will raise in case the cache was not created + return object.__getattribute__(self, attr) + + def _set_cache_(self, attr): + """ + This method should be overridden in the derived class. + It should check whether the attribute named by attr can be created + and cached. Do nothing if you do not know the attribute or call your subclass + + The derived class may create as many additional attributes as it deems + necessary in case a git command returns more information than represented + in the single attribute.""" + pass + + +class LockedFD(object): + + """ + This class facilitates a safe read and write operation to a file on disk. + If we write to 'file', we obtain a lock file at 'file.lock' and write to + that instead. If we succeed, the lock file will be renamed to overwrite + the original file. + + When reading, we obtain a lock file, but to prevent other writers from + succeeding while we are reading the file. + + This type handles error correctly in that it will assure a consistent state + on destruction. + + **note** with this setup, parallel reading is not possible""" + __slots__ = ("_filepath", '_fd', '_write') + + def __init__(self, filepath): + """Initialize an instance with the givne filepath""" + self._filepath = filepath + self._fd = None + self._write = None # if True, we write a file + + def __del__(self): + # will do nothing if the file descriptor is already closed + if self._fd is not None: + self.rollback() + + def _lockfilepath(self): + return "%s.lock" % self._filepath + + def open(self, write=False, stream=False): + """ + Open the file descriptor for reading or writing, both in binary mode. + + :param write: if True, the file descriptor will be opened for writing. Other + wise it will be opened read-only. + :param stream: if True, the file descriptor will be wrapped into a simple stream + object which supports only reading or writing + :return: fd to read from or write to. It is still maintained by this instance + and must not be closed directly + :raise IOError: if the lock could not be retrieved + :raise OSError: If the actual file could not be opened for reading + + **note** must only be called once""" + if self._write is not None: + raise AssertionError("Called %s multiple times" % self.open) + + self._write = write + + # try to open the lock file + binary = getattr(os, 'O_BINARY', 0) + lockmode = os.O_WRONLY | os.O_CREAT | os.O_EXCL | binary + try: + fd = os.open(self._lockfilepath(), lockmode, int("600", 8)) + if not write: + os.close(fd) + else: + self._fd = fd + # END handle file descriptor + except OSError as e: + raise IOError("Lock at %r could not be obtained" % self._lockfilepath()) from e + # END handle lock retrieval + + # open actual file if required + if self._fd is None: + # we could specify exlusive here, as we obtained the lock anyway + try: + self._fd = os.open(self._filepath, os.O_RDONLY | binary) + except: + # assure we release our lockfile + remove(self._lockfilepath()) + raise + # END handle lockfile + # END open descriptor for reading + + if stream: + # need delayed import + from gitdb.stream import FDStream + return FDStream(self._fd) + else: + return self._fd + # END handle stream + + def commit(self): + """When done writing, call this function to commit your changes into the + actual file. + The file descriptor will be closed, and the lockfile handled. + + **Note** can be called multiple times""" + self._end_writing(successful=True) + + def rollback(self): + """Abort your operation without any changes. The file descriptor will be + closed, and the lock released. + + **Note** can be called multiple times""" + self._end_writing(successful=False) + + def _end_writing(self, successful=True): + """Handle the lock according to the write mode """ + if self._write is None: + raise AssertionError("Cannot end operation if it wasn't started yet") + + if self._fd is None: + return + + os.close(self._fd) + self._fd = None + + lockfile = self._lockfilepath() + if self._write and successful: + # on windows, rename does not silently overwrite the existing one + if sys.platform == "win32": + if isfile(self._filepath): + remove(self._filepath) + # END remove if exists + # END win32 special handling + os.rename(lockfile, self._filepath) + + # assure others can at least read the file - the tmpfile left it at rw-- + # We may also write that file, on windows that boils down to a remove- + # protection as well + chmod(self._filepath, int("644", 8)) + else: + # just delete the file so far, we failed + remove(lockfile) + # END successful handling + +#} END utilities diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/_proxy.cpython-38-x86_64-linux-gnu.so b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/_proxy.cpython-38-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..6591ecf5434f8aeb60fab32d053f52a7b6c9f831 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/_proxy.cpython-38-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67f77a59361389216643d54c6f9bf38e83b4583cf45d1f96599af196e22db881 +size 113016 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5z.cpython-38-x86_64-linux-gnu.so b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5z.cpython-38-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..250c69aec9b8a4bb2af1903605de26299599ef91 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5z.cpython-38-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99b3d967ef4ae35b57e6797e8079d43c71a0aa146fa76e592f2b05499905d4b9 +size 104872 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/_qhull.cpython-38-x86_64-linux-gnu.so b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/_qhull.cpython-38-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1195bc5fbc0a76f8f3323af3949b57eb80c84f93 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/matplotlib/_qhull.cpython-38-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a1dcc9c9db6d1bd1713b8d40b4b3211af3a7bb2be660e2740aba98aad37750d +size 1935422 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch_tb_profiler/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch_tb_profiler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c94e71bf05f670cdbc1121ba1cec31150f68792b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch_tb_profiler/__init__.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# -------------------------------------------------------------------------- + +# Entry point for Pytorch TensorBoard plugin package. + +__version__ = '0.4.0' diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch_tb_profiler/consts.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch_tb_profiler/consts.py new file mode 100644 index 0000000000000000000000000000000000000000..df881b05aba5400fbdead9b464a4951e99ea854a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch_tb_profiler/consts.py @@ -0,0 +1,74 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# -------------------------------------------------------------------------- +import re +from collections import namedtuple + +PLUGIN_NAME = 'pytorch_profiler' + +WORKER_PATTERN = re.compile(r"""^(.*?) # worker name + (\.\d+)? # optional timestamp like 1619499959628 used as span name + \.pt\.trace\.json # the ending suffix + (?:\.gz)?$""", re.X) # optional .gz extension + +NODE_PROCESS_PATTERN = re.compile(r"""^(.*)_(\d+)""") +MONITOR_RUN_REFRESH_INTERNAL_IN_SECONDS = 10 +MAX_GPU_PER_NODE = 64 + +View = namedtuple('View', 'id, name, display_name') +OVERALL_VIEW = View(1, 'overall', 'Overview') +OP_VIEW = View(2, 'operator', 'Operator') +KERNEL_VIEW = View(3, 'kernel', 'Kernel') +TRACE_VIEW = View(4, 'trace', 'Trace') +DISTRIBUTED_VIEW = View(5, 'distributed', 'Distributed') +MEMORY_VIEW = View(6, 'memory', 'Memory') +MODULE_VIEW = View(7, 'module', 'Module') +LIGHTNING_VIEW = View(8, 'lightning', 'Lightning') + +TOOLTIP_GPU_UTIL = \ + 'GPU Utilization:\n' \ + 'GPU busy time / All steps time. The higher, the better. ' \ + 'GPU busy time is the time during which there is at least one GPU kernel running on it. ' \ + 'All steps time is the total time of all profiler steps(or called as iterations).\n' +TOOLTIP_SM_EFFICIENCY = \ + 'Est. SM Efficiency:\n' \ + 'Estimated Stream Multiprocessor Efficiency. The higher, the better. ' \ + 'This metric of a kernel, SM_Eff_K = min(blocks of this kernel / SM number of this GPU, 100%). ' \ + "This overall number is the sum of all kernels' SM_Eff_K weighted by kernel's execution duration, " \ + 'divided by all steps time.\n' +TOOLTIP_OCCUPANCY_COMMON = \ + 'Est. Achieved Occupancy:\n' \ + 'For most cases such as memory bandwidth bounded kernels, the higher the better. ' \ + 'Occupancy is the ratio of active warps on an SM ' \ + 'to the maximum number of active warps supported by the SM. ' \ + 'The theoretical occupancy of a kernel is upper limit occupancy of this kernel, ' \ + 'limited by multiple factors such as kernel shape, kernel used resource, ' \ + 'and the GPU compute capability.\n' \ + 'Est. Achieved Occupancy of a kernel, OCC_K = ' \ + 'min(threads of the kernel / SM number / max threads per SM, theoretical occupancy of the kernel). ' +TOOLTIP_OCCUPANCY_OVERVIEW = \ + "This overall number is the weighted average of all kernels' OCC_K " \ + "using kernel's execution duration as weight. " \ + 'It shows fine-grained low-level GPU utilization.\n' +TOOLTIP_TENSOR_CORES = \ + 'Kernel using Tensor Cores:\n' \ + 'Total GPU Time for Tensor Core kernels / Total GPU Time for all kernels.\n' +TOOLTIP_OCCUPANCY_TABLE = \ + "This \"Mean\" number is the weighted average of all calls' OCC_K of the kernel, " \ + "using each call's execution duration as weight. " \ + 'It shows fine-grained low-level GPU utilization.' +TOOLTIP_BLOCKS_PER_SM = \ + 'Blocks Per SM = blocks of this kernel / SM number of this GPU.\n' \ + 'If this number is less than 1, it indicates the GPU multiprocessors are not fully utilized.\n' \ + '\"Mean Blocks per SM\" is the weighted average of all calls of this kernel, ' \ + "using each call's execution duration as weight." +TOOLTIP_OP_TC_ELIGIBLE = \ + 'Whether this operator is eligible to use Tensor Cores.' +TOOLTIP_OP_TC_SELF = \ + 'Time of self-kernels with Tensor Cores / Time of self-kernels.' +TOOLTIP_OP_TC_TOTAL = \ + 'Time of kernels with Tensor Cores / Time of kernels.' +TOOLTIP_KERNEL_USES_TC = \ + 'Whether this kernel uses Tensor Cores.' +TOOLTIP_KERNEL_OP_TC_ELIGIBLE = \ + 'Whether the operator launched this kernel is eligible to use Tensor Cores.' diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch_tb_profiler/multiprocessing.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch_tb_profiler/multiprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..b71773505c4473934340a0e573ebfcfe3db6f6a4 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch_tb_profiler/multiprocessing.py @@ -0,0 +1,13 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# ------------------------------------------------------------------------- +import multiprocessing as mp +import os + + +def get_start_method(): + return os.getenv('TORCH_PROFILER_START_METHOD', 'spawn') + + +__all__ = [x for x in dir(mp.get_context(get_start_method())) if not x.startswith('_')] +globals().update((name, getattr(mp.get_context(get_start_method()), name)) for name in __all__) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch_tb_profiler/plugin.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch_tb_profiler/plugin.py new file mode 100644 index 0000000000000000000000000000000000000000..c93aedb88e0b43d50dbe71ee61f0ef4fdbf9134b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch_tb_profiler/plugin.py @@ -0,0 +1,557 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# -------------------------------------------------------------------------- +import atexit +import gzip +import json +import os +import shutil +import sys +import tempfile +import threading +import time +from collections import OrderedDict +from queue import Queue + +import werkzeug +from tensorboard.plugins import base_plugin +from werkzeug import exceptions, wrappers + +from . import consts, io, utils +from .profiler import RunLoader +from .run import DistributedRunProfile, Run, RunProfile + +logger = utils.get_logger() + + +def decorate_headers(func): + def wrapper(*args, **kwargs): + headers = func(*args, **kwargs) + headers.extend(TorchProfilerPlugin.headers) + return headers + return wrapper + + +exceptions.HTTPException.get_headers = decorate_headers(exceptions.HTTPException.get_headers) + + +class TorchProfilerPlugin(base_plugin.TBPlugin): + """TensorBoard plugin for Torch Profiler.""" + + plugin_name = consts.PLUGIN_NAME + headers = [('X-Content-Type-Options', 'nosniff')] + CONTENT_TYPE = 'application/json' + + def __init__(self, context: base_plugin.TBContext): + """Instantiates TorchProfilerPlugin. + Args: + context: A base_plugin.TBContext instance. + """ + super(TorchProfilerPlugin, self).__init__(context) + self.logdir = io.abspath(context.logdir.rstrip('/')) + + self._load_lock = threading.Lock() + self._load_threads = [] + + self._runs = OrderedDict() + self._runs_lock = threading.Lock() + + self._temp_dir = tempfile.mkdtemp() + self._cache = io.Cache(self._temp_dir) + self._queue = Queue() + self._gpu_metrics_file_dict = {} + monitor_runs = threading.Thread(target=self._monitor_runs, name='monitor_runs', daemon=True) + monitor_runs.start() + + receive_runs = threading.Thread(target=self._receive_runs, name='receive_runs', daemon=True) + receive_runs.start() + + self.diff_run_cache = {} + self.diff_run_flatten_cache = {} + + def clean(): + logger.debug('starting cleanup...') + self._cache.__exit__(*sys.exc_info()) + logger.debug('remove temporary cache directory %s' % self._temp_dir) + shutil.rmtree(self._temp_dir) + + atexit.register(clean) + + def is_active(self): + """Returns whether there is relevant data for the plugin to process. + If there is no any pending run, hide the plugin + """ + if self.is_loading: + return True + else: + with self._runs_lock: + return bool(self._runs) + + def get_plugin_apps(self): + return { + '/index.js': self.static_file_route, + '/index.html': self.static_file_route, + '/trace_viewer_full.html': self.static_file_route, + '/trace_embedding.html': self.static_file_route, + '/runs': self.runs_route, + '/views': self.views_route, + '/workers': self.workers_route, + '/spans': self.spans_route, + '/overview': self.overview_route, + '/operation': self.operation_pie_route, + '/operation/table': self.operation_table_route, + '/operation/stack': self.operation_stack_route, + '/kernel': self.kernel_pie_route, + '/kernel/table': self.kernel_table_route, + '/kernel/tc_pie': self.kernel_tc_route, + '/trace': self.trace_route, + '/distributed/gpuinfo': self.dist_gpu_info_route, + '/distributed/overlap': self.comm_overlap_route, + '/distributed/waittime': self.comm_wait_route, + '/distributed/commops': self.comm_ops_route, + '/memory': self.memory_route, + '/memory_curve': self.memory_curve_route, + '/memory_events': self.memory_events_route, + '/module': self.module_route, + '/tree': self.op_tree_route, + '/diff': self.diff_run_route, + '/diffnode': self.diff_run_node_route, + } + + def frontend_metadata(self): + return base_plugin.FrontendMetadata(es_module_path='/index.js', disable_reload=True) + + @wrappers.Request.application + def runs_route(self, request: werkzeug.Request): + with self._runs_lock: + names = list(self._runs.keys()) + + data = { + 'runs': names, + 'loading': self.is_loading + } + return self.respond_as_json(data) + + @wrappers.Request.application + def views_route(self, request: werkzeug.Request): + name = request.args.get('run') + self._validate(run=name) + run = self._get_run(name) + views_list = [view.display_name for view in run.views] + return self.respond_as_json(views_list) + + @wrappers.Request.application + def workers_route(self, request: werkzeug.Request): + name = request.args.get('run') + view = request.args.get('view') + self._validate(run=name, view=view) + run = self._get_run(name) + return self.respond_as_json(run.get_workers(view)) + + @wrappers.Request.application + def spans_route(self, request: werkzeug.Request): + name = request.args.get('run') + worker = request.args.get('worker') + self._validate(run=name, worker=worker) + run = self._get_run(name) + return self.respond_as_json(run.get_spans(worker)) + + @wrappers.Request.application + def overview_route(self, request: werkzeug.Request): + profile = self._get_profile_for_request(request) + name = request.args.get('run') + run = self._get_run(name) + data = profile.overview + is_gpu_used = profile.has_runtime or profile.has_kernel or profile.has_memcpy_or_memset + normal_workers = [worker for worker in run.workers if worker != 'All'] + data['environments'] = [{'title': 'Number of Worker(s)', 'value': str(len(normal_workers))}, + {'title': 'Device Type', 'value': 'GPU' if is_gpu_used else 'CPU'}] + if profile.gpu_summary and profile.gpu_tooltip: + data['gpu_metrics'] = {'title': 'GPU Summary', + 'data': profile.gpu_summary, + 'tooltip': profile.gpu_tooltip} + + return self.respond_as_json(data) + + @wrappers.Request.application + def operation_pie_route(self, request: werkzeug.Request): + profile = self._get_profile_for_request(request) + + group_by = request.args.get('group_by') + if group_by == 'OperationAndInputShape': + return self.respond_as_json(profile.operation_pie_by_name_input) + else: + return self.respond_as_json(profile.operation_pie_by_name) + + @wrappers.Request.application + def operation_table_route(self, request: werkzeug.Request): + profile = self._get_profile_for_request(request) + + group_by = request.args.get('group_by') + if group_by == 'OperationAndInputShape': + return self.respond_as_json(profile.operation_table_by_name_input) + else: + return self.respond_as_json(profile.operation_table_by_name) + + @wrappers.Request.application + def operation_stack_route(self, request: werkzeug.Request): + profile = self._get_profile_for_request(request) + + op_name = request.args.get('op_name') + self._validate(op_name=op_name) + group_by = request.args.get('group_by') + input_shape = request.args.get('input_shape') + if group_by == 'OperationAndInputShape': + return self.respond_as_json(profile.operation_stack_by_name_input[str(op_name)+'###'+str(input_shape)]) + else: + return self.respond_as_json(profile.operation_stack_by_name[str(op_name)]) + + @wrappers.Request.application + def kernel_pie_route(self, request: werkzeug.Request): + profile = self._get_profile_for_request(request) + + return self.respond_as_json(profile.kernel_pie) + + @wrappers.Request.application + def kernel_table_route(self, request: werkzeug.Request): + profile = self._get_profile_for_request(request) + + group_by = request.args.get('group_by') + if group_by == 'Kernel': + return self.respond_as_json(profile.kernel_table) + else: + return self.respond_as_json(profile.kernel_op_table) + + @wrappers.Request.application + def kernel_tc_route(self, request: werkzeug.Request): + profile = self._get_profile_for_request(request) + + return self.respond_as_json(profile.tc_pie) + + @wrappers.Request.application + def trace_route(self, request: werkzeug.Request): + profile = self._get_profile_for_request(request) + + if not profile.has_kernel: # Pure CPU. + raw_data = self._cache.read(profile.trace_file_path) + if not profile.trace_file_path.endswith('.gz'): + raw_data = gzip.compress(raw_data, 1) + else: + file_with_gpu_metrics = self._gpu_metrics_file_dict.get(profile.trace_file_path) + if file_with_gpu_metrics: + raw_data = io.read(file_with_gpu_metrics) + else: + raw_data = self._cache.read(profile.trace_file_path) + if profile.trace_file_path.endswith('.gz'): + raw_data = gzip.decompress(raw_data) + raw_data = profile.append_gpu_metrics(raw_data) + + # write the data to temp file + fp = tempfile.NamedTemporaryFile('w+b', suffix='.json.gz', dir=self._temp_dir, delete=False) + fp.close() + # Already compressed, no need to gzip.open + with open(fp.name, mode='wb') as file: + file.write(raw_data) + self._gpu_metrics_file_dict[profile.trace_file_path] = fp.name + + headers = [('Content-Encoding', 'gzip')] + headers.extend(TorchProfilerPlugin.headers) + return werkzeug.Response(raw_data, content_type=TorchProfilerPlugin.CONTENT_TYPE, headers=headers) + + @wrappers.Request.application + def dist_gpu_info_route(self, request: werkzeug.Request): + profile = self._get_distributed_profile_for_request(request) + return self.respond_as_json(profile.gpu_info) + + @wrappers.Request.application + def comm_overlap_route(self, request: werkzeug.Request): + profile = self._get_distributed_profile_for_request(request) + return self.respond_as_json(profile.steps_to_overlap) + + @wrappers.Request.application + def comm_wait_route(self, request: werkzeug.Request): + profile = self._get_distributed_profile_for_request(request) + return self.respond_as_json(profile.steps_to_wait) + + @wrappers.Request.application + def comm_ops_route(self, request: werkzeug.Request): + profile = self._get_distributed_profile_for_request(request) + return self.respond_as_json(profile.comm_ops) + + @wrappers.Request.application + def memory_route(self, request: werkzeug.Request): + profile = self._get_profile_for_request(request) + start_ts = request.args.get('start_ts', None) + end_ts = request.args.get('end_ts', None) + memory_metric = request.args.get('memory_metric', 'KB') + if start_ts is not None: + start_ts = int(start_ts) + if end_ts is not None: + end_ts = int(end_ts) + + return self.respond_as_json( + profile.get_memory_stats(start_ts=start_ts, end_ts=end_ts, memory_metric=memory_metric), True) + + @wrappers.Request.application + def memory_curve_route(self, request: werkzeug.Request): + profile = self._get_profile_for_request(request) + time_metric = request.args.get('time_metric', 'ms') + memory_metric = request.args.get('memory_metric', 'MB') + return self.respond_as_json( + profile.get_memory_curve(time_metric=time_metric, memory_metric=memory_metric), True) + + @wrappers.Request.application + def memory_events_route(self, request: werkzeug.Request): + profile = self._get_profile_for_request(request) + start_ts = request.args.get('start_ts', None) + end_ts = request.args.get('end_ts', None) + time_metric = request.args.get('time_metric', 'ms') + memory_metric = request.args.get('memory_metric', 'KB') + if start_ts is not None: + start_ts = int(start_ts) + if end_ts is not None: + end_ts = int(end_ts) + + return self.respond_as_json( + profile.get_memory_events(start_ts, end_ts, time_metric=time_metric, + memory_metric=memory_metric), True) + + @wrappers.Request.application + def module_route(self, request: werkzeug.Request): + profile = self._get_profile_for_request(request) + content = profile.get_module_view() + if content: + return self.respond_as_json(content, True) + else: + name = request.args.get('run') + worker = request.args.get('worker') + span = request.args.get('span') + raise exceptions.NotFound('could not find the run for %s/%s/%s' % (name, worker, span)) + + @wrappers.Request.application + def op_tree_route(self, request: werkzeug.Request): + profile = self._get_profile_for_request(request) + content = profile.get_operator_tree() + return self.respond_as_json(content, True) + + @wrappers.Request.application + def diff_run_route(self, request: werkzeug.Request): + base, exp = self.get_diff_runs(request) + diff_stats = self.get_diff_status(base, exp) + content = diff_stats.get_diff_tree_summary() + return self.respond_as_json(content, True) + + @wrappers.Request.application + def diff_run_node_route(self, request: werkzeug.Request): + base, exp = self.get_diff_runs(request) + path = request.args.get('path', '0') + stats_dict = self.get_diff_stats_dict(base, exp) + diff_stat = stats_dict.get(path) + if diff_stat is None: + raise exceptions.NotFound('could not find diff run for %s' % (path)) + content = diff_stat.get_diff_node_summary(path) + return self.respond_as_json(content, True) + + @wrappers.Request.application + def static_file_route(self, request: werkzeug.Request): + filename = os.path.basename(request.path) + extension = os.path.splitext(filename)[1] + if extension == '.html': + mimetype = 'text/html' + elif extension == '.css': + mimetype = 'text/css' + elif extension == '.js': + mimetype = 'application/javascript' + else: + mimetype = 'application/octet-stream' + filepath = os.path.join(os.path.dirname(__file__), 'static', filename) + try: + with open(filepath, 'rb') as infile: + contents = infile.read() + except IOError: + raise exceptions.NotFound('404 Not Found') + return werkzeug.Response( + contents, content_type=mimetype, headers=TorchProfilerPlugin.headers + ) + + @staticmethod + def respond_as_json(obj, compress: bool = False): + content = json.dumps(obj) + headers = [] + headers.extend(TorchProfilerPlugin.headers) + if compress: + content_bytes = content.encode('utf-8') + raw_data = gzip.compress(content_bytes, 1) + headers.append(('Content-Encoding', 'gzip')) + return werkzeug.Response(raw_data, content_type=TorchProfilerPlugin.CONTENT_TYPE, headers=headers) + else: + return werkzeug.Response(content, content_type=TorchProfilerPlugin.CONTENT_TYPE, headers=headers) + + @property + def is_loading(self): + with self._load_lock: + return bool(self._load_threads) + + def get_diff_runs(self, request: werkzeug.Request): + name = request.args.get('run') + span = request.args.get('span') + worker = request.args.get('worker') + self._validate(run=name, worker=worker, span=span) + base = self._get_profile(name, worker, span) + + exp_name = request.args.get('exp_run') + exp_span = request.args.get('exp_span') + exp_worker = request.args.get('exp_worker') + self._validate(exp_run=exp_name, exp_worker=exp_worker, exp_span=exp_span) + exp = self._get_profile(exp_name, exp_worker, exp_span) + + return base, exp + + def get_diff_status(self, base: RunProfile, exp: RunProfile): + key = (base, exp) + diff_stats = self.diff_run_cache.get(key) + if diff_stats is None: + diff_stats = base.compare_run(exp) + self.diff_run_cache[key] = diff_stats + + return diff_stats + + def get_diff_stats_dict(self, base: RunProfile, exp: RunProfile): + key = (base, exp) + stats_dict = self.diff_run_flatten_cache.get(key) + if stats_dict is None: + diff_stats = self.get_diff_status(base, exp) + stats_dict = diff_stats.flatten_diff_tree() + self.diff_run_flatten_cache[key] = stats_dict + return stats_dict + + def _monitor_runs(self): + logger.info('Monitor runs begin') + + try: + touched = set() + while True: + try: + logger.debug('Scan run dir') + run_dirs = self._get_run_dirs() + + has_dir = False + # Assume no deletion on run directories, trigger async load if find a new run + for run_dir in run_dirs: + has_dir = True + if run_dir not in touched: + touched.add(run_dir) + logger.info('Find run directory %s', run_dir) + # Use threading to avoid UI stall and reduce data parsing time + t = threading.Thread(target=self._load_run, args=(run_dir,)) + t.start() + with self._load_lock: + self._load_threads.append(t) + + if not has_dir: + # handle directory removed case. + self._runs.clear() + except Exception as ex: + logger.warning('Failed to scan runs. Exception=%s', ex, exc_info=True) + + time.sleep(consts.MONITOR_RUN_REFRESH_INTERNAL_IN_SECONDS) + except Exception: + logger.exception('Failed to start monitor_runs') + + def _receive_runs(self): + while True: + run: Run = self._queue.get() + if run is None: + continue + + logger.info('Add run %s', run.name) + with self._runs_lock: + is_new = run.name not in self._runs + self._runs[run.name] = run + if is_new: + self._runs = OrderedDict(sorted(self._runs.items())) + + def _get_run_dirs(self): + """Scan logdir, find PyTorch Profiler run directories. + A directory is considered to be a run if it contains 1 or more *.pt.trace.json[.gz]. + E.g. there are 2 runs: run1, run2 + /run1 + /[worker1].pt.trace.json.gz + /[worker2].pt.trace.json.gz + /run2 + /[worker1].pt.trace.json + """ + for root, _, files in io.walk(self.logdir): + for file in files: + if utils.is_chrome_trace_file(file): + yield root + break + + def _load_run(self, run_dir): + try: + name = self._get_run_name(run_dir) + logger.info('Load run %s', name) + # Currently, assume run data is immutable, so just load once + loader = RunLoader(name, run_dir, self._cache) + run = loader.load() + logger.info('Run %s loaded', name) + self._queue.put(run) + except Exception as ex: + logger.warning('Failed to load run %s. Exception=%s', ex, name, exc_info=True) + + t = threading.current_thread() + with self._load_lock: + try: + self._load_threads.remove(t) + except ValueError: + logger.warning('could not find the thread {}'.format(run_dir)) + + def _get_run(self, name) -> Run: + with self._runs_lock: + run = self._runs.get(name, None) + + if run is None: + raise exceptions.NotFound('could not find the run for %s' % (name)) + + return run + + def _get_run_name(self, run_dir): + logdir = io.abspath(self.logdir) + if run_dir == logdir: + name = io.basename(run_dir) + else: + name = io.relpath(run_dir, logdir) + return name + + def _get_profile_for_request(self, request: werkzeug.Request) -> RunProfile: + name = request.args.get('run') + span = request.args.get('span') + worker = request.args.get('worker') + self._validate(run=name, worker=worker) + profile = self._get_profile(name, worker, span) + if not isinstance(profile, RunProfile): + raise exceptions.BadRequest('Get an unexpected profile type %s for %s/%s' % (type(profile), name, worker)) + + return profile + + def _get_distributed_profile_for_request(self, request: werkzeug.Request) -> DistributedRunProfile: + name = request.args.get('run') + span = request.args.get('span') + self._validate(run=name) + profile = self._get_profile(name, 'All', span) + if not isinstance(profile, DistributedRunProfile): + raise exceptions.BadRequest('Get an unexpected distributed profile type %s for %s' % (type(profile), name)) + + return profile + + def _get_profile(self, name, worker, span): + run = self._get_run(name) + profile = run.get_profile(worker, span) + if profile is None: + raise exceptions.NotFound('could not find the profile for %s/%s/%s ' % (name, worker, span)) + return profile + + def _validate(self, **kwargs): + for name, v in kwargs.items(): + if v is None: + raise exceptions.BadRequest('Must specify %s in request url' % (name)) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch_tb_profiler/run.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch_tb_profiler/run.py new file mode 100644 index 0000000000000000000000000000000000000000..e54109eeb0628ff4de8313e3b29a1a76c9986c97 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch_tb_profiler/run.py @@ -0,0 +1,485 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# -------------------------------------------------------------------------- +from collections import defaultdict +from typing import Any, Dict, Iterable, List, Optional, Tuple, Union + +from . import consts, utils +from .profiler.diffrun import compare_op_tree, diff_summary +from .profiler.memory_parser import MemoryMetrics, MemoryRecord, MemorySnapshot +from .profiler.module_op import Stats +from .profiler.node import OperatorNode +from .utils import Canonicalizer, DisplayRounder + +logger = utils.get_logger() + + +class Run(object): + """ A profiler run. For visualization purpose only. + May contain profiling results from multiple workers. E.g. distributed scenario. + """ + + def __init__(self, name, run_dir): + self.name = name + self.run_dir = run_dir + self.profiles: Dict[Tuple[str, str], RunProfile] = {} + + @property + def workers(self): + # get full worker list and remove the duplicated + worker_list, _ = zip(*self.profiles.keys()) + worker_list = sorted(list(dict.fromkeys(worker_list))) + return worker_list + + @property + def views(self) -> List[consts.View]: + view_set = set() + for profile in self.profiles.values(): + view_set.update(profile.views) + return sorted(list(view_set), key=lambda x: x.id) + + def get_workers(self, view): + worker_set = set() + for profile in self.profiles.values(): + for v in profile.views: + if v.display_name == view: + worker_set.add(profile.worker) + break + return sorted(list(worker_set)) + + def get_spans(self, worker=None): + if worker is not None: + spans = [s for w, s in self.profiles.keys() if w == worker] + else: + spans = [s for _, s in self.profiles.keys()] + + spans = list(set(spans)) + if len(spans) == 1 and spans[0] is None: + return None + else: + return sorted(spans) + + def add_profile(self, profile: Union['DistributedRunProfile', 'RunProfile']): + span = profile.span + if span is None: + span = 'default' + else: + span = str(span) + self.profiles[(profile.worker, span)] = profile + + def get_profile(self, worker, span) -> Union['DistributedRunProfile', 'RunProfile']: + if worker is None: + raise ValueError('the worker parameter is mandatory') + + if len(self.profiles) == 0: + return None + + return self.profiles.get((worker, span), None) + + def get_profiles(self, *, worker=None, span=None) \ + -> Optional[Union[List['RunProfile'], List['DistributedRunProfile']]]: + # Note: we could not use if span to check it is None or not + # since the span 0 will be skipped at this case. + if worker is not None and span is not None: + return self.profiles.get((worker, span), None) + elif worker is not None: + return [p for (w, s), p in self.profiles.items() if worker == w] + elif span is not None: + return [p for (w, s), p in self.profiles.items() if span == s] + else: + return self.profiles.values() + + +class RunProfile(object): + """ Cooked profiling result for a worker. For visualization purpose only. + """ + + def __init__(self, worker, span): + self.worker = worker + self.span = span + self.views: List[consts.View] = [] + self.is_pytorch_lightning = False + self.has_runtime = False + self.has_kernel = False + self.has_communication = False + self.has_memcpy_or_memset = False + self.profiler_start_ts = float('inf') + self.overview = None + self.operation_pie_by_name = None + self.operation_table_by_name = None + self.operation_stack_by_name: Dict = None + self.operation_pie_by_name_input = None + self.operation_table_by_name_input = None + self.operation_stack_by_name_input: Dict = None + self.kernel_op_table = None + self.kernel_pie = None + self.kernel_table = None + self.tc_pie = None + self.trace_file_path: str = None + + self.gpu_metrics = None + + self.gpu_summary = None + self.gpu_tooltip = None + + # for memory stats and curve + self.memory_snapshot: Optional[MemorySnapshot] = None + self.tid2tree: Dict[int, OperatorNode] = None + self.pl_tid2tree: Dict[int, OperatorNode] = None + + self.module_stats: Optional[List(Stats)] = None + self.pl_module_stats: Optional[List(Stats)] = None + + def append_gpu_metrics(self, raw_data: bytes): + counter_json_str = ', {}'.format(', '.join(self.gpu_metrics)) + counter_json_bytes = bytes(counter_json_str, 'utf-8') + + raw_data_without_tail = raw_data[: raw_data.rfind(b']')] + raw_data = b''.join([raw_data_without_tail, counter_json_bytes, b']}']) + + import gzip + raw_data = gzip.compress(raw_data, 1) + return raw_data + + @staticmethod + def _filtered_by_ts(events: Iterable[MemoryRecord], start_ts, end_ts): + """Returns time-ordered events of memory allocation and free""" + if start_ts is not None and end_ts is not None: + events = [e for e in events if start_ts <= e.ts and e.ts <= end_ts] + elif start_ts is not None: + events = [e for e in events if start_ts <= e.ts] + elif end_ts is not None: + events = [e for e in events if e.ts <= end_ts] + + return events + + def get_memory_stats(self, start_ts=None, end_ts=None, memory_metric='K'): + cano = Canonicalizer(memory_metric=memory_metric) + round = DisplayRounder(ndigits=2) + + stats = self.memory_snapshot.get_memory_statistics(self.tid2tree, start_ts=start_ts, end_ts=end_ts) + + result = { + 'metadata': { + 'title': 'Memory View', + 'default_device': 'CPU', + 'search': 'Operator Name', + 'sort': f'Self Size Increase ({cano.memory_metric})' + }, + 'columns': [ + {'name': 'Operator Name', 'type': 'string'}, + {'name': 'Calls', 'type': 'number', 'tooltip': '# of calls of the operator.'}, + {'name': f'Size Increase ({cano.memory_metric})', 'type': 'number', + 'tooltip': 'The memory increase size include all children operators.'}, + {'name': f'Self Size Increase ({cano.memory_metric})', 'type': 'number', + 'tooltip': 'The memory increase size associated with the operator itself.'}, + {'name': 'Allocation Count', 'type': 'number', + 'tooltip': 'The allocation count including all chidren operators.'}, + {'name': 'Self Allocation Count', 'type': 'number', + 'tooltip': 'The allocation count belonging to the operator itself.'}, + {'name': f'Allocation Size ({cano.memory_metric})', 'type': 'number', + 'tooltip': 'The allocation size including all children operators.'}, + {'name': f'Self Allocation Size ({cano.memory_metric})', 'type': 'number', + 'tooltip': ('The allocation size belonging to the operator itself.\n' + 'It will sum up all allocation bytes without considering the memory free.')}, + ], + 'rows': {} + } + + for name in stats: + these_rows = [] + result['rows'][name] = these_rows + + memory = stats[name] + for op_name, stat in sorted(memory.items()): + these_rows.append([ + op_name, + stat[6], + round(cano.convert_memory(stat[MemoryMetrics.IncreaseSize])), + round(cano.convert_memory(stat[MemoryMetrics.SelfIncreaseSize])), + stat[MemoryMetrics.AllocationCount], + stat[MemoryMetrics.SelfAllocationCount], + round(cano.convert_memory(stat[MemoryMetrics.AllocationSize])), + round(cano.convert_memory(stat[MemoryMetrics.SelfAllocationSize])), + ]) + + for dev_name in sorted(stats.keys()): + if dev_name.startswith('GPU'): + result['metadata']['default_device'] = dev_name + break + + return result + + def get_memory_curve( + self, + time_metric: str = 'ms', + memory_metric: str = 'K', + patch_for_step_plot=True): + def get_curves_and_peaks(records: List[MemoryRecord], cano: Canonicalizer): + """Inputs: + records: Sorted list of MemoryRecord + + For example: + ```py + { + 'CPU': [# Timestamp, Total Allocated, Total Reserved, Device Total Memory, operator + [1, 4, 4, 1000000, 'aten::add'], + [2, 16, 16, 1000000, "aten::empty], + [4, 4, 16, 1000000, '...'], + ], + 'GPU0': ... + } + ```""" + curves = defaultdict(list) + peaks = defaultdict(float) + for r in records: + if r.addr is None: + continue + dev = r.device_name + ts = r.ts + ta = r.total_allocated + tr = r.total_reserved + + if ta != ta or tr != tr: # isnan + continue + + curves[dev].append([ + cano.convert_time(ts - self.profiler_start_ts), + cano.convert_memory(ta), + cano.convert_memory(tr), + ]) + peaks[dev] = max(peaks[dev], ta) + + for dev in curves: + if len(curves[dev]) == 0: + del curves[dev] + del peaks[dev] + + return curves, peaks + + # NOTE: this should have been occured in frontend + def patch_curves_for_step_plot(curves: Dict[str, List]): + # For example, if a curve is [(0, 0), (1, 1), (2,2)], the line plot + # is a stright line. Interpolating it as [(0, 0), (1, 0), (1, 1), + # (2,1) (2,2)], then the line plot will work as step plot. + new_curves = defaultdict(list) + for dev, curve in curves.items(): + new_curve = [] + for i, p in enumerate(curve): + if i != 0: + new_curve.append(p[:1] + new_curve[-1][1:]) + new_curve.append(p) + new_curves[dev] = new_curve + return new_curves + + cano = Canonicalizer(time_metric, memory_metric) + + curves, peaks = get_curves_and_peaks(self.memory_snapshot.memory_records, cano) + if patch_for_step_plot: + curves = patch_curves_for_step_plot(curves) + peaks_formatted = {} + totals = {} + for dev, value in peaks.items(): + peaks_formatted[dev] = 'Peak Memory Usage: {:.1f}{}'.format(cano.convert_memory(value), cano.memory_metric) + if dev != 'CPU': + try: + totals[dev] = cano.convert_memory(self.gpu_infos[int(dev[3:])]['Memory Raw']) + except BaseException: + pass + + devices: List[str] = sorted(list(curves.keys())) + default_device = 'CPU' + for dev in devices: + if dev.startswith('GPU'): + default_device = dev + break + + return { + 'metadata': { + 'default_device': default_device, + 'devices': devices, + 'peaks': peaks_formatted, + 'totals': totals, + 'first_ts': self.profiler_start_ts, + 'time_metric': cano.time_metric, + 'memory_metric': cano.memory_metric, + 'time_factor': cano.time_factor, + 'memory_factor': cano.memory_factor, + }, + 'columns': [ + {'name': f'Time ({cano.time_metric})', 'type': 'number', 'tooltip': 'Time since profiler starts.'}, + {'name': f'Allocated ({cano.memory_metric})', 'type': 'number', 'tooltip': 'Total memory in use.'}, + {'name': f'Reserved ({cano.memory_metric})', 'type': 'number', + 'tooltip': 'Total reserved memory by allocator, both used and unused.'}, + ], + 'rows': curves, + } + + def get_memory_events( + self, + start_ts=None, + end_ts=None, + time_metric: str = 'ms', + memory_metric: str = 'K'): + def get_op_name_or_ctx(record: MemoryRecord): + name = record.op_name_or_unknown + if name.startswith('aten::empty') and record.parent_op_name: + # aten::empty can be treated as the 'malloc' in pytorch + name = f'{record.parent_op_name} ({name})' + return name + + cano = Canonicalizer(time_metric=time_metric, memory_metric=memory_metric) + round = DisplayRounder(ndigits=2) + + profiler_start_ts = self.profiler_start_ts + memory_records = RunProfile._filtered_by_ts(self.memory_snapshot.memory_records, start_ts, end_ts) + + events = defaultdict(list) + alloc = {} # allocation events may or may not have paired free event + free = {} # free events that does not have paired alloc event + prev_ts = float('-inf') # ensure ordered memory records is ordered + for i, r in enumerate(memory_records): + if r.addr is None: + # profile json data prior to pytorch 1.10 do not have addr + # we should ignore them + continue + assert prev_ts <= r.ts + prev_ts = r.ts + addr = r.addr + size = r.bytes + if r.is_allocation: + # to be matched with a release event + alloc[addr] = i + else: + if addr in alloc: + alloc_r = memory_records[alloc[addr]] + alloc_ts = alloc_r.ts + free_ts = r.ts + events[alloc_r.device_name].append([ + get_op_name_or_ctx(alloc_r), + round(cano.convert_memory(-size)), + round(cano.convert_time(alloc_ts - profiler_start_ts)), + round(cano.convert_time(free_ts - profiler_start_ts)), + round(cano.convert_time(free_ts - alloc_ts)), + ]) + del alloc[addr] + else: + if addr in free: + logger.warning(f'Address {addr} is freed multiple times') + free[addr] = i + + for i in alloc.values(): + r = memory_records[i] + events[r.device_name].append([ + get_op_name_or_ctx(r), + round(cano.convert_memory(r.bytes)), + round(cano.convert_time(r.ts - profiler_start_ts)), + None, + None, + ]) + + for i in free.values(): + r = memory_records[i] + events[r.device_name].append([ + get_op_name_or_ctx(r), + round(cano.convert_memory(-r.bytes)), + None, + round(cano.convert_time(r.ts - profiler_start_ts)), + None, + ]) + + default_device = 'CPU' + for dev_name in sorted(events.keys()): + if dev_name.startswith('GPU'): + default_device = dev_name + break + + return { + 'metadata': { + 'title': 'Memory Events', + 'default_device': default_device, + }, + 'columns': [ + {'name': 'Operator', 'type': 'string', 'tooltip': ''}, + {'name': f'Size ({cano.memory_metric})', 'type': 'number', 'tooltip': ''}, + {'name': f'Allocation Time ({cano.time_metric})', 'type': 'number', 'tooltip': ''}, + {'name': f'Release Time ({cano.time_metric})', 'type': 'number', 'tooltip': ''}, + {'name': f'Duration ({cano.time_metric})', 'type': 'number', 'tooltip': ''}, + ], + 'rows': events, # in the form of { 'CPU': [...], 'GPU0': [...], ... } + } + + def get_module_view(self): + if self.is_pytorch_lightning and self.pl_module_stats: + module_stats = self.pl_module_stats + elif self.module_stats: + module_stats = self.module_stats + else: + return None + + result = { + 'columns': [ + {'name': 'Module Name', 'type': 'string', 'key': 'name'}, + {'name': 'Occurences', 'type': 'number', 'key': 'occurences'}, + {'name': 'Operators', 'type': 'number', 'key': 'operators'}, + {'name': 'Host Total Time', 'type': 'number', 'key': 'host_duration'}, + {'name': 'Host Self Time', 'type': 'number', 'key': 'self_host_duration'}, + {'name': 'Device Total Time', 'type': 'number', 'key': 'device_duration'}, + {'name': 'Device Self Time', 'type': 'number', 'key': 'self_device_duration'} + ], + 'data': [] + } + + def process_modules_stats(parent: List[Any], modules_stats: List[Stats]): + for stats in modules_stats: + d = stats._asdict() + d['children'] = [] + parent.append(d) + process_modules_stats(d['children'], stats.children) + + process_modules_stats(result['data'], module_stats) + return result + + def get_operator_tree(self): + if self.is_pytorch_lightning: + root = next(iter(self.pl_tid2tree.values())) + else: + root = next(iter(self.tid2tree.values())) + + result = [] + + def traverse_node(parent: List, node: OperatorNode): + d = { + 'name': node.name, + 'start_time': node.start_time, + 'end_time': node.end_time, + 'type': node.type, + 'tid': node.tid, + 'children': [] + } + parent.append(d) + for child in node.children: + traverse_node(d['children'], child) + traverse_node(result, root) + return result[0] + + def compare_run(self, exp: 'RunProfile'): + base_root = next(iter(self.tid2tree.values())) + exp_root = next(iter(exp.tid2tree.values())) + diff_root = compare_op_tree(base_root, exp_root) + diff_stats = diff_summary(diff_root) + return diff_stats + + +class DistributedRunProfile(object): + """ Profiling all workers in a view. + """ + + def __init__(self, span: str): + self.worker = 'All' + self.span = span + self.views = [] + self.gpu_info = None + self.steps_to_overlap = None + self.steps_to_wait = None + self.comm_ops = None diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch_tb_profiler/utils.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch_tb_profiler/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c40ce9befc30b1f894a12e7554b1ee4f325ae605 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torch_tb_profiler/utils.py @@ -0,0 +1,122 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# -------------------------------------------------------------------------- +import logging +import math +import os +import time +from contextlib import contextmanager +from math import pow + +from . import consts + + +def get_logging_level(): + log_level = os.environ.get('TORCH_PROFILER_LOG_LEVEL', 'INFO').upper() + if log_level not in logging._levelToName.values(): + log_level = logging.getLevelName(logging.INFO) + return log_level + + +logger = None + + +def get_logger(): + global logger + if logger is None: + logger = logging.getLogger(consts.PLUGIN_NAME) + logger.setLevel(get_logging_level()) + return logger + + +def is_chrome_trace_file(path): + return consts.WORKER_PATTERN.match(path) + + +def href(text, url): + """"return html formatted hyperlink string + + Note: + target="_blank" causes this link to be opened in new tab if clicked. + """ + return f'{text}' + + +class Canonicalizer: + def __init__( + self, + time_metric='us', + memory_metric='B', + *, + input_time_metric='us', + input_memory_metric='B'): + # raw timestamp is in microsecond + # https://github.com/pytorch/pytorch/blob/v1.9.0/torch/csrc/autograd/profiler_kineto.cpp#L33 + time_metric_to_factor = { + 'us': 1, + 'ms': 1e3, + 's': 1e6, + } + # raw memory is in bytes + memory_metric_to_factor = { + 'B': pow(1024, 0), + 'KB': pow(1024, 1), + 'MB': pow(1024, 2), + 'GB': pow(1024, 3), + } + + # canonicalize the memory metric to a string + self.canonical_time_metrics = { + 'micro': 'us', 'microsecond': 'us', 'us': 'us', + 'milli': 'ms', 'millisecond': 'ms', 'ms': 'ms', + '': 's', 'second': 's', 's': 's', + } + # canonicalize the memory metric to a string + self.canonical_memory_metrics = { + '': 'B', 'B': 'B', + 'K': 'KB', 'KB': 'KB', + 'M': 'MB', 'MB': 'MB', + 'G': 'GB', 'GB': 'GB', + } + + self.time_metric = self.canonical_time_metrics[time_metric] + self.memory_metric = self.canonical_memory_metrics[memory_metric] + + # scale factor scale input to output + self.time_factor = time_metric_to_factor[self.canonical_time_metrics[input_time_metric]] /\ + time_metric_to_factor[self.time_metric] + self.memory_factor = memory_metric_to_factor[self.canonical_memory_metrics[input_memory_metric]] /\ + memory_metric_to_factor[self.memory_metric] + + def convert_time(self, t): + return self.time_factor * t + + def convert_memory(self, m): + return self.memory_factor * m + + +class DisplayRounder: + """Round a value for display purpose.""" + + def __init__(self, ndigits): + self.ndigits = ndigits + self.precision = pow(10, -ndigits) + + def __call__(self, v: float): + _v = abs(v) + if _v >= self.precision or v == 0: + return round(v, 2) + else: + ndigit = abs(math.floor(math.log10(_v))) + return round(v, ndigit) + + +@contextmanager +def timing(description: str, force: bool = False) -> None: + if force or os.environ.get('TORCH_PROFILER_BENCHMARK', '0') == '1': + start = time.time() + yield + elapsed_time = time.time() - start + logger.info(f'{description}: {elapsed_time}') + else: + yield diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchaudio/_torchaudio_ffmpeg.so b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchaudio/_torchaudio_ffmpeg.so new file mode 100644 index 0000000000000000000000000000000000000000..a9e8488da245b0b1c1fbc211fef0974dbbb3e0c0 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchaudio/_torchaudio_ffmpeg.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82a72601a994b96a8c476a252325028d012dd9aa011bfb4954f9797b740cdadd +size 219928 diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a021d16e9a54eb18384d2c678675eca917753848 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/__init__.py @@ -0,0 +1,41 @@ +from ._monitor import TMonitor, TqdmSynchronisationWarning +from ._tqdm_pandas import tqdm_pandas +from .cli import main # TODO: remove in v5.0.0 +from .gui import tqdm as tqdm_gui # TODO: remove in v5.0.0 +from .gui import trange as tgrange # TODO: remove in v5.0.0 +from .std import ( + TqdmDeprecationWarning, TqdmExperimentalWarning, TqdmKeyError, TqdmMonitorWarning, + TqdmTypeError, TqdmWarning, tqdm, trange) +from .version import __version__ + +__all__ = ['tqdm', 'tqdm_gui', 'trange', 'tgrange', 'tqdm_pandas', + 'tqdm_notebook', 'tnrange', 'main', 'TMonitor', + 'TqdmTypeError', 'TqdmKeyError', + 'TqdmWarning', 'TqdmDeprecationWarning', + 'TqdmExperimentalWarning', + 'TqdmMonitorWarning', 'TqdmSynchronisationWarning', + '__version__'] + + +def tqdm_notebook(*args, **kwargs): # pragma: no cover + """See tqdm.notebook.tqdm for full documentation""" + from warnings import warn + + from .notebook import tqdm as _tqdm_notebook + warn("This function will be removed in tqdm==5.0.0\n" + "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`", + TqdmDeprecationWarning, stacklevel=2) + return _tqdm_notebook(*args, **kwargs) + + +def tnrange(*args, **kwargs): # pragma: no cover + """ + A shortcut for `tqdm.notebook.tqdm(xrange(*args), **kwargs)`. + On Python3+, `range` is used instead of `xrange`. + """ + from warnings import warn + + from .notebook import trange as _tnrange + warn("Please use `tqdm.notebook.trange` instead of `tqdm.tnrange`", + TqdmDeprecationWarning, stacklevel=2) + return _tnrange(*args, **kwargs) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/_dist_ver.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/_dist_ver.py new file mode 100644 index 0000000000000000000000000000000000000000..1c020c67e87e2c7154ebd489b70107987ace54ea --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/_dist_ver.py @@ -0,0 +1 @@ +__version__ = '4.64.0' diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/_main.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/_main.py new file mode 100644 index 0000000000000000000000000000000000000000..04fdeeff17b5cc84b210f445b54b87d5b99e3748 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/_main.py @@ -0,0 +1,9 @@ +from warnings import warn + +from .cli import * # NOQA +from .cli import __all__ # NOQA +from .std import TqdmDeprecationWarning + +warn("This function will be removed in tqdm==5.0.0\n" + "Please use `tqdm.cli.*` instead of `tqdm._main.*`", + TqdmDeprecationWarning, stacklevel=2) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/_monitor.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/_monitor.py new file mode 100644 index 0000000000000000000000000000000000000000..f71aa56817ca77eba5df4a2dd11cb0c4a9a7ea1c --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/_monitor.py @@ -0,0 +1,95 @@ +import atexit +from threading import Event, Thread, current_thread +from time import time +from warnings import warn + +__all__ = ["TMonitor", "TqdmSynchronisationWarning"] + + +class TqdmSynchronisationWarning(RuntimeWarning): + """tqdm multi-thread/-process errors which may cause incorrect nesting + but otherwise no adverse effects""" + pass + + +class TMonitor(Thread): + """ + Monitoring thread for tqdm bars. + Monitors if tqdm bars are taking too much time to display + and readjusts miniters automatically if necessary. + + Parameters + ---------- + tqdm_cls : class + tqdm class to use (can be core tqdm or a submodule). + sleep_interval : float + Time to sleep between monitoring checks. + """ + _test = {} # internal vars for unit testing + + def __init__(self, tqdm_cls, sleep_interval): + Thread.__init__(self) + self.daemon = True # kill thread when main killed (KeyboardInterrupt) + self.woken = 0 # last time woken up, to sync with monitor + self.tqdm_cls = tqdm_cls + self.sleep_interval = sleep_interval + self._time = self._test.get("time", time) + self.was_killed = self._test.get("Event", Event)() + atexit.register(self.exit) + self.start() + + def exit(self): + self.was_killed.set() + if self is not current_thread(): + self.join() + return self.report() + + def get_instances(self): + # returns a copy of started `tqdm_cls` instances + return [i for i in self.tqdm_cls._instances.copy() + # Avoid race by checking that the instance started + if hasattr(i, 'start_t')] + + def run(self): + cur_t = self._time() + while True: + # After processing and before sleeping, notify that we woke + # Need to be done just before sleeping + self.woken = cur_t + # Sleep some time... + self.was_killed.wait(self.sleep_interval) + # Quit if killed + if self.was_killed.is_set(): + return + # Then monitor! + # Acquire lock (to access _instances) + with self.tqdm_cls.get_lock(): + cur_t = self._time() + # Check tqdm instances are waiting too long to print + instances = self.get_instances() + for instance in instances: + # Check event in loop to reduce blocking time on exit + if self.was_killed.is_set(): + return + # Only if mininterval > 1 (else iterations are just slow) + # and last refresh exceeded maxinterval + if ( + instance.miniters > 1 + and (cur_t - instance.last_print_t) >= instance.maxinterval + ): + # force bypassing miniters on next iteration + # (dynamic_miniters adjusts mininterval automatically) + instance.miniters = 1 + # Refresh now! (works only for manual tqdm) + instance.refresh(nolock=True) + # Remove accidental long-lived strong reference + del instance + if instances != self.get_instances(): # pragma: nocover + warn("Set changed size during iteration" + + " (see https://github.com/tqdm/tqdm/issues/481)", + TqdmSynchronisationWarning, stacklevel=2) + # Remove accidental long-lived strong references + del instances + + def report(self): + return not self.was_killed.is_set() diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/_tqdm_gui.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/_tqdm_gui.py new file mode 100644 index 0000000000000000000000000000000000000000..f32aa894f54b3a5b47a0fbf4263c2fd20df56c9d --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/_tqdm_gui.py @@ -0,0 +1,9 @@ +from warnings import warn + +from .gui import * # NOQA +from .gui import __all__ # NOQA +from .std import TqdmDeprecationWarning + +warn("This function will be removed in tqdm==5.0.0\n" + "Please use `tqdm.gui.*` instead of `tqdm._tqdm_gui.*`", + TqdmDeprecationWarning, stacklevel=2) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/_tqdm_pandas.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/_tqdm_pandas.py new file mode 100644 index 0000000000000000000000000000000000000000..c4fe6efdc603579e7f8acfa27ac10dccdf3e94ce --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/_tqdm_pandas.py @@ -0,0 +1,24 @@ +import sys + +__author__ = "github.com/casperdcl" +__all__ = ['tqdm_pandas'] + + +def tqdm_pandas(tclass, **tqdm_kwargs): + """ + Registers the given `tqdm` instance with + `pandas.core.groupby.DataFrameGroupBy.progress_apply`. + """ + from tqdm import TqdmDeprecationWarning + + if isinstance(tclass, type) or (getattr(tclass, '__name__', '').startswith( + 'tqdm_')): # delayed adapter case + TqdmDeprecationWarning( + "Please use `tqdm.pandas(...)` instead of `tqdm_pandas(tqdm, ...)`.", + fp_write=getattr(tqdm_kwargs.get('file', None), 'write', sys.stderr.write)) + tclass.pandas(**tqdm_kwargs) + else: + TqdmDeprecationWarning( + "Please use `tqdm.pandas(...)` instead of `tqdm_pandas(tqdm(...))`.", + fp_write=getattr(tclass.fp, 'write', sys.stderr.write)) + type(tclass).pandas(deprecated_t=tclass) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/_utils.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2cf10909f5cd5ee27d7cc23cbce3e47a51665212 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/_utils.py @@ -0,0 +1,12 @@ +from warnings import warn + +from .std import TqdmDeprecationWarning +from .utils import ( # NOQA, pylint: disable=unused-import + CUR_OS, IS_NIX, IS_WIN, RE_ANSI, Comparable, FormatReplace, SimpleTextIOWrapper, _basestring, + _environ_cols_wrapper, _is_ascii, _is_utf, _range, _screen_shape_linux, _screen_shape_tput, + _screen_shape_windows, _screen_shape_wrapper, _supports_unicode, _term_move_up, _unich, + _unicode, colorama) + +warn("This function will be removed in tqdm==5.0.0\n" + "Please use `tqdm.utils.*` instead of `tqdm._utils.*`", + TqdmDeprecationWarning, stacklevel=2) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/auto.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/auto.py new file mode 100644 index 0000000000000000000000000000000000000000..cffca206ffe014bea6e758df858a1d640392fe96 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/auto.py @@ -0,0 +1,44 @@ +""" +Enables multiple commonly used features. + +Method resolution order: + +- `tqdm.autonotebook` without import warnings +- `tqdm.asyncio` on Python3.6+ +- `tqdm.std` base class + +Usage: +>>> from tqdm.auto import trange, tqdm +>>> for i in trange(10): +... ... +""" +import sys +import warnings + +from .std import TqdmExperimentalWarning + +with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=TqdmExperimentalWarning) + from .autonotebook import tqdm as notebook_tqdm + from .autonotebook import trange as notebook_trange + +if sys.version_info[:2] < (3, 6): + tqdm = notebook_tqdm + trange = notebook_trange +else: # Python3.6+ + from .asyncio import tqdm as asyncio_tqdm + from .std import tqdm as std_tqdm + + if notebook_tqdm != std_tqdm: + class tqdm(notebook_tqdm, asyncio_tqdm): # pylint: disable=inconsistent-mro + pass + else: + tqdm = asyncio_tqdm + + def trange(*args, **kwargs): + """ + A shortcut for `tqdm.auto.tqdm(range(*args), **kwargs)`. + """ + return tqdm(range(*args), **kwargs) + +__all__ = ["tqdm", "trange"] diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/autonotebook.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/autonotebook.py new file mode 100644 index 0000000000000000000000000000000000000000..a09f2ec4b8c95f12b8c7b7774f84d5ec55826334 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/autonotebook.py @@ -0,0 +1,29 @@ +""" +Automatically choose between `tqdm.notebook` and `tqdm.std`. + +Usage: +>>> from tqdm.autonotebook import trange, tqdm +>>> for i in trange(10): +... ... +""" +import sys +from warnings import warn + +try: + get_ipython = sys.modules['IPython'].get_ipython + if 'IPKernelApp' not in get_ipython().config: # pragma: no cover + raise ImportError("console") + from .notebook import WARN_NOIPYW, IProgress + if IProgress is None: + from .std import TqdmWarning + warn(WARN_NOIPYW, TqdmWarning, stacklevel=2) + raise ImportError('ipywidgets') +except Exception: + from .std import tqdm, trange +else: # pragma: no cover + from .notebook import tqdm, trange + from .std import TqdmExperimentalWarning + warn("Using `tqdm.autonotebook.tqdm` in notebook mode." + " Use `tqdm.tqdm` instead to force console mode" + " (e.g. in jupyter console)", TqdmExperimentalWarning, stacklevel=2) +__all__ = ["tqdm", "trange"] diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/cli.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/cli.py new file mode 100644 index 0000000000000000000000000000000000000000..3ed25fbb441607f911c558556416f2218b4a4696 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/cli.py @@ -0,0 +1,315 @@ +""" +Module version for monitoring CLI pipes (`... | python -m tqdm | ...`). +""" +import logging +import re +import sys +from ast import literal_eval as numeric + +from .std import TqdmKeyError, TqdmTypeError, tqdm +from .version import __version__ + +__all__ = ["main"] +log = logging.getLogger(__name__) + + +def cast(val, typ): + log.debug((val, typ)) + if " or " in typ: + for t in typ.split(" or "): + try: + return cast(val, t) + except TqdmTypeError: + pass + raise TqdmTypeError(val + ' : ' + typ) + + # sys.stderr.write('\ndebug | `val:type`: `' + val + ':' + typ + '`.\n') + if typ == 'bool': + if (val == 'True') or (val == ''): + return True + elif val == 'False': + return False + else: + raise TqdmTypeError(val + ' : ' + typ) + try: + return eval(typ + '("' + val + '")') + except Exception: + if typ == 'chr': + return chr(ord(eval('"' + val + '"'))).encode() + else: + raise TqdmTypeError(val + ' : ' + typ) + + +def posix_pipe(fin, fout, delim=b'\\n', buf_size=256, + callback=lambda float: None, callback_len=True): + """ + Params + ------ + fin : binary file with `read(buf_size : int)` method + fout : binary file with `write` (and optionally `flush`) methods. + callback : function(float), e.g.: `tqdm.update` + callback_len : If (default: True) do `callback(len(buffer))`. + Otherwise, do `callback(data) for data in buffer.split(delim)`. + """ + fp_write = fout.write + + if not delim: + while True: + tmp = fin.read(buf_size) + + # flush at EOF + if not tmp: + getattr(fout, 'flush', lambda: None)() + return + + fp_write(tmp) + callback(len(tmp)) + # return + + buf = b'' + len_delim = len(delim) + # n = 0 + while True: + tmp = fin.read(buf_size) + + # flush at EOF + if not tmp: + if buf: + fp_write(buf) + if callback_len: + # n += 1 + buf.count(delim) + callback(1 + buf.count(delim)) + else: + for i in buf.split(delim): + callback(i) + getattr(fout, 'flush', lambda: None)() + return # n + + while True: + i = tmp.find(delim) + if i < 0: + buf += tmp + break + fp_write(buf + tmp[:i + len(delim)]) + # n += 1 + callback(1 if callback_len else (buf + tmp[:i])) + buf = b'' + tmp = tmp[i + len_delim:] + + +# ((opt, type), ... ) +RE_OPTS = re.compile(r'\n {8}(\S+)\s{2,}:\s*([^,]+)') +# better split method assuming no positional args +RE_SHLEX = re.compile(r'\s*(? : \2', d) + split = RE_OPTS.split(d) + opt_types_desc = zip(split[1::3], split[2::3], split[3::3]) + d = ''.join(('\n --{0} : {2}{3}' if otd[1] == 'bool' else + '\n --{0}=<{1}> : {2}{3}').format( + otd[0].replace('_', '-'), otd[0], *otd[1:]) + for otd in opt_types_desc if otd[0] not in UNSUPPORTED_OPTS) + + help_short = "Usage:\n tqdm [--help | options]\n" + d = help_short + """ +Options: + -h, --help Print this help and exit. + -v, --version Print version and exit. +""" + d.strip('\n') + '\n' + + # opts = docopt(d, version=__version__) + if any(v in argv for v in ('-v', '--version')): + sys.stdout.write(__version__ + '\n') + sys.exit(0) + elif any(v in argv for v in ('-h', '--help')): + sys.stdout.write(d + '\n') + sys.exit(0) + elif argv and argv[0][:2] != '--': + sys.stderr.write( + "Error:Unknown argument:{0}\n{1}".format(argv[0], help_short)) + + argv = RE_SHLEX.split(' '.join(["tqdm"] + argv)) + opts = dict(zip(argv[1::3], argv[3::3])) + + log.debug(opts) + opts.pop('log', True) + + tqdm_args = {'file': fp} + try: + for (o, v) in opts.items(): + o = o.replace('-', '_') + try: + tqdm_args[o] = cast(v, opt_types[o]) + except KeyError as e: + raise TqdmKeyError(str(e)) + log.debug('args:' + str(tqdm_args)) + + delim_per_char = tqdm_args.pop('bytes', False) + update = tqdm_args.pop('update', False) + update_to = tqdm_args.pop('update_to', False) + if sum((delim_per_char, update, update_to)) > 1: + raise TqdmKeyError("Can only have one of --bytes --update --update_to") + except Exception: + fp.write("\nError:\n" + help_short) + stdin, stdout_write = sys.stdin, sys.stdout.write + for i in stdin: + stdout_write(i) + raise + else: + buf_size = tqdm_args.pop('buf_size', 256) + delim = tqdm_args.pop('delim', b'\\n') + tee = tqdm_args.pop('tee', False) + manpath = tqdm_args.pop('manpath', None) + comppath = tqdm_args.pop('comppath', None) + if tqdm_args.pop('null', False): + class stdout(object): + @staticmethod + def write(_): + pass + else: + stdout = sys.stdout + stdout = getattr(stdout, 'buffer', stdout) + stdin = getattr(sys.stdin, 'buffer', sys.stdin) + if manpath or comppath: + from os import path + from shutil import copyfile + try: # py<3.7 + import importlib_resources as resources + except ImportError: + from importlib import resources + + def cp(name, dst): + """copy resource `name` to `dst`""" + if hasattr(resources, 'files'): + copyfile(str(resources.files('tqdm') / name), dst) + else: # py<3.9 + with resources.path('tqdm', name) as src: + copyfile(str(src), dst) + log.info("written:%s", dst) + if manpath is not None: + cp('tqdm.1', path.join(manpath, 'tqdm.1')) + if comppath is not None: + cp('completion.sh', path.join(comppath, 'tqdm_completion.sh')) + sys.exit(0) + if tee: + stdout_write = stdout.write + fp_write = getattr(fp, 'buffer', fp).write + + class stdout(object): # pylint: disable=function-redefined + @staticmethod + def write(x): + with tqdm.external_write_mode(file=fp): + fp_write(x) + stdout_write(x) + if delim_per_char: + tqdm_args.setdefault('unit', 'B') + tqdm_args.setdefault('unit_scale', True) + tqdm_args.setdefault('unit_divisor', 1024) + log.debug(tqdm_args) + with tqdm(**tqdm_args) as t: + posix_pipe(stdin, stdout, '', buf_size, t.update) + elif delim == b'\\n': + log.debug(tqdm_args) + write = stdout.write + if update or update_to: + with tqdm(**tqdm_args) as t: + if update: + def callback(i): + t.update(numeric(i.decode())) + else: # update_to + def callback(i): + t.update(numeric(i.decode()) - t.n) + for i in stdin: + write(i) + callback(i) + else: + for i in tqdm(stdin, **tqdm_args): + write(i) + else: + log.debug(tqdm_args) + with tqdm(**tqdm_args) as t: + callback_len = False + if update: + def callback(i): + t.update(numeric(i.decode())) + elif update_to: + def callback(i): + t.update(numeric(i.decode()) - t.n) + else: + callback = t.update + callback_len = True + posix_pipe(stdin, stdout, delim, buf_size, callback, callback_len) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/completion.sh b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/completion.sh new file mode 100644 index 0000000000000000000000000000000000000000..9f61c7f14bb8c1f6099b9eb75dce28ece6a7ae96 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/completion.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +_tqdm(){ + local cur prv + cur="${COMP_WORDS[COMP_CWORD]}" + prv="${COMP_WORDS[COMP_CWORD - 1]}" + + case ${prv} in + --bar_format|--buf_size|--colour|--comppath|--delay|--delim|--desc|--initial|--lock_args|--manpath|--maxinterval|--mininterval|--miniters|--ncols|--nrows|--position|--postfix|--smoothing|--total|--unit|--unit_divisor) + # await user input + ;; + "--log") + COMPREPLY=($(compgen -W 'CRITICAL FATAL ERROR WARN WARNING INFO DEBUG NOTSET' -- ${cur})) + ;; + *) + COMPREPLY=($(compgen -W '--ascii --bar_format --buf_size --bytes --colour --comppath --delay --delim --desc --disable --dynamic_ncols --help --initial --leave --lock_args --log --manpath --maxinterval --mininterval --miniters --ncols --nrows --null --position --postfix --smoothing --tee --total --unit --unit_divisor --unit_scale --update --update_to --version --write_bytes -h -v' -- ${cur})) + ;; + esac +} +complete -F _tqdm tqdm diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/dask.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/dask.py new file mode 100644 index 0000000000000000000000000000000000000000..6fc7504c79a37872388a99e2abe498e9e9c5b41a --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/dask.py @@ -0,0 +1,46 @@ +from __future__ import absolute_import + +from functools import partial + +from dask.callbacks import Callback + +from .auto import tqdm as tqdm_auto + +__author__ = {"github.com/": ["casperdcl"]} +__all__ = ['TqdmCallback'] + + +class TqdmCallback(Callback): + """Dask callback for task progress.""" + def __init__(self, start=None, pretask=None, tqdm_class=tqdm_auto, + **tqdm_kwargs): + """ + Parameters + ---------- + tqdm_class : optional + `tqdm` class to use for bars [default: `tqdm.auto.tqdm`]. + tqdm_kwargs : optional + Any other arguments used for all bars. + """ + super(TqdmCallback, self).__init__(start=start, pretask=pretask) + if tqdm_kwargs: + tqdm_class = partial(tqdm_class, **tqdm_kwargs) + self.tqdm_class = tqdm_class + + def _start_state(self, _, state): + self.pbar = self.tqdm_class(total=sum( + len(state[k]) for k in ['ready', 'waiting', 'running', 'finished'])) + + def _posttask(self, *_, **__): + self.pbar.update() + + def _finish(self, *_, **__): + self.pbar.close() + + def display(self): + """Displays in the current cell in Notebooks.""" + container = getattr(self.bar, 'container', None) + if container is None: + return + from .notebook import display + display(container) diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/gui.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/gui.py new file mode 100644 index 0000000000000000000000000000000000000000..4612701d2a4dd7ab0175f3f8924c9d600d9a7220 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/gui.py @@ -0,0 +1,191 @@ +""" +Matplotlib GUI progressbar decorator for iterators. + +Usage: +>>> from tqdm.gui import trange, tqdm +>>> for i in trange(10): +... ... +""" +# future division is important to divide integers and get as +# a result precise floating numbers (instead of truncated int) +from __future__ import absolute_import, division + +import re +from warnings import warn + +# to inherit from the tqdm class +from .std import TqdmExperimentalWarning +from .std import tqdm as std_tqdm +# import compatibility functions and utilities +from .utils import _range + +__author__ = {"github.com/": ["casperdcl", "lrq3000"]} +__all__ = ['tqdm_gui', 'tgrange', 'tqdm', 'trange'] + + +class tqdm_gui(std_tqdm): # pragma: no cover + """Experimental Matplotlib GUI version of tqdm!""" + # TODO: @classmethod: write() on GUI? + def __init__(self, *args, **kwargs): + from collections import deque + + import matplotlib as mpl + import matplotlib.pyplot as plt + kwargs = kwargs.copy() + kwargs['gui'] = True + colour = kwargs.pop('colour', 'g') + super(tqdm_gui, self).__init__(*args, **kwargs) + + if self.disable: + return + + warn("GUI is experimental/alpha", TqdmExperimentalWarning, stacklevel=2) + self.mpl = mpl + self.plt = plt + + # Remember if external environment uses toolbars + self.toolbar = self.mpl.rcParams['toolbar'] + self.mpl.rcParams['toolbar'] = 'None' + + self.mininterval = max(self.mininterval, 0.5) + self.fig, ax = plt.subplots(figsize=(9, 2.2)) + # self.fig.subplots_adjust(bottom=0.2) + total = self.__len__() # avoids TypeError on None #971 + if total is not None: + self.xdata = [] + self.ydata = [] + self.zdata = [] + else: + self.xdata = deque([]) + self.ydata = deque([]) + self.zdata = deque([]) + self.line1, = ax.plot(self.xdata, self.ydata, color='b') + self.line2, = ax.plot(self.xdata, self.zdata, color='k') + ax.set_ylim(0, 0.001) + if total is not None: + ax.set_xlim(0, 100) + ax.set_xlabel("percent") + self.fig.legend((self.line1, self.line2), ("cur", "est"), + loc='center right') + # progressbar + self.hspan = plt.axhspan(0, 0.001, xmin=0, xmax=0, color=colour) + else: + # ax.set_xlim(-60, 0) + ax.set_xlim(0, 60) + ax.invert_xaxis() + ax.set_xlabel("seconds") + ax.legend(("cur", "est"), loc='lower left') + ax.grid() + # ax.set_xlabel('seconds') + ax.set_ylabel((self.unit if self.unit else "it") + "/s") + if self.unit_scale: + plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0)) + ax.yaxis.get_offset_text().set_x(-0.15) + + # Remember if external environment is interactive + self.wasion = plt.isinteractive() + plt.ion() + self.ax = ax + + def close(self): + if self.disable: + return + + self.disable = True + + with self.get_lock(): + self._instances.remove(self) + + # Restore toolbars + self.mpl.rcParams['toolbar'] = self.toolbar + # Return to non-interactive mode + if not self.wasion: + self.plt.ioff() + if self.leave: + self.display() + else: + self.plt.close(self.fig) + + def clear(self, *_, **__): + pass + + def display(self, *_, **__): + n = self.n + cur_t = self._time() + elapsed = cur_t - self.start_t + delta_it = n - self.last_print_n + delta_t = cur_t - self.last_print_t + + # Inline due to multiple calls + total = self.total + xdata = self.xdata + ydata = self.ydata + zdata = self.zdata + ax = self.ax + line1 = self.line1 + line2 = self.line2 + # instantaneous rate + y = delta_it / delta_t + # overall rate + z = n / elapsed + # update line data + xdata.append(n * 100.0 / total if total else cur_t) + ydata.append(y) + zdata.append(z) + + # Discard old values + # xmin, xmax = ax.get_xlim() + # if (not total) and elapsed > xmin * 1.1: + if (not total) and elapsed > 66: + xdata.popleft() + ydata.popleft() + zdata.popleft() + + ymin, ymax = ax.get_ylim() + if y > ymax or z > ymax: + ymax = 1.1 * y + ax.set_ylim(ymin, ymax) + ax.figure.canvas.draw() + + if total: + line1.set_data(xdata, ydata) + line2.set_data(xdata, zdata) + try: + poly_lims = self.hspan.get_xy() + except AttributeError: + self.hspan = self.plt.axhspan(0, 0.001, xmin=0, xmax=0, color='g') + poly_lims = self.hspan.get_xy() + poly_lims[0, 1] = ymin + poly_lims[1, 1] = ymax + poly_lims[2] = [n / total, ymax] + poly_lims[3] = [poly_lims[2, 0], ymin] + if len(poly_lims) > 4: + poly_lims[4, 1] = ymin + self.hspan.set_xy(poly_lims) + else: + t_ago = [cur_t - i for i in xdata] + line1.set_data(t_ago, ydata) + line2.set_data(t_ago, zdata) + + d = self.format_dict + # remove {bar} + d['bar_format'] = (d['bar_format'] or "{l_bar}{r_bar}").replace( + "{bar}", "") + msg = self.format_meter(**d) + if '' in msg: + msg = "".join(re.split(r'\|?\|?', msg, 1)) + ax.set_title(msg, fontname="DejaVu Sans Mono", fontsize=11) + self.plt.pause(1e-9) + + +def tgrange(*args, **kwargs): + """ + A shortcut for `tqdm.gui.tqdm(xrange(*args), **kwargs)`. + On Python3+, `range` is used instead of `xrange`. + """ + return tqdm_gui(_range(*args), **kwargs) + + +# Aliases +tqdm = tqdm_gui +trange = tgrange diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/keras.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/keras.py new file mode 100644 index 0000000000000000000000000000000000000000..523e62e94753523f52ebc0a704240c4095321e44 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/keras.py @@ -0,0 +1,124 @@ +from __future__ import absolute_import, division + +from copy import copy +from functools import partial + +from .auto import tqdm as tqdm_auto + +try: + import keras +except (ImportError, AttributeError) as e: + try: + from tensorflow import keras + except ImportError: + raise e +__author__ = {"github.com/": ["casperdcl"]} +__all__ = ['TqdmCallback'] + + +class TqdmCallback(keras.callbacks.Callback): + """Keras callback for epoch and batch progress.""" + @staticmethod + def bar2callback(bar, pop=None, delta=(lambda logs: 1)): + def callback(_, logs=None): + n = delta(logs) + if logs: + if pop: + logs = copy(logs) + [logs.pop(i, 0) for i in pop] + bar.set_postfix(logs, refresh=False) + bar.update(n) + + return callback + + def __init__(self, epochs=None, data_size=None, batch_size=None, verbose=1, + tqdm_class=tqdm_auto, **tqdm_kwargs): + """ + Parameters + ---------- + epochs : int, optional + data_size : int, optional + Number of training pairs. + batch_size : int, optional + Number of training pairs per batch. + verbose : int + 0: epoch, 1: batch (transient), 2: batch. [default: 1]. + Will be set to `0` unless both `data_size` and `batch_size` + are given. + tqdm_class : optional + `tqdm` class to use for bars [default: `tqdm.auto.tqdm`]. + tqdm_kwargs : optional + Any other arguments used for all bars. + """ + if tqdm_kwargs: + tqdm_class = partial(tqdm_class, **tqdm_kwargs) + self.tqdm_class = tqdm_class + self.epoch_bar = tqdm_class(total=epochs, unit='epoch') + self.on_epoch_end = self.bar2callback(self.epoch_bar) + if data_size and batch_size: + self.batches = batches = (data_size + batch_size - 1) // batch_size + else: + self.batches = batches = None + self.verbose = verbose + if verbose == 1: + self.batch_bar = tqdm_class(total=batches, unit='batch', leave=False) + self.on_batch_end = self.bar2callback( + self.batch_bar, pop=['batch', 'size'], + delta=lambda logs: logs.get('size', 1)) + + def on_train_begin(self, *_, **__): + params = self.params.get + auto_total = params('epochs', params('nb_epoch', None)) + if auto_total is not None and auto_total != self.epoch_bar.total: + self.epoch_bar.reset(total=auto_total) + + def on_epoch_begin(self, epoch, *_, **__): + if self.epoch_bar.n < epoch: + ebar = self.epoch_bar + ebar.n = ebar.last_print_n = ebar.initial = epoch + if self.verbose: + params = self.params.get + total = params('samples', params( + 'nb_sample', params('steps', None))) or self.batches + if self.verbose == 2: + if hasattr(self, 'batch_bar'): + self.batch_bar.close() + self.batch_bar = self.tqdm_class( + total=total, unit='batch', leave=True, + unit_scale=1 / (params('batch_size', 1) or 1)) + self.on_batch_end = self.bar2callback( + self.batch_bar, pop=['batch', 'size'], + delta=lambda logs: logs.get('size', 1)) + elif self.verbose == 1: + self.batch_bar.unit_scale = 1 / (params('batch_size', 1) or 1) + self.batch_bar.reset(total=total) + else: + raise KeyError('Unknown verbosity') + + def on_train_end(self, *_, **__): + if self.verbose: + self.batch_bar.close() + self.epoch_bar.close() + + def display(self): + """Displays in the current cell in Notebooks.""" + container = getattr(self.epoch_bar, 'container', None) + if container is None: + return + from .notebook import display + display(container) + batch_bar = getattr(self, 'batch_bar', None) + if batch_bar is not None: + display(batch_bar.container) + + @staticmethod + def _implements_train_batch_hooks(): + return True + + @staticmethod + def _implements_test_batch_hooks(): + return True + + @staticmethod + def _implements_predict_batch_hooks(): + return True diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/tk.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/tk.py new file mode 100644 index 0000000000000000000000000000000000000000..92adb51db3b1c5834ee610532de1403a70aed6a3 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/tk.py @@ -0,0 +1,207 @@ +""" +Tkinter GUI progressbar decorator for iterators. + +Usage: +>>> from tqdm.tk import trange, tqdm +>>> for i in trange(10): +... ... +""" +from __future__ import absolute_import, division + +import re +import sys +from warnings import warn + +try: + import tkinter + import tkinter.ttk as ttk +except ImportError: + import Tkinter as tkinter + import ttk as ttk + +from .std import TqdmExperimentalWarning, TqdmWarning +from .std import tqdm as std_tqdm +from .utils import _range + +__author__ = {"github.com/": ["richardsheridan", "casperdcl"]} +__all__ = ['tqdm_tk', 'ttkrange', 'tqdm', 'trange'] + + +class tqdm_tk(std_tqdm): # pragma: no cover + """ + Experimental Tkinter GUI version of tqdm! + + Note: Window interactivity suffers if `tqdm_tk` is not running within + a Tkinter mainloop and values are generated infrequently. In this case, + consider calling `tqdm_tk.refresh()` frequently in the Tk thread. + """ + + # TODO: @classmethod: write()? + + def __init__(self, *args, **kwargs): + """ + This class accepts the following parameters *in addition* to + the parameters accepted by `tqdm`. + + Parameters + ---------- + grab : bool, optional + Grab the input across all windows of the process. + tk_parent : `tkinter.Wm`, optional + Parent Tk window. + cancel_callback : Callable, optional + Create a cancel button and set `cancel_callback` to be called + when the cancel or window close button is clicked. + """ + kwargs = kwargs.copy() + kwargs['gui'] = True + # convert disable = None to False + kwargs['disable'] = bool(kwargs.get('disable', False)) + self._warn_leave = 'leave' in kwargs + grab = kwargs.pop('grab', False) + tk_parent = kwargs.pop('tk_parent', None) + self._cancel_callback = kwargs.pop('cancel_callback', None) + super(tqdm_tk, self).__init__(*args, **kwargs) + + if self.disable: + return + + if tk_parent is None: # Discover parent widget + try: + tk_parent = tkinter._default_root + except AttributeError: + raise AttributeError( + "`tk_parent` required when using `tkinter.NoDefaultRoot()`") + if tk_parent is None: # use new default root window as display + self._tk_window = tkinter.Tk() + else: # some other windows already exist + self._tk_window = tkinter.Toplevel() + else: + self._tk_window = tkinter.Toplevel(tk_parent) + + warn("GUI is experimental/alpha", TqdmExperimentalWarning, stacklevel=2) + self._tk_dispatching = self._tk_dispatching_helper() + + self._tk_window.protocol("WM_DELETE_WINDOW", self.cancel) + self._tk_window.wm_title(self.desc) + self._tk_window.wm_attributes("-topmost", 1) + self._tk_window.after(0, lambda: self._tk_window.wm_attributes("-topmost", 0)) + self._tk_n_var = tkinter.DoubleVar(self._tk_window, value=0) + self._tk_text_var = tkinter.StringVar(self._tk_window) + pbar_frame = ttk.Frame(self._tk_window, padding=5) + pbar_frame.pack() + _tk_label = ttk.Label(pbar_frame, textvariable=self._tk_text_var, + wraplength=600, anchor="center", justify="center") + _tk_label.pack() + self._tk_pbar = ttk.Progressbar( + pbar_frame, variable=self._tk_n_var, length=450) + if self.total is not None: + self._tk_pbar.configure(maximum=self.total) + else: + self._tk_pbar.configure(mode="indeterminate") + self._tk_pbar.pack() + if self._cancel_callback is not None: + _tk_button = ttk.Button(pbar_frame, text="Cancel", command=self.cancel) + _tk_button.pack() + if grab: + self._tk_window.grab_set() + + def close(self): + if self.disable: + return + + self.disable = True + + with self.get_lock(): + self._instances.remove(self) + + def _close(): + self._tk_window.after('idle', self._tk_window.destroy) + if not self._tk_dispatching: + self._tk_window.update() + + self._tk_window.protocol("WM_DELETE_WINDOW", _close) + + # if leave is set but we are self-dispatching, the left window is + # totally unresponsive unless the user manually dispatches + if not self.leave: + _close() + elif not self._tk_dispatching: + if self._warn_leave: + warn("leave flag ignored if not in tkinter mainloop", + TqdmWarning, stacklevel=2) + _close() + + def clear(self, *_, **__): + pass + + def display(self, *_, **__): + self._tk_n_var.set(self.n) + d = self.format_dict + # remove {bar} + d['bar_format'] = (d['bar_format'] or "{l_bar}{r_bar}").replace( + "{bar}", "") + msg = self.format_meter(**d) + if '' in msg: + msg = "".join(re.split(r'\|?\|?', msg, 1)) + self._tk_text_var.set(msg) + if not self._tk_dispatching: + self._tk_window.update() + + def set_description(self, desc=None, refresh=True): + self.set_description_str(desc, refresh) + + def set_description_str(self, desc=None, refresh=True): + self.desc = desc + if not self.disable: + self._tk_window.wm_title(desc) + if refresh and not self._tk_dispatching: + self._tk_window.update() + + def cancel(self): + """ + `cancel_callback()` followed by `close()` + when close/cancel buttons clicked. + """ + if self._cancel_callback is not None: + self._cancel_callback() + self.close() + + def reset(self, total=None): + """ + Resets to 0 iterations for repeated use. + + Parameters + ---------- + total : int or float, optional. Total to use for the new bar. + """ + if hasattr(self, '_tk_pbar'): + if total is None: + self._tk_pbar.configure(maximum=100, mode="indeterminate") + else: + self._tk_pbar.configure(maximum=total, mode="determinate") + super(tqdm_tk, self).reset(total=total) + + @staticmethod + def _tk_dispatching_helper(): + """determine if Tkinter mainloop is dispatching events""" + codes = {tkinter.mainloop.__code__, tkinter.Misc.mainloop.__code__} + for frame in sys._current_frames().values(): + while frame: + if frame.f_code in codes: + return True + frame = frame.f_back + return False + + +def ttkrange(*args, **kwargs): + """ + A shortcut for `tqdm.tk.tqdm(xrange(*args), **kwargs)`. + On Python3+, `range` is used instead of `xrange`. + """ + return tqdm_tk(_range(*args), **kwargs) + + +# Aliases +tqdm = tqdm_tk +trange = ttkrange diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/tqdm.1 b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/tqdm.1 new file mode 100644 index 0000000000000000000000000000000000000000..0533198ca51425532ac1e5e96bf7af978081a9b8 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/tqdm.1 @@ -0,0 +1,316 @@ +.\" Automatically generated by Pandoc 1.19.2 +.\" +.TH "TQDM" "1" "2015\-2021" "tqdm User Manuals" "" +.hy +.SH NAME +.PP +tqdm \- fast, extensible progress bar for Python and CLI +.SH SYNOPSIS +.PP +tqdm [\f[I]options\f[]] +.SH DESCRIPTION +.PP +See . +Can be used as a pipe: +.IP +.nf +\f[C] +$\ #\ count\ lines\ of\ code +$\ cat\ *.py\ |\ tqdm\ |\ wc\ \-l +327it\ [00:00,\ 981773.38it/s] +327 + +$\ #\ find\ all\ files +$\ find\ .\ \-name\ "*.py"\ |\ tqdm\ |\ wc\ \-l +432it\ [00:00,\ 833842.30it/s] +432 + +#\ ...\ and\ more\ info +$\ find\ .\ \-name\ \[aq]*.py\[aq]\ \-exec\ wc\ \-l\ \\{}\ \\;\ \\ +\ \ |\ tqdm\ \-\-total\ 432\ \-\-unit\ files\ \-\-desc\ counting\ \\ +\ \ |\ awk\ \[aq]{\ sum\ +=\ $1\ };\ END\ {\ print\ sum\ }\[aq] +counting:\ 100%|█████████|\ 432/432\ [00:00<00:00,\ 794361.83files/s] +131998 +\f[] +.fi +.SH OPTIONS +.TP +.B \-h, \-\-help +Print this help and exit. +.RS +.RE +.TP +.B \-v, \-\-version +Print version and exit. +.RS +.RE +.TP +.B \-\-desc=\f[I]desc\f[] +str, optional. +Prefix for the progressbar. +.RS +.RE +.TP +.B \-\-total=\f[I]total\f[] +int or float, optional. +The number of expected iterations. +If unspecified, len(iterable) is used if possible. +If float("inf") or as a last resort, only basic progress statistics are +displayed (no ETA, no progressbar). +If \f[C]gui\f[] is True and this parameter needs subsequent updating, +specify an initial arbitrary large positive number, e.g. +9e9. +.RS +.RE +.TP +.B \-\-leave +bool, optional. +If [default: True], keeps all traces of the progressbar upon termination +of iteration. +If \f[C]None\f[], will leave only if \f[C]position\f[] is \f[C]0\f[]. +.RS +.RE +.TP +.B \-\-ncols=\f[I]ncols\f[] +int, optional. +The width of the entire output message. +If specified, dynamically resizes the progressbar to stay within this +bound. +If unspecified, attempts to use environment width. +The fallback is a meter width of 10 and no limit for the counter and +statistics. +If 0, will not print any meter (only stats). +.RS +.RE +.TP +.B \-\-mininterval=\f[I]mininterval\f[] +float, optional. +Minimum progress display update interval [default: 0.1] seconds. +.RS +.RE +.TP +.B \-\-maxinterval=\f[I]maxinterval\f[] +float, optional. +Maximum progress display update interval [default: 10] seconds. +Automatically adjusts \f[C]miniters\f[] to correspond to +\f[C]mininterval\f[] after long display update lag. +Only works if \f[C]dynamic_miniters\f[] or monitor thread is enabled. +.RS +.RE +.TP +.B \-\-miniters=\f[I]miniters\f[] +int or float, optional. +Minimum progress display update interval, in iterations. +If 0 and \f[C]dynamic_miniters\f[], will automatically adjust to equal +\f[C]mininterval\f[] (more CPU efficient, good for tight loops). +If > 0, will skip display of specified number of iterations. +Tweak this and \f[C]mininterval\f[] to get very efficient loops. +If your progress is erratic with both fast and slow iterations (network, +skipping items, etc) you should set miniters=1. +.RS +.RE +.TP +.B \-\-ascii=\f[I]ascii\f[] +bool or str, optional. +If unspecified or False, use unicode (smooth blocks) to fill the meter. +The fallback is to use ASCII characters " 123456789#". +.RS +.RE +.TP +.B \-\-disable +bool, optional. +Whether to disable the entire progressbar wrapper [default: False]. +If set to None, disable on non\-TTY. +.RS +.RE +.TP +.B \-\-unit=\f[I]unit\f[] +str, optional. +String that will be used to define the unit of each iteration [default: +it]. +.RS +.RE +.TP +.B \-\-unit\-scale=\f[I]unit_scale\f[] +bool or int or float, optional. +If 1 or True, the number of iterations will be reduced/scaled +automatically and a metric prefix following the International System of +Units standard will be added (kilo, mega, etc.) [default: False]. +If any other non\-zero number, will scale \f[C]total\f[] and \f[C]n\f[]. +.RS +.RE +.TP +.B \-\-dynamic\-ncols +bool, optional. +If set, constantly alters \f[C]ncols\f[] and \f[C]nrows\f[] to the +environment (allowing for window resizes) [default: False]. +.RS +.RE +.TP +.B \-\-smoothing=\f[I]smoothing\f[] +float, optional. +Exponential moving average smoothing factor for speed estimates (ignored +in GUI mode). +Ranges from 0 (average speed) to 1 (current/instantaneous speed) +[default: 0.3]. +.RS +.RE +.TP +.B \-\-bar\-format=\f[I]bar_format\f[] +str, optional. +Specify a custom bar string formatting. +May impact performance. +[default: \[aq]{l_bar}{bar}{r_bar}\[aq]], where l_bar=\[aq]{desc}: +{percentage:3.0f}%|\[aq] and r_bar=\[aq]| {n_fmt}/{total_fmt} +[{elapsed}<{remaining}, \[aq] \[aq]{rate_fmt}{postfix}]\[aq] Possible +vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt, percentage, +elapsed, elapsed_s, ncols, nrows, desc, unit, rate, rate_fmt, +rate_noinv, rate_noinv_fmt, rate_inv, rate_inv_fmt, postfix, +unit_divisor, remaining, remaining_s, eta. +Note that a trailing ": " is automatically removed after {desc} if the +latter is empty. +.RS +.RE +.TP +.B \-\-initial=\f[I]initial\f[] +int or float, optional. +The initial counter value. +Useful when restarting a progress bar [default: 0]. +If using float, consider specifying \f[C]{n:.3f}\f[] or similar in +\f[C]bar_format\f[], or specifying \f[C]unit_scale\f[]. +.RS +.RE +.TP +.B \-\-position=\f[I]position\f[] +int, optional. +Specify the line offset to print this bar (starting from 0) Automatic if +unspecified. +Useful to manage multiple bars at once (eg, from threads). +.RS +.RE +.TP +.B \-\-postfix=\f[I]postfix\f[] +dict or *, optional. +Specify additional stats to display at the end of the bar. +Calls \f[C]set_postfix(**postfix)\f[] if possible (dict). +.RS +.RE +.TP +.B \-\-unit\-divisor=\f[I]unit_divisor\f[] +float, optional. +[default: 1000], ignored unless \f[C]unit_scale\f[] is True. +.RS +.RE +.TP +.B \-\-write\-bytes +bool, optional. +If (default: None) and \f[C]file\f[] is unspecified, bytes will be +written in Python 2. +If \f[C]True\f[] will also write bytes. +In all other cases will default to unicode. +.RS +.RE +.TP +.B \-\-lock\-args=\f[I]lock_args\f[] +tuple, optional. +Passed to \f[C]refresh\f[] for intermediate output (initialisation, +iterating, and updating). +.RS +.RE +.TP +.B \-\-nrows=\f[I]nrows\f[] +int, optional. +The screen height. +If specified, hides nested bars outside this bound. +If unspecified, attempts to use environment height. +The fallback is 20. +.RS +.RE +.TP +.B \-\-colour=\f[I]colour\f[] +str, optional. +Bar colour (e.g. +\[aq]green\[aq], \[aq]#00ff00\[aq]). +.RS +.RE +.TP +.B \-\-delay=\f[I]delay\f[] +float, optional. +Don\[aq]t display until [default: 0] seconds have elapsed. +.RS +.RE +.TP +.B \-\-delim=\f[I]delim\f[] +chr, optional. +Delimiting character [default: \[aq]\\n\[aq]]. +Use \[aq]\\0\[aq] for null. +N.B.: on Windows systems, Python converts \[aq]\\n\[aq] to +\[aq]\\r\\n\[aq]. +.RS +.RE +.TP +.B \-\-buf\-size=\f[I]buf_size\f[] +int, optional. +String buffer size in bytes [default: 256] used when \f[C]delim\f[] is +specified. +.RS +.RE +.TP +.B \-\-bytes +bool, optional. +If true, will count bytes, ignore \f[C]delim\f[], and default +\f[C]unit_scale\f[] to True, \f[C]unit_divisor\f[] to 1024, and +\f[C]unit\f[] to \[aq]B\[aq]. +.RS +.RE +.TP +.B \-\-tee +bool, optional. +If true, passes \f[C]stdin\f[] to both \f[C]stderr\f[] and +\f[C]stdout\f[]. +.RS +.RE +.TP +.B \-\-update +bool, optional. +If true, will treat input as newly elapsed iterations, i.e. +numbers to pass to \f[C]update()\f[]. +Note that this is slow (~2e5 it/s) since every input must be decoded as +a number. +.RS +.RE +.TP +.B \-\-update\-to +bool, optional. +If true, will treat input as total elapsed iterations, i.e. +numbers to assign to \f[C]self.n\f[]. +Note that this is slow (~2e5 it/s) since every input must be decoded as +a number. +.RS +.RE +.TP +.B \-\-null +bool, optional. +If true, will discard input (no stdout). +.RS +.RE +.TP +.B \-\-manpath=\f[I]manpath\f[] +str, optional. +Directory in which to install tqdm man pages. +.RS +.RE +.TP +.B \-\-comppath=\f[I]comppath\f[] +str, optional. +Directory in which to place tqdm completion. +.RS +.RE +.TP +.B \-\-log=\f[I]log\f[] +str, optional. +CRITICAL|FATAL|ERROR|WARN(ING)|[default: \[aq]INFO\[aq]]|DEBUG|NOTSET. +.RS +.RE +.SH AUTHORS +tqdm developers . diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/utils.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0632b8dd0536069417da6cb4e130422217941148 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/utils.py @@ -0,0 +1,354 @@ +""" +General helpers required for `tqdm.std`. +""" +import os +import re +import sys +from functools import wraps +from warnings import warn +from weakref import proxy + +# py2/3 compat +try: + _range = xrange +except NameError: + _range = range + +try: + _unich = unichr +except NameError: + _unich = chr + +try: + _unicode = unicode +except NameError: + _unicode = str + +try: + _basestring = basestring +except NameError: + _basestring = str + +CUR_OS = sys.platform +IS_WIN = any(CUR_OS.startswith(i) for i in ['win32', 'cygwin']) +IS_NIX = any(CUR_OS.startswith(i) for i in ['aix', 'linux', 'darwin']) +RE_ANSI = re.compile(r"\x1b\[[;\d]*[A-Za-z]") + +try: + if IS_WIN: + import colorama + else: + raise ImportError +except ImportError: + colorama = None +else: + try: + colorama.init(strip=False) + except TypeError: + colorama.init() + + +class FormatReplace(object): + """ + >>> a = FormatReplace('something') + >>> "{:5d}".format(a) + 'something' + """ # NOQA: P102 + def __init__(self, replace=''): + self.replace = replace + self.format_called = 0 + + def __format__(self, _): + self.format_called += 1 + return self.replace + + +class Comparable(object): + """Assumes child has self._comparable attr/@property""" + def __lt__(self, other): + return self._comparable < other._comparable + + def __le__(self, other): + return (self < other) or (self == other) + + def __eq__(self, other): + return self._comparable == other._comparable + + def __ne__(self, other): + return not self == other + + def __gt__(self, other): + return not self <= other + + def __ge__(self, other): + return not self < other + + +class ObjectWrapper(object): + def __getattr__(self, name): + return getattr(self._wrapped, name) + + def __setattr__(self, name, value): + return setattr(self._wrapped, name, value) + + def wrapper_getattr(self, name): + """Actual `self.getattr` rather than self._wrapped.getattr""" + try: + return object.__getattr__(self, name) + except AttributeError: # py2 + return getattr(self, name) + + def wrapper_setattr(self, name, value): + """Actual `self.setattr` rather than self._wrapped.setattr""" + return object.__setattr__(self, name, value) + + def __init__(self, wrapped): + """ + Thin wrapper around a given object + """ + self.wrapper_setattr('_wrapped', wrapped) + + +class SimpleTextIOWrapper(ObjectWrapper): + """ + Change only `.write()` of the wrapped object by encoding the passed + value and passing the result to the wrapped object's `.write()` method. + """ + # pylint: disable=too-few-public-methods + def __init__(self, wrapped, encoding): + super(SimpleTextIOWrapper, self).__init__(wrapped) + self.wrapper_setattr('encoding', encoding) + + def write(self, s): + """ + Encode `s` and pass to the wrapped object's `.write()` method. + """ + return self._wrapped.write(s.encode(self.wrapper_getattr('encoding'))) + + def __eq__(self, other): + return self._wrapped == getattr(other, '_wrapped', other) + + +class DisableOnWriteError(ObjectWrapper): + """ + Disable the given `tqdm_instance` upon `write()` or `flush()` errors. + """ + @staticmethod + def disable_on_exception(tqdm_instance, func): + """ + Quietly set `tqdm_instance.miniters=inf` if `func` raises `errno=5`. + """ + tqdm_instance = proxy(tqdm_instance) + + def inner(*args, **kwargs): + try: + return func(*args, **kwargs) + except OSError as e: + if e.errno != 5: + raise + try: + tqdm_instance.miniters = float('inf') + except ReferenceError: + pass + except ValueError as e: + if 'closed' not in str(e): + raise + try: + tqdm_instance.miniters = float('inf') + except ReferenceError: + pass + return inner + + def __init__(self, wrapped, tqdm_instance): + super(DisableOnWriteError, self).__init__(wrapped) + if hasattr(wrapped, 'write'): + self.wrapper_setattr( + 'write', self.disable_on_exception(tqdm_instance, wrapped.write)) + if hasattr(wrapped, 'flush'): + self.wrapper_setattr( + 'flush', self.disable_on_exception(tqdm_instance, wrapped.flush)) + + def __eq__(self, other): + return self._wrapped == getattr(other, '_wrapped', other) + + +class CallbackIOWrapper(ObjectWrapper): + def __init__(self, callback, stream, method="read"): + """ + Wrap a given `file`-like object's `read()` or `write()` to report + lengths to the given `callback` + """ + super(CallbackIOWrapper, self).__init__(stream) + func = getattr(stream, method) + if method == "write": + @wraps(func) + def write(data, *args, **kwargs): + res = func(data, *args, **kwargs) + callback(len(data)) + return res + self.wrapper_setattr('write', write) + elif method == "read": + @wraps(func) + def read(*args, **kwargs): + data = func(*args, **kwargs) + callback(len(data)) + return data + self.wrapper_setattr('read', read) + else: + raise KeyError("Can only wrap read/write methods") + + +def _is_utf(encoding): + try: + u'\u2588\u2589'.encode(encoding) + except UnicodeEncodeError: + return False + except Exception: + try: + return encoding.lower().startswith('utf-') or ('U8' == encoding) + except Exception: + return False + else: + return True + + +def _supports_unicode(fp): + try: + return _is_utf(fp.encoding) + except AttributeError: + return False + + +def _is_ascii(s): + if isinstance(s, str): + for c in s: + if ord(c) > 255: + return False + return True + return _supports_unicode(s) + + +def _screen_shape_wrapper(): # pragma: no cover + """ + Return a function which returns console dimensions (width, height). + Supported: linux, osx, windows, cygwin. + """ + _screen_shape = None + if IS_WIN: + _screen_shape = _screen_shape_windows + if _screen_shape is None: + _screen_shape = _screen_shape_tput + if IS_NIX: + _screen_shape = _screen_shape_linux + return _screen_shape + + +def _screen_shape_windows(fp): # pragma: no cover + try: + import struct + from ctypes import create_string_buffer, windll + from sys import stdin, stdout + + io_handle = -12 # assume stderr + if fp == stdin: + io_handle = -10 + elif fp == stdout: + io_handle = -11 + + h = windll.kernel32.GetStdHandle(io_handle) + csbi = create_string_buffer(22) + res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi) + if res: + (_bufx, _bufy, _curx, _cury, _wattr, left, top, right, bottom, + _maxx, _maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw) + return right - left, bottom - top # +1 + except Exception: # nosec + pass + return None, None + + +def _screen_shape_tput(*_): # pragma: no cover + """cygwin xterm (windows)""" + try: + import shlex + from subprocess import check_call # nosec + return [int(check_call(shlex.split('tput ' + i))) - 1 + for i in ('cols', 'lines')] + except Exception: # nosec + pass + return None, None + + +def _screen_shape_linux(fp): # pragma: no cover + + try: + from array import array + from fcntl import ioctl + from termios import TIOCGWINSZ + except ImportError: + return None, None + else: + try: + rows, cols = array('h', ioctl(fp, TIOCGWINSZ, '\0' * 8))[:2] + return cols, rows + except Exception: + try: + return [int(os.environ[i]) - 1 for i in ("COLUMNS", "LINES")] + except (KeyError, ValueError): + return None, None + + +def _environ_cols_wrapper(): # pragma: no cover + """ + Return a function which returns console width. + Supported: linux, osx, windows, cygwin. + """ + warn("Use `_screen_shape_wrapper()(file)[0]` instead of" + " `_environ_cols_wrapper()(file)`", DeprecationWarning, stacklevel=2) + shape = _screen_shape_wrapper() + if not shape: + return None + + @wraps(shape) + def inner(fp): + return shape(fp)[0] + + return inner + + +def _term_move_up(): # pragma: no cover + return '' if (os.name == 'nt') and (colorama is None) else '\x1b[A' + + +try: + # TODO consider using wcswidth third-party package for 0-width characters + from unicodedata import east_asian_width +except ImportError: + _text_width = len +else: + def _text_width(s): + return sum(2 if east_asian_width(ch) in 'FW' else 1 for ch in _unicode(s)) + + +def disp_len(data): + """ + Returns the real on-screen length of a string which may contain + ANSI control codes and wide chars. + """ + return _text_width(RE_ANSI.sub('', data)) + + +def disp_trim(data, length): + """ + Trim a string which may contain ANSI control characters. + """ + if len(data) == disp_len(data): + return data[:length] + + ansi_present = bool(RE_ANSI.search(data)) + while disp_len(data) > length: # carefully delete one char at a time + data = data[:-1] + if ansi_present and bool(RE_ANSI.search(data)): + # assume ANSI reset is required + return data if data.endswith("\033[0m") else data + "\033[0m" + return data diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/version.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/version.py new file mode 100644 index 0000000000000000000000000000000000000000..11cbaea79d1f4f46f9ae4bea542d7c66ded96e34 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/tqdm/version.py @@ -0,0 +1,9 @@ +"""`tqdm` version detector. Precedence: installed dist, git, 'UNKNOWN'.""" +try: + from ._dist_ver import __version__ +except ImportError: + try: + from setuptools_scm import get_version + __version__ = get_version(root='..', relative_to=__file__) + except (ImportError, LookupError): + __version__ = "UNKNOWN" diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zmq/__init__.pxd b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zmq/__init__.pxd new file mode 100644 index 0000000000000000000000000000000000000000..a01fc19d9af8551ebbc05dd04c60c9fffca19cc5 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zmq/__init__.pxd @@ -0,0 +1,4 @@ +from zmq.backend.cython.context cimport Context +from zmq.backend.cython.socket cimport Socket +from zmq.backend.cython.message cimport Frame +from zmq.backend.cython cimport libzmq diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zmq/__init__.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zmq/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8be01d79ace450af7f46616a857931bf45e3044f --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zmq/__init__.py @@ -0,0 +1,73 @@ +"""Python bindings for 0MQ.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +# load bundled libzmq, if there is one: +def _load_libzmq(): + """load bundled libzmq if there is one""" + import sys, platform, os + dlopen = hasattr(sys, 'getdlopenflags') # unix-only + # RTLD flags are added to os in Python 3 + # get values from os because ctypes values are WRONG on pypy + PYPY = platform.python_implementation().lower() == 'pypy' + + if dlopen: + import ctypes + dlflags = sys.getdlopenflags() + # set RTLD_GLOBAL, unset RTLD_LOCAL + flags = ctypes.RTLD_GLOBAL | dlflags + # ctypes.RTLD_LOCAL is 0 on pypy, which is *wrong* + flags &= ~ getattr(os, 'RTLD_LOCAL', 4) + # pypy on darwin needs RTLD_LAZY for some reason + if PYPY and sys.platform == 'darwin': + flags |= getattr(os, 'RTLD_LAZY', 1) + flags &= ~ getattr(os, 'RTLD_NOW', 2) + sys.setdlopenflags(flags) + try: + from . import libzmq + except ImportError: + pass + else: + # store libzmq as zmq._libzmq for backward-compat + globals()['_libzmq'] = libzmq + if PYPY: + # should already have been imported above, so reimporting is as cheap as checking + import ctypes + # some versions of pypy (5.3 < ? < 5.8) needs explicit CDLL load for some reason, + # otherwise symbols won't be globally available + # do this unconditionally because it should be harmless (?) + ctypes.CDLL(libzmq.__file__, ctypes.RTLD_GLOBAL) + finally: + if dlopen: + sys.setdlopenflags(dlflags) + +_load_libzmq() + + +# zmq top-level imports + +from zmq import backend +from zmq.backend import * +from zmq import sugar +from zmq.sugar import * + +def get_includes(): + """Return a list of directories to include for linking against pyzmq with cython.""" + from os.path import join, dirname, abspath, pardir, exists + base = dirname(__file__) + parent = abspath(join(base, pardir)) + includes = [ parent ] + [ join(parent, base, subdir) for subdir in ('utils',) ] + if exists(join(parent, base, 'include')): + includes.append(join(parent, base, 'include')) + return includes + +def get_library_dirs(): + """Return a list of directories used to link against pyzmq's bundled libzmq.""" + from os.path import join, dirname, abspath, pardir + base = dirname(__file__) + parent = abspath(join(base, pardir)) + return [ join(parent, base) ] + +COPY_THRESHOLD = 65536 +__all__ = ['get_includes', 'COPY_THRESHOLD'] + sugar.__all__ + backend.__all__ diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zmq/_future.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zmq/_future.py new file mode 100644 index 0000000000000000000000000000000000000000..4d415ac19b520a7540736b8a0a12c28c0d7e865b --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zmq/_future.py @@ -0,0 +1,545 @@ +"""Future-returning APIs for coroutines.""" + +# Copyright (c) PyZMQ Developers. +# Distributed under the terms of the Modified BSD License. + +from collections import namedtuple, deque +from itertools import chain + +from zmq import EVENTS, POLLOUT, POLLIN +import zmq as _zmq + +_FutureEvent = namedtuple('_FutureEvent', ('future', 'kind', 'kwargs', 'msg')) + +# These are incomplete classes and need a Mixin for compatibility with an eventloop +# defining the followig attributes: +# +# _Future +# _READ +# _WRITE +# _default_loop() + + +class _AsyncPoller(_zmq.Poller): + """Poller that returns a Future on poll, instead of blocking.""" + + def poll(self, timeout=-1): + """Return a Future for a poll event""" + future = self._Future() + if timeout == 0: + try: + result = super(_AsyncPoller, self).poll(0) + except Exception as e: + future.set_exception(e) + else: + future.set_result(result) + return future + + loop = self._default_loop() + + # register Future to be called as soon as any event is available on any socket + watcher = self._Future() + + # watch raw sockets: + raw_sockets = [] + def wake_raw(*args): + if not watcher.done(): + watcher.set_result(None) + + watcher.add_done_callback(lambda f: self._unwatch_raw_sockets(loop, *raw_sockets)) + + for socket, mask in self.sockets: + if isinstance(socket, _zmq.Socket): + if not isinstance(socket, self._socket_class): + # it's a blocking zmq.Socket, wrap it in async + socket = self._socket_class.from_socket(socket) + if mask & _zmq.POLLIN: + socket._add_recv_event('poll', future=watcher) + if mask & _zmq.POLLOUT: + socket._add_send_event('poll', future=watcher) + else: + raw_sockets.append(socket) + evt = 0 + if mask & _zmq.POLLIN: + evt |= self._READ + if mask & _zmq.POLLOUT: + evt |= self._WRITE + self._watch_raw_socket(loop, socket, evt, wake_raw) + + def on_poll_ready(f): + if future.done(): + return + if watcher.cancelled(): + try: + future.cancel() + except RuntimeError: + # RuntimeError may be called during teardown + pass + return + if watcher.exception(): + future.set_exception(watcher.exception()) + else: + try: + result = super(_AsyncPoller, self).poll(0) + except Exception as e: + future.set_exception(e) + else: + future.set_result(result) + watcher.add_done_callback(on_poll_ready) + + if timeout is not None and timeout > 0: + # schedule cancel to fire on poll timeout, if any + def trigger_timeout(): + if not watcher.done(): + watcher.set_result(None) + + timeout_handle = loop.call_later( + 1e-3 * timeout, + trigger_timeout + ) + def cancel_timeout(f): + if hasattr(timeout_handle, 'cancel'): + timeout_handle.cancel() + else: + loop.remove_timeout(timeout_handle) + future.add_done_callback(cancel_timeout) + + def cancel_watcher(f): + if not watcher.done(): + watcher.cancel() + future.add_done_callback(cancel_watcher) + + return future + + +class _AsyncSocket(_zmq.Socket): + + + # Warning : these class variables are only here to allow to call super().__setattr__. + # They be overridden at instance initialization and not shared in the whole class + _recv_futures = None + _send_futures = None + _state = 0 + _shadow_sock = None + _poller_class = _AsyncPoller + io_loop = None + _fd = None + + def __init__(self, context=None, socket_type=-1, io_loop=None, **kwargs): + if isinstance(context, _zmq.Socket): + context, from_socket = (None, context) + else: + from_socket = kwargs.pop('_from_socket', None) + if from_socket is not None: + super(_AsyncSocket, self).__init__(shadow=from_socket.underlying) + self._shadow_sock = from_socket + else: + super(_AsyncSocket, self).__init__(context, socket_type, **kwargs) + self._shadow_sock = _zmq.Socket.shadow(self.underlying) + self.io_loop = io_loop or self._default_loop() + self._recv_futures = deque() + self._send_futures = deque() + self._state = 0 + self._fd = self._shadow_sock.FD + self._init_io_state() + + @classmethod + def from_socket(cls, socket, io_loop=None): + """Create an async socket from an existing Socket""" + return cls(_from_socket=socket, io_loop=io_loop) + + def close(self, linger=None): + if not self.closed: + for event in list(chain(self._recv_futures, self._send_futures)): + if not event.future.done(): + try: + event.future.cancel() + except RuntimeError: + # RuntimeError may be called during teardown + pass + self._clear_io_state() + super(_AsyncSocket, self).close(linger=linger) + close.__doc__ = _zmq.Socket.close.__doc__ + + def get(self, key): + result = super(_AsyncSocket, self).get(key) + if key == EVENTS: + self._schedule_remaining_events(result) + return result + get.__doc__ = _zmq.Socket.get.__doc__ + + def recv_multipart(self, flags=0, copy=True, track=False): + """Receive a complete multipart zmq message. + + Returns a Future whose result will be a multipart message. + """ + return self._add_recv_event('recv_multipart', + dict(flags=flags, copy=copy, track=track) + ) + + def recv(self, flags=0, copy=True, track=False): + """Receive a single zmq frame. + + Returns a Future, whose result will be the received frame. + + Recommend using recv_multipart instead. + """ + return self._add_recv_event('recv', + dict(flags=flags, copy=copy, track=track) + ) + + def send_multipart(self, msg, flags=0, copy=True, track=False, **kwargs): + """Send a complete multipart zmq message. + + Returns a Future that resolves when sending is complete. + """ + kwargs['flags'] = flags + kwargs['copy'] = copy + kwargs['track'] = track + return self._add_send_event('send_multipart', msg=msg, kwargs=kwargs) + + def send(self, msg, flags=0, copy=True, track=False, **kwargs): + """Send a single zmq frame. + + Returns a Future that resolves when sending is complete. + + Recommend using send_multipart instead. + """ + kwargs['flags'] = flags + kwargs['copy'] = copy + kwargs['track'] = track + kwargs.update(dict(flags=flags, copy=copy, track=track)) + return self._add_send_event('send', msg=msg, kwargs=kwargs) + + def _deserialize(self, recvd, load): + """Deserialize with Futures""" + f = self._Future() + def _chain(_): + """Chain result through serialization to recvd""" + if f.done(): + return + if recvd.exception(): + f.set_exception(recvd.exception()) + else: + buf = recvd.result() + try: + loaded = load(buf) + except Exception as e: + f.set_exception(e) + else: + f.set_result(loaded) + recvd.add_done_callback(_chain) + + def _chain_cancel(_): + """Chain cancellation from f to recvd""" + if recvd.done(): + return + if f.cancelled(): + recvd.cancel() + f.add_done_callback(_chain_cancel) + + return f + + def poll(self, timeout=None, flags=_zmq.POLLIN): + """poll the socket for events + + returns a Future for the poll results. + """ + + if self.closed: + raise _zmq.ZMQError(_zmq.ENOTSUP) + + p = self._poller_class() + p.register(self, flags) + f = p.poll(timeout) + + future = self._Future() + def unwrap_result(f): + if future.done(): + return + if f.cancelled(): + try: + future.cancel() + except RuntimeError: + # RuntimeError may be called during teardown + pass + return + if f.exception(): + future.set_exception(f.exception()) + else: + evts = dict(f.result()) + future.set_result(evts.get(self, 0)) + + if f.done(): + # hook up result if + unwrap_result(f) + else: + f.add_done_callback(unwrap_result) + return future + + def _add_timeout(self, future, timeout): + """Add a timeout for a send or recv Future""" + def future_timeout(): + if future.done(): + # future already resolved, do nothing + return + + # raise EAGAIN + future.set_exception(_zmq.Again()) + self._call_later(timeout, future_timeout) + + def _call_later(self, delay, callback): + """Schedule a function to be called later + + Override for different IOLoop implementations + + Tornado and asyncio happen to both have ioloop.call_later + with the same signature. + """ + self.io_loop.call_later(delay, callback) + + @staticmethod + def _remove_finished_future(future, event_list): + """Make sure that futures are removed from the event list when they resolve + + Avoids delaying cleanup until the next send/recv event, + which may never come. + """ + for f_idx, (f, kind, kwargs, _) in enumerate(event_list): + if f is future: + break + else: + return + + # "future" instance is shared between sockets, but each socket has its own event list. + event_list.remove(event_list[f_idx]) + + def _add_recv_event(self, kind, kwargs=None, future=None): + """Add a recv event, returning the corresponding Future""" + f = future or self._Future() + if kind.startswith('recv') and kwargs.get('flags', 0) & _zmq.DONTWAIT: + # short-circuit non-blocking calls + recv = getattr(self._shadow_sock, kind) + try: + r = recv(**kwargs) + except Exception as e: + f.set_exception(e) + else: + f.set_result(r) + return f + + # we add it to the list of futures before we add the timeout as the + # timeout will remove the future from recv_futures to avoid leaks + self._recv_futures.append( + _FutureEvent(f, kind, kwargs, msg=None) + ) + + # Don't let the Future sit in _recv_events after it's done + f.add_done_callback(lambda f: self._remove_finished_future(f, self._recv_futures)) + + if hasattr(_zmq, 'RCVTIMEO'): + timeout_ms = self._shadow_sock.rcvtimeo + if timeout_ms >= 0: + self._add_timeout(f, timeout_ms * 1e-3) + + if self._shadow_sock.get(EVENTS) & POLLIN: + # recv immediately, if we can + self._handle_recv() + if self._recv_futures: + self._add_io_state(POLLIN) + return f + + def _add_send_event(self, kind, msg=None, kwargs=None, future=None): + """Add a send event, returning the corresponding Future""" + f = future or self._Future() + # attempt send with DONTWAIT if no futures are waiting + # short-circuit for sends that will resolve immediately + # only call if no send Futures are waiting + if ( + kind in ('send', 'send_multipart') + and not self._send_futures + ): + flags = kwargs.get('flags', 0) + nowait_kwargs = kwargs.copy() + nowait_kwargs['flags'] = flags | _zmq.DONTWAIT + + # short-circuit non-blocking calls + send = getattr(self._shadow_sock, kind) + # track if the send resolved or not + # (EAGAIN if DONTWAIT is not set should proceed with) + finish_early = True + try: + r = send(msg, **nowait_kwargs) + except _zmq.Again as e: + if flags & _zmq.DONTWAIT: + f.set_exception(e) + else: + # EAGAIN raised and DONTWAIT not requested, + # proceed with async send + finish_early = False + except Exception as e: + f.set_exception(e) + else: + f.set_result(r) + + if finish_early: + # short-circuit resolved, return finished Future + # schedule wake for recv if there are any receivers waiting + if self._recv_futures: + self._schedule_remaining_events() + return f + + # we add it to the list of futures before we add the timeout as the + # timeout will remove the future from recv_futures to avoid leaks + self._send_futures.append( + _FutureEvent(f, kind, kwargs=kwargs, msg=msg) + ) + # Don't let the Future sit in _send_futures after it's done + f.add_done_callback(lambda f: self._remove_finished_future(f, self._send_futures)) + + if hasattr(_zmq, 'SNDTIMEO'): + timeout_ms = self._shadow_sock.get(_zmq.SNDTIMEO) + if timeout_ms >= 0: + self._add_timeout(f, timeout_ms * 1e-3) + + self._add_io_state(POLLOUT) + return f + + def _handle_recv(self): + """Handle recv events""" + if not self._shadow_sock.get(EVENTS) & POLLIN: + # event triggered, but state may have been changed between trigger and callback + return + f = None + while self._recv_futures: + f, kind, kwargs, _ = self._recv_futures.popleft() + # skip any cancelled futures + if f.done(): + f = None + else: + break + + if not self._recv_futures: + self._drop_io_state(POLLIN) + + if f is None: + return + + if kind == 'poll': + # on poll event, just signal ready, nothing else. + f.set_result(None) + return + elif kind == 'recv_multipart': + recv = self._shadow_sock.recv_multipart + elif kind == 'recv': + recv = self._shadow_sock.recv + else: + raise ValueError("Unhandled recv event type: %r" % kind) + + kwargs['flags'] |= _zmq.DONTWAIT + try: + result = recv(**kwargs) + except Exception as e: + f.set_exception(e) + else: + f.set_result(result) + + def _handle_send(self): + if not self._shadow_sock.get(EVENTS) & POLLOUT: + # event triggered, but state may have been changed between trigger and callback + return + f = None + while self._send_futures: + f, kind, kwargs, msg = self._send_futures.popleft() + # skip any cancelled futures + if f.done(): + f = None + else: + break + + if not self._send_futures: + self._drop_io_state(POLLOUT) + + if f is None: + return + + if kind == 'poll': + # on poll event, just signal ready, nothing else. + f.set_result(None) + return + elif kind == 'send_multipart': + send = self._shadow_sock.send_multipart + elif kind == 'send': + send = self._shadow_sock.send + else: + raise ValueError("Unhandled send event type: %r" % kind) + + kwargs['flags'] |= _zmq.DONTWAIT + try: + result = send(msg, **kwargs) + except Exception as e: + f.set_exception(e) + else: + f.set_result(result) + + # event masking from ZMQStream + def _handle_events(self, fd=0, events=0): + """Dispatch IO events to _handle_recv, etc.""" + zmq_events = self._shadow_sock.get(EVENTS) + if zmq_events & _zmq.POLLIN: + self._handle_recv() + if zmq_events & _zmq.POLLOUT: + self._handle_send() + self._schedule_remaining_events() + + def _schedule_remaining_events(self, events=None): + """Schedule a call to handle_events next loop iteration + + If there are still events to handle. + """ + # edge-triggered handling + # allow passing events in, in case this is triggered by retrieving events, + # so we don't have to retrieve it twice. + if self._state == 0: + # not watching for anything, nothing to schedule + return + if events is None: + events = self._shadow_sock.get(EVENTS) + if events & self._state: + self._call_later(0, self._handle_events) + + def _add_io_state(self, state): + """Add io_state to poller.""" + if self._state != state: + state = self._state = self._state | state + self._update_handler(self._state) + + def _drop_io_state(self, state): + """Stop poller from watching an io_state.""" + if self._state & state: + self._state = self._state & (~state) + self._update_handler(self._state) + + def _update_handler(self, state): + """Update IOLoop handler with state. + + zmq FD is always read-only. + """ + self._schedule_remaining_events() + + def _init_io_state(self): + """initialize the ioloop event handler""" + self.io_loop.add_handler(self._shadow_sock, self._handle_events, self._READ) + self._call_later(0, self._handle_events) + + def _clear_io_state(self): + """unregister the ioloop event handler + + called once during close + """ + fd = self._shadow_sock + if self._shadow_sock.closed: + fd = self._fd + self.io_loop.remove_handler(fd) + + diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zmq/decorators.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zmq/decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..f95c946e12d3c75a17d41af491cbd19612d4d753 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zmq/decorators.py @@ -0,0 +1,188 @@ +"""Decorators for running functions with context/sockets. + +.. versionadded:: 15.3 + +Like using Contexts and Sockets as context managers, but with decorator syntax. +Context and sockets are closed at the end of the function. + +For example:: + + from zmq.decorators import context, socket + + @context() + @socket(zmq.PUSH) + def work(ctx, push): + ... +""" + +# Copyright (c) PyZMQ Developers. +# Distributed under the terms of the Modified BSD License. + +__all__ = ( + 'context', + 'socket', +) + +from functools import wraps + +import zmq +from zmq.utils.strtypes import basestring + + +class _Decorator(object): + '''The mini decorator factory''' + + def __init__(self, target=None): + self._target = target + + def __call__(self, *dec_args, **dec_kwargs): + ''' + The main logic of decorator + + Here is how those arguments works:: + + @out_decorator(*dec_args, *dec_kwargs) + def func(*wrap_args, **wrap_kwargs): + ... + + And in the ``wrapper``, we simply create ``self.target`` instance via + ``with``:: + + target = self.get_target(*args, **kwargs) + with target(*dec_args, **dec_kwargs) as obj: + ... + + ''' + kw_name, dec_args, dec_kwargs = self.process_decorator_args(*dec_args, **dec_kwargs) + + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + target = self.get_target(*args, **kwargs) + + with target(*dec_args, **dec_kwargs) as obj: + # insert our object into args + if kw_name and kw_name not in kwargs: + kwargs[kw_name] = obj + elif kw_name and kw_name in kwargs: + raise TypeError( + "{0}() got multiple values for" + " argument '{1}'".format( + func.__name__, kw_name)) + else: + args = args + (obj,) + + return func(*args, **kwargs) + + return wrapper + + return decorator + + def get_target(self, *args, **kwargs): + """Return the target function + + Allows modifying args/kwargs to be passed. + """ + return self._target + + def process_decorator_args(self, *args, **kwargs): + """Process args passed to the decorator. + + args not consumed by the decorator will be passed to the target factory + (Context/Socket constructor). + """ + kw_name = None + + if isinstance(kwargs.get('name'), basestring): + kw_name = kwargs.pop('name') + elif len(args) >= 1 and isinstance(args[0], basestring): + kw_name = args[0] + args = args[1:] + + return kw_name, args, kwargs + + +class _ContextDecorator(_Decorator): + """Decorator subclass for Contexts""" + def __init__(self): + super(_ContextDecorator, self).__init__(zmq.Context) + + +class _SocketDecorator(_Decorator): + """Decorator subclass for sockets + + Gets the context from other args. + """ + + def process_decorator_args(self, *args, **kwargs): + """Also grab context_name out of kwargs""" + kw_name, args, kwargs = super(_SocketDecorator, self).process_decorator_args(*args, **kwargs) + self.context_name = kwargs.pop('context_name', 'context') + return kw_name, args, kwargs + + def get_target(self, *args, **kwargs): + """Get context, based on call-time args""" + context = self._get_context(*args, **kwargs) + return context.socket + + def _get_context(self, *args, **kwargs): + ''' + Find the ``zmq.Context`` from ``args`` and ``kwargs`` at call time. + + First, if there is an keyword argument named ``context`` and it is a + ``zmq.Context`` instance , we will take it. + + Second, we check all the ``args``, take the first ``zmq.Context`` + instance. + + Finally, we will provide default Context -- ``zmq.Context.instance`` + + :return: a ``zmq.Context`` instance + ''' + if self.context_name in kwargs: + ctx = kwargs[self.context_name] + + if isinstance(ctx, zmq.Context): + return ctx + + for arg in args: + if isinstance(arg, zmq.Context): + return arg + # not specified by any decorator + return zmq.Context.instance() + + +def context(*args, **kwargs): + '''Decorator for adding a Context to a function. + + Usage:: + + @context() + def foo(ctx): + ... + + .. versionadded:: 15.3 + + :param str name: the keyword argument passed to decorated function + ''' + return _ContextDecorator()(*args, **kwargs) + + +def socket(*args, **kwargs): + '''Decorator for adding a socket to a function. + + Usage:: + + @socket(zmq.PUSH) + def foo(push): + ... + + .. versionadded:: 15.3 + + :param str name: the keyword argument passed to decorated function + :param str context_name: the keyword only argument to identify context + object + ''' + return _SocketDecorator()(*args, **kwargs) + + diff --git a/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zmq/error.py b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zmq/error.py new file mode 100644 index 0000000000000000000000000000000000000000..71c90a38a56b5e3c0caade793a187fa4ac2f18f0 --- /dev/null +++ b/my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/zmq/error.py @@ -0,0 +1,196 @@ +"""0MQ Error classes and functions.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +from errno import EINTR + + +class ZMQBaseError(Exception): + """Base exception class for 0MQ errors in Python.""" + pass + +class ZMQError(ZMQBaseError): + """Wrap an errno style error. + + Parameters + ---------- + errno : int + The ZMQ errno or None. If None, then ``zmq_errno()`` is called and + used. + msg : string + Description of the error or None. + """ + errno = None + + def __init__(self, errno=None, msg=None): + """Wrap an errno style error. + + Parameters + ---------- + errno : int + The ZMQ errno or None. If None, then ``zmq_errno()`` is called and + used. + msg : string + Description of the error or None. + """ + from zmq.backend import strerror, zmq_errno + if errno is None: + errno = zmq_errno() + if isinstance(errno, int): + self.errno = errno + if msg is None: + self.strerror = strerror(errno) + else: + self.strerror = msg + else: + if msg is None: + self.strerror = str(errno) + else: + self.strerror = msg + # flush signals, because there could be a SIGINT + # waiting to pounce, resulting in uncaught exceptions. + # Doing this here means getting SIGINT during a blocking + # libzmq call will raise a *catchable* KeyboardInterrupt + # PyErr_CheckSignals() + + def __str__(self): + return self.strerror + + def __repr__(self): + return "%s('%s')" % (self.__class__.__name__, str(self)) + + +class ZMQBindError(ZMQBaseError): + """An error for ``Socket.bind_to_random_port()``. + + See Also + -------- + .Socket.bind_to_random_port + """ + pass + + +class NotDone(ZMQBaseError): + """Raised when timeout is reached while waiting for 0MQ to finish with a Message + + See Also + -------- + .MessageTracker.wait : object for tracking when ZeroMQ is done + """ + pass + + +class ContextTerminated(ZMQError): + """Wrapper for zmq.ETERM + + .. versionadded:: 13.0 + """ + def __init__(self, errno='ignored', msg='ignored'): + from zmq import ETERM + super(ContextTerminated, self).__init__(ETERM) + + +class Again(ZMQError): + """Wrapper for zmq.EAGAIN + + .. versionadded:: 13.0 + """ + + def __init__(self, errno='ignored', msg='ignored'): + from zmq import EAGAIN + super(Again, self).__init__(EAGAIN) + + +try: + InterruptedError +except NameError: + InterruptedError = OSError + +class InterruptedSystemCall(ZMQError, InterruptedError): + """Wrapper for EINTR + + This exception should be caught internally in pyzmq + to retry system calls, and not propagate to the user. + + .. versionadded:: 14.7 + """ + + def __init__(self, errno='ignored', msg='ignored'): + super(InterruptedSystemCall, self).__init__(EINTR) + + def __str__(self): + s = super(InterruptedSystemCall, self).__str__() + return s + ": This call should have been retried. Please report this to pyzmq." + + +def _check_rc(rc, errno=None): + """internal utility for checking zmq return condition + + and raising the appropriate Exception class + """ + if rc == -1: + if errno is None: + from zmq.backend import zmq_errno + errno = zmq_errno() + from zmq import EAGAIN, ETERM + if errno == EINTR: + raise InterruptedSystemCall(errno) + elif errno == EAGAIN: + raise Again(errno) + elif errno == ETERM: + raise ContextTerminated(errno) + else: + raise ZMQError(errno) + +_zmq_version_info = None +_zmq_version = None + +class ZMQVersionError(NotImplementedError): + """Raised when a feature is not provided by the linked version of libzmq. + + .. versionadded:: 14.2 + """ + min_version = None + def __init__(self, min_version, msg='Feature'): + global _zmq_version + if _zmq_version is None: + from zmq import zmq_version + _zmq_version = zmq_version() + self.msg = msg + self.min_version = min_version + self.version = _zmq_version + + def __repr__(self): + return "ZMQVersionError('%s')" % str(self) + + def __str__(self): + return "%s requires libzmq >= %s, have %s" % (self.msg, self.min_version, self.version) + + +def _check_version(min_version_info, msg='Feature'): + """Check for libzmq + + raises ZMQVersionError if current zmq version is not at least min_version + + min_version_info is a tuple of integers, and will be compared against zmq.zmq_version_info(). + """ + global _zmq_version_info + if _zmq_version_info is None: + from zmq import zmq_version_info + _zmq_version_info = zmq_version_info() + if _zmq_version_info < min_version_info: + min_version = '.'.join(str(v) for v in min_version_info) + raise ZMQVersionError(min_version, msg) + + +__all__ = [ + 'ZMQBaseError', + 'ZMQBindError', + 'ZMQError', + 'NotDone', + 'ContextTerminated', + 'InterruptedSystemCall', + 'Again', + 'ZMQVersionError', +] diff --git a/tmp_inputs_32_31/case00010.nii.gz b/tmp_inputs_32_31/case00010.nii.gz new file mode 100644 index 0000000000000000000000000000000000000000..167cfd313c6c0ca3dbf562caa0759595bb50843e --- /dev/null +++ b/tmp_inputs_32_31/case00010.nii.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a7e2a26e4380f226b6322de656b987c8360dd71ded8a3e123b6c74c80b1f7e0 +size 37525324