diff --git a/.venv/lib/python3.11/site-packages/PyYAML-6.0.2.dist-info/INSTALLER b/.venv/lib/python3.11/site-packages/PyYAML-6.0.2.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/PyYAML-6.0.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.11/site-packages/PyYAML-6.0.2.dist-info/LICENSE b/.venv/lib/python3.11/site-packages/PyYAML-6.0.2.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..2f1b8e15e5627d92f0521605c9870bc8e5505cb4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/PyYAML-6.0.2.dist-info/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2017-2021 Ingy döt Net +Copyright (c) 2006-2016 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.venv/lib/python3.11/site-packages/PyYAML-6.0.2.dist-info/METADATA b/.venv/lib/python3.11/site-packages/PyYAML-6.0.2.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..db029b770cd87a12086e70b1be9900c93d255f0b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/PyYAML-6.0.2.dist-info/METADATA @@ -0,0 +1,46 @@ +Metadata-Version: 2.1 +Name: PyYAML +Version: 6.0.2 +Summary: YAML parser and emitter for Python +Home-page: https://pyyaml.org/ +Download-URL: https://pypi.org/project/PyYAML/ +Author: Kirill Simonov +Author-email: xi@resolvent.net +License: MIT +Project-URL: Bug Tracker, https://github.com/yaml/pyyaml/issues +Project-URL: CI, https://github.com/yaml/pyyaml/actions +Project-URL: Documentation, https://pyyaml.org/wiki/PyYAMLDocumentation +Project-URL: Mailing lists, http://lists.sourceforge.net/lists/listinfo/yaml-core +Project-URL: Source Code, https://github.com/yaml/pyyaml +Platform: Any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Cython +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup +Requires-Python: >=3.8 +License-File: LICENSE + +YAML is a data serialization format designed for human readability +and interaction with scripting languages. PyYAML is a YAML parser +and emitter for Python. + +PyYAML features a complete YAML 1.1 parser, Unicode support, pickle +support, capable extension API, and sensible error messages. PyYAML +supports standard YAML tags and provides Python-specific tags that +allow to represent an arbitrary Python object. + +PyYAML is applicable for a broad range of tasks from complex +configuration files to object serialization and persistence. diff --git a/.venv/lib/python3.11/site-packages/PyYAML-6.0.2.dist-info/RECORD b/.venv/lib/python3.11/site-packages/PyYAML-6.0.2.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..a9fecfefc3b8fc970e6e9a3b8fb3e9711ced557f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/PyYAML-6.0.2.dist-info/RECORD @@ -0,0 +1,43 @@ +PyYAML-6.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +PyYAML-6.0.2.dist-info/LICENSE,sha256=jTko-dxEkP1jVwfLiOsmvXZBAqcoKVQwfT5RZ6V36KQ,1101 +PyYAML-6.0.2.dist-info/METADATA,sha256=9-odFB5seu4pGPcEv7E8iyxNF51_uKnaNGjLAhz2lto,2060 +PyYAML-6.0.2.dist-info/RECORD,, +PyYAML-6.0.2.dist-info/WHEEL,sha256=YWWHkv6sHhBDPNqgSfLklIm4KZnZJH4x2lIHOwCoU7Q,152 +PyYAML-6.0.2.dist-info/top_level.txt,sha256=rpj0IVMTisAjh_1vG3Ccf9v5jpCQwAz6cD1IVU5ZdhQ,11 +_yaml/__init__.py,sha256=04Ae_5osxahpJHa3XBZUAf4wi6XX32gR8D6X6p64GEA,1402 +_yaml/__pycache__/__init__.cpython-311.pyc,, +yaml/__init__.py,sha256=N35S01HMesFTe0aRRMWkPj0Pa8IEbHpE9FK7cr5Bdtw,12311 +yaml/__pycache__/__init__.cpython-311.pyc,, +yaml/__pycache__/composer.cpython-311.pyc,, +yaml/__pycache__/constructor.cpython-311.pyc,, +yaml/__pycache__/cyaml.cpython-311.pyc,, +yaml/__pycache__/dumper.cpython-311.pyc,, +yaml/__pycache__/emitter.cpython-311.pyc,, +yaml/__pycache__/error.cpython-311.pyc,, +yaml/__pycache__/events.cpython-311.pyc,, +yaml/__pycache__/loader.cpython-311.pyc,, +yaml/__pycache__/nodes.cpython-311.pyc,, +yaml/__pycache__/parser.cpython-311.pyc,, +yaml/__pycache__/reader.cpython-311.pyc,, +yaml/__pycache__/representer.cpython-311.pyc,, +yaml/__pycache__/resolver.cpython-311.pyc,, +yaml/__pycache__/scanner.cpython-311.pyc,, +yaml/__pycache__/serializer.cpython-311.pyc,, +yaml/__pycache__/tokens.cpython-311.pyc,, +yaml/_yaml.cpython-311-x86_64-linux-gnu.so,sha256=sZBsAqPs6VM8YzOkHpNL0qKIfR0zNM9gttjzjoVPaiI,2466120 +yaml/composer.py,sha256=_Ko30Wr6eDWUeUpauUGT3Lcg9QPBnOPVlTnIMRGJ9FM,4883 +yaml/constructor.py,sha256=kNgkfaeLUkwQYY_Q6Ff1Tz2XVw_pG1xVE9Ak7z-viLA,28639 +yaml/cyaml.py,sha256=6ZrAG9fAYvdVe2FK_w0hmXoG7ZYsoYUwapG8CiC72H0,3851 +yaml/dumper.py,sha256=PLctZlYwZLp7XmeUdwRuv4nYOZ2UBnDIUy8-lKfLF-o,2837 +yaml/emitter.py,sha256=jghtaU7eFwg31bG0B7RZea_29Adi9CKmXq_QjgQpCkQ,43006 +yaml/error.py,sha256=Ah9z-toHJUbE9j-M8YpxgSRM5CgLCcwVzJgLLRF2Fxo,2533 +yaml/events.py,sha256=50_TksgQiE4up-lKo_V-nBy-tAIxkIPQxY5qDhKCeHw,2445 +yaml/loader.py,sha256=UVa-zIqmkFSCIYq_PgSGm4NSJttHY2Rf_zQ4_b1fHN0,2061 +yaml/nodes.py,sha256=gPKNj8pKCdh2d4gr3gIYINnPOaOxGhJAUiYhGRnPE84,1440 +yaml/parser.py,sha256=ilWp5vvgoHFGzvOZDItFoGjD6D42nhlZrZyjAwa0oJo,25495 +yaml/reader.py,sha256=0dmzirOiDG4Xo41RnuQS7K9rkY3xjHiVasfDMNTqCNw,6794 +yaml/representer.py,sha256=IuWP-cAW9sHKEnS0gCqSa894k1Bg4cgTxaDwIcbRQ-Y,14190 +yaml/resolver.py,sha256=9L-VYfm4mWHxUD1Vg4X7rjDRK_7VZd6b92wzq7Y2IKY,9004 +yaml/scanner.py,sha256=YEM3iLZSaQwXcQRg2l2R4MdT0zGP2F9eHkKGKnHyWQY,51279 +yaml/serializer.py,sha256=ChuFgmhU01hj4xgI8GaKv6vfM2Bujwa9i7d2FAHj7cA,4165 +yaml/tokens.py,sha256=lTQIzSVw8Mg9wv459-TjiOQe6wVziqaRlqX2_89rp54,2573 diff --git a/.venv/lib/python3.11/site-packages/PyYAML-6.0.2.dist-info/WHEEL b/.venv/lib/python3.11/site-packages/PyYAML-6.0.2.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..a9a865296bb2a6e89041edea9c427cae8da32ad4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/PyYAML-6.0.2.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.44.0) +Root-Is-Purelib: false +Tag: cp311-cp311-manylinux_2_17_x86_64 +Tag: cp311-cp311-manylinux2014_x86_64 + diff --git a/.venv/lib/python3.11/site-packages/PyYAML-6.0.2.dist-info/top_level.txt b/.venv/lib/python3.11/site-packages/PyYAML-6.0.2.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..e6475e911f628412049bc4090d86f23ac403adde --- /dev/null +++ b/.venv/lib/python3.11/site-packages/PyYAML-6.0.2.dist-info/top_level.txt @@ -0,0 +1,2 @@ +_yaml +yaml diff --git a/.venv/lib/python3.11/site-packages/__pycache__/example.cpython-311.pyc b/.venv/lib/python3.11/site-packages/__pycache__/example.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cd9de576ae431a382355821271f82445d84d727 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/__pycache__/example.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/__pycache__/google_auth_httplib2.cpython-311.pyc b/.venv/lib/python3.11/site-packages/__pycache__/google_auth_httplib2.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bfe38a6884b05db0d413dbdb7eacab2756acc31 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/__pycache__/google_auth_httplib2.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/__pycache__/isympy.cpython-311.pyc b/.venv/lib/python3.11/site-packages/__pycache__/isympy.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1cf9163b42d83a546f87fadcac573ca0daf502e Binary files /dev/null and b/.venv/lib/python3.11/site-packages/__pycache__/isympy.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/__pycache__/nest_asyncio.cpython-311.pyc b/.venv/lib/python3.11/site-packages/__pycache__/nest_asyncio.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30be29e743f48a8ceb34803e4805904d3a15fc02 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/__pycache__/nest_asyncio.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/__pycache__/py.cpython-311.pyc b/.venv/lib/python3.11/site-packages/__pycache__/py.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5085ca67b36cfc9d2d14c75dd4122c72d5cb04fb Binary files /dev/null and b/.venv/lib/python3.11/site-packages/__pycache__/py.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/__pycache__/six.cpython-311.pyc b/.venv/lib/python3.11/site-packages/__pycache__/six.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03fd403335b47ecc8dbbcb71654846166ec7390f Binary files /dev/null and b/.venv/lib/python3.11/site-packages/__pycache__/six.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/cffi/_cffi_errors.h b/.venv/lib/python3.11/site-packages/cffi/_cffi_errors.h new file mode 100644 index 0000000000000000000000000000000000000000..158e0590346a9a8b2ab047ac1bd23bcb3af21398 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/cffi/_cffi_errors.h @@ -0,0 +1,149 @@ +#ifndef CFFI_MESSAGEBOX +# ifdef _MSC_VER +# define CFFI_MESSAGEBOX 1 +# else +# define CFFI_MESSAGEBOX 0 +# endif +#endif + + +#if CFFI_MESSAGEBOX +/* Windows only: logic to take the Python-CFFI embedding logic + initialization errors and display them in a background thread + with MessageBox. The idea is that if the whole program closes + as a result of this problem, then likely it is already a console + program and you can read the stderr output in the console too. + If it is not a console program, then it will likely show its own + dialog to complain, or generally not abruptly close, and for this + case the background thread should stay alive. +*/ +static void *volatile _cffi_bootstrap_text; + +static PyObject *_cffi_start_error_capture(void) +{ + PyObject *result = NULL; + PyObject *x, *m, *bi; + + if (InterlockedCompareExchangePointer(&_cffi_bootstrap_text, + (void *)1, NULL) != NULL) + return (PyObject *)1; + + m = PyImport_AddModule("_cffi_error_capture"); + if (m == NULL) + goto error; + + result = PyModule_GetDict(m); + if (result == NULL) + goto error; + +#if PY_MAJOR_VERSION >= 3 + bi = PyImport_ImportModule("builtins"); +#else + bi = PyImport_ImportModule("__builtin__"); +#endif + if (bi == NULL) + goto error; + PyDict_SetItemString(result, "__builtins__", bi); + Py_DECREF(bi); + + x = PyRun_String( + "import sys\n" + "class FileLike:\n" + " def write(self, x):\n" + " try:\n" + " of.write(x)\n" + " except: pass\n" + " self.buf += x\n" + " def flush(self):\n" + " pass\n" + "fl = FileLike()\n" + "fl.buf = ''\n" + "of = sys.stderr\n" + "sys.stderr = fl\n" + "def done():\n" + " sys.stderr = of\n" + " return fl.buf\n", /* make sure the returned value stays alive */ + Py_file_input, + result, result); + Py_XDECREF(x); + + error: + if (PyErr_Occurred()) + { + PyErr_WriteUnraisable(Py_None); + PyErr_Clear(); + } + return result; +} + +#pragma comment(lib, "user32.lib") + +static DWORD WINAPI _cffi_bootstrap_dialog(LPVOID ignored) +{ + Sleep(666); /* may be interrupted if the whole process is closing */ +#if PY_MAJOR_VERSION >= 3 + MessageBoxW(NULL, (wchar_t *)_cffi_bootstrap_text, + L"Python-CFFI error", + MB_OK | MB_ICONERROR); +#else + MessageBoxA(NULL, (char *)_cffi_bootstrap_text, + "Python-CFFI error", + MB_OK | MB_ICONERROR); +#endif + _cffi_bootstrap_text = NULL; + return 0; +} + +static void _cffi_stop_error_capture(PyObject *ecap) +{ + PyObject *s; + void *text; + + if (ecap == (PyObject *)1) + return; + + if (ecap == NULL) + goto error; + + s = PyRun_String("done()", Py_eval_input, ecap, ecap); + if (s == NULL) + goto error; + + /* Show a dialog box, but in a background thread, and + never show multiple dialog boxes at once. */ +#if PY_MAJOR_VERSION >= 3 + text = PyUnicode_AsWideCharString(s, NULL); +#else + text = PyString_AsString(s); +#endif + + _cffi_bootstrap_text = text; + + if (text != NULL) + { + HANDLE h; + h = CreateThread(NULL, 0, _cffi_bootstrap_dialog, + NULL, 0, NULL); + if (h != NULL) + CloseHandle(h); + } + /* decref the string, but it should stay alive as 'fl.buf' + in the small module above. It will really be freed only if + we later get another similar error. So it's a leak of at + most one copy of the small module. That's fine for this + situation which is usually a "fatal error" anyway. */ + Py_DECREF(s); + PyErr_Clear(); + return; + + error: + _cffi_bootstrap_text = NULL; + PyErr_Clear(); +} + +#else + +static PyObject *_cffi_start_error_capture(void) { return NULL; } +static void _cffi_stop_error_capture(PyObject *ecap) { } + +#endif diff --git a/.venv/lib/python3.11/site-packages/cffi/_embedding.h b/.venv/lib/python3.11/site-packages/cffi/_embedding.h new file mode 100644 index 0000000000000000000000000000000000000000..94d8b30a9e3c93a0ce3766f32dc291e09c8fff92 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/cffi/_embedding.h @@ -0,0 +1,550 @@ + +/***** Support code for embedding *****/ + +#ifdef __cplusplus +extern "C" { +#endif + + +#if defined(_WIN32) +# define CFFI_DLLEXPORT __declspec(dllexport) +#elif defined(__GNUC__) +# define CFFI_DLLEXPORT __attribute__((visibility("default"))) +#else +# define CFFI_DLLEXPORT /* nothing */ +#endif + + +/* There are two global variables of type _cffi_call_python_fnptr: + + * _cffi_call_python, which we declare just below, is the one called + by ``extern "Python"`` implementations. + + * _cffi_call_python_org, which on CPython is actually part of the + _cffi_exports[] array, is the function pointer copied from + _cffi_backend. If _cffi_start_python() fails, then this is set + to NULL; otherwise, it should never be NULL. + + After initialization is complete, both are equal. However, the + first one remains equal to &_cffi_start_and_call_python until the + very end of initialization, when we are (or should be) sure that + concurrent threads also see a completely initialized world, and + only then is it changed. +*/ +#undef _cffi_call_python +typedef void (*_cffi_call_python_fnptr)(struct _cffi_externpy_s *, char *); +static void _cffi_start_and_call_python(struct _cffi_externpy_s *, char *); +static _cffi_call_python_fnptr _cffi_call_python = &_cffi_start_and_call_python; + + +#ifndef _MSC_VER + /* --- Assuming a GCC not infinitely old --- */ +# define cffi_compare_and_swap(l,o,n) __sync_bool_compare_and_swap(l,o,n) +# define cffi_write_barrier() __sync_synchronize() +# if !defined(__amd64__) && !defined(__x86_64__) && \ + !defined(__i386__) && !defined(__i386) +# define cffi_read_barrier() __sync_synchronize() +# else +# define cffi_read_barrier() (void)0 +# endif +#else + /* --- Windows threads version --- */ +# include +# define cffi_compare_and_swap(l,o,n) \ + (InterlockedCompareExchangePointer(l,n,o) == (o)) +# define cffi_write_barrier() InterlockedCompareExchange(&_cffi_dummy,0,0) +# define cffi_read_barrier() (void)0 +static volatile LONG _cffi_dummy; +#endif + +#ifdef WITH_THREAD +# ifndef _MSC_VER +# include + static pthread_mutex_t _cffi_embed_startup_lock; +# else + static CRITICAL_SECTION _cffi_embed_startup_lock; +# endif + static char _cffi_embed_startup_lock_ready = 0; +#endif + +static void _cffi_acquire_reentrant_mutex(void) +{ + static void *volatile lock = NULL; + + while (!cffi_compare_and_swap(&lock, NULL, (void *)1)) { + /* should ideally do a spin loop instruction here, but + hard to do it portably and doesn't really matter I + think: pthread_mutex_init() should be very fast, and + this is only run at start-up anyway. */ + } + +#ifdef WITH_THREAD + if (!_cffi_embed_startup_lock_ready) { +# ifndef _MSC_VER + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init(&_cffi_embed_startup_lock, &attr); +# else + InitializeCriticalSection(&_cffi_embed_startup_lock); +# endif + _cffi_embed_startup_lock_ready = 1; + } +#endif + + while (!cffi_compare_and_swap(&lock, (void *)1, NULL)) + ; + +#ifndef _MSC_VER + pthread_mutex_lock(&_cffi_embed_startup_lock); +#else + EnterCriticalSection(&_cffi_embed_startup_lock); +#endif +} + +static void _cffi_release_reentrant_mutex(void) +{ +#ifndef _MSC_VER + pthread_mutex_unlock(&_cffi_embed_startup_lock); +#else + LeaveCriticalSection(&_cffi_embed_startup_lock); +#endif +} + + +/********** CPython-specific section **********/ +#ifndef PYPY_VERSION + +#include "_cffi_errors.h" + + +#define _cffi_call_python_org _cffi_exports[_CFFI_CPIDX] + +PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(void); /* forward */ + +static void _cffi_py_initialize(void) +{ + /* XXX use initsigs=0, which "skips initialization registration of + signal handlers, which might be useful when Python is + embedded" according to the Python docs. But review and think + if it should be a user-controllable setting. + + XXX we should also give a way to write errors to a buffer + instead of to stderr. + + XXX if importing 'site' fails, CPython (any version) calls + exit(). Should we try to work around this behavior here? + */ + Py_InitializeEx(0); +} + +static int _cffi_initialize_python(void) +{ + /* This initializes Python, imports _cffi_backend, and then the + present .dll/.so is set up as a CPython C extension module. + */ + int result; + PyGILState_STATE state; + PyObject *pycode=NULL, *global_dict=NULL, *x; + PyObject *builtins; + + state = PyGILState_Ensure(); + + /* Call the initxxx() function from the present module. It will + create and initialize us as a CPython extension module, instead + of letting the startup Python code do it---it might reimport + the same .dll/.so and get maybe confused on some platforms. + It might also have troubles locating the .dll/.so again for all + I know. + */ + (void)_CFFI_PYTHON_STARTUP_FUNC(); + if (PyErr_Occurred()) + goto error; + + /* Now run the Python code provided to ffi.embedding_init_code(). + */ + pycode = Py_CompileString(_CFFI_PYTHON_STARTUP_CODE, + "", + Py_file_input); + if (pycode == NULL) + goto error; + global_dict = PyDict_New(); + if (global_dict == NULL) + goto error; + builtins = PyEval_GetBuiltins(); + if (builtins == NULL) + goto error; + if (PyDict_SetItemString(global_dict, "__builtins__", builtins) < 0) + goto error; + x = PyEval_EvalCode( +#if PY_MAJOR_VERSION < 3 + (PyCodeObject *) +#endif + pycode, global_dict, global_dict); + if (x == NULL) + goto error; + Py_DECREF(x); + + /* Done! Now if we've been called from + _cffi_start_and_call_python() in an ``extern "Python"``, we can + only hope that the Python code did correctly set up the + corresponding @ffi.def_extern() function. Otherwise, the + general logic of ``extern "Python"`` functions (inside the + _cffi_backend module) will find that the reference is still + missing and print an error. + */ + result = 0; + done: + Py_XDECREF(pycode); + Py_XDECREF(global_dict); + PyGILState_Release(state); + return result; + + error:; + { + /* Print as much information as potentially useful. + Debugging load-time failures with embedding is not fun + */ + PyObject *ecap; + PyObject *exception, *v, *tb, *f, *modules, *mod; + PyErr_Fetch(&exception, &v, &tb); + ecap = _cffi_start_error_capture(); + f = PySys_GetObject((char *)"stderr"); + if (f != NULL && f != Py_None) { + PyFile_WriteString( + "Failed to initialize the Python-CFFI embedding logic:\n\n", f); + } + + if (exception != NULL) { + PyErr_NormalizeException(&exception, &v, &tb); + PyErr_Display(exception, v, tb); + } + Py_XDECREF(exception); + Py_XDECREF(v); + Py_XDECREF(tb); + + if (f != NULL && f != Py_None) { + PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME + "\ncompiled with cffi version: 1.17.1" + "\n_cffi_backend module: ", f); + modules = PyImport_GetModuleDict(); + mod = PyDict_GetItemString(modules, "_cffi_backend"); + if (mod == NULL) { + PyFile_WriteString("not loaded", f); + } + else { + v = PyObject_GetAttrString(mod, "__file__"); + PyFile_WriteObject(v, f, 0); + Py_XDECREF(v); + } + PyFile_WriteString("\nsys.path: ", f); + PyFile_WriteObject(PySys_GetObject((char *)"path"), f, 0); + PyFile_WriteString("\n\n", f); + } + _cffi_stop_error_capture(ecap); + } + result = -1; + goto done; +} + +#if PY_VERSION_HEX < 0x03080000 +PyAPI_DATA(char *) _PyParser_TokenNames[]; /* from CPython */ +#endif + +static int _cffi_carefully_make_gil(void) +{ + /* This does the basic initialization of Python. It can be called + completely concurrently from unrelated threads. It assumes + that we don't hold the GIL before (if it exists), and we don't + hold it afterwards. + + (What it really does used to be completely different in Python 2 + and Python 3, with the Python 2 solution avoiding the spin-lock + around the Py_InitializeEx() call. However, after recent changes + to CPython 2.7 (issue #358) it no longer works. So we use the + Python 3 solution everywhere.) + + This initializes Python by calling Py_InitializeEx(). + Important: this must not be called concurrently at all. + So we use a global variable as a simple spin lock. This global + variable must be from 'libpythonX.Y.so', not from this + cffi-based extension module, because it must be shared from + different cffi-based extension modules. + + In Python < 3.8, we choose + _PyParser_TokenNames[0] as a completely arbitrary pointer value + that is never written to. The default is to point to the + string "ENDMARKER". We change it temporarily to point to the + next character in that string. (Yes, I know it's REALLY + obscure.) + + In Python >= 3.8, this string array is no longer writable, so + instead we pick PyCapsuleType.tp_version_tag. We can't change + Python < 3.8 because someone might use a mixture of cffi + embedded modules, some of which were compiled before this file + changed. + + In Python >= 3.12, this stopped working because that particular + tp_version_tag gets modified during interpreter startup. It's + arguably a bad idea before 3.12 too, but again we can't change + that because someone might use a mixture of cffi embedded + modules, and no-one reported a bug so far. In Python >= 3.12 + we go instead for PyCapsuleType.tp_as_buffer, which is supposed + to always be NULL. We write to it temporarily a pointer to + a struct full of NULLs, which is semantically the same. + */ + +#ifdef WITH_THREAD +# if PY_VERSION_HEX < 0x03080000 + char *volatile *lock = (char *volatile *)_PyParser_TokenNames; + char *old_value, *locked_value; + + while (1) { /* spin loop */ + old_value = *lock; + locked_value = old_value + 1; + if (old_value[0] == 'E') { + assert(old_value[1] == 'N'); + if (cffi_compare_and_swap(lock, old_value, locked_value)) + break; + } + else { + assert(old_value[0] == 'N'); + /* should ideally do a spin loop instruction here, but + hard to do it portably and doesn't really matter I + think: PyEval_InitThreads() should be very fast, and + this is only run at start-up anyway. */ + } + } +# else +# if PY_VERSION_HEX < 0x030C0000 + int volatile *lock = (int volatile *)&PyCapsule_Type.tp_version_tag; + int old_value, locked_value = -42; + assert(!(PyCapsule_Type.tp_flags & Py_TPFLAGS_HAVE_VERSION_TAG)); +# else + static struct ebp_s { PyBufferProcs buf; int mark; } empty_buffer_procs; + empty_buffer_procs.mark = -42; + PyBufferProcs *volatile *lock = (PyBufferProcs *volatile *) + &PyCapsule_Type.tp_as_buffer; + PyBufferProcs *old_value, *locked_value = &empty_buffer_procs.buf; +# endif + + while (1) { /* spin loop */ + old_value = *lock; + if (old_value == 0) { + if (cffi_compare_and_swap(lock, old_value, locked_value)) + break; + } + else { +# if PY_VERSION_HEX < 0x030C0000 + assert(old_value == locked_value); +# else + /* The pointer should point to a possibly different + empty_buffer_procs from another C extension module */ + assert(((struct ebp_s *)old_value)->mark == -42); +# endif + /* should ideally do a spin loop instruction here, but + hard to do it portably and doesn't really matter I + think: PyEval_InitThreads() should be very fast, and + this is only run at start-up anyway. */ + } + } +# endif +#endif + + /* call Py_InitializeEx() */ + if (!Py_IsInitialized()) { + _cffi_py_initialize(); +#if PY_VERSION_HEX < 0x03070000 + PyEval_InitThreads(); +#endif + PyEval_SaveThread(); /* release the GIL */ + /* the returned tstate must be the one that has been stored into the + autoTLSkey by _PyGILState_Init() called from Py_Initialize(). */ + } + else { +#if PY_VERSION_HEX < 0x03070000 + /* PyEval_InitThreads() is always a no-op from CPython 3.7 */ + PyGILState_STATE state = PyGILState_Ensure(); + PyEval_InitThreads(); + PyGILState_Release(state); +#endif + } + +#ifdef WITH_THREAD + /* release the lock */ + while (!cffi_compare_and_swap(lock, locked_value, old_value)) + ; +#endif + + return 0; +} + +/********** end CPython-specific section **********/ + + +#else + + +/********** PyPy-specific section **********/ + +PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(const void *[]); /* forward */ + +static struct _cffi_pypy_init_s { + const char *name; + void *func; /* function pointer */ + const char *code; +} _cffi_pypy_init = { + _CFFI_MODULE_NAME, + _CFFI_PYTHON_STARTUP_FUNC, + _CFFI_PYTHON_STARTUP_CODE, +}; + +extern int pypy_carefully_make_gil(const char *); +extern int pypy_init_embedded_cffi_module(int, struct _cffi_pypy_init_s *); + +static int _cffi_carefully_make_gil(void) +{ + return pypy_carefully_make_gil(_CFFI_MODULE_NAME); +} + +static int _cffi_initialize_python(void) +{ + return pypy_init_embedded_cffi_module(0xB011, &_cffi_pypy_init); +} + +/********** end PyPy-specific section **********/ + + +#endif + + +#ifdef __GNUC__ +__attribute__((noinline)) +#endif +static _cffi_call_python_fnptr _cffi_start_python(void) +{ + /* Delicate logic to initialize Python. This function can be + called multiple times concurrently, e.g. when the process calls + its first ``extern "Python"`` functions in multiple threads at + once. It can also be called recursively, in which case we must + ignore it. We also have to consider what occurs if several + different cffi-based extensions reach this code in parallel + threads---it is a different copy of the code, then, and we + can't have any shared global variable unless it comes from + 'libpythonX.Y.so'. + + Idea: + + * _cffi_carefully_make_gil(): "carefully" call + PyEval_InitThreads() (possibly with Py_InitializeEx() first). + + * then we use a (local) custom lock to make sure that a call to this + cffi-based extension will wait if another call to the *same* + extension is running the initialization in another thread. + It is reentrant, so that a recursive call will not block, but + only one from a different thread. + + * then we grab the GIL and (Python 2) we call Py_InitializeEx(). + At this point, concurrent calls to Py_InitializeEx() are not + possible: we have the GIL. + + * do the rest of the specific initialization, which may + temporarily release the GIL but not the custom lock. + Only release the custom lock when we are done. + */ + static char called = 0; + + if (_cffi_carefully_make_gil() != 0) + return NULL; + + _cffi_acquire_reentrant_mutex(); + + /* Here the GIL exists, but we don't have it. We're only protected + from concurrency by the reentrant mutex. */ + + /* This file only initializes the embedded module once, the first + time this is called, even if there are subinterpreters. */ + if (!called) { + called = 1; /* invoke _cffi_initialize_python() only once, + but don't set '_cffi_call_python' right now, + otherwise concurrent threads won't call + this function at all (we need them to wait) */ + if (_cffi_initialize_python() == 0) { + /* now initialization is finished. Switch to the fast-path. */ + + /* We would like nobody to see the new value of + '_cffi_call_python' without also seeing the rest of the + data initialized. However, this is not possible. But + the new value of '_cffi_call_python' is the function + 'cffi_call_python()' from _cffi_backend. So: */ + cffi_write_barrier(); + /* ^^^ we put a write barrier here, and a corresponding + read barrier at the start of cffi_call_python(). This + ensures that after that read barrier, we see everything + done here before the write barrier. + */ + + assert(_cffi_call_python_org != NULL); + _cffi_call_python = (_cffi_call_python_fnptr)_cffi_call_python_org; + } + else { + /* initialization failed. Reset this to NULL, even if it was + already set to some other value. Future calls to + _cffi_start_python() are still forced to occur, and will + always return NULL from now on. */ + _cffi_call_python_org = NULL; + } + } + + _cffi_release_reentrant_mutex(); + + return (_cffi_call_python_fnptr)_cffi_call_python_org; +} + +static +void _cffi_start_and_call_python(struct _cffi_externpy_s *externpy, char *args) +{ + _cffi_call_python_fnptr fnptr; + int current_err = errno; +#ifdef _MSC_VER + int current_lasterr = GetLastError(); +#endif + fnptr = _cffi_start_python(); + if (fnptr == NULL) { + fprintf(stderr, "function %s() called, but initialization code " + "failed. Returning 0.\n", externpy->name); + memset(args, 0, externpy->size_of_result); + } +#ifdef _MSC_VER + SetLastError(current_lasterr); +#endif + errno = current_err; + + if (fnptr != NULL) + fnptr(externpy, args); +} + + +/* The cffi_start_python() function makes sure Python is initialized + and our cffi module is set up. It can be called manually from the + user C code. The same effect is obtained automatically from any + dll-exported ``extern "Python"`` function. This function returns + -1 if initialization failed, 0 if all is OK. */ +_CFFI_UNUSED_FN +static int cffi_start_python(void) +{ + if (_cffi_call_python == &_cffi_start_and_call_python) { + if (_cffi_start_python() == NULL) + return -1; + } + cffi_read_barrier(); + return 0; +} + +#undef cffi_compare_and_swap +#undef cffi_write_barrier +#undef cffi_read_barrier + +#ifdef __cplusplus +} +#endif diff --git a/.venv/lib/python3.11/site-packages/cffi/backend_ctypes.py b/.venv/lib/python3.11/site-packages/cffi/backend_ctypes.py new file mode 100644 index 0000000000000000000000000000000000000000..e7956a79cfb1c3d28a2ad22a40b261ae7dbbbb5f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/cffi/backend_ctypes.py @@ -0,0 +1,1121 @@ +import ctypes, ctypes.util, operator, sys +from . import model + +if sys.version_info < (3,): + bytechr = chr +else: + unicode = str + long = int + xrange = range + bytechr = lambda num: bytes([num]) + +class CTypesType(type): + pass + +class CTypesData(object): + __metaclass__ = CTypesType + __slots__ = ['__weakref__'] + __name__ = '' + + def __init__(self, *args): + raise TypeError("cannot instantiate %r" % (self.__class__,)) + + @classmethod + def _newp(cls, init): + raise TypeError("expected a pointer or array ctype, got '%s'" + % (cls._get_c_name(),)) + + @staticmethod + def _to_ctypes(value): + raise TypeError + + @classmethod + def _arg_to_ctypes(cls, *value): + try: + ctype = cls._ctype + except AttributeError: + raise TypeError("cannot create an instance of %r" % (cls,)) + if value: + res = cls._to_ctypes(*value) + if not isinstance(res, ctype): + res = cls._ctype(res) + else: + res = cls._ctype() + return res + + @classmethod + def _create_ctype_obj(cls, init): + if init is None: + return cls._arg_to_ctypes() + else: + return cls._arg_to_ctypes(init) + + @staticmethod + def _from_ctypes(ctypes_value): + raise TypeError + + @classmethod + def _get_c_name(cls, replace_with=''): + return cls._reftypename.replace(' &', replace_with) + + @classmethod + def _fix_class(cls): + cls.__name__ = 'CData<%s>' % (cls._get_c_name(),) + cls.__qualname__ = 'CData<%s>' % (cls._get_c_name(),) + cls.__module__ = 'ffi' + + def _get_own_repr(self): + raise NotImplementedError + + def _addr_repr(self, address): + if address == 0: + return 'NULL' + else: + if address < 0: + address += 1 << (8*ctypes.sizeof(ctypes.c_void_p)) + return '0x%x' % address + + def __repr__(self, c_name=None): + own = self._get_own_repr() + return '' % (c_name or self._get_c_name(), own) + + def _convert_to_address(self, BClass): + if BClass is None: + raise TypeError("cannot convert %r to an address" % ( + self._get_c_name(),)) + else: + raise TypeError("cannot convert %r to %r" % ( + self._get_c_name(), BClass._get_c_name())) + + @classmethod + def _get_size(cls): + return ctypes.sizeof(cls._ctype) + + def _get_size_of_instance(self): + return ctypes.sizeof(self._ctype) + + @classmethod + def _cast_from(cls, source): + raise TypeError("cannot cast to %r" % (cls._get_c_name(),)) + + def _cast_to_integer(self): + return self._convert_to_address(None) + + @classmethod + def _alignment(cls): + return ctypes.alignment(cls._ctype) + + def __iter__(self): + raise TypeError("cdata %r does not support iteration" % ( + self._get_c_name()),) + + def _make_cmp(name): + cmpfunc = getattr(operator, name) + def cmp(self, other): + v_is_ptr = not isinstance(self, CTypesGenericPrimitive) + w_is_ptr = (isinstance(other, CTypesData) and + not isinstance(other, CTypesGenericPrimitive)) + if v_is_ptr and w_is_ptr: + return cmpfunc(self._convert_to_address(None), + other._convert_to_address(None)) + elif v_is_ptr or w_is_ptr: + return NotImplemented + else: + if isinstance(self, CTypesGenericPrimitive): + self = self._value + if isinstance(other, CTypesGenericPrimitive): + other = other._value + return cmpfunc(self, other) + cmp.func_name = name + return cmp + + __eq__ = _make_cmp('__eq__') + __ne__ = _make_cmp('__ne__') + __lt__ = _make_cmp('__lt__') + __le__ = _make_cmp('__le__') + __gt__ = _make_cmp('__gt__') + __ge__ = _make_cmp('__ge__') + + def __hash__(self): + return hash(self._convert_to_address(None)) + + def _to_string(self, maxlen): + raise TypeError("string(): %r" % (self,)) + + +class CTypesGenericPrimitive(CTypesData): + __slots__ = [] + + def __hash__(self): + return hash(self._value) + + def _get_own_repr(self): + return repr(self._from_ctypes(self._value)) + + +class CTypesGenericArray(CTypesData): + __slots__ = [] + + @classmethod + def _newp(cls, init): + return cls(init) + + def __iter__(self): + for i in xrange(len(self)): + yield self[i] + + def _get_own_repr(self): + return self._addr_repr(ctypes.addressof(self._blob)) + + +class CTypesGenericPtr(CTypesData): + __slots__ = ['_address', '_as_ctype_ptr'] + _automatic_casts = False + kind = "pointer" + + @classmethod + def _newp(cls, init): + return cls(init) + + @classmethod + def _cast_from(cls, source): + if source is None: + address = 0 + elif isinstance(source, CTypesData): + address = source._cast_to_integer() + elif isinstance(source, (int, long)): + address = source + else: + raise TypeError("bad type for cast to %r: %r" % + (cls, type(source).__name__)) + return cls._new_pointer_at(address) + + @classmethod + def _new_pointer_at(cls, address): + self = cls.__new__(cls) + self._address = address + self._as_ctype_ptr = ctypes.cast(address, cls._ctype) + return self + + def _get_own_repr(self): + try: + return self._addr_repr(self._address) + except AttributeError: + return '???' + + def _cast_to_integer(self): + return self._address + + def __nonzero__(self): + return bool(self._address) + __bool__ = __nonzero__ + + @classmethod + def _to_ctypes(cls, value): + if not isinstance(value, CTypesData): + raise TypeError("unexpected %s object" % type(value).__name__) + address = value._convert_to_address(cls) + return ctypes.cast(address, cls._ctype) + + @classmethod + def _from_ctypes(cls, ctypes_ptr): + address = ctypes.cast(ctypes_ptr, ctypes.c_void_p).value or 0 + return cls._new_pointer_at(address) + + @classmethod + def _initialize(cls, ctypes_ptr, value): + if value: + ctypes_ptr.contents = cls._to_ctypes(value).contents + + def _convert_to_address(self, BClass): + if (BClass in (self.__class__, None) or BClass._automatic_casts + or self._automatic_casts): + return self._address + else: + return CTypesData._convert_to_address(self, BClass) + + +class CTypesBaseStructOrUnion(CTypesData): + __slots__ = ['_blob'] + + @classmethod + def _create_ctype_obj(cls, init): + # may be overridden + raise TypeError("cannot instantiate opaque type %s" % (cls,)) + + def _get_own_repr(self): + return self._addr_repr(ctypes.addressof(self._blob)) + + @classmethod + def _offsetof(cls, fieldname): + return getattr(cls._ctype, fieldname).offset + + def _convert_to_address(self, BClass): + if getattr(BClass, '_BItem', None) is self.__class__: + return ctypes.addressof(self._blob) + else: + return CTypesData._convert_to_address(self, BClass) + + @classmethod + def _from_ctypes(cls, ctypes_struct_or_union): + self = cls.__new__(cls) + self._blob = ctypes_struct_or_union + return self + + @classmethod + def _to_ctypes(cls, value): + return value._blob + + def __repr__(self, c_name=None): + return CTypesData.__repr__(self, c_name or self._get_c_name(' &')) + + +class CTypesBackend(object): + + PRIMITIVE_TYPES = { + 'char': ctypes.c_char, + 'short': ctypes.c_short, + 'int': ctypes.c_int, + 'long': ctypes.c_long, + 'long long': ctypes.c_longlong, + 'signed char': ctypes.c_byte, + 'unsigned char': ctypes.c_ubyte, + 'unsigned short': ctypes.c_ushort, + 'unsigned int': ctypes.c_uint, + 'unsigned long': ctypes.c_ulong, + 'unsigned long long': ctypes.c_ulonglong, + 'float': ctypes.c_float, + 'double': ctypes.c_double, + '_Bool': ctypes.c_bool, + } + + for _name in ['unsigned long long', 'unsigned long', + 'unsigned int', 'unsigned short', 'unsigned char']: + _size = ctypes.sizeof(PRIMITIVE_TYPES[_name]) + PRIMITIVE_TYPES['uint%d_t' % (8*_size)] = PRIMITIVE_TYPES[_name] + if _size == ctypes.sizeof(ctypes.c_void_p): + PRIMITIVE_TYPES['uintptr_t'] = PRIMITIVE_TYPES[_name] + if _size == ctypes.sizeof(ctypes.c_size_t): + PRIMITIVE_TYPES['size_t'] = PRIMITIVE_TYPES[_name] + + for _name in ['long long', 'long', 'int', 'short', 'signed char']: + _size = ctypes.sizeof(PRIMITIVE_TYPES[_name]) + PRIMITIVE_TYPES['int%d_t' % (8*_size)] = PRIMITIVE_TYPES[_name] + if _size == ctypes.sizeof(ctypes.c_void_p): + PRIMITIVE_TYPES['intptr_t'] = PRIMITIVE_TYPES[_name] + PRIMITIVE_TYPES['ptrdiff_t'] = PRIMITIVE_TYPES[_name] + if _size == ctypes.sizeof(ctypes.c_size_t): + PRIMITIVE_TYPES['ssize_t'] = PRIMITIVE_TYPES[_name] + + + def __init__(self): + self.RTLD_LAZY = 0 # not supported anyway by ctypes + self.RTLD_NOW = 0 + self.RTLD_GLOBAL = ctypes.RTLD_GLOBAL + self.RTLD_LOCAL = ctypes.RTLD_LOCAL + + def set_ffi(self, ffi): + self.ffi = ffi + + def _get_types(self): + return CTypesData, CTypesType + + def load_library(self, path, flags=0): + cdll = ctypes.CDLL(path, flags) + return CTypesLibrary(self, cdll) + + def new_void_type(self): + class CTypesVoid(CTypesData): + __slots__ = [] + _reftypename = 'void &' + @staticmethod + def _from_ctypes(novalue): + return None + @staticmethod + def _to_ctypes(novalue): + if novalue is not None: + raise TypeError("None expected, got %s object" % + (type(novalue).__name__,)) + return None + CTypesVoid._fix_class() + return CTypesVoid + + def new_primitive_type(self, name): + if name == 'wchar_t': + raise NotImplementedError(name) + ctype = self.PRIMITIVE_TYPES[name] + if name == 'char': + kind = 'char' + elif name in ('float', 'double'): + kind = 'float' + else: + if name in ('signed char', 'unsigned char'): + kind = 'byte' + elif name == '_Bool': + kind = 'bool' + else: + kind = 'int' + is_signed = (ctype(-1).value == -1) + # + def _cast_source_to_int(source): + if isinstance(source, (int, long, float)): + source = int(source) + elif isinstance(source, CTypesData): + source = source._cast_to_integer() + elif isinstance(source, bytes): + source = ord(source) + elif source is None: + source = 0 + else: + raise TypeError("bad type for cast to %r: %r" % + (CTypesPrimitive, type(source).__name__)) + return source + # + kind1 = kind + class CTypesPrimitive(CTypesGenericPrimitive): + __slots__ = ['_value'] + _ctype = ctype + _reftypename = '%s &' % name + kind = kind1 + + def __init__(self, value): + self._value = value + + @staticmethod + def _create_ctype_obj(init): + if init is None: + return ctype() + return ctype(CTypesPrimitive._to_ctypes(init)) + + if kind == 'int' or kind == 'byte': + @classmethod + def _cast_from(cls, source): + source = _cast_source_to_int(source) + source = ctype(source).value # cast within range + return cls(source) + def __int__(self): + return self._value + + if kind == 'bool': + @classmethod + def _cast_from(cls, source): + if not isinstance(source, (int, long, float)): + source = _cast_source_to_int(source) + return cls(bool(source)) + def __int__(self): + return int(self._value) + + if kind == 'char': + @classmethod + def _cast_from(cls, source): + source = _cast_source_to_int(source) + source = bytechr(source & 0xFF) + return cls(source) + def __int__(self): + return ord(self._value) + + if kind == 'float': + @classmethod + def _cast_from(cls, source): + if isinstance(source, float): + pass + elif isinstance(source, CTypesGenericPrimitive): + if hasattr(source, '__float__'): + source = float(source) + else: + source = int(source) + else: + source = _cast_source_to_int(source) + source = ctype(source).value # fix precision + return cls(source) + def __int__(self): + return int(self._value) + def __float__(self): + return self._value + + _cast_to_integer = __int__ + + if kind == 'int' or kind == 'byte' or kind == 'bool': + @staticmethod + def _to_ctypes(x): + if not isinstance(x, (int, long)): + if isinstance(x, CTypesData): + x = int(x) + else: + raise TypeError("integer expected, got %s" % + type(x).__name__) + if ctype(x).value != x: + if not is_signed and x < 0: + raise OverflowError("%s: negative integer" % name) + else: + raise OverflowError("%s: integer out of bounds" + % name) + return x + + if kind == 'char': + @staticmethod + def _to_ctypes(x): + if isinstance(x, bytes) and len(x) == 1: + return x + if isinstance(x, CTypesPrimitive): # > + return x._value + raise TypeError("character expected, got %s" % + type(x).__name__) + def __nonzero__(self): + return ord(self._value) != 0 + else: + def __nonzero__(self): + return self._value != 0 + __bool__ = __nonzero__ + + if kind == 'float': + @staticmethod + def _to_ctypes(x): + if not isinstance(x, (int, long, float, CTypesData)): + raise TypeError("float expected, got %s" % + type(x).__name__) + return ctype(x).value + + @staticmethod + def _from_ctypes(value): + return getattr(value, 'value', value) + + @staticmethod + def _initialize(blob, init): + blob.value = CTypesPrimitive._to_ctypes(init) + + if kind == 'char': + def _to_string(self, maxlen): + return self._value + if kind == 'byte': + def _to_string(self, maxlen): + return chr(self._value & 0xff) + # + CTypesPrimitive._fix_class() + return CTypesPrimitive + + def new_pointer_type(self, BItem): + getbtype = self.ffi._get_cached_btype + if BItem is getbtype(model.PrimitiveType('char')): + kind = 'charp' + elif BItem in (getbtype(model.PrimitiveType('signed char')), + getbtype(model.PrimitiveType('unsigned char'))): + kind = 'bytep' + elif BItem is getbtype(model.void_type): + kind = 'voidp' + else: + kind = 'generic' + # + class CTypesPtr(CTypesGenericPtr): + __slots__ = ['_own'] + if kind == 'charp': + __slots__ += ['__as_strbuf'] + _BItem = BItem + if hasattr(BItem, '_ctype'): + _ctype = ctypes.POINTER(BItem._ctype) + _bitem_size = ctypes.sizeof(BItem._ctype) + else: + _ctype = ctypes.c_void_p + if issubclass(BItem, CTypesGenericArray): + _reftypename = BItem._get_c_name('(* &)') + else: + _reftypename = BItem._get_c_name(' * &') + + def __init__(self, init): + ctypeobj = BItem._create_ctype_obj(init) + if kind == 'charp': + self.__as_strbuf = ctypes.create_string_buffer( + ctypeobj.value + b'\x00') + self._as_ctype_ptr = ctypes.cast( + self.__as_strbuf, self._ctype) + else: + self._as_ctype_ptr = ctypes.pointer(ctypeobj) + self._address = ctypes.cast(self._as_ctype_ptr, + ctypes.c_void_p).value + self._own = True + + def __add__(self, other): + if isinstance(other, (int, long)): + return self._new_pointer_at(self._address + + other * self._bitem_size) + else: + return NotImplemented + + def __sub__(self, other): + if isinstance(other, (int, long)): + return self._new_pointer_at(self._address - + other * self._bitem_size) + elif type(self) is type(other): + return (self._address - other._address) // self._bitem_size + else: + return NotImplemented + + def __getitem__(self, index): + if getattr(self, '_own', False) and index != 0: + raise IndexError + return BItem._from_ctypes(self._as_ctype_ptr[index]) + + def __setitem__(self, index, value): + self._as_ctype_ptr[index] = BItem._to_ctypes(value) + + if kind == 'charp' or kind == 'voidp': + @classmethod + def _arg_to_ctypes(cls, *value): + if value and isinstance(value[0], bytes): + return ctypes.c_char_p(value[0]) + else: + return super(CTypesPtr, cls)._arg_to_ctypes(*value) + + if kind == 'charp' or kind == 'bytep': + def _to_string(self, maxlen): + if maxlen < 0: + maxlen = sys.maxsize + p = ctypes.cast(self._as_ctype_ptr, + ctypes.POINTER(ctypes.c_char)) + n = 0 + while n < maxlen and p[n] != b'\x00': + n += 1 + return b''.join([p[i] for i in range(n)]) + + def _get_own_repr(self): + if getattr(self, '_own', False): + return 'owning %d bytes' % ( + ctypes.sizeof(self._as_ctype_ptr.contents),) + return super(CTypesPtr, self)._get_own_repr() + # + if (BItem is self.ffi._get_cached_btype(model.void_type) or + BItem is self.ffi._get_cached_btype(model.PrimitiveType('char'))): + CTypesPtr._automatic_casts = True + # + CTypesPtr._fix_class() + return CTypesPtr + + def new_array_type(self, CTypesPtr, length): + if length is None: + brackets = ' &[]' + else: + brackets = ' &[%d]' % length + BItem = CTypesPtr._BItem + getbtype = self.ffi._get_cached_btype + if BItem is getbtype(model.PrimitiveType('char')): + kind = 'char' + elif BItem in (getbtype(model.PrimitiveType('signed char')), + getbtype(model.PrimitiveType('unsigned char'))): + kind = 'byte' + else: + kind = 'generic' + # + class CTypesArray(CTypesGenericArray): + __slots__ = ['_blob', '_own'] + if length is not None: + _ctype = BItem._ctype * length + else: + __slots__.append('_ctype') + _reftypename = BItem._get_c_name(brackets) + _declared_length = length + _CTPtr = CTypesPtr + + def __init__(self, init): + if length is None: + if isinstance(init, (int, long)): + len1 = init + init = None + elif kind == 'char' and isinstance(init, bytes): + len1 = len(init) + 1 # extra null + else: + init = tuple(init) + len1 = len(init) + self._ctype = BItem._ctype * len1 + self._blob = self._ctype() + self._own = True + if init is not None: + self._initialize(self._blob, init) + + @staticmethod + def _initialize(blob, init): + if isinstance(init, bytes): + init = [init[i:i+1] for i in range(len(init))] + else: + if isinstance(init, CTypesGenericArray): + if (len(init) != len(blob) or + not isinstance(init, CTypesArray)): + raise TypeError("length/type mismatch: %s" % (init,)) + init = tuple(init) + if len(init) > len(blob): + raise IndexError("too many initializers") + addr = ctypes.cast(blob, ctypes.c_void_p).value + PTR = ctypes.POINTER(BItem._ctype) + itemsize = ctypes.sizeof(BItem._ctype) + for i, value in enumerate(init): + p = ctypes.cast(addr + i * itemsize, PTR) + BItem._initialize(p.contents, value) + + def __len__(self): + return len(self._blob) + + def __getitem__(self, index): + if not (0 <= index < len(self._blob)): + raise IndexError + return BItem._from_ctypes(self._blob[index]) + + def __setitem__(self, index, value): + if not (0 <= index < len(self._blob)): + raise IndexError + self._blob[index] = BItem._to_ctypes(value) + + if kind == 'char' or kind == 'byte': + def _to_string(self, maxlen): + if maxlen < 0: + maxlen = len(self._blob) + p = ctypes.cast(self._blob, + ctypes.POINTER(ctypes.c_char)) + n = 0 + while n < maxlen and p[n] != b'\x00': + n += 1 + return b''.join([p[i] for i in range(n)]) + + def _get_own_repr(self): + if getattr(self, '_own', False): + return 'owning %d bytes' % (ctypes.sizeof(self._blob),) + return super(CTypesArray, self)._get_own_repr() + + def _convert_to_address(self, BClass): + if BClass in (CTypesPtr, None) or BClass._automatic_casts: + return ctypes.addressof(self._blob) + else: + return CTypesData._convert_to_address(self, BClass) + + @staticmethod + def _from_ctypes(ctypes_array): + self = CTypesArray.__new__(CTypesArray) + self._blob = ctypes_array + return self + + @staticmethod + def _arg_to_ctypes(value): + return CTypesPtr._arg_to_ctypes(value) + + def __add__(self, other): + if isinstance(other, (int, long)): + return CTypesPtr._new_pointer_at( + ctypes.addressof(self._blob) + + other * ctypes.sizeof(BItem._ctype)) + else: + return NotImplemented + + @classmethod + def _cast_from(cls, source): + raise NotImplementedError("casting to %r" % ( + cls._get_c_name(),)) + # + CTypesArray._fix_class() + return CTypesArray + + def _new_struct_or_union(self, kind, name, base_ctypes_class): + # + class struct_or_union(base_ctypes_class): + pass + struct_or_union.__name__ = '%s_%s' % (kind, name) + kind1 = kind + # + class CTypesStructOrUnion(CTypesBaseStructOrUnion): + __slots__ = ['_blob'] + _ctype = struct_or_union + _reftypename = '%s &' % (name,) + _kind = kind = kind1 + # + CTypesStructOrUnion._fix_class() + return CTypesStructOrUnion + + def new_struct_type(self, name): + return self._new_struct_or_union('struct', name, ctypes.Structure) + + def new_union_type(self, name): + return self._new_struct_or_union('union', name, ctypes.Union) + + def complete_struct_or_union(self, CTypesStructOrUnion, fields, tp, + totalsize=-1, totalalignment=-1, sflags=0, + pack=0): + if totalsize >= 0 or totalalignment >= 0: + raise NotImplementedError("the ctypes backend of CFFI does not support " + "structures completed by verify(); please " + "compile and install the _cffi_backend module.") + struct_or_union = CTypesStructOrUnion._ctype + fnames = [fname for (fname, BField, bitsize) in fields] + btypes = [BField for (fname, BField, bitsize) in fields] + bitfields = [bitsize for (fname, BField, bitsize) in fields] + # + bfield_types = {} + cfields = [] + for (fname, BField, bitsize) in fields: + if bitsize < 0: + cfields.append((fname, BField._ctype)) + bfield_types[fname] = BField + else: + cfields.append((fname, BField._ctype, bitsize)) + bfield_types[fname] = Ellipsis + if sflags & 8: + struct_or_union._pack_ = 1 + elif pack: + struct_or_union._pack_ = pack + struct_or_union._fields_ = cfields + CTypesStructOrUnion._bfield_types = bfield_types + # + @staticmethod + def _create_ctype_obj(init): + result = struct_or_union() + if init is not None: + initialize(result, init) + return result + CTypesStructOrUnion._create_ctype_obj = _create_ctype_obj + # + def initialize(blob, init): + if is_union: + if len(init) > 1: + raise ValueError("union initializer: %d items given, but " + "only one supported (use a dict if needed)" + % (len(init),)) + if not isinstance(init, dict): + if isinstance(init, (bytes, unicode)): + raise TypeError("union initializer: got a str") + init = tuple(init) + if len(init) > len(fnames): + raise ValueError("too many values for %s initializer" % + CTypesStructOrUnion._get_c_name()) + init = dict(zip(fnames, init)) + addr = ctypes.addressof(blob) + for fname, value in init.items(): + BField, bitsize = name2fieldtype[fname] + assert bitsize < 0, \ + "not implemented: initializer with bit fields" + offset = CTypesStructOrUnion._offsetof(fname) + PTR = ctypes.POINTER(BField._ctype) + p = ctypes.cast(addr + offset, PTR) + BField._initialize(p.contents, value) + is_union = CTypesStructOrUnion._kind == 'union' + name2fieldtype = dict(zip(fnames, zip(btypes, bitfields))) + # + for fname, BField, bitsize in fields: + if fname == '': + raise NotImplementedError("nested anonymous structs/unions") + if hasattr(CTypesStructOrUnion, fname): + raise ValueError("the field name %r conflicts in " + "the ctypes backend" % fname) + if bitsize < 0: + def getter(self, fname=fname, BField=BField, + offset=CTypesStructOrUnion._offsetof(fname), + PTR=ctypes.POINTER(BField._ctype)): + addr = ctypes.addressof(self._blob) + p = ctypes.cast(addr + offset, PTR) + return BField._from_ctypes(p.contents) + def setter(self, value, fname=fname, BField=BField): + setattr(self._blob, fname, BField._to_ctypes(value)) + # + if issubclass(BField, CTypesGenericArray): + setter = None + if BField._declared_length == 0: + def getter(self, fname=fname, BFieldPtr=BField._CTPtr, + offset=CTypesStructOrUnion._offsetof(fname), + PTR=ctypes.POINTER(BField._ctype)): + addr = ctypes.addressof(self._blob) + p = ctypes.cast(addr + offset, PTR) + return BFieldPtr._from_ctypes(p) + # + else: + def getter(self, fname=fname, BField=BField): + return BField._from_ctypes(getattr(self._blob, fname)) + def setter(self, value, fname=fname, BField=BField): + # xxx obscure workaround + value = BField._to_ctypes(value) + oldvalue = getattr(self._blob, fname) + setattr(self._blob, fname, value) + if value != getattr(self._blob, fname): + setattr(self._blob, fname, oldvalue) + raise OverflowError("value too large for bitfield") + setattr(CTypesStructOrUnion, fname, property(getter, setter)) + # + CTypesPtr = self.ffi._get_cached_btype(model.PointerType(tp)) + for fname in fnames: + if hasattr(CTypesPtr, fname): + raise ValueError("the field name %r conflicts in " + "the ctypes backend" % fname) + def getter(self, fname=fname): + return getattr(self[0], fname) + def setter(self, value, fname=fname): + setattr(self[0], fname, value) + setattr(CTypesPtr, fname, property(getter, setter)) + + def new_function_type(self, BArgs, BResult, has_varargs): + nameargs = [BArg._get_c_name() for BArg in BArgs] + if has_varargs: + nameargs.append('...') + nameargs = ', '.join(nameargs) + # + class CTypesFunctionPtr(CTypesGenericPtr): + __slots__ = ['_own_callback', '_name'] + _ctype = ctypes.CFUNCTYPE(getattr(BResult, '_ctype', None), + *[BArg._ctype for BArg in BArgs], + use_errno=True) + _reftypename = BResult._get_c_name('(* &)(%s)' % (nameargs,)) + + def __init__(self, init, error=None): + # create a callback to the Python callable init() + import traceback + assert not has_varargs, "varargs not supported for callbacks" + if getattr(BResult, '_ctype', None) is not None: + error = BResult._from_ctypes( + BResult._create_ctype_obj(error)) + else: + error = None + def callback(*args): + args2 = [] + for arg, BArg in zip(args, BArgs): + args2.append(BArg._from_ctypes(arg)) + try: + res2 = init(*args2) + res2 = BResult._to_ctypes(res2) + except: + traceback.print_exc() + res2 = error + if issubclass(BResult, CTypesGenericPtr): + if res2: + res2 = ctypes.cast(res2, ctypes.c_void_p).value + # .value: http://bugs.python.org/issue1574593 + else: + res2 = None + #print repr(res2) + return res2 + if issubclass(BResult, CTypesGenericPtr): + # The only pointers callbacks can return are void*s: + # http://bugs.python.org/issue5710 + callback_ctype = ctypes.CFUNCTYPE( + ctypes.c_void_p, + *[BArg._ctype for BArg in BArgs], + use_errno=True) + else: + callback_ctype = CTypesFunctionPtr._ctype + self._as_ctype_ptr = callback_ctype(callback) + self._address = ctypes.cast(self._as_ctype_ptr, + ctypes.c_void_p).value + self._own_callback = init + + @staticmethod + def _initialize(ctypes_ptr, value): + if value: + raise NotImplementedError("ctypes backend: not supported: " + "initializers for function pointers") + + def __repr__(self): + c_name = getattr(self, '_name', None) + if c_name: + i = self._reftypename.index('(* &)') + if self._reftypename[i-1] not in ' )*': + c_name = ' ' + c_name + c_name = self._reftypename.replace('(* &)', c_name) + return CTypesData.__repr__(self, c_name) + + def _get_own_repr(self): + if getattr(self, '_own_callback', None) is not None: + return 'calling %r' % (self._own_callback,) + return super(CTypesFunctionPtr, self)._get_own_repr() + + def __call__(self, *args): + if has_varargs: + assert len(args) >= len(BArgs) + extraargs = args[len(BArgs):] + args = args[:len(BArgs)] + else: + assert len(args) == len(BArgs) + ctypes_args = [] + for arg, BArg in zip(args, BArgs): + ctypes_args.append(BArg._arg_to_ctypes(arg)) + if has_varargs: + for i, arg in enumerate(extraargs): + if arg is None: + ctypes_args.append(ctypes.c_void_p(0)) # NULL + continue + if not isinstance(arg, CTypesData): + raise TypeError( + "argument %d passed in the variadic part " + "needs to be a cdata object (got %s)" % + (1 + len(BArgs) + i, type(arg).__name__)) + ctypes_args.append(arg._arg_to_ctypes(arg)) + result = self._as_ctype_ptr(*ctypes_args) + return BResult._from_ctypes(result) + # + CTypesFunctionPtr._fix_class() + return CTypesFunctionPtr + + def new_enum_type(self, name, enumerators, enumvalues, CTypesInt): + assert isinstance(name, str) + reverse_mapping = dict(zip(reversed(enumvalues), + reversed(enumerators))) + # + class CTypesEnum(CTypesInt): + __slots__ = [] + _reftypename = '%s &' % name + + def _get_own_repr(self): + value = self._value + try: + return '%d: %s' % (value, reverse_mapping[value]) + except KeyError: + return str(value) + + def _to_string(self, maxlen): + value = self._value + try: + return reverse_mapping[value] + except KeyError: + return str(value) + # + CTypesEnum._fix_class() + return CTypesEnum + + def get_errno(self): + return ctypes.get_errno() + + def set_errno(self, value): + ctypes.set_errno(value) + + def string(self, b, maxlen=-1): + return b._to_string(maxlen) + + def buffer(self, bptr, size=-1): + raise NotImplementedError("buffer() with ctypes backend") + + def sizeof(self, cdata_or_BType): + if isinstance(cdata_or_BType, CTypesData): + return cdata_or_BType._get_size_of_instance() + else: + assert issubclass(cdata_or_BType, CTypesData) + return cdata_or_BType._get_size() + + def alignof(self, BType): + assert issubclass(BType, CTypesData) + return BType._alignment() + + def newp(self, BType, source): + if not issubclass(BType, CTypesData): + raise TypeError + return BType._newp(source) + + def cast(self, BType, source): + return BType._cast_from(source) + + def callback(self, BType, source, error, onerror): + assert onerror is None # XXX not implemented + return BType(source, error) + + _weakref_cache_ref = None + + def gcp(self, cdata, destructor, size=0): + if self._weakref_cache_ref is None: + import weakref + class MyRef(weakref.ref): + def __eq__(self, other): + myref = self() + return self is other or ( + myref is not None and myref is other()) + def __ne__(self, other): + return not (self == other) + def __hash__(self): + try: + return self._hash + except AttributeError: + self._hash = hash(self()) + return self._hash + self._weakref_cache_ref = {}, MyRef + weak_cache, MyRef = self._weakref_cache_ref + + if destructor is None: + try: + del weak_cache[MyRef(cdata)] + except KeyError: + raise TypeError("Can remove destructor only on a object " + "previously returned by ffi.gc()") + return None + + def remove(k): + cdata, destructor = weak_cache.pop(k, (None, None)) + if destructor is not None: + destructor(cdata) + + new_cdata = self.cast(self.typeof(cdata), cdata) + assert new_cdata is not cdata + weak_cache[MyRef(new_cdata, remove)] = (cdata, destructor) + return new_cdata + + typeof = type + + def getcname(self, BType, replace_with): + return BType._get_c_name(replace_with) + + def typeoffsetof(self, BType, fieldname, num=0): + if isinstance(fieldname, str): + if num == 0 and issubclass(BType, CTypesGenericPtr): + BType = BType._BItem + if not issubclass(BType, CTypesBaseStructOrUnion): + raise TypeError("expected a struct or union ctype") + BField = BType._bfield_types[fieldname] + if BField is Ellipsis: + raise TypeError("not supported for bitfields") + return (BField, BType._offsetof(fieldname)) + elif isinstance(fieldname, (int, long)): + if issubclass(BType, CTypesGenericArray): + BType = BType._CTPtr + if not issubclass(BType, CTypesGenericPtr): + raise TypeError("expected an array or ptr ctype") + BItem = BType._BItem + offset = BItem._get_size() * fieldname + if offset > sys.maxsize: + raise OverflowError + return (BItem, offset) + else: + raise TypeError(type(fieldname)) + + def rawaddressof(self, BTypePtr, cdata, offset=None): + if isinstance(cdata, CTypesBaseStructOrUnion): + ptr = ctypes.pointer(type(cdata)._to_ctypes(cdata)) + elif isinstance(cdata, CTypesGenericPtr): + if offset is None or not issubclass(type(cdata)._BItem, + CTypesBaseStructOrUnion): + raise TypeError("unexpected cdata type") + ptr = type(cdata)._to_ctypes(cdata) + elif isinstance(cdata, CTypesGenericArray): + ptr = type(cdata)._to_ctypes(cdata) + else: + raise TypeError("expected a ") + if offset: + ptr = ctypes.cast( + ctypes.c_void_p( + ctypes.cast(ptr, ctypes.c_void_p).value + offset), + type(ptr)) + return BTypePtr._from_ctypes(ptr) + + +class CTypesLibrary(object): + + def __init__(self, backend, cdll): + self.backend = backend + self.cdll = cdll + + def load_function(self, BType, name): + c_func = getattr(self.cdll, name) + funcobj = BType._from_ctypes(c_func) + funcobj._name = name + return funcobj + + def read_variable(self, BType, name): + try: + ctypes_obj = BType._ctype.in_dll(self.cdll, name) + except AttributeError as e: + raise NotImplementedError(e) + return BType._from_ctypes(ctypes_obj) + + def write_variable(self, BType, name, value): + new_ctypes_obj = BType._to_ctypes(value) + ctypes_obj = BType._ctype.in_dll(self.cdll, name) + ctypes.memmove(ctypes.addressof(ctypes_obj), + ctypes.addressof(new_ctypes_obj), + ctypes.sizeof(BType._ctype)) diff --git a/.venv/lib/python3.11/site-packages/cffi/cffi_opcode.py b/.venv/lib/python3.11/site-packages/cffi/cffi_opcode.py new file mode 100644 index 0000000000000000000000000000000000000000..6421df62134ce43a10d72b3b404102681574abf3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/cffi/cffi_opcode.py @@ -0,0 +1,187 @@ +from .error import VerificationError + +class CffiOp(object): + def __init__(self, op, arg): + self.op = op + self.arg = arg + + def as_c_expr(self): + if self.op is None: + assert isinstance(self.arg, str) + return '(_cffi_opcode_t)(%s)' % (self.arg,) + classname = CLASS_NAME[self.op] + return '_CFFI_OP(_CFFI_OP_%s, %s)' % (classname, self.arg) + + def as_python_bytes(self): + if self.op is None and self.arg.isdigit(): + value = int(self.arg) # non-negative: '-' not in self.arg + if value >= 2**31: + raise OverflowError("cannot emit %r: limited to 2**31-1" + % (self.arg,)) + return format_four_bytes(value) + if isinstance(self.arg, str): + raise VerificationError("cannot emit to Python: %r" % (self.arg,)) + return format_four_bytes((self.arg << 8) | self.op) + + def __str__(self): + classname = CLASS_NAME.get(self.op, self.op) + return '(%s %s)' % (classname, self.arg) + +def format_four_bytes(num): + return '\\x%02X\\x%02X\\x%02X\\x%02X' % ( + (num >> 24) & 0xFF, + (num >> 16) & 0xFF, + (num >> 8) & 0xFF, + (num ) & 0xFF) + +OP_PRIMITIVE = 1 +OP_POINTER = 3 +OP_ARRAY = 5 +OP_OPEN_ARRAY = 7 +OP_STRUCT_UNION = 9 +OP_ENUM = 11 +OP_FUNCTION = 13 +OP_FUNCTION_END = 15 +OP_NOOP = 17 +OP_BITFIELD = 19 +OP_TYPENAME = 21 +OP_CPYTHON_BLTN_V = 23 # varargs +OP_CPYTHON_BLTN_N = 25 # noargs +OP_CPYTHON_BLTN_O = 27 # O (i.e. a single arg) +OP_CONSTANT = 29 +OP_CONSTANT_INT = 31 +OP_GLOBAL_VAR = 33 +OP_DLOPEN_FUNC = 35 +OP_DLOPEN_CONST = 37 +OP_GLOBAL_VAR_F = 39 +OP_EXTERN_PYTHON = 41 + +PRIM_VOID = 0 +PRIM_BOOL = 1 +PRIM_CHAR = 2 +PRIM_SCHAR = 3 +PRIM_UCHAR = 4 +PRIM_SHORT = 5 +PRIM_USHORT = 6 +PRIM_INT = 7 +PRIM_UINT = 8 +PRIM_LONG = 9 +PRIM_ULONG = 10 +PRIM_LONGLONG = 11 +PRIM_ULONGLONG = 12 +PRIM_FLOAT = 13 +PRIM_DOUBLE = 14 +PRIM_LONGDOUBLE = 15 + +PRIM_WCHAR = 16 +PRIM_INT8 = 17 +PRIM_UINT8 = 18 +PRIM_INT16 = 19 +PRIM_UINT16 = 20 +PRIM_INT32 = 21 +PRIM_UINT32 = 22 +PRIM_INT64 = 23 +PRIM_UINT64 = 24 +PRIM_INTPTR = 25 +PRIM_UINTPTR = 26 +PRIM_PTRDIFF = 27 +PRIM_SIZE = 28 +PRIM_SSIZE = 29 +PRIM_INT_LEAST8 = 30 +PRIM_UINT_LEAST8 = 31 +PRIM_INT_LEAST16 = 32 +PRIM_UINT_LEAST16 = 33 +PRIM_INT_LEAST32 = 34 +PRIM_UINT_LEAST32 = 35 +PRIM_INT_LEAST64 = 36 +PRIM_UINT_LEAST64 = 37 +PRIM_INT_FAST8 = 38 +PRIM_UINT_FAST8 = 39 +PRIM_INT_FAST16 = 40 +PRIM_UINT_FAST16 = 41 +PRIM_INT_FAST32 = 42 +PRIM_UINT_FAST32 = 43 +PRIM_INT_FAST64 = 44 +PRIM_UINT_FAST64 = 45 +PRIM_INTMAX = 46 +PRIM_UINTMAX = 47 +PRIM_FLOATCOMPLEX = 48 +PRIM_DOUBLECOMPLEX = 49 +PRIM_CHAR16 = 50 +PRIM_CHAR32 = 51 + +_NUM_PRIM = 52 +_UNKNOWN_PRIM = -1 +_UNKNOWN_FLOAT_PRIM = -2 +_UNKNOWN_LONG_DOUBLE = -3 + +_IO_FILE_STRUCT = -1 + +PRIMITIVE_TO_INDEX = { + 'char': PRIM_CHAR, + 'short': PRIM_SHORT, + 'int': PRIM_INT, + 'long': PRIM_LONG, + 'long long': PRIM_LONGLONG, + 'signed char': PRIM_SCHAR, + 'unsigned char': PRIM_UCHAR, + 'unsigned short': PRIM_USHORT, + 'unsigned int': PRIM_UINT, + 'unsigned long': PRIM_ULONG, + 'unsigned long long': PRIM_ULONGLONG, + 'float': PRIM_FLOAT, + 'double': PRIM_DOUBLE, + 'long double': PRIM_LONGDOUBLE, + '_cffi_float_complex_t': PRIM_FLOATCOMPLEX, + '_cffi_double_complex_t': PRIM_DOUBLECOMPLEX, + '_Bool': PRIM_BOOL, + 'wchar_t': PRIM_WCHAR, + 'char16_t': PRIM_CHAR16, + 'char32_t': PRIM_CHAR32, + 'int8_t': PRIM_INT8, + 'uint8_t': PRIM_UINT8, + 'int16_t': PRIM_INT16, + 'uint16_t': PRIM_UINT16, + 'int32_t': PRIM_INT32, + 'uint32_t': PRIM_UINT32, + 'int64_t': PRIM_INT64, + 'uint64_t': PRIM_UINT64, + 'intptr_t': PRIM_INTPTR, + 'uintptr_t': PRIM_UINTPTR, + 'ptrdiff_t': PRIM_PTRDIFF, + 'size_t': PRIM_SIZE, + 'ssize_t': PRIM_SSIZE, + 'int_least8_t': PRIM_INT_LEAST8, + 'uint_least8_t': PRIM_UINT_LEAST8, + 'int_least16_t': PRIM_INT_LEAST16, + 'uint_least16_t': PRIM_UINT_LEAST16, + 'int_least32_t': PRIM_INT_LEAST32, + 'uint_least32_t': PRIM_UINT_LEAST32, + 'int_least64_t': PRIM_INT_LEAST64, + 'uint_least64_t': PRIM_UINT_LEAST64, + 'int_fast8_t': PRIM_INT_FAST8, + 'uint_fast8_t': PRIM_UINT_FAST8, + 'int_fast16_t': PRIM_INT_FAST16, + 'uint_fast16_t': PRIM_UINT_FAST16, + 'int_fast32_t': PRIM_INT_FAST32, + 'uint_fast32_t': PRIM_UINT_FAST32, + 'int_fast64_t': PRIM_INT_FAST64, + 'uint_fast64_t': PRIM_UINT_FAST64, + 'intmax_t': PRIM_INTMAX, + 'uintmax_t': PRIM_UINTMAX, + } + +F_UNION = 0x01 +F_CHECK_FIELDS = 0x02 +F_PACKED = 0x04 +F_EXTERNAL = 0x08 +F_OPAQUE = 0x10 + +G_FLAGS = dict([('_CFFI_' + _key, globals()[_key]) + for _key in ['F_UNION', 'F_CHECK_FIELDS', 'F_PACKED', + 'F_EXTERNAL', 'F_OPAQUE']]) + +CLASS_NAME = {} +for _name, _value in list(globals().items()): + if _name.startswith('OP_') and isinstance(_value, int): + CLASS_NAME[_value] = _name[3:] diff --git a/.venv/lib/python3.11/site-packages/cffi/commontypes.py b/.venv/lib/python3.11/site-packages/cffi/commontypes.py new file mode 100644 index 0000000000000000000000000000000000000000..d4dae3517009fc3f7ccaf01d97d10df098700d06 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/cffi/commontypes.py @@ -0,0 +1,82 @@ +import sys +from . import model +from .error import FFIError + + +COMMON_TYPES = {} + +try: + # fetch "bool" and all simple Windows types + from _cffi_backend import _get_common_types + _get_common_types(COMMON_TYPES) +except ImportError: + pass + +COMMON_TYPES['FILE'] = model.unknown_type('FILE', '_IO_FILE') +COMMON_TYPES['bool'] = '_Bool' # in case we got ImportError above +COMMON_TYPES['float _Complex'] = '_cffi_float_complex_t' +COMMON_TYPES['double _Complex'] = '_cffi_double_complex_t' + +for _type in model.PrimitiveType.ALL_PRIMITIVE_TYPES: + if _type.endswith('_t'): + COMMON_TYPES[_type] = _type +del _type + +_CACHE = {} + +def resolve_common_type(parser, commontype): + try: + return _CACHE[commontype] + except KeyError: + cdecl = COMMON_TYPES.get(commontype, commontype) + if not isinstance(cdecl, str): + result, quals = cdecl, 0 # cdecl is already a BaseType + elif cdecl in model.PrimitiveType.ALL_PRIMITIVE_TYPES: + result, quals = model.PrimitiveType(cdecl), 0 + elif cdecl == 'set-unicode-needed': + raise FFIError("The Windows type %r is only available after " + "you call ffi.set_unicode()" % (commontype,)) + else: + if commontype == cdecl: + raise FFIError( + "Unsupported type: %r. Please look at " + "http://cffi.readthedocs.io/en/latest/cdef.html#ffi-cdef-limitations " + "and file an issue if you think this type should really " + "be supported." % (commontype,)) + result, quals = parser.parse_type_and_quals(cdecl) # recursive + + assert isinstance(result, model.BaseTypeByIdentity) + _CACHE[commontype] = result, quals + return result, quals + + +# ____________________________________________________________ +# extra types for Windows (most of them are in commontypes.c) + + +def win_common_types(): + return { + "UNICODE_STRING": model.StructType( + "_UNICODE_STRING", + ["Length", + "MaximumLength", + "Buffer"], + [model.PrimitiveType("unsigned short"), + model.PrimitiveType("unsigned short"), + model.PointerType(model.PrimitiveType("wchar_t"))], + [-1, -1, -1]), + "PUNICODE_STRING": "UNICODE_STRING *", + "PCUNICODE_STRING": "const UNICODE_STRING *", + + "TBYTE": "set-unicode-needed", + "TCHAR": "set-unicode-needed", + "LPCTSTR": "set-unicode-needed", + "PCTSTR": "set-unicode-needed", + "LPTSTR": "set-unicode-needed", + "PTSTR": "set-unicode-needed", + "PTBYTE": "set-unicode-needed", + "PTCHAR": "set-unicode-needed", + } + +if sys.platform == 'win32': + COMMON_TYPES.update(win_common_types()) diff --git a/.venv/lib/python3.11/site-packages/cffi/error.py b/.venv/lib/python3.11/site-packages/cffi/error.py new file mode 100644 index 0000000000000000000000000000000000000000..0a27247c32a381ab7cecedd0f985b781619c1ea5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/cffi/error.py @@ -0,0 +1,31 @@ + +class FFIError(Exception): + __module__ = 'cffi' + +class CDefError(Exception): + __module__ = 'cffi' + def __str__(self): + try: + current_decl = self.args[1] + filename = current_decl.coord.file + linenum = current_decl.coord.line + prefix = '%s:%d: ' % (filename, linenum) + except (AttributeError, TypeError, IndexError): + prefix = '' + return '%s%s' % (prefix, self.args[0]) + +class VerificationError(Exception): + """ An error raised when verification fails + """ + __module__ = 'cffi' + +class VerificationMissing(Exception): + """ An error raised when incomplete structures are passed into + cdef, but no verification has been done + """ + __module__ = 'cffi' + +class PkgConfigError(Exception): + """ An error raised for missing modules in pkg-config + """ + __module__ = 'cffi' diff --git a/.venv/lib/python3.11/site-packages/cffi/lock.py b/.venv/lib/python3.11/site-packages/cffi/lock.py new file mode 100644 index 0000000000000000000000000000000000000000..db91b7158c4ee9aa653462fe38e79ed1b553db87 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/cffi/lock.py @@ -0,0 +1,30 @@ +import sys + +if sys.version_info < (3,): + try: + from thread import allocate_lock + except ImportError: + from dummy_thread import allocate_lock +else: + try: + from _thread import allocate_lock + except ImportError: + from _dummy_thread import allocate_lock + + +##import sys +##l1 = allocate_lock + +##class allocate_lock(object): +## def __init__(self): +## self._real = l1() +## def __enter__(self): +## for i in range(4, 0, -1): +## print sys._getframe(i).f_code +## print +## return self._real.__enter__() +## def __exit__(self, *args): +## return self._real.__exit__(*args) +## def acquire(self, f): +## assert f is False +## return self._real.acquire(f) diff --git a/.venv/lib/python3.11/site-packages/cffi/recompiler.py b/.venv/lib/python3.11/site-packages/cffi/recompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..57781a3cad616a7adf82dc7f0c701167467e3dd2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/cffi/recompiler.py @@ -0,0 +1,1598 @@ +import os, sys, io +from . import ffiplatform, model +from .error import VerificationError +from .cffi_opcode import * + +VERSION_BASE = 0x2601 +VERSION_EMBEDDED = 0x2701 +VERSION_CHAR16CHAR32 = 0x2801 + +USE_LIMITED_API = (sys.platform != 'win32' or sys.version_info < (3, 0) or + sys.version_info >= (3, 5)) + + +class GlobalExpr: + def __init__(self, name, address, type_op, size=0, check_value=0): + self.name = name + self.address = address + self.type_op = type_op + self.size = size + self.check_value = check_value + + def as_c_expr(self): + return ' { "%s", (void *)%s, %s, (void *)%s },' % ( + self.name, self.address, self.type_op.as_c_expr(), self.size) + + def as_python_expr(self): + return "b'%s%s',%d" % (self.type_op.as_python_bytes(), self.name, + self.check_value) + +class FieldExpr: + def __init__(self, name, field_offset, field_size, fbitsize, field_type_op): + self.name = name + self.field_offset = field_offset + self.field_size = field_size + self.fbitsize = fbitsize + self.field_type_op = field_type_op + + def as_c_expr(self): + spaces = " " * len(self.name) + return (' { "%s", %s,\n' % (self.name, self.field_offset) + + ' %s %s,\n' % (spaces, self.field_size) + + ' %s %s },' % (spaces, self.field_type_op.as_c_expr())) + + def as_python_expr(self): + raise NotImplementedError + + def as_field_python_expr(self): + if self.field_type_op.op == OP_NOOP: + size_expr = '' + elif self.field_type_op.op == OP_BITFIELD: + size_expr = format_four_bytes(self.fbitsize) + else: + raise NotImplementedError + return "b'%s%s%s'" % (self.field_type_op.as_python_bytes(), + size_expr, + self.name) + +class StructUnionExpr: + def __init__(self, name, type_index, flags, size, alignment, comment, + first_field_index, c_fields): + self.name = name + self.type_index = type_index + self.flags = flags + self.size = size + self.alignment = alignment + self.comment = comment + self.first_field_index = first_field_index + self.c_fields = c_fields + + def as_c_expr(self): + return (' { "%s", %d, %s,' % (self.name, self.type_index, self.flags) + + '\n %s, %s, ' % (self.size, self.alignment) + + '%d, %d ' % (self.first_field_index, len(self.c_fields)) + + ('/* %s */ ' % self.comment if self.comment else '') + + '},') + + def as_python_expr(self): + flags = eval(self.flags, G_FLAGS) + fields_expr = [c_field.as_field_python_expr() + for c_field in self.c_fields] + return "(b'%s%s%s',%s)" % ( + format_four_bytes(self.type_index), + format_four_bytes(flags), + self.name, + ','.join(fields_expr)) + +class EnumExpr: + def __init__(self, name, type_index, size, signed, allenums): + self.name = name + self.type_index = type_index + self.size = size + self.signed = signed + self.allenums = allenums + + def as_c_expr(self): + return (' { "%s", %d, _cffi_prim_int(%s, %s),\n' + ' "%s" },' % (self.name, self.type_index, + self.size, self.signed, self.allenums)) + + def as_python_expr(self): + prim_index = { + (1, 0): PRIM_UINT8, (1, 1): PRIM_INT8, + (2, 0): PRIM_UINT16, (2, 1): PRIM_INT16, + (4, 0): PRIM_UINT32, (4, 1): PRIM_INT32, + (8, 0): PRIM_UINT64, (8, 1): PRIM_INT64, + }[self.size, self.signed] + return "b'%s%s%s\\x00%s'" % (format_four_bytes(self.type_index), + format_four_bytes(prim_index), + self.name, self.allenums) + +class TypenameExpr: + def __init__(self, name, type_index): + self.name = name + self.type_index = type_index + + def as_c_expr(self): + return ' { "%s", %d },' % (self.name, self.type_index) + + def as_python_expr(self): + return "b'%s%s'" % (format_four_bytes(self.type_index), self.name) + + +# ____________________________________________________________ + + +class Recompiler: + _num_externpy = 0 + + def __init__(self, ffi, module_name, target_is_python=False): + self.ffi = ffi + self.module_name = module_name + self.target_is_python = target_is_python + self._version = VERSION_BASE + + def needs_version(self, ver): + self._version = max(self._version, ver) + + def collect_type_table(self): + self._typesdict = {} + self._generate("collecttype") + # + all_decls = sorted(self._typesdict, key=str) + # + # prepare all FUNCTION bytecode sequences first + self.cffi_types = [] + for tp in all_decls: + if tp.is_raw_function: + assert self._typesdict[tp] is None + self._typesdict[tp] = len(self.cffi_types) + self.cffi_types.append(tp) # placeholder + for tp1 in tp.args: + assert isinstance(tp1, (model.VoidType, + model.BasePrimitiveType, + model.PointerType, + model.StructOrUnionOrEnum, + model.FunctionPtrType)) + if self._typesdict[tp1] is None: + self._typesdict[tp1] = len(self.cffi_types) + self.cffi_types.append(tp1) # placeholder + self.cffi_types.append('END') # placeholder + # + # prepare all OTHER bytecode sequences + for tp in all_decls: + if not tp.is_raw_function and self._typesdict[tp] is None: + self._typesdict[tp] = len(self.cffi_types) + self.cffi_types.append(tp) # placeholder + if tp.is_array_type and tp.length is not None: + self.cffi_types.append('LEN') # placeholder + assert None not in self._typesdict.values() + # + # collect all structs and unions and enums + self._struct_unions = {} + self._enums = {} + for tp in all_decls: + if isinstance(tp, model.StructOrUnion): + self._struct_unions[tp] = None + elif isinstance(tp, model.EnumType): + self._enums[tp] = None + for i, tp in enumerate(sorted(self._struct_unions, + key=lambda tp: tp.name)): + self._struct_unions[tp] = i + for i, tp in enumerate(sorted(self._enums, + key=lambda tp: tp.name)): + self._enums[tp] = i + # + # emit all bytecode sequences now + for tp in all_decls: + method = getattr(self, '_emit_bytecode_' + tp.__class__.__name__) + method(tp, self._typesdict[tp]) + # + # consistency check + for op in self.cffi_types: + assert isinstance(op, CffiOp) + self.cffi_types = tuple(self.cffi_types) # don't change any more + + def _enum_fields(self, tp): + # When producing C, expand all anonymous struct/union fields. + # That's necessary to have C code checking the offsets of the + # individual fields contained in them. When producing Python, + # don't do it and instead write it like it is, with the + # corresponding fields having an empty name. Empty names are + # recognized at runtime when we import the generated Python + # file. + expand_anonymous_struct_union = not self.target_is_python + return tp.enumfields(expand_anonymous_struct_union) + + def _do_collect_type(self, tp): + if not isinstance(tp, model.BaseTypeByIdentity): + if isinstance(tp, tuple): + for x in tp: + self._do_collect_type(x) + return + if tp not in self._typesdict: + self._typesdict[tp] = None + if isinstance(tp, model.FunctionPtrType): + self._do_collect_type(tp.as_raw_function()) + elif isinstance(tp, model.StructOrUnion): + if tp.fldtypes is not None and ( + tp not in self.ffi._parser._included_declarations): + for name1, tp1, _, _ in self._enum_fields(tp): + self._do_collect_type(self._field_type(tp, name1, tp1)) + else: + for _, x in tp._get_items(): + self._do_collect_type(x) + + def _generate(self, step_name): + lst = self.ffi._parser._declarations.items() + for name, (tp, quals) in sorted(lst): + kind, realname = name.split(' ', 1) + try: + method = getattr(self, '_generate_cpy_%s_%s' % (kind, + step_name)) + except AttributeError: + raise VerificationError( + "not implemented in recompile(): %r" % name) + try: + self._current_quals = quals + method(tp, realname) + except Exception as e: + model.attach_exception_info(e, name) + raise + + # ---------- + + ALL_STEPS = ["global", "field", "struct_union", "enum", "typename"] + + def collect_step_tables(self): + # collect the declarations for '_cffi_globals', '_cffi_typenames', etc. + self._lsts = {} + for step_name in self.ALL_STEPS: + self._lsts[step_name] = [] + self._seen_struct_unions = set() + self._generate("ctx") + self._add_missing_struct_unions() + # + for step_name in self.ALL_STEPS: + lst = self._lsts[step_name] + if step_name != "field": + lst.sort(key=lambda entry: entry.name) + self._lsts[step_name] = tuple(lst) # don't change any more + # + # check for a possible internal inconsistency: _cffi_struct_unions + # should have been generated with exactly self._struct_unions + lst = self._lsts["struct_union"] + for tp, i in self._struct_unions.items(): + assert i < len(lst) + assert lst[i].name == tp.name + assert len(lst) == len(self._struct_unions) + # same with enums + lst = self._lsts["enum"] + for tp, i in self._enums.items(): + assert i < len(lst) + assert lst[i].name == tp.name + assert len(lst) == len(self._enums) + + # ---------- + + def _prnt(self, what=''): + self._f.write(what + '\n') + + def write_source_to_f(self, f, preamble): + if self.target_is_python: + assert preamble is None + self.write_py_source_to_f(f) + else: + assert preamble is not None + self.write_c_source_to_f(f, preamble) + + def _rel_readlines(self, filename): + g = open(os.path.join(os.path.dirname(__file__), filename), 'r') + lines = g.readlines() + g.close() + return lines + + def write_c_source_to_f(self, f, preamble): + self._f = f + prnt = self._prnt + if self.ffi._embedding is not None: + prnt('#define _CFFI_USE_EMBEDDING') + if not USE_LIMITED_API: + prnt('#define _CFFI_NO_LIMITED_API') + # + # first the '#include' (actually done by inlining the file's content) + lines = self._rel_readlines('_cffi_include.h') + i = lines.index('#include "parse_c_type.h"\n') + lines[i:i+1] = self._rel_readlines('parse_c_type.h') + prnt(''.join(lines)) + # + # if we have ffi._embedding != None, we give it here as a macro + # and include an extra file + base_module_name = self.module_name.split('.')[-1] + if self.ffi._embedding is not None: + prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,)) + prnt('static const char _CFFI_PYTHON_STARTUP_CODE[] = {') + self._print_string_literal_in_array(self.ffi._embedding) + prnt('0 };') + prnt('#ifdef PYPY_VERSION') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % ( + base_module_name,)) + prnt('#elif PY_MAJOR_VERSION >= 3') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC PyInit_%s' % ( + base_module_name,)) + prnt('#else') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC init%s' % ( + base_module_name,)) + prnt('#endif') + lines = self._rel_readlines('_embedding.h') + i = lines.index('#include "_cffi_errors.h"\n') + lines[i:i+1] = self._rel_readlines('_cffi_errors.h') + prnt(''.join(lines)) + self.needs_version(VERSION_EMBEDDED) + # + # then paste the C source given by the user, verbatim. + prnt('/************************************************************/') + prnt() + prnt(preamble) + prnt() + prnt('/************************************************************/') + prnt() + # + # the declaration of '_cffi_types' + prnt('static void *_cffi_types[] = {') + typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()]) + for i, op in enumerate(self.cffi_types): + comment = '' + if i in typeindex2type: + comment = ' // ' + typeindex2type[i]._get_c_name() + prnt('/* %2d */ %s,%s' % (i, op.as_c_expr(), comment)) + if not self.cffi_types: + prnt(' 0') + prnt('};') + prnt() + # + # call generate_cpy_xxx_decl(), for every xxx found from + # ffi._parser._declarations. This generates all the functions. + self._seen_constants = set() + self._generate("decl") + # + # the declaration of '_cffi_globals' and '_cffi_typenames' + nums = {} + for step_name in self.ALL_STEPS: + lst = self._lsts[step_name] + nums[step_name] = len(lst) + if nums[step_name] > 0: + prnt('static const struct _cffi_%s_s _cffi_%ss[] = {' % ( + step_name, step_name)) + for entry in lst: + prnt(entry.as_c_expr()) + prnt('};') + prnt() + # + # the declaration of '_cffi_includes' + if self.ffi._included_ffis: + prnt('static const char * const _cffi_includes[] = {') + for ffi_to_include in self.ffi._included_ffis: + try: + included_module_name, included_source = ( + ffi_to_include._assigned_source[:2]) + except AttributeError: + raise VerificationError( + "ffi object %r includes %r, but the latter has not " + "been prepared with set_source()" % ( + self.ffi, ffi_to_include,)) + if included_source is None: + raise VerificationError( + "not implemented yet: ffi.include() of a Python-based " + "ffi inside a C-based ffi") + prnt(' "%s",' % (included_module_name,)) + prnt(' NULL') + prnt('};') + prnt() + # + # the declaration of '_cffi_type_context' + prnt('static const struct _cffi_type_context_s _cffi_type_context = {') + prnt(' _cffi_types,') + for step_name in self.ALL_STEPS: + if nums[step_name] > 0: + prnt(' _cffi_%ss,' % step_name) + else: + prnt(' NULL, /* no %ss */' % step_name) + for step_name in self.ALL_STEPS: + if step_name != "field": + prnt(' %d, /* num_%ss */' % (nums[step_name], step_name)) + if self.ffi._included_ffis: + prnt(' _cffi_includes,') + else: + prnt(' NULL, /* no includes */') + prnt(' %d, /* num_types */' % (len(self.cffi_types),)) + flags = 0 + if self._num_externpy > 0 or self.ffi._embedding is not None: + flags |= 1 # set to mean that we use extern "Python" + prnt(' %d, /* flags */' % flags) + prnt('};') + prnt() + # + # the init function + prnt('#ifdef __GNUC__') + prnt('# pragma GCC visibility push(default) /* for -fvisibility= */') + prnt('#endif') + prnt() + prnt('#ifdef PYPY_VERSION') + prnt('PyMODINIT_FUNC') + prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,)) + prnt('{') + if flags & 1: + prnt(' if (((intptr_t)p[0]) >= 0x0A03) {') + prnt(' _cffi_call_python_org = ' + '(void(*)(struct _cffi_externpy_s *, char *))p[1];') + prnt(' }') + prnt(' p[0] = (const void *)0x%x;' % self._version) + prnt(' p[1] = &_cffi_type_context;') + prnt('#if PY_MAJOR_VERSION >= 3') + prnt(' return NULL;') + prnt('#endif') + prnt('}') + # on Windows, distutils insists on putting init_cffi_xyz in + # 'export_symbols', so instead of fighting it, just give up and + # give it one + prnt('# ifdef _MSC_VER') + prnt(' PyMODINIT_FUNC') + prnt('# if PY_MAJOR_VERSION >= 3') + prnt(' PyInit_%s(void) { return NULL; }' % (base_module_name,)) + prnt('# else') + prnt(' init%s(void) { }' % (base_module_name,)) + prnt('# endif') + prnt('# endif') + prnt('#elif PY_MAJOR_VERSION >= 3') + prnt('PyMODINIT_FUNC') + prnt('PyInit_%s(void)' % (base_module_name,)) + prnt('{') + prnt(' return _cffi_init("%s", 0x%x, &_cffi_type_context);' % ( + self.module_name, self._version)) + prnt('}') + prnt('#else') + prnt('PyMODINIT_FUNC') + prnt('init%s(void)' % (base_module_name,)) + prnt('{') + prnt(' _cffi_init("%s", 0x%x, &_cffi_type_context);' % ( + self.module_name, self._version)) + prnt('}') + prnt('#endif') + prnt() + prnt('#ifdef __GNUC__') + prnt('# pragma GCC visibility pop') + prnt('#endif') + self._version = None + + def _to_py(self, x): + if isinstance(x, str): + return "b'%s'" % (x,) + if isinstance(x, (list, tuple)): + rep = [self._to_py(item) for item in x] + if len(rep) == 1: + rep.append('') + return "(%s)" % (','.join(rep),) + return x.as_python_expr() # Py2: unicode unexpected; Py3: bytes unexp. + + def write_py_source_to_f(self, f): + self._f = f + prnt = self._prnt + # + # header + prnt("# auto-generated file") + prnt("import _cffi_backend") + # + # the 'import' of the included ffis + num_includes = len(self.ffi._included_ffis or ()) + for i in range(num_includes): + ffi_to_include = self.ffi._included_ffis[i] + try: + included_module_name, included_source = ( + ffi_to_include._assigned_source[:2]) + except AttributeError: + raise VerificationError( + "ffi object %r includes %r, but the latter has not " + "been prepared with set_source()" % ( + self.ffi, ffi_to_include,)) + if included_source is not None: + raise VerificationError( + "not implemented yet: ffi.include() of a C-based " + "ffi inside a Python-based ffi") + prnt('from %s import ffi as _ffi%d' % (included_module_name, i)) + prnt() + prnt("ffi = _cffi_backend.FFI('%s'," % (self.module_name,)) + prnt(" _version = 0x%x," % (self._version,)) + self._version = None + # + # the '_types' keyword argument + self.cffi_types = tuple(self.cffi_types) # don't change any more + types_lst = [op.as_python_bytes() for op in self.cffi_types] + prnt(' _types = %s,' % (self._to_py(''.join(types_lst)),)) + typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()]) + # + # the keyword arguments from ALL_STEPS + for step_name in self.ALL_STEPS: + lst = self._lsts[step_name] + if len(lst) > 0 and step_name != "field": + prnt(' _%ss = %s,' % (step_name, self._to_py(lst))) + # + # the '_includes' keyword argument + if num_includes > 0: + prnt(' _includes = (%s,),' % ( + ', '.join(['_ffi%d' % i for i in range(num_includes)]),)) + # + # the footer + prnt(')') + + # ---------- + + def _gettypenum(self, type): + # a KeyError here is a bug. please report it! :-) + return self._typesdict[type] + + def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode): + extraarg = '' + if isinstance(tp, model.BasePrimitiveType) and not tp.is_complex_type(): + if tp.is_integer_type() and tp.name != '_Bool': + converter = '_cffi_to_c_int' + extraarg = ', %s' % tp.name + elif isinstance(tp, model.UnknownFloatType): + # don't check with is_float_type(): it may be a 'long + # double' here, and _cffi_to_c_double would loose precision + converter = '(%s)_cffi_to_c_double' % (tp.get_c_name(''),) + else: + cname = tp.get_c_name('') + converter = '(%s)_cffi_to_c_%s' % (cname, + tp.name.replace(' ', '_')) + if cname in ('char16_t', 'char32_t'): + self.needs_version(VERSION_CHAR16CHAR32) + errvalue = '-1' + # + elif isinstance(tp, model.PointerType): + self._convert_funcarg_to_c_ptr_or_array(tp, fromvar, + tovar, errcode) + return + # + elif (isinstance(tp, model.StructOrUnionOrEnum) or + isinstance(tp, model.BasePrimitiveType)): + # a struct (not a struct pointer) as a function argument; + # or, a complex (the same code works) + self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)' + % (tovar, self._gettypenum(tp), fromvar)) + self._prnt(' %s;' % errcode) + return + # + elif isinstance(tp, model.FunctionPtrType): + converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('') + extraarg = ', _cffi_type(%d)' % self._gettypenum(tp) + errvalue = 'NULL' + # + else: + raise NotImplementedError(tp) + # + self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg)) + self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % ( + tovar, tp.get_c_name(''), errvalue)) + self._prnt(' %s;' % errcode) + + def _extra_local_variables(self, tp, localvars, freelines): + if isinstance(tp, model.PointerType): + localvars.add('Py_ssize_t datasize') + localvars.add('struct _cffi_freeme_s *large_args_free = NULL') + freelines.add('if (large_args_free != NULL)' + ' _cffi_free_array_arguments(large_args_free);') + + def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode): + self._prnt(' datasize = _cffi_prepare_pointer_call_argument(') + self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % ( + self._gettypenum(tp), fromvar, tovar)) + self._prnt(' if (datasize != 0) {') + self._prnt(' %s = ((size_t)datasize) <= 640 ? ' + '(%s)alloca((size_t)datasize) : NULL;' % ( + tovar, tp.get_c_name(''))) + self._prnt(' if (_cffi_convert_array_argument(_cffi_type(%d), %s, ' + '(char **)&%s,' % (self._gettypenum(tp), fromvar, tovar)) + self._prnt(' datasize, &large_args_free) < 0)') + self._prnt(' %s;' % errcode) + self._prnt(' }') + + def _convert_expr_from_c(self, tp, var, context): + if isinstance(tp, model.BasePrimitiveType): + if tp.is_integer_type() and tp.name != '_Bool': + return '_cffi_from_c_int(%s, %s)' % (var, tp.name) + elif isinstance(tp, model.UnknownFloatType): + return '_cffi_from_c_double(%s)' % (var,) + elif tp.name != 'long double' and not tp.is_complex_type(): + cname = tp.name.replace(' ', '_') + if cname in ('char16_t', 'char32_t'): + self.needs_version(VERSION_CHAR16CHAR32) + return '_cffi_from_c_%s(%s)' % (cname, var) + else: + return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + elif isinstance(tp, (model.PointerType, model.FunctionPtrType)): + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + elif isinstance(tp, model.ArrayType): + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(model.PointerType(tp.item))) + elif isinstance(tp, model.StructOrUnion): + if tp.fldnames is None: + raise TypeError("'%s' is used as %s, but is opaque" % ( + tp._get_c_name(), context)) + return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + elif isinstance(tp, model.EnumType): + return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + else: + raise NotImplementedError(tp) + + # ---------- + # typedefs + + def _typedef_type(self, tp, name): + return self._global_type(tp, "(*(%s *)0)" % (name,)) + + def _generate_cpy_typedef_collecttype(self, tp, name): + self._do_collect_type(self._typedef_type(tp, name)) + + def _generate_cpy_typedef_decl(self, tp, name): + pass + + def _typedef_ctx(self, tp, name): + type_index = self._typesdict[tp] + self._lsts["typename"].append(TypenameExpr(name, type_index)) + + def _generate_cpy_typedef_ctx(self, tp, name): + tp = self._typedef_type(tp, name) + self._typedef_ctx(tp, name) + if getattr(tp, "origin", None) == "unknown_type": + self._struct_ctx(tp, tp.name, approxname=None) + elif isinstance(tp, model.NamedPointerType): + self._struct_ctx(tp.totype, tp.totype.name, approxname=tp.name, + named_ptr=tp) + + # ---------- + # function declarations + + def _generate_cpy_function_collecttype(self, tp, name): + self._do_collect_type(tp.as_raw_function()) + if tp.ellipsis and not self.target_is_python: + self._do_collect_type(tp) + + def _generate_cpy_function_decl(self, tp, name): + assert not self.target_is_python + assert isinstance(tp, model.FunctionPtrType) + if tp.ellipsis: + # cannot support vararg functions better than this: check for its + # exact type (including the fixed arguments), and build it as a + # constant function pointer (no CPython wrapper) + self._generate_cpy_constant_decl(tp, name) + return + prnt = self._prnt + numargs = len(tp.args) + if numargs == 0: + argname = 'noarg' + elif numargs == 1: + argname = 'arg0' + else: + argname = 'args' + # + # ------------------------------ + # the 'd' version of the function, only for addressof(lib, 'func') + arguments = [] + call_arguments = [] + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + arguments.append(type.get_c_name(' x%d' % i, context)) + call_arguments.append('x%d' % i) + repr_arguments = ', '.join(arguments) + repr_arguments = repr_arguments or 'void' + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + name_and_arguments = '%s_cffi_d_%s(%s)' % (abi, name, repr_arguments) + prnt('static %s' % (tp.result.get_c_name(name_and_arguments),)) + prnt('{') + call_arguments = ', '.join(call_arguments) + result_code = 'return ' + if isinstance(tp.result, model.VoidType): + result_code = '' + prnt(' %s%s(%s);' % (result_code, name, call_arguments)) + prnt('}') + # + prnt('#ifndef PYPY_VERSION') # ------------------------------ + # + prnt('static PyObject *') + prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname)) + prnt('{') + # + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + arg = type.get_c_name(' x%d' % i, context) + prnt(' %s;' % arg) + # + localvars = set() + freelines = set() + for type in tp.args: + self._extra_local_variables(type, localvars, freelines) + for decl in sorted(localvars): + prnt(' %s;' % (decl,)) + # + if not isinstance(tp.result, model.VoidType): + result_code = 'result = ' + context = 'result of %s' % name + result_decl = ' %s;' % tp.result.get_c_name(' result', context) + prnt(result_decl) + prnt(' PyObject *pyresult;') + else: + result_decl = None + result_code = '' + # + if len(tp.args) > 1: + rng = range(len(tp.args)) + for i in rng: + prnt(' PyObject *arg%d;' % i) + prnt() + prnt(' if (!PyArg_UnpackTuple(args, "%s", %d, %d, %s))' % ( + name, len(rng), len(rng), + ', '.join(['&arg%d' % i for i in rng]))) + prnt(' return NULL;') + prnt() + # + for i, type in enumerate(tp.args): + self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i, + 'return NULL') + prnt() + # + prnt(' Py_BEGIN_ALLOW_THREADS') + prnt(' _cffi_restore_errno();') + call_arguments = ['x%d' % i for i in range(len(tp.args))] + call_arguments = ', '.join(call_arguments) + prnt(' { %s%s(%s); }' % (result_code, name, call_arguments)) + prnt(' _cffi_save_errno();') + prnt(' Py_END_ALLOW_THREADS') + prnt() + # + prnt(' (void)self; /* unused */') + if numargs == 0: + prnt(' (void)noarg; /* unused */') + if result_code: + prnt(' pyresult = %s;' % + self._convert_expr_from_c(tp.result, 'result', 'result type')) + for freeline in freelines: + prnt(' ' + freeline) + prnt(' return pyresult;') + else: + for freeline in freelines: + prnt(' ' + freeline) + prnt(' Py_INCREF(Py_None);') + prnt(' return Py_None;') + prnt('}') + # + prnt('#else') # ------------------------------ + # + # the PyPy version: need to replace struct/union arguments with + # pointers, and if the result is a struct/union, insert a first + # arg that is a pointer to the result. We also do that for + # complex args and return type. + def need_indirection(type): + return (isinstance(type, model.StructOrUnion) or + (isinstance(type, model.PrimitiveType) and + type.is_complex_type())) + difference = False + arguments = [] + call_arguments = [] + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + indirection = '' + if need_indirection(type): + indirection = '*' + difference = True + arg = type.get_c_name(' %sx%d' % (indirection, i), context) + arguments.append(arg) + call_arguments.append('%sx%d' % (indirection, i)) + tp_result = tp.result + if need_indirection(tp_result): + context = 'result of %s' % name + arg = tp_result.get_c_name(' *result', context) + arguments.insert(0, arg) + tp_result = model.void_type + result_decl = None + result_code = '*result = ' + difference = True + if difference: + repr_arguments = ', '.join(arguments) + repr_arguments = repr_arguments or 'void' + name_and_arguments = '%s_cffi_f_%s(%s)' % (abi, name, + repr_arguments) + prnt('static %s' % (tp_result.get_c_name(name_and_arguments),)) + prnt('{') + if result_decl: + prnt(result_decl) + call_arguments = ', '.join(call_arguments) + prnt(' { %s%s(%s); }' % (result_code, name, call_arguments)) + if result_decl: + prnt(' return result;') + prnt('}') + else: + prnt('# define _cffi_f_%s _cffi_d_%s' % (name, name)) + # + prnt('#endif') # ------------------------------ + prnt() + + def _generate_cpy_function_ctx(self, tp, name): + if tp.ellipsis and not self.target_is_python: + self._generate_cpy_constant_ctx(tp, name) + return + type_index = self._typesdict[tp.as_raw_function()] + numargs = len(tp.args) + if self.target_is_python: + meth_kind = OP_DLOPEN_FUNC + elif numargs == 0: + meth_kind = OP_CPYTHON_BLTN_N # 'METH_NOARGS' + elif numargs == 1: + meth_kind = OP_CPYTHON_BLTN_O # 'METH_O' + else: + meth_kind = OP_CPYTHON_BLTN_V # 'METH_VARARGS' + self._lsts["global"].append( + GlobalExpr(name, '_cffi_f_%s' % name, + CffiOp(meth_kind, type_index), + size='_cffi_d_%s' % name)) + + # ---------- + # named structs or unions + + def _field_type(self, tp_struct, field_name, tp_field): + if isinstance(tp_field, model.ArrayType): + actual_length = tp_field.length + if actual_length == '...': + ptr_struct_name = tp_struct.get_c_name('*') + actual_length = '_cffi_array_len(((%s)0)->%s)' % ( + ptr_struct_name, field_name) + tp_item = self._field_type(tp_struct, '%s[0]' % field_name, + tp_field.item) + tp_field = model.ArrayType(tp_item, actual_length) + return tp_field + + def _struct_collecttype(self, tp): + self._do_collect_type(tp) + if self.target_is_python: + # also requires nested anon struct/unions in ABI mode, recursively + for fldtype in tp.anonymous_struct_fields(): + self._struct_collecttype(fldtype) + + def _struct_decl(self, tp, cname, approxname): + if tp.fldtypes is None: + return + prnt = self._prnt + checkfuncname = '_cffi_checkfld_%s' % (approxname,) + prnt('_CFFI_UNUSED_FN') + prnt('static void %s(%s *p)' % (checkfuncname, cname)) + prnt('{') + prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') + for fname, ftype, fbitsize, fqual in self._enum_fields(tp): + try: + if ftype.is_integer_type() or fbitsize >= 0: + # accept all integers, but complain on float or double + if fname != '': + prnt(" (void)((p->%s) | 0); /* check that '%s.%s' is " + "an integer */" % (fname, cname, fname)) + continue + # only accept exactly the type declared, except that '[]' + # is interpreted as a '*' and so will match any array length. + # (It would also match '*', but that's harder to detect...) + while (isinstance(ftype, model.ArrayType) + and (ftype.length is None or ftype.length == '...')): + ftype = ftype.item + fname = fname + '[0]' + prnt(' { %s = &p->%s; (void)tmp; }' % ( + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) + except VerificationError as e: + prnt(' /* %s */' % str(e)) # cannot verify it, ignore + prnt('}') + prnt('struct _cffi_align_%s { char x; %s y; };' % (approxname, cname)) + prnt() + + def _struct_ctx(self, tp, cname, approxname, named_ptr=None): + type_index = self._typesdict[tp] + reason_for_not_expanding = None + flags = [] + if isinstance(tp, model.UnionType): + flags.append("_CFFI_F_UNION") + if tp.fldtypes is None: + flags.append("_CFFI_F_OPAQUE") + reason_for_not_expanding = "opaque" + if (tp not in self.ffi._parser._included_declarations and + (named_ptr is None or + named_ptr not in self.ffi._parser._included_declarations)): + if tp.fldtypes is None: + pass # opaque + elif tp.partial or any(tp.anonymous_struct_fields()): + pass # field layout obtained silently from the C compiler + else: + flags.append("_CFFI_F_CHECK_FIELDS") + if tp.packed: + if tp.packed > 1: + raise NotImplementedError( + "%r is declared with 'pack=%r'; only 0 or 1 are " + "supported in API mode (try to use \"...;\", which " + "does not require a 'pack' declaration)" % + (tp, tp.packed)) + flags.append("_CFFI_F_PACKED") + else: + flags.append("_CFFI_F_EXTERNAL") + reason_for_not_expanding = "external" + flags = '|'.join(flags) or '0' + c_fields = [] + if reason_for_not_expanding is None: + enumfields = list(self._enum_fields(tp)) + for fldname, fldtype, fbitsize, fqual in enumfields: + fldtype = self._field_type(tp, fldname, fldtype) + self._check_not_opaque(fldtype, + "field '%s.%s'" % (tp.name, fldname)) + # cname is None for _add_missing_struct_unions() only + op = OP_NOOP + if fbitsize >= 0: + op = OP_BITFIELD + size = '%d /* bits */' % fbitsize + elif cname is None or ( + isinstance(fldtype, model.ArrayType) and + fldtype.length is None): + size = '(size_t)-1' + else: + size = 'sizeof(((%s)0)->%s)' % ( + tp.get_c_name('*') if named_ptr is None + else named_ptr.name, + fldname) + if cname is None or fbitsize >= 0: + offset = '(size_t)-1' + elif named_ptr is not None: + offset = '((char *)&((%s)4096)->%s) - (char *)4096' % ( + named_ptr.name, fldname) + else: + offset = 'offsetof(%s, %s)' % (tp.get_c_name(''), fldname) + c_fields.append( + FieldExpr(fldname, offset, size, fbitsize, + CffiOp(op, self._typesdict[fldtype]))) + first_field_index = len(self._lsts["field"]) + self._lsts["field"].extend(c_fields) + # + if cname is None: # unknown name, for _add_missing_struct_unions + size = '(size_t)-2' + align = -2 + comment = "unnamed" + else: + if named_ptr is not None: + size = 'sizeof(*(%s)0)' % (named_ptr.name,) + align = '-1 /* unknown alignment */' + else: + size = 'sizeof(%s)' % (cname,) + align = 'offsetof(struct _cffi_align_%s, y)' % (approxname,) + comment = None + else: + size = '(size_t)-1' + align = -1 + first_field_index = -1 + comment = reason_for_not_expanding + self._lsts["struct_union"].append( + StructUnionExpr(tp.name, type_index, flags, size, align, comment, + first_field_index, c_fields)) + self._seen_struct_unions.add(tp) + + def _check_not_opaque(self, tp, location): + while isinstance(tp, model.ArrayType): + tp = tp.item + if isinstance(tp, model.StructOrUnion) and tp.fldtypes is None: + raise TypeError( + "%s is of an opaque type (not declared in cdef())" % location) + + def _add_missing_struct_unions(self): + # not very nice, but some struct declarations might be missing + # because they don't have any known C name. Check that they are + # not partial (we can't complete or verify them!) and emit them + # anonymously. + lst = list(self._struct_unions.items()) + lst.sort(key=lambda tp_order: tp_order[1]) + for tp, order in lst: + if tp not in self._seen_struct_unions: + if tp.partial: + raise NotImplementedError("internal inconsistency: %r is " + "partial but was not seen at " + "this point" % (tp,)) + if tp.name.startswith('$') and tp.name[1:].isdigit(): + approxname = tp.name[1:] + elif tp.name == '_IO_FILE' and tp.forcename == 'FILE': + approxname = 'FILE' + self._typedef_ctx(tp, 'FILE') + else: + raise NotImplementedError("internal inconsistency: %r" % + (tp,)) + self._struct_ctx(tp, None, approxname) + + def _generate_cpy_struct_collecttype(self, tp, name): + self._struct_collecttype(tp) + _generate_cpy_union_collecttype = _generate_cpy_struct_collecttype + + def _struct_names(self, tp): + cname = tp.get_c_name('') + if ' ' in cname: + return cname, cname.replace(' ', '_') + else: + return cname, '_' + cname + + def _generate_cpy_struct_decl(self, tp, name): + self._struct_decl(tp, *self._struct_names(tp)) + _generate_cpy_union_decl = _generate_cpy_struct_decl + + def _generate_cpy_struct_ctx(self, tp, name): + self._struct_ctx(tp, *self._struct_names(tp)) + _generate_cpy_union_ctx = _generate_cpy_struct_ctx + + # ---------- + # 'anonymous' declarations. These are produced for anonymous structs + # or unions; the 'name' is obtained by a typedef. + + def _generate_cpy_anonymous_collecttype(self, tp, name): + if isinstance(tp, model.EnumType): + self._generate_cpy_enum_collecttype(tp, name) + else: + self._struct_collecttype(tp) + + def _generate_cpy_anonymous_decl(self, tp, name): + if isinstance(tp, model.EnumType): + self._generate_cpy_enum_decl(tp) + else: + self._struct_decl(tp, name, 'typedef_' + name) + + def _generate_cpy_anonymous_ctx(self, tp, name): + if isinstance(tp, model.EnumType): + self._enum_ctx(tp, name) + else: + self._struct_ctx(tp, name, 'typedef_' + name) + + # ---------- + # constants, declared with "static const ..." + + def _generate_cpy_const(self, is_int, name, tp=None, category='const', + check_value=None): + if (category, name) in self._seen_constants: + raise VerificationError( + "duplicate declaration of %s '%s'" % (category, name)) + self._seen_constants.add((category, name)) + # + prnt = self._prnt + funcname = '_cffi_%s_%s' % (category, name) + if is_int: + prnt('static int %s(unsigned long long *o)' % funcname) + prnt('{') + prnt(' int n = (%s) <= 0;' % (name,)) + prnt(' *o = (unsigned long long)((%s) | 0);' + ' /* check that %s is an integer */' % (name, name)) + if check_value is not None: + if check_value > 0: + check_value = '%dU' % (check_value,) + prnt(' if (!_cffi_check_int(*o, n, %s))' % (check_value,)) + prnt(' n |= 2;') + prnt(' return n;') + prnt('}') + else: + assert check_value is None + prnt('static void %s(char *o)' % funcname) + prnt('{') + prnt(' *(%s)o = %s;' % (tp.get_c_name('*'), name)) + prnt('}') + prnt() + + def _generate_cpy_constant_collecttype(self, tp, name): + is_int = tp.is_integer_type() + if not is_int or self.target_is_python: + self._do_collect_type(tp) + + def _generate_cpy_constant_decl(self, tp, name): + is_int = tp.is_integer_type() + self._generate_cpy_const(is_int, name, tp) + + def _generate_cpy_constant_ctx(self, tp, name): + if not self.target_is_python and tp.is_integer_type(): + type_op = CffiOp(OP_CONSTANT_INT, -1) + else: + if self.target_is_python: + const_kind = OP_DLOPEN_CONST + else: + const_kind = OP_CONSTANT + type_index = self._typesdict[tp] + type_op = CffiOp(const_kind, type_index) + self._lsts["global"].append( + GlobalExpr(name, '_cffi_const_%s' % name, type_op)) + + # ---------- + # enums + + def _generate_cpy_enum_collecttype(self, tp, name): + self._do_collect_type(tp) + + def _generate_cpy_enum_decl(self, tp, name=None): + for enumerator in tp.enumerators: + self._generate_cpy_const(True, enumerator) + + def _enum_ctx(self, tp, cname): + type_index = self._typesdict[tp] + type_op = CffiOp(OP_ENUM, -1) + if self.target_is_python: + tp.check_not_partial() + for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): + self._lsts["global"].append( + GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op, + check_value=enumvalue)) + # + if cname is not None and '$' not in cname and not self.target_is_python: + size = "sizeof(%s)" % cname + signed = "((%s)-1) <= 0" % cname + else: + basetp = tp.build_baseinttype(self.ffi, []) + size = self.ffi.sizeof(basetp) + signed = int(int(self.ffi.cast(basetp, -1)) < 0) + allenums = ",".join(tp.enumerators) + self._lsts["enum"].append( + EnumExpr(tp.name, type_index, size, signed, allenums)) + + def _generate_cpy_enum_ctx(self, tp, name): + self._enum_ctx(tp, tp._get_c_name()) + + # ---------- + # macros: for now only for integers + + def _generate_cpy_macro_collecttype(self, tp, name): + pass + + def _generate_cpy_macro_decl(self, tp, name): + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_cpy_const(True, name, check_value=check_value) + + def _generate_cpy_macro_ctx(self, tp, name): + if tp == '...': + if self.target_is_python: + raise VerificationError( + "cannot use the syntax '...' in '#define %s ...' when " + "using the ABI mode" % (name,)) + check_value = None + else: + check_value = tp # an integer + type_op = CffiOp(OP_CONSTANT_INT, -1) + self._lsts["global"].append( + GlobalExpr(name, '_cffi_const_%s' % name, type_op, + check_value=check_value)) + + # ---------- + # global variables + + def _global_type(self, tp, global_name): + if isinstance(tp, model.ArrayType): + actual_length = tp.length + if actual_length == '...': + actual_length = '_cffi_array_len(%s)' % (global_name,) + tp_item = self._global_type(tp.item, '%s[0]' % global_name) + tp = model.ArrayType(tp_item, actual_length) + return tp + + def _generate_cpy_variable_collecttype(self, tp, name): + self._do_collect_type(self._global_type(tp, name)) + + def _generate_cpy_variable_decl(self, tp, name): + prnt = self._prnt + tp = self._global_type(tp, name) + if isinstance(tp, model.ArrayType) and tp.length is None: + tp = tp.item + ampersand = '' + else: + ampersand = '&' + # This code assumes that casts from "tp *" to "void *" is a + # no-op, i.e. a function that returns a "tp *" can be called + # as if it returned a "void *". This should be generally true + # on any modern machine. The only exception to that rule (on + # uncommon architectures, and as far as I can tell) might be + # if 'tp' were a function type, but that is not possible here. + # (If 'tp' is a function _pointer_ type, then casts from "fn_t + # **" to "void *" are again no-ops, as far as I can tell.) + decl = '*_cffi_var_%s(void)' % (name,) + prnt('static ' + tp.get_c_name(decl, quals=self._current_quals)) + prnt('{') + prnt(' return %s(%s);' % (ampersand, name)) + prnt('}') + prnt() + + def _generate_cpy_variable_ctx(self, tp, name): + tp = self._global_type(tp, name) + type_index = self._typesdict[tp] + if self.target_is_python: + op = OP_GLOBAL_VAR + else: + op = OP_GLOBAL_VAR_F + self._lsts["global"].append( + GlobalExpr(name, '_cffi_var_%s' % name, CffiOp(op, type_index))) + + # ---------- + # extern "Python" + + def _generate_cpy_extern_python_collecttype(self, tp, name): + assert isinstance(tp, model.FunctionPtrType) + self._do_collect_type(tp) + _generate_cpy_dllexport_python_collecttype = \ + _generate_cpy_extern_python_plus_c_collecttype = \ + _generate_cpy_extern_python_collecttype + + def _extern_python_decl(self, tp, name, tag_and_space): + prnt = self._prnt + if isinstance(tp.result, model.VoidType): + size_of_result = '0' + else: + context = 'result of %s' % name + size_of_result = '(int)sizeof(%s)' % ( + tp.result.get_c_name('', context),) + prnt('static struct _cffi_externpy_s _cffi_externpy__%s =' % name) + prnt(' { "%s.%s", %s, 0, 0 };' % ( + self.module_name, name, size_of_result)) + prnt() + # + arguments = [] + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + arg = type.get_c_name(' a%d' % i, context) + arguments.append(arg) + # + repr_arguments = ', '.join(arguments) + repr_arguments = repr_arguments or 'void' + name_and_arguments = '%s(%s)' % (name, repr_arguments) + if tp.abi == "__stdcall": + name_and_arguments = '_cffi_stdcall ' + name_and_arguments + # + def may_need_128_bits(tp): + return (isinstance(tp, model.PrimitiveType) and + tp.name == 'long double') + # + size_of_a = max(len(tp.args)*8, 8) + if may_need_128_bits(tp.result): + size_of_a = max(size_of_a, 16) + if isinstance(tp.result, model.StructOrUnion): + size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( + tp.result.get_c_name(''), size_of_a, + tp.result.get_c_name(''), size_of_a) + prnt('%s%s' % (tag_and_space, tp.result.get_c_name(name_and_arguments))) + prnt('{') + prnt(' char a[%s];' % size_of_a) + prnt(' char *p = a;') + for i, type in enumerate(tp.args): + arg = 'a%d' % i + if (isinstance(type, model.StructOrUnion) or + may_need_128_bits(type)): + arg = '&' + arg + type = model.PointerType(type) + prnt(' *(%s)(p + %d) = %s;' % (type.get_c_name('*'), i*8, arg)) + prnt(' _cffi_call_python(&_cffi_externpy__%s, p);' % name) + if not isinstance(tp.result, model.VoidType): + prnt(' return *(%s)p;' % (tp.result.get_c_name('*'),)) + prnt('}') + prnt() + self._num_externpy += 1 + + def _generate_cpy_extern_python_decl(self, tp, name): + self._extern_python_decl(tp, name, 'static ') + + def _generate_cpy_dllexport_python_decl(self, tp, name): + self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ') + + def _generate_cpy_extern_python_plus_c_decl(self, tp, name): + self._extern_python_decl(tp, name, '') + + def _generate_cpy_extern_python_ctx(self, tp, name): + if self.target_is_python: + raise VerificationError( + "cannot use 'extern \"Python\"' in the ABI mode") + if tp.ellipsis: + raise NotImplementedError("a vararg function is extern \"Python\"") + type_index = self._typesdict[tp] + type_op = CffiOp(OP_EXTERN_PYTHON, type_index) + self._lsts["global"].append( + GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) + + _generate_cpy_dllexport_python_ctx = \ + _generate_cpy_extern_python_plus_c_ctx = \ + _generate_cpy_extern_python_ctx + + def _print_string_literal_in_array(self, s): + prnt = self._prnt + prnt('// # NB. this is not a string because of a size limit in MSVC') + if not isinstance(s, bytes): # unicode + s = s.encode('utf-8') # -> bytes + else: + s.decode('utf-8') # got bytes, check for valid utf-8 + try: + s.decode('ascii') + except UnicodeDecodeError: + s = b'# -*- encoding: utf8 -*-\n' + s + for line in s.splitlines(True): + comment = line + if type('//') is bytes: # python2 + line = map(ord, line) # make a list of integers + else: # python3 + # type(line) is bytes, which enumerates like a list of integers + comment = ascii(comment)[1:-1] + prnt(('// ' + comment).rstrip()) + printed_line = '' + for c in line: + if len(printed_line) >= 76: + prnt(printed_line) + printed_line = '' + printed_line += '%d,' % (c,) + prnt(printed_line) + + # ---------- + # emitting the opcodes for individual types + + def _emit_bytecode_VoidType(self, tp, index): + self.cffi_types[index] = CffiOp(OP_PRIMITIVE, PRIM_VOID) + + def _emit_bytecode_PrimitiveType(self, tp, index): + prim_index = PRIMITIVE_TO_INDEX[tp.name] + self.cffi_types[index] = CffiOp(OP_PRIMITIVE, prim_index) + + def _emit_bytecode_UnknownIntegerType(self, tp, index): + s = ('_cffi_prim_int(sizeof(%s), (\n' + ' ((%s)-1) | 0 /* check that %s is an integer type */\n' + ' ) <= 0)' % (tp.name, tp.name, tp.name)) + self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s) + + def _emit_bytecode_UnknownFloatType(self, tp, index): + s = ('_cffi_prim_float(sizeof(%s) *\n' + ' (((%s)1) / 2) * 2 /* integer => 0, float => 1 */\n' + ' )' % (tp.name, tp.name)) + self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s) + + def _emit_bytecode_RawFunctionType(self, tp, index): + self.cffi_types[index] = CffiOp(OP_FUNCTION, self._typesdict[tp.result]) + index += 1 + for tp1 in tp.args: + realindex = self._typesdict[tp1] + if index != realindex: + if isinstance(tp1, model.PrimitiveType): + self._emit_bytecode_PrimitiveType(tp1, index) + else: + self.cffi_types[index] = CffiOp(OP_NOOP, realindex) + index += 1 + flags = int(tp.ellipsis) + if tp.abi is not None: + if tp.abi == '__stdcall': + flags |= 2 + else: + raise NotImplementedError("abi=%r" % (tp.abi,)) + self.cffi_types[index] = CffiOp(OP_FUNCTION_END, flags) + + def _emit_bytecode_PointerType(self, tp, index): + self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype]) + + _emit_bytecode_ConstPointerType = _emit_bytecode_PointerType + _emit_bytecode_NamedPointerType = _emit_bytecode_PointerType + + def _emit_bytecode_FunctionPtrType(self, tp, index): + raw = tp.as_raw_function() + self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[raw]) + + def _emit_bytecode_ArrayType(self, tp, index): + item_index = self._typesdict[tp.item] + if tp.length is None: + self.cffi_types[index] = CffiOp(OP_OPEN_ARRAY, item_index) + elif tp.length == '...': + raise VerificationError( + "type %s badly placed: the '...' array length can only be " + "used on global arrays or on fields of structures" % ( + str(tp).replace('/*...*/', '...'),)) + else: + assert self.cffi_types[index + 1] == 'LEN' + self.cffi_types[index] = CffiOp(OP_ARRAY, item_index) + self.cffi_types[index + 1] = CffiOp(None, str(tp.length)) + + def _emit_bytecode_StructType(self, tp, index): + struct_index = self._struct_unions[tp] + self.cffi_types[index] = CffiOp(OP_STRUCT_UNION, struct_index) + _emit_bytecode_UnionType = _emit_bytecode_StructType + + def _emit_bytecode_EnumType(self, tp, index): + enum_index = self._enums[tp] + self.cffi_types[index] = CffiOp(OP_ENUM, enum_index) + + +if sys.version_info >= (3,): + NativeIO = io.StringIO +else: + class NativeIO(io.BytesIO): + def write(self, s): + if isinstance(s, unicode): + s = s.encode('ascii') + super(NativeIO, self).write(s) + +def _is_file_like(maybefile): + # compare to xml.etree.ElementTree._get_writer + return hasattr(maybefile, 'write') + +def _make_c_or_py_source(ffi, module_name, preamble, target_file, verbose): + if verbose: + print("generating %s" % (target_file,)) + recompiler = Recompiler(ffi, module_name, + target_is_python=(preamble is None)) + recompiler.collect_type_table() + recompiler.collect_step_tables() + if _is_file_like(target_file): + recompiler.write_source_to_f(target_file, preamble) + return True + f = NativeIO() + recompiler.write_source_to_f(f, preamble) + output = f.getvalue() + try: + with open(target_file, 'r') as f1: + if f1.read(len(output) + 1) != output: + raise IOError + if verbose: + print("(already up-to-date)") + return False # already up-to-date + except IOError: + tmp_file = '%s.~%d' % (target_file, os.getpid()) + with open(tmp_file, 'w') as f1: + f1.write(output) + try: + os.rename(tmp_file, target_file) + except OSError: + os.unlink(target_file) + os.rename(tmp_file, target_file) + return True + +def make_c_source(ffi, module_name, preamble, target_c_file, verbose=False): + assert preamble is not None + return _make_c_or_py_source(ffi, module_name, preamble, target_c_file, + verbose) + +def make_py_source(ffi, module_name, target_py_file, verbose=False): + return _make_c_or_py_source(ffi, module_name, None, target_py_file, + verbose) + +def _modname_to_file(outputdir, modname, extension): + parts = modname.split('.') + try: + os.makedirs(os.path.join(outputdir, *parts[:-1])) + except OSError: + pass + parts[-1] += extension + return os.path.join(outputdir, *parts), parts + + +# Aaargh. Distutils is not tested at all for the purpose of compiling +# DLLs that are not extension modules. Here are some hacks to work +# around that, in the _patch_for_*() functions... + +def _patch_meth(patchlist, cls, name, new_meth): + old = getattr(cls, name) + patchlist.append((cls, name, old)) + setattr(cls, name, new_meth) + return old + +def _unpatch_meths(patchlist): + for cls, name, old_meth in reversed(patchlist): + setattr(cls, name, old_meth) + +def _patch_for_embedding(patchlist): + if sys.platform == 'win32': + # we must not remove the manifest when building for embedding! + # FUTURE: this module was removed in setuptools 74; this is likely dead code and should be removed, + # since the toolchain it supports (VS2005-2008) is also long dead. + from cffi._shimmed_dist_utils import MSVCCompiler + if MSVCCompiler is not None: + _patch_meth(patchlist, MSVCCompiler, '_remove_visual_c_ref', + lambda self, manifest_file: manifest_file) + + if sys.platform == 'darwin': + # we must not make a '-bundle', but a '-dynamiclib' instead + from cffi._shimmed_dist_utils import CCompiler + def my_link_shared_object(self, *args, **kwds): + if '-bundle' in self.linker_so: + self.linker_so = list(self.linker_so) + i = self.linker_so.index('-bundle') + self.linker_so[i] = '-dynamiclib' + return old_link_shared_object(self, *args, **kwds) + old_link_shared_object = _patch_meth(patchlist, CCompiler, + 'link_shared_object', + my_link_shared_object) + +def _patch_for_target(patchlist, target): + from cffi._shimmed_dist_utils import build_ext + # if 'target' is different from '*', we need to patch some internal + # method to just return this 'target' value, instead of having it + # built from module_name + if target.endswith('.*'): + target = target[:-2] + if sys.platform == 'win32': + target += '.dll' + elif sys.platform == 'darwin': + target += '.dylib' + else: + target += '.so' + _patch_meth(patchlist, build_ext, 'get_ext_filename', + lambda self, ext_name: target) + + +def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, + c_file=None, source_extension='.c', extradir=None, + compiler_verbose=1, target=None, debug=None, + uses_ffiplatform=True, **kwds): + if not isinstance(module_name, str): + module_name = module_name.encode('ascii') + if ffi._windows_unicode: + ffi._apply_windows_unicode(kwds) + if preamble is not None: + if call_c_compiler and _is_file_like(c_file): + raise TypeError("Writing to file-like objects is not supported " + "with call_c_compiler=True") + embedding = (ffi._embedding is not None) + if embedding: + ffi._apply_embedding_fix(kwds) + if c_file is None: + c_file, parts = _modname_to_file(tmpdir, module_name, + source_extension) + if extradir: + parts = [extradir] + parts + ext_c_file = os.path.join(*parts) + else: + ext_c_file = c_file + # + if target is None: + if embedding: + target = '%s.*' % module_name + else: + target = '*' + # + if uses_ffiplatform: + ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) + else: + ext = None + updated = make_c_source(ffi, module_name, preamble, c_file, + verbose=compiler_verbose) + if call_c_compiler: + patchlist = [] + cwd = os.getcwd() + try: + if embedding: + _patch_for_embedding(patchlist) + if target != '*': + _patch_for_target(patchlist, target) + if compiler_verbose: + if tmpdir == '.': + msg = 'the current directory is' + else: + msg = 'setting the current directory to' + print('%s %r' % (msg, os.path.abspath(tmpdir))) + os.chdir(tmpdir) + outputfilename = ffiplatform.compile('.', ext, + compiler_verbose, debug) + finally: + os.chdir(cwd) + _unpatch_meths(patchlist) + return outputfilename + else: + return ext, updated + else: + if c_file is None: + c_file, _ = _modname_to_file(tmpdir, module_name, '.py') + updated = make_py_source(ffi, module_name, c_file, + verbose=compiler_verbose) + if call_c_compiler: + return c_file + else: + return None, updated + diff --git a/.venv/lib/python3.11/site-packages/cffi/setuptools_ext.py b/.venv/lib/python3.11/site-packages/cffi/setuptools_ext.py new file mode 100644 index 0000000000000000000000000000000000000000..681b49d7ad964d9de4b6b32a24eec6fcebddf7ed --- /dev/null +++ b/.venv/lib/python3.11/site-packages/cffi/setuptools_ext.py @@ -0,0 +1,216 @@ +import os +import sys + +try: + basestring +except NameError: + # Python 3.x + basestring = str + +def error(msg): + from cffi._shimmed_dist_utils import DistutilsSetupError + raise DistutilsSetupError(msg) + + +def execfile(filename, glob): + # We use execfile() (here rewritten for Python 3) instead of + # __import__() to load the build script. The problem with + # a normal import is that in some packages, the intermediate + # __init__.py files may already try to import the file that + # we are generating. + with open(filename) as f: + src = f.read() + src += '\n' # Python 2.6 compatibility + code = compile(src, filename, 'exec') + exec(code, glob, glob) + + +def add_cffi_module(dist, mod_spec): + from cffi.api import FFI + + if not isinstance(mod_spec, basestring): + error("argument to 'cffi_modules=...' must be a str or a list of str," + " not %r" % (type(mod_spec).__name__,)) + mod_spec = str(mod_spec) + try: + build_file_name, ffi_var_name = mod_spec.split(':') + except ValueError: + error("%r must be of the form 'path/build.py:ffi_variable'" % + (mod_spec,)) + if not os.path.exists(build_file_name): + ext = '' + rewritten = build_file_name.replace('.', '/') + '.py' + if os.path.exists(rewritten): + ext = ' (rewrite cffi_modules to [%r])' % ( + rewritten + ':' + ffi_var_name,) + error("%r does not name an existing file%s" % (build_file_name, ext)) + + mod_vars = {'__name__': '__cffi__', '__file__': build_file_name} + execfile(build_file_name, mod_vars) + + try: + ffi = mod_vars[ffi_var_name] + except KeyError: + error("%r: object %r not found in module" % (mod_spec, + ffi_var_name)) + if not isinstance(ffi, FFI): + ffi = ffi() # maybe it's a function instead of directly an ffi + if not isinstance(ffi, FFI): + error("%r is not an FFI instance (got %r)" % (mod_spec, + type(ffi).__name__)) + if not hasattr(ffi, '_assigned_source'): + error("%r: the set_source() method was not called" % (mod_spec,)) + module_name, source, source_extension, kwds = ffi._assigned_source + if ffi._windows_unicode: + kwds = kwds.copy() + ffi._apply_windows_unicode(kwds) + + if source is None: + _add_py_module(dist, ffi, module_name) + else: + _add_c_module(dist, ffi, module_name, source, source_extension, kwds) + +def _set_py_limited_api(Extension, kwds): + """ + Add py_limited_api to kwds if setuptools >= 26 is in use. + Do not alter the setting if it already exists. + Setuptools takes care of ignoring the flag on Python 2 and PyPy. + + CPython itself should ignore the flag in a debugging version + (by not listing .abi3.so in the extensions it supports), but + it doesn't so far, creating troubles. That's why we check + for "not hasattr(sys, 'gettotalrefcount')" (the 2.7 compatible equivalent + of 'd' not in sys.abiflags). (http://bugs.python.org/issue28401) + + On Windows, with CPython <= 3.4, it's better not to use py_limited_api + because virtualenv *still* doesn't copy PYTHON3.DLL on these versions. + Recently (2020) we started shipping only >= 3.5 wheels, though. So + we'll give it another try and set py_limited_api on Windows >= 3.5. + """ + from cffi import recompiler + + if ('py_limited_api' not in kwds and not hasattr(sys, 'gettotalrefcount') + and recompiler.USE_LIMITED_API): + import setuptools + try: + setuptools_major_version = int(setuptools.__version__.partition('.')[0]) + if setuptools_major_version >= 26: + kwds['py_limited_api'] = True + except ValueError: # certain development versions of setuptools + # If we don't know the version number of setuptools, we + # try to set 'py_limited_api' anyway. At worst, we get a + # warning. + kwds['py_limited_api'] = True + return kwds + +def _add_c_module(dist, ffi, module_name, source, source_extension, kwds): + # We are a setuptools extension. Need this build_ext for py_limited_api. + from setuptools.command.build_ext import build_ext + from cffi._shimmed_dist_utils import Extension, log, mkpath + from cffi import recompiler + + allsources = ['$PLACEHOLDER'] + allsources.extend(kwds.pop('sources', [])) + kwds = _set_py_limited_api(Extension, kwds) + ext = Extension(name=module_name, sources=allsources, **kwds) + + def make_mod(tmpdir, pre_run=None): + c_file = os.path.join(tmpdir, module_name + source_extension) + log.info("generating cffi module %r" % c_file) + mkpath(tmpdir) + # a setuptools-only, API-only hook: called with the "ext" and "ffi" + # arguments just before we turn the ffi into C code. To use it, + # subclass the 'distutils.command.build_ext.build_ext' class and + # add a method 'def pre_run(self, ext, ffi)'. + if pre_run is not None: + pre_run(ext, ffi) + updated = recompiler.make_c_source(ffi, module_name, source, c_file) + if not updated: + log.info("already up-to-date") + return c_file + + if dist.ext_modules is None: + dist.ext_modules = [] + dist.ext_modules.append(ext) + + base_class = dist.cmdclass.get('build_ext', build_ext) + class build_ext_make_mod(base_class): + def run(self): + if ext.sources[0] == '$PLACEHOLDER': + pre_run = getattr(self, 'pre_run', None) + ext.sources[0] = make_mod(self.build_temp, pre_run) + base_class.run(self) + dist.cmdclass['build_ext'] = build_ext_make_mod + # NB. multiple runs here will create multiple 'build_ext_make_mod' + # classes. Even in this case the 'build_ext' command should be + # run once; but just in case, the logic above does nothing if + # called again. + + +def _add_py_module(dist, ffi, module_name): + from setuptools.command.build_py import build_py + from setuptools.command.build_ext import build_ext + from cffi._shimmed_dist_utils import log, mkpath + from cffi import recompiler + + def generate_mod(py_file): + log.info("generating cffi module %r" % py_file) + mkpath(os.path.dirname(py_file)) + updated = recompiler.make_py_source(ffi, module_name, py_file) + if not updated: + log.info("already up-to-date") + + base_class = dist.cmdclass.get('build_py', build_py) + class build_py_make_mod(base_class): + def run(self): + base_class.run(self) + module_path = module_name.split('.') + module_path[-1] += '.py' + generate_mod(os.path.join(self.build_lib, *module_path)) + def get_source_files(self): + # This is called from 'setup.py sdist' only. Exclude + # the generate .py module in this case. + saved_py_modules = self.py_modules + try: + if saved_py_modules: + self.py_modules = [m for m in saved_py_modules + if m != module_name] + return base_class.get_source_files(self) + finally: + self.py_modules = saved_py_modules + dist.cmdclass['build_py'] = build_py_make_mod + + # distutils and setuptools have no notion I could find of a + # generated python module. If we don't add module_name to + # dist.py_modules, then things mostly work but there are some + # combination of options (--root and --record) that will miss + # the module. So we add it here, which gives a few apparently + # harmless warnings about not finding the file outside the + # build directory. + # Then we need to hack more in get_source_files(); see above. + if dist.py_modules is None: + dist.py_modules = [] + dist.py_modules.append(module_name) + + # the following is only for "build_ext -i" + base_class_2 = dist.cmdclass.get('build_ext', build_ext) + class build_ext_make_mod(base_class_2): + def run(self): + base_class_2.run(self) + if self.inplace: + # from get_ext_fullpath() in distutils/command/build_ext.py + module_path = module_name.split('.') + package = '.'.join(module_path[:-1]) + build_py = self.get_finalized_command('build_py') + package_dir = build_py.get_package_dir(package) + file_name = module_path[-1] + '.py' + generate_mod(os.path.join(package_dir, file_name)) + dist.cmdclass['build_ext'] = build_ext_make_mod + +def cffi_modules(dist, attr, value): + assert attr == 'cffi_modules' + if isinstance(value, basestring): + value = [value] + + for cffi_module in value: + add_cffi_module(dist, cffi_module) diff --git a/.venv/lib/python3.11/site-packages/cffi/vengine_gen.py b/.venv/lib/python3.11/site-packages/cffi/vengine_gen.py new file mode 100644 index 0000000000000000000000000000000000000000..bffc82122c353fbd15a395d77e829a95bf8546b0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/cffi/vengine_gen.py @@ -0,0 +1,679 @@ +# +# DEPRECATED: implementation for ffi.verify() +# +import sys, os +import types + +from . import model +from .error import VerificationError + + +class VGenericEngine(object): + _class_key = 'g' + _gen_python_module = False + + def __init__(self, verifier): + self.verifier = verifier + self.ffi = verifier.ffi + self.export_symbols = [] + self._struct_pending_verification = {} + + def patch_extension_kwds(self, kwds): + # add 'export_symbols' to the dictionary. Note that we add the + # list before filling it. When we fill it, it will thus also show + # up in kwds['export_symbols']. + kwds.setdefault('export_symbols', self.export_symbols) + + def find_module(self, module_name, path, so_suffixes): + for so_suffix in so_suffixes: + basename = module_name + so_suffix + if path is None: + path = sys.path + for dirname in path: + filename = os.path.join(dirname, basename) + if os.path.isfile(filename): + return filename + + def collect_types(self): + pass # not needed in the generic engine + + def _prnt(self, what=''): + self._f.write(what + '\n') + + def write_source_to_f(self): + prnt = self._prnt + # first paste some standard set of lines that are mostly '#include' + prnt(cffimod_header) + # then paste the C source given by the user, verbatim. + prnt(self.verifier.preamble) + # + # call generate_gen_xxx_decl(), for every xxx found from + # ffi._parser._declarations. This generates all the functions. + self._generate('decl') + # + # on Windows, distutils insists on putting init_cffi_xyz in + # 'export_symbols', so instead of fighting it, just give up and + # give it one + if sys.platform == 'win32': + if sys.version_info >= (3,): + prefix = 'PyInit_' + else: + prefix = 'init' + modname = self.verifier.get_module_name() + prnt("void %s%s(void) { }\n" % (prefix, modname)) + + def load_library(self, flags=0): + # import it with the CFFI backend + backend = self.ffi._backend + # needs to make a path that contains '/', on Posix + filename = os.path.join(os.curdir, self.verifier.modulefilename) + module = backend.load_library(filename, flags) + # + # call loading_gen_struct() to get the struct layout inferred by + # the C compiler + self._load(module, 'loading') + + # build the FFILibrary class and instance, this is a module subclass + # because modules are expected to have usually-constant-attributes and + # in PyPy this means the JIT is able to treat attributes as constant, + # which we want. + class FFILibrary(types.ModuleType): + _cffi_generic_module = module + _cffi_ffi = self.ffi + _cffi_dir = [] + def __dir__(self): + return FFILibrary._cffi_dir + library = FFILibrary("") + # + # finally, call the loaded_gen_xxx() functions. This will set + # up the 'library' object. + self._load(module, 'loaded', library=library) + return library + + def _get_declarations(self): + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst + + def _generate(self, step_name): + for name, tp in self._get_declarations(): + kind, realname = name.split(' ', 1) + try: + method = getattr(self, '_generate_gen_%s_%s' % (kind, + step_name)) + except AttributeError: + raise VerificationError( + "not implemented in verify(): %r" % name) + try: + method(tp, realname) + except Exception as e: + model.attach_exception_info(e, name) + raise + + def _load(self, module, step_name, **kwds): + for name, tp in self._get_declarations(): + kind, realname = name.split(' ', 1) + method = getattr(self, '_%s_gen_%s' % (step_name, kind)) + try: + method(tp, realname, module, **kwds) + except Exception as e: + model.attach_exception_info(e, name) + raise + + def _generate_nothing(self, tp, name): + pass + + def _loaded_noop(self, tp, name, module, **kwds): + pass + + # ---------- + # typedefs: generates no code so far + + _generate_gen_typedef_decl = _generate_nothing + _loading_gen_typedef = _loaded_noop + _loaded_gen_typedef = _loaded_noop + + # ---------- + # function declarations + + def _generate_gen_function_decl(self, tp, name): + assert isinstance(tp, model.FunctionPtrType) + if tp.ellipsis: + # cannot support vararg functions better than this: check for its + # exact type (including the fixed arguments), and build it as a + # constant function pointer (no _cffi_f_%s wrapper) + self._generate_gen_const(False, name, tp) + return + prnt = self._prnt + numargs = len(tp.args) + argnames = [] + for i, type in enumerate(tp.args): + indirection = '' + if isinstance(type, model.StructOrUnion): + indirection = '*' + argnames.append('%sx%d' % (indirection, i)) + context = 'argument of %s' % name + arglist = [type.get_c_name(' %s' % arg, context) + for type, arg in zip(tp.args, argnames)] + tpresult = tp.result + if isinstance(tpresult, model.StructOrUnion): + arglist.insert(0, tpresult.get_c_name(' *r', context)) + tpresult = model.void_type + arglist = ', '.join(arglist) or 'void' + wrappername = '_cffi_f_%s' % name + self.export_symbols.append(wrappername) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + funcdecl = ' %s%s(%s)' % (abi, wrappername, arglist) + context = 'result of %s' % name + prnt(tpresult.get_c_name(funcdecl, context)) + prnt('{') + # + if isinstance(tp.result, model.StructOrUnion): + result_code = '*r = ' + elif not isinstance(tp.result, model.VoidType): + result_code = 'return ' + else: + result_code = '' + prnt(' %s%s(%s);' % (result_code, name, ', '.join(argnames))) + prnt('}') + prnt() + + _loading_gen_function = _loaded_noop + + def _loaded_gen_function(self, tp, name, module, library): + assert isinstance(tp, model.FunctionPtrType) + if tp.ellipsis: + newfunction = self._load_constant(False, tp, name, module) + else: + indirections = [] + base_tp = tp + if (any(isinstance(typ, model.StructOrUnion) for typ in tp.args) + or isinstance(tp.result, model.StructOrUnion)): + indirect_args = [] + for i, typ in enumerate(tp.args): + if isinstance(typ, model.StructOrUnion): + typ = model.PointerType(typ) + indirections.append((i, typ)) + indirect_args.append(typ) + indirect_result = tp.result + if isinstance(indirect_result, model.StructOrUnion): + if indirect_result.fldtypes is None: + raise TypeError("'%s' is used as result type, " + "but is opaque" % ( + indirect_result._get_c_name(),)) + indirect_result = model.PointerType(indirect_result) + indirect_args.insert(0, indirect_result) + indirections.insert(0, ("result", indirect_result)) + indirect_result = model.void_type + tp = model.FunctionPtrType(tuple(indirect_args), + indirect_result, tp.ellipsis) + BFunc = self.ffi._get_cached_btype(tp) + wrappername = '_cffi_f_%s' % name + newfunction = module.load_function(BFunc, wrappername) + for i, typ in indirections: + newfunction = self._make_struct_wrapper(newfunction, i, typ, + base_tp) + setattr(library, name, newfunction) + type(library)._cffi_dir.append(name) + + def _make_struct_wrapper(self, oldfunc, i, tp, base_tp): + backend = self.ffi._backend + BType = self.ffi._get_cached_btype(tp) + if i == "result": + ffi = self.ffi + def newfunc(*args): + res = ffi.new(BType) + oldfunc(res, *args) + return res[0] + else: + def newfunc(*args): + args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:] + return oldfunc(*args) + newfunc._cffi_base_type = base_tp + return newfunc + + # ---------- + # named structs + + def _generate_gen_struct_decl(self, tp, name): + assert name == tp.name + self._generate_struct_or_union_decl(tp, 'struct', name) + + def _loading_gen_struct(self, tp, name, module): + self._loading_struct_or_union(tp, 'struct', name, module) + + def _loaded_gen_struct(self, tp, name, module, **kwds): + self._loaded_struct_or_union(tp) + + def _generate_gen_union_decl(self, tp, name): + assert name == tp.name + self._generate_struct_or_union_decl(tp, 'union', name) + + def _loading_gen_union(self, tp, name, module): + self._loading_struct_or_union(tp, 'union', name, module) + + def _loaded_gen_union(self, tp, name, module, **kwds): + self._loaded_struct_or_union(tp) + + def _generate_struct_or_union_decl(self, tp, prefix, name): + if tp.fldnames is None: + return # nothing to do with opaque structs + checkfuncname = '_cffi_check_%s_%s' % (prefix, name) + layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) + cname = ('%s %s' % (prefix, name)).strip() + # + prnt = self._prnt + prnt('static void %s(%s *p)' % (checkfuncname, cname)) + prnt('{') + prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') + for fname, ftype, fbitsize, fqual in tp.enumfields(): + if (isinstance(ftype, model.PrimitiveType) + and ftype.is_integer_type()) or fbitsize >= 0: + # accept all integers, but complain on float or double + prnt(' (void)((p->%s) << 1);' % fname) + else: + # only accept exactly the type declared. + try: + prnt(' { %s = &p->%s; (void)tmp; }' % ( + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) + except VerificationError as e: + prnt(' /* %s */' % str(e)) # cannot verify it, ignore + prnt('}') + self.export_symbols.append(layoutfuncname) + prnt('intptr_t %s(intptr_t i)' % (layoutfuncname,)) + prnt('{') + prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname) + prnt(' static intptr_t nums[] = {') + prnt(' sizeof(%s),' % cname) + prnt(' offsetof(struct _cffi_aligncheck, y),') + for fname, ftype, fbitsize, fqual in tp.enumfields(): + if fbitsize >= 0: + continue # xxx ignore fbitsize for now + prnt(' offsetof(%s, %s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + prnt(' -1') + prnt(' };') + prnt(' return nums[i];') + prnt(' /* the next line is not executed, but compiled */') + prnt(' %s(0);' % (checkfuncname,)) + prnt('}') + prnt() + + def _loading_struct_or_union(self, tp, prefix, name, module): + if tp.fldnames is None: + return # nothing to do with opaque structs + layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) + # + BFunc = self.ffi._typeof_locked("intptr_t(*)(intptr_t)")[0] + function = module.load_function(BFunc, layoutfuncname) + layout = [] + num = 0 + while True: + x = function(num) + if x < 0: break + layout.append(x) + num += 1 + if isinstance(tp, model.StructOrUnion) and tp.partial: + # use the function()'s sizes and offsets to guide the + # layout of the struct + totalsize = layout[0] + totalalignment = layout[1] + fieldofs = layout[2::2] + fieldsize = layout[3::2] + tp.force_flatten() + assert len(fieldofs) == len(fieldsize) == len(tp.fldnames) + tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment + else: + cname = ('%s %s' % (prefix, name)).strip() + self._struct_pending_verification[tp] = layout, cname + + def _loaded_struct_or_union(self, tp): + if tp.fldnames is None: + return # nothing to do with opaque structs + self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered + + if tp in self._struct_pending_verification: + # check that the layout sizes and offsets match the real ones + def check(realvalue, expectedvalue, msg): + if realvalue != expectedvalue: + raise VerificationError( + "%s (we have %d, but C compiler says %d)" + % (msg, expectedvalue, realvalue)) + ffi = self.ffi + BStruct = ffi._get_cached_btype(tp) + layout, cname = self._struct_pending_verification.pop(tp) + check(layout[0], ffi.sizeof(BStruct), "wrong total size") + check(layout[1], ffi.alignof(BStruct), "wrong total alignment") + i = 2 + for fname, ftype, fbitsize, fqual in tp.enumfields(): + if fbitsize >= 0: + continue # xxx ignore fbitsize for now + check(layout[i], ffi.offsetof(BStruct, fname), + "wrong offset for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) + i += 2 + assert i == len(layout) + + # ---------- + # 'anonymous' declarations. These are produced for anonymous structs + # or unions; the 'name' is obtained by a typedef. + + def _generate_gen_anonymous_decl(self, tp, name): + if isinstance(tp, model.EnumType): + self._generate_gen_enum_decl(tp, name, '') + else: + self._generate_struct_or_union_decl(tp, '', name) + + def _loading_gen_anonymous(self, tp, name, module): + if isinstance(tp, model.EnumType): + self._loading_gen_enum(tp, name, module, '') + else: + self._loading_struct_or_union(tp, '', name, module) + + def _loaded_gen_anonymous(self, tp, name, module, **kwds): + if isinstance(tp, model.EnumType): + self._loaded_gen_enum(tp, name, module, **kwds) + else: + self._loaded_struct_or_union(tp) + + # ---------- + # constants, likely declared with '#define' + + def _generate_gen_const(self, is_int, name, tp=None, category='const', + check_value=None): + prnt = self._prnt + funcname = '_cffi_%s_%s' % (category, name) + self.export_symbols.append(funcname) + if check_value is not None: + assert is_int + assert category == 'const' + prnt('int %s(char *out_error)' % funcname) + prnt('{') + self._check_int_constant_value(name, check_value) + prnt(' return 0;') + prnt('}') + elif is_int: + assert category == 'const' + prnt('int %s(long long *out_value)' % funcname) + prnt('{') + prnt(' *out_value = (long long)(%s);' % (name,)) + prnt(' return (%s) <= 0;' % (name,)) + prnt('}') + else: + assert tp is not None + assert check_value is None + if category == 'var': + ampersand = '&' + else: + ampersand = '' + extra = '' + if category == 'const' and isinstance(tp, model.StructOrUnion): + extra = 'const *' + ampersand = '&' + prnt(tp.get_c_name(' %s%s(void)' % (extra, funcname), name)) + prnt('{') + prnt(' return (%s%s);' % (ampersand, name)) + prnt('}') + prnt() + + def _generate_gen_constant_decl(self, tp, name): + is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() + self._generate_gen_const(is_int, name, tp) + + _loading_gen_constant = _loaded_noop + + def _load_constant(self, is_int, tp, name, module, check_value=None): + funcname = '_cffi_const_%s' % name + if check_value is not None: + assert is_int + self._load_known_int_constant(module, funcname) + value = check_value + elif is_int: + BType = self.ffi._typeof_locked("long long*")[0] + BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] + function = module.load_function(BFunc, funcname) + p = self.ffi.new(BType) + negative = function(p) + value = int(p[0]) + if value < 0 and not negative: + BLongLong = self.ffi._typeof_locked("long long")[0] + value += (1 << (8*self.ffi.sizeof(BLongLong))) + else: + assert check_value is None + fntypeextra = '(*)(void)' + if isinstance(tp, model.StructOrUnion): + fntypeextra = '*' + fntypeextra + BFunc = self.ffi._typeof_locked(tp.get_c_name(fntypeextra, name))[0] + function = module.load_function(BFunc, funcname) + value = function() + if isinstance(tp, model.StructOrUnion): + value = value[0] + return value + + def _loaded_gen_constant(self, tp, name, module, library): + is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() + value = self._load_constant(is_int, tp, name, module) + setattr(library, name, value) + type(library)._cffi_dir.append(name) + + # ---------- + # enums + + def _check_int_constant_value(self, name, value): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % + name) + prnt(' sprintf(out_error, "%s has the real value %s, not %s",') + prnt(' "%s", buf, "%d");' % (name[:100], value)) + prnt(' return -1;') + prnt(' }') + + def _load_known_int_constant(self, module, funcname): + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] + function = module.load_function(BFunc, funcname) + p = self.ffi.new(BType, 256) + if function(p) < 0: + error = self.ffi.string(p) + if sys.version_info >= (3,): + error = str(error, 'utf-8') + raise VerificationError(error) + + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + + def _generate_gen_enum_decl(self, tp, name, prefix='enum'): + if tp.partial: + for enumerator in tp.enumerators: + self._generate_gen_const(True, enumerator) + return + # + funcname = self._enum_funcname(prefix, name) + self.export_symbols.append(funcname) + prnt = self._prnt + prnt('int %s(char *out_error)' % funcname) + prnt('{') + for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): + self._check_int_constant_value(enumerator, enumvalue) + prnt(' return 0;') + prnt('}') + prnt() + + def _loading_gen_enum(self, tp, name, module, prefix='enum'): + if tp.partial: + enumvalues = [self._load_constant(True, tp, enumerator, module) + for enumerator in tp.enumerators] + tp.enumvalues = tuple(enumvalues) + tp.partial_resolved = True + else: + funcname = self._enum_funcname(prefix, name) + self._load_known_int_constant(module, funcname) + + def _loaded_gen_enum(self, tp, name, module, library): + for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): + setattr(library, enumerator, enumvalue) + type(library)._cffi_dir.append(enumerator) + + # ---------- + # macros: for now only for integers + + def _generate_gen_macro_decl(self, tp, name): + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_gen_const(True, name, check_value=check_value) + + _loading_gen_macro = _loaded_noop + + def _loaded_gen_macro(self, tp, name, module, library): + if tp == '...': + check_value = None + else: + check_value = tp # an integer + value = self._load_constant(True, tp, name, module, + check_value=check_value) + setattr(library, name, value) + type(library)._cffi_dir.append(name) + + # ---------- + # global variables + + def _generate_gen_variable_decl(self, tp, name): + if isinstance(tp, model.ArrayType): + if tp.length_is_unknown(): + prnt = self._prnt + funcname = '_cffi_sizeof_%s' % (name,) + self.export_symbols.append(funcname) + prnt("size_t %s(void)" % funcname) + prnt("{") + prnt(" return sizeof(%s);" % (name,)) + prnt("}") + tp_ptr = model.PointerType(tp.item) + self._generate_gen_const(False, name, tp_ptr) + else: + tp_ptr = model.PointerType(tp) + self._generate_gen_const(False, name, tp_ptr, category='var') + + _loading_gen_variable = _loaded_noop + + def _loaded_gen_variable(self, tp, name, module, library): + if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the + # sense that "a=..." is forbidden + if tp.length_is_unknown(): + funcname = '_cffi_sizeof_%s' % (name,) + BFunc = self.ffi._typeof_locked('size_t(*)(void)')[0] + function = module.load_function(BFunc, funcname) + size = function() + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) + tp_ptr = model.PointerType(tp.item) + value = self._load_constant(False, tp_ptr, name, module) + # 'value' is a which we have to replace with + # a if the N is actually known + if tp.length is not None: + BArray = self.ffi._get_cached_btype(tp) + value = self.ffi.cast(BArray, value) + setattr(library, name, value) + type(library)._cffi_dir.append(name) + return + # remove ptr= from the library instance, and replace + # it by a property on the class, which reads/writes into ptr[0]. + funcname = '_cffi_var_%s' % name + BFunc = self.ffi._typeof_locked(tp.get_c_name('*(*)(void)', name))[0] + function = module.load_function(BFunc, funcname) + ptr = function() + def getter(library): + return ptr[0] + def setter(library, value): + ptr[0] = value + setattr(type(library), name, property(getter, setter)) + type(library)._cffi_dir.append(name) + +cffimod_header = r''' +#include +#include +#include +#include +#include /* XXX for ssize_t on some platforms */ + +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py + and cffi/_cffi_include.h */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ +# ifndef __cplusplus + typedef unsigned char _Bool; +# endif +# endif +# define _cffi_float_complex_t _Fcomplex /* include for it */ +# define _cffi_double_complex_t _Dcomplex /* include for it */ +#else +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux) +# include +# endif +# define _cffi_float_complex_t float _Complex +# define _cffi_double_complex_t double _Complex +#endif +''' diff --git a/.venv/lib/python3.11/site-packages/diskcache/__init__.py b/.venv/lib/python3.11/site-packages/diskcache/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7757d6601e278ee8879debe76b971cb3d9f1187a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/diskcache/__init__.py @@ -0,0 +1,68 @@ +""" +DiskCache API Reference +======================= + +The :doc:`tutorial` provides a helpful walkthrough of most methods. +""" + +from .core import ( + DEFAULT_SETTINGS, + ENOVAL, + EVICTION_POLICY, + UNKNOWN, + Cache, + Disk, + EmptyDirWarning, + JSONDisk, + Timeout, + UnknownFileWarning, +) +from .fanout import FanoutCache +from .persistent import Deque, Index +from .recipes import ( + Averager, + BoundedSemaphore, + Lock, + RLock, + barrier, + memoize_stampede, + throttle, +) + +__all__ = [ + 'Averager', + 'BoundedSemaphore', + 'Cache', + 'DEFAULT_SETTINGS', + 'Deque', + 'Disk', + 'ENOVAL', + 'EVICTION_POLICY', + 'EmptyDirWarning', + 'FanoutCache', + 'Index', + 'JSONDisk', + 'Lock', + 'RLock', + 'Timeout', + 'UNKNOWN', + 'UnknownFileWarning', + 'barrier', + 'memoize_stampede', + 'throttle', +] + +try: + from .djangocache import DjangoCache # noqa + + __all__.append('DjangoCache') +except Exception: # pylint: disable=broad-except # pragma: no cover + # Django not installed or not setup so ignore. + pass + +__title__ = 'diskcache' +__version__ = '5.6.3' +__build__ = 0x050603 +__author__ = 'Grant Jenks' +__license__ = 'Apache 2.0' +__copyright__ = 'Copyright 2016-2023 Grant Jenks' diff --git a/.venv/lib/python3.11/site-packages/diskcache/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/diskcache/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..735c9191fcccfca72f49dab616f51d02ac9ef622 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/diskcache/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/diskcache/__pycache__/cli.cpython-311.pyc b/.venv/lib/python3.11/site-packages/diskcache/__pycache__/cli.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31fca0bdb4f4665fad21f9bacb32807074dc3343 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/diskcache/__pycache__/cli.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/diskcache/__pycache__/djangocache.cpython-311.pyc b/.venv/lib/python3.11/site-packages/diskcache/__pycache__/djangocache.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc2c4f045e627c851a17b2c49ea3f1ce18ee454e Binary files /dev/null and b/.venv/lib/python3.11/site-packages/diskcache/__pycache__/djangocache.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/diskcache/__pycache__/fanout.cpython-311.pyc b/.venv/lib/python3.11/site-packages/diskcache/__pycache__/fanout.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74a5f9f1fcf86725107ca2a803fc480c1eb19c71 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/diskcache/__pycache__/fanout.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/diskcache/__pycache__/persistent.cpython-311.pyc b/.venv/lib/python3.11/site-packages/diskcache/__pycache__/persistent.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5979ac4de198d17af294d3ecbb0edb8c2877c02 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/diskcache/__pycache__/persistent.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/diskcache/__pycache__/recipes.cpython-311.pyc b/.venv/lib/python3.11/site-packages/diskcache/__pycache__/recipes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fd554fe98c0e92b93cf07b4c41a2d1cea29feb4 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/diskcache/__pycache__/recipes.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/diskcache/cli.py b/.venv/lib/python3.11/site-packages/diskcache/cli.py new file mode 100644 index 0000000000000000000000000000000000000000..6a39f601fe116f0a1f4e7a1866df2cb18a0ebdb4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/diskcache/cli.py @@ -0,0 +1 @@ +"""Command line interface to disk cache.""" diff --git a/.venv/lib/python3.11/site-packages/diskcache/core.py b/.venv/lib/python3.11/site-packages/diskcache/core.py new file mode 100644 index 0000000000000000000000000000000000000000..c7c84860ea853fe9232e8ee52eaaa053b00a3a87 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/diskcache/core.py @@ -0,0 +1,2452 @@ +"""Core disk and file backed cache API. +""" + +import codecs +import contextlib as cl +import errno +import functools as ft +import io +import json +import os +import os.path as op +import pickle +import pickletools +import sqlite3 +import struct +import tempfile +import threading +import time +import warnings +import zlib + + +def full_name(func): + """Return full name of `func` by adding the module and function name.""" + return func.__module__ + '.' + func.__qualname__ + + +class Constant(tuple): + """Pretty display of immutable constant.""" + + def __new__(cls, name): + return tuple.__new__(cls, (name,)) + + def __repr__(self): + return '%s' % self[0] + + +DBNAME = 'cache.db' +ENOVAL = Constant('ENOVAL') +UNKNOWN = Constant('UNKNOWN') + +MODE_NONE = 0 +MODE_RAW = 1 +MODE_BINARY = 2 +MODE_TEXT = 3 +MODE_PICKLE = 4 + +DEFAULT_SETTINGS = { + 'statistics': 0, # False + 'tag_index': 0, # False + 'eviction_policy': 'least-recently-stored', + 'size_limit': 2**30, # 1gb + 'cull_limit': 10, + 'sqlite_auto_vacuum': 1, # FULL + 'sqlite_cache_size': 2**13, # 8,192 pages + 'sqlite_journal_mode': 'wal', + 'sqlite_mmap_size': 2**26, # 64mb + 'sqlite_synchronous': 1, # NORMAL + 'disk_min_file_size': 2**15, # 32kb + 'disk_pickle_protocol': pickle.HIGHEST_PROTOCOL, +} + +METADATA = { + 'count': 0, + 'size': 0, + 'hits': 0, + 'misses': 0, +} + +EVICTION_POLICY = { + 'none': { + 'init': None, + 'get': None, + 'cull': None, + }, + 'least-recently-stored': { + 'init': ( + 'CREATE INDEX IF NOT EXISTS Cache_store_time ON' + ' Cache (store_time)' + ), + 'get': None, + 'cull': 'SELECT {fields} FROM Cache ORDER BY store_time LIMIT ?', + }, + 'least-recently-used': { + 'init': ( + 'CREATE INDEX IF NOT EXISTS Cache_access_time ON' + ' Cache (access_time)' + ), + 'get': 'access_time = {now}', + 'cull': 'SELECT {fields} FROM Cache ORDER BY access_time LIMIT ?', + }, + 'least-frequently-used': { + 'init': ( + 'CREATE INDEX IF NOT EXISTS Cache_access_count ON' + ' Cache (access_count)' + ), + 'get': 'access_count = access_count + 1', + 'cull': 'SELECT {fields} FROM Cache ORDER BY access_count LIMIT ?', + }, +} + + +class Disk: + """Cache key and value serialization for SQLite database and files.""" + + def __init__(self, directory, min_file_size=0, pickle_protocol=0): + """Initialize disk instance. + + :param str directory: directory path + :param int min_file_size: minimum size for file use + :param int pickle_protocol: pickle protocol for serialization + + """ + self._directory = directory + self.min_file_size = min_file_size + self.pickle_protocol = pickle_protocol + + def hash(self, key): + """Compute portable hash for `key`. + + :param key: key to hash + :return: hash value + + """ + mask = 0xFFFFFFFF + disk_key, _ = self.put(key) + type_disk_key = type(disk_key) + + if type_disk_key is sqlite3.Binary: + return zlib.adler32(disk_key) & mask + elif type_disk_key is str: + return zlib.adler32(disk_key.encode('utf-8')) & mask # noqa + elif type_disk_key is int: + return disk_key % mask + else: + assert type_disk_key is float + return zlib.adler32(struct.pack('!d', disk_key)) & mask + + def put(self, key): + """Convert `key` to fields key and raw for Cache table. + + :param key: key to convert + :return: (database key, raw boolean) pair + + """ + # pylint: disable=unidiomatic-typecheck + type_key = type(key) + + if type_key is bytes: + return sqlite3.Binary(key), True + elif ( + (type_key is str) + or ( + type_key is int + and -9223372036854775808 <= key <= 9223372036854775807 + ) + or (type_key is float) + ): + return key, True + else: + data = pickle.dumps(key, protocol=self.pickle_protocol) + result = pickletools.optimize(data) + return sqlite3.Binary(result), False + + def get(self, key, raw): + """Convert fields `key` and `raw` from Cache table to key. + + :param key: database key to convert + :param bool raw: flag indicating raw database storage + :return: corresponding Python key + + """ + # pylint: disable=unidiomatic-typecheck + if raw: + return bytes(key) if type(key) is sqlite3.Binary else key + else: + return pickle.load(io.BytesIO(key)) + + def store(self, value, read, key=UNKNOWN): + """Convert `value` to fields size, mode, filename, and value for Cache + table. + + :param value: value to convert + :param bool read: True when value is file-like object + :param key: key for item (default UNKNOWN) + :return: (size, mode, filename, value) tuple for Cache table + + """ + # pylint: disable=unidiomatic-typecheck + type_value = type(value) + min_file_size = self.min_file_size + + if ( + (type_value is str and len(value) < min_file_size) + or ( + type_value is int + and -9223372036854775808 <= value <= 9223372036854775807 + ) + or (type_value is float) + ): + return 0, MODE_RAW, None, value + elif type_value is bytes: + if len(value) < min_file_size: + return 0, MODE_RAW, None, sqlite3.Binary(value) + else: + filename, full_path = self.filename(key, value) + self._write(full_path, io.BytesIO(value), 'xb') + return len(value), MODE_BINARY, filename, None + elif type_value is str: + filename, full_path = self.filename(key, value) + self._write(full_path, io.StringIO(value), 'x', 'UTF-8') + size = op.getsize(full_path) + return size, MODE_TEXT, filename, None + elif read: + reader = ft.partial(value.read, 2**22) + filename, full_path = self.filename(key, value) + iterator = iter(reader, b'') + size = self._write(full_path, iterator, 'xb') + return size, MODE_BINARY, filename, None + else: + result = pickle.dumps(value, protocol=self.pickle_protocol) + + if len(result) < min_file_size: + return 0, MODE_PICKLE, None, sqlite3.Binary(result) + else: + filename, full_path = self.filename(key, value) + self._write(full_path, io.BytesIO(result), 'xb') + return len(result), MODE_PICKLE, filename, None + + def _write(self, full_path, iterator, mode, encoding=None): + full_dir, _ = op.split(full_path) + + for count in range(1, 11): + with cl.suppress(OSError): + os.makedirs(full_dir) + + try: + # Another cache may have deleted the directory before + # the file could be opened. + writer = open(full_path, mode, encoding=encoding) + except OSError: + if count == 10: + # Give up after 10 tries to open the file. + raise + continue + + with writer: + size = 0 + for chunk in iterator: + size += len(chunk) + writer.write(chunk) + return size + + def fetch(self, mode, filename, value, read): + """Convert fields `mode`, `filename`, and `value` from Cache table to + value. + + :param int mode: value mode raw, binary, text, or pickle + :param str filename: filename of corresponding value + :param value: database value + :param bool read: when True, return an open file handle + :return: corresponding Python value + :raises: IOError if the value cannot be read + + """ + # pylint: disable=unidiomatic-typecheck,consider-using-with + if mode == MODE_RAW: + return bytes(value) if type(value) is sqlite3.Binary else value + elif mode == MODE_BINARY: + if read: + return open(op.join(self._directory, filename), 'rb') + else: + with open(op.join(self._directory, filename), 'rb') as reader: + return reader.read() + elif mode == MODE_TEXT: + full_path = op.join(self._directory, filename) + with open(full_path, 'r', encoding='UTF-8') as reader: + return reader.read() + elif mode == MODE_PICKLE: + if value is None: + with open(op.join(self._directory, filename), 'rb') as reader: + return pickle.load(reader) + else: + return pickle.load(io.BytesIO(value)) + + def filename(self, key=UNKNOWN, value=UNKNOWN): + """Return filename and full-path tuple for file storage. + + Filename will be a randomly generated 28 character hexadecimal string + with ".val" suffixed. Two levels of sub-directories will be used to + reduce the size of directories. On older filesystems, lookups in + directories with many files may be slow. + + The default implementation ignores the `key` and `value` parameters. + + In some scenarios, for example :meth:`Cache.push + `, the `key` or `value` may not be known when the + item is stored in the cache. + + :param key: key for item (default UNKNOWN) + :param value: value for item (default UNKNOWN) + + """ + # pylint: disable=unused-argument + hex_name = codecs.encode(os.urandom(16), 'hex').decode('utf-8') + sub_dir = op.join(hex_name[:2], hex_name[2:4]) + name = hex_name[4:] + '.val' + filename = op.join(sub_dir, name) + full_path = op.join(self._directory, filename) + return filename, full_path + + def remove(self, file_path): + """Remove a file given by `file_path`. + + This method is cross-thread and cross-process safe. If an OSError + occurs, it is suppressed. + + :param str file_path: relative path to file + + """ + full_path = op.join(self._directory, file_path) + full_dir, _ = op.split(full_path) + + # Suppress OSError that may occur if two caches attempt to delete the + # same file or directory at the same time. + + with cl.suppress(OSError): + os.remove(full_path) + + with cl.suppress(OSError): + os.removedirs(full_dir) + + +class JSONDisk(Disk): + """Cache key and value using JSON serialization with zlib compression.""" + + def __init__(self, directory, compress_level=1, **kwargs): + """Initialize JSON disk instance. + + Keys and values are compressed using the zlib library. The + `compress_level` is an integer from 0 to 9 controlling the level of + compression; 1 is fastest and produces the least compression, 9 is + slowest and produces the most compression, and 0 is no compression. + + :param str directory: directory path + :param int compress_level: zlib compression level (default 1) + :param kwargs: super class arguments + + """ + self.compress_level = compress_level + super().__init__(directory, **kwargs) + + def put(self, key): + json_bytes = json.dumps(key).encode('utf-8') + data = zlib.compress(json_bytes, self.compress_level) + return super().put(data) + + def get(self, key, raw): + data = super().get(key, raw) + return json.loads(zlib.decompress(data).decode('utf-8')) + + def store(self, value, read, key=UNKNOWN): + if not read: + json_bytes = json.dumps(value).encode('utf-8') + value = zlib.compress(json_bytes, self.compress_level) + return super().store(value, read, key=key) + + def fetch(self, mode, filename, value, read): + data = super().fetch(mode, filename, value, read) + if not read: + data = json.loads(zlib.decompress(data).decode('utf-8')) + return data + + +class Timeout(Exception): + """Database timeout expired.""" + + +class UnknownFileWarning(UserWarning): + """Warning used by Cache.check for unknown files.""" + + +class EmptyDirWarning(UserWarning): + """Warning used by Cache.check for empty directories.""" + + +def args_to_key(base, args, kwargs, typed, ignore): + """Create cache key out of function arguments. + + :param tuple base: base of key + :param tuple args: function arguments + :param dict kwargs: function keyword arguments + :param bool typed: include types in cache key + :param set ignore: positional or keyword args to ignore + :return: cache key tuple + + """ + args = tuple(arg for index, arg in enumerate(args) if index not in ignore) + key = base + args + (None,) + + if kwargs: + kwargs = {key: val for key, val in kwargs.items() if key not in ignore} + sorted_items = sorted(kwargs.items()) + + for item in sorted_items: + key += item + + if typed: + key += tuple(type(arg) for arg in args) + + if kwargs: + key += tuple(type(value) for _, value in sorted_items) + + return key + + +class Cache: + """Disk and file backed cache.""" + + def __init__(self, directory=None, timeout=60, disk=Disk, **settings): + """Initialize cache instance. + + :param str directory: cache directory + :param float timeout: SQLite connection timeout + :param disk: Disk type or subclass for serialization + :param settings: any of DEFAULT_SETTINGS + + """ + try: + assert issubclass(disk, Disk) + except (TypeError, AssertionError): + raise ValueError('disk must subclass diskcache.Disk') from None + + if directory is None: + directory = tempfile.mkdtemp(prefix='diskcache-') + directory = str(directory) + directory = op.expanduser(directory) + directory = op.expandvars(directory) + + self._directory = directory + self._timeout = 0 # Manually handle retries during initialization. + self._local = threading.local() + self._txn_id = None + + if not op.isdir(directory): + try: + os.makedirs(directory, 0o755) + except OSError as error: + if error.errno != errno.EEXIST: + raise EnvironmentError( + error.errno, + 'Cache directory "%s" does not exist' + ' and could not be created' % self._directory, + ) from None + + sql = self._sql_retry + + # Setup Settings table. + + try: + current_settings = dict( + sql('SELECT key, value FROM Settings').fetchall() + ) + except sqlite3.OperationalError: + current_settings = {} + + sets = DEFAULT_SETTINGS.copy() + sets.update(current_settings) + sets.update(settings) + + for key in METADATA: + sets.pop(key, None) + + # Chance to set pragmas before any tables are created. + + for key, value in sorted(sets.items()): + if key.startswith('sqlite_'): + self.reset(key, value, update=False) + + sql( + 'CREATE TABLE IF NOT EXISTS Settings (' + ' key TEXT NOT NULL UNIQUE,' + ' value)' + ) + + # Setup Disk object (must happen after settings initialized). + + kwargs = { + key[5:]: value + for key, value in sets.items() + if key.startswith('disk_') + } + self._disk = disk(directory, **kwargs) + + # Set cached attributes: updates settings and sets pragmas. + + for key, value in sets.items(): + query = 'INSERT OR REPLACE INTO Settings VALUES (?, ?)' + sql(query, (key, value)) + self.reset(key, value) + + for key, value in METADATA.items(): + query = 'INSERT OR IGNORE INTO Settings VALUES (?, ?)' + sql(query, (key, value)) + self.reset(key) + + ((self._page_size,),) = sql('PRAGMA page_size').fetchall() + + # Setup Cache table. + + sql( + 'CREATE TABLE IF NOT EXISTS Cache (' + ' rowid INTEGER PRIMARY KEY,' + ' key BLOB,' + ' raw INTEGER,' + ' store_time REAL,' + ' expire_time REAL,' + ' access_time REAL,' + ' access_count INTEGER DEFAULT 0,' + ' tag BLOB,' + ' size INTEGER DEFAULT 0,' + ' mode INTEGER DEFAULT 0,' + ' filename TEXT,' + ' value BLOB)' + ) + + sql( + 'CREATE UNIQUE INDEX IF NOT EXISTS Cache_key_raw ON' + ' Cache(key, raw)' + ) + + sql( + 'CREATE INDEX IF NOT EXISTS Cache_expire_time ON' + ' Cache (expire_time)' + ) + + query = EVICTION_POLICY[self.eviction_policy]['init'] + + if query is not None: + sql(query) + + # Use triggers to keep Metadata updated. + + sql( + 'CREATE TRIGGER IF NOT EXISTS Settings_count_insert' + ' AFTER INSERT ON Cache FOR EACH ROW BEGIN' + ' UPDATE Settings SET value = value + 1' + ' WHERE key = "count"; END' + ) + + sql( + 'CREATE TRIGGER IF NOT EXISTS Settings_count_delete' + ' AFTER DELETE ON Cache FOR EACH ROW BEGIN' + ' UPDATE Settings SET value = value - 1' + ' WHERE key = "count"; END' + ) + + sql( + 'CREATE TRIGGER IF NOT EXISTS Settings_size_insert' + ' AFTER INSERT ON Cache FOR EACH ROW BEGIN' + ' UPDATE Settings SET value = value + NEW.size' + ' WHERE key = "size"; END' + ) + + sql( + 'CREATE TRIGGER IF NOT EXISTS Settings_size_update' + ' AFTER UPDATE ON Cache FOR EACH ROW BEGIN' + ' UPDATE Settings' + ' SET value = value + NEW.size - OLD.size' + ' WHERE key = "size"; END' + ) + + sql( + 'CREATE TRIGGER IF NOT EXISTS Settings_size_delete' + ' AFTER DELETE ON Cache FOR EACH ROW BEGIN' + ' UPDATE Settings SET value = value - OLD.size' + ' WHERE key = "size"; END' + ) + + # Create tag index if requested. + + if self.tag_index: # pylint: disable=no-member + self.create_tag_index() + else: + self.drop_tag_index() + + # Close and re-open database connection with given timeout. + + self.close() + self._timeout = timeout + self._sql # pylint: disable=pointless-statement + + @property + def directory(self): + """Cache directory.""" + return self._directory + + @property + def timeout(self): + """SQLite connection timeout value in seconds.""" + return self._timeout + + @property + def disk(self): + """Disk used for serialization.""" + return self._disk + + @property + def _con(self): + # Check process ID to support process forking. If the process + # ID changes, close the connection and update the process ID. + + local_pid = getattr(self._local, 'pid', None) + pid = os.getpid() + + if local_pid != pid: + self.close() + self._local.pid = pid + + con = getattr(self._local, 'con', None) + + if con is None: + con = self._local.con = sqlite3.connect( + op.join(self._directory, DBNAME), + timeout=self._timeout, + isolation_level=None, + ) + + # Some SQLite pragmas work on a per-connection basis so + # query the Settings table and reset the pragmas. The + # Settings table may not exist so catch and ignore the + # OperationalError that may occur. + + try: + select = 'SELECT key, value FROM Settings' + settings = con.execute(select).fetchall() + except sqlite3.OperationalError: + pass + else: + for key, value in settings: + if key.startswith('sqlite_'): + self.reset(key, value, update=False) + + return con + + @property + def _sql(self): + return self._con.execute + + @property + def _sql_retry(self): + sql = self._sql + + # 2018-11-01 GrantJ - Some SQLite builds/versions handle + # the SQLITE_BUSY return value and connection parameter + # "timeout" differently. For a more reliable duration, + # manually retry the statement for 60 seconds. Only used + # by statements which modify the database and do not use + # a transaction (like those in ``__init__`` or ``reset``). + # See Issue #85 for and tests/issue_85.py for more details. + + def _execute_with_retry(statement, *args, **kwargs): + start = time.time() + while True: + try: + return sql(statement, *args, **kwargs) + except sqlite3.OperationalError as exc: + if str(exc) != 'database is locked': + raise + diff = time.time() - start + if diff > 60: + raise + time.sleep(0.001) + + return _execute_with_retry + + @cl.contextmanager + def transact(self, retry=False): + """Context manager to perform a transaction by locking the cache. + + While the cache is locked, no other write operation is permitted. + Transactions should therefore be as short as possible. Read and write + operations performed in a transaction are atomic. Read operations may + occur concurrent to a transaction. + + Transactions may be nested and may not be shared between threads. + + Raises :exc:`Timeout` error when database timeout occurs and `retry` is + `False` (default). + + >>> cache = Cache() + >>> with cache.transact(): # Atomically increment two keys. + ... _ = cache.incr('total', 123.4) + ... _ = cache.incr('count', 1) + >>> with cache.transact(): # Atomically calculate average. + ... average = cache['total'] / cache['count'] + >>> average + 123.4 + + :param bool retry: retry if database timeout occurs (default False) + :return: context manager for use in `with` statement + :raises Timeout: if database timeout occurs + + """ + with self._transact(retry=retry): + yield + + @cl.contextmanager + def _transact(self, retry=False, filename=None): + sql = self._sql + filenames = [] + _disk_remove = self._disk.remove + tid = threading.get_ident() + txn_id = self._txn_id + + if tid == txn_id: + begin = False + else: + while True: + try: + sql('BEGIN IMMEDIATE') + begin = True + self._txn_id = tid + break + except sqlite3.OperationalError: + if retry: + continue + if filename is not None: + _disk_remove(filename) + raise Timeout from None + + try: + yield sql, filenames.append + except BaseException: + if begin: + assert self._txn_id == tid + self._txn_id = None + sql('ROLLBACK') + raise + else: + if begin: + assert self._txn_id == tid + self._txn_id = None + sql('COMMIT') + for name in filenames: + if name is not None: + _disk_remove(name) + + def set(self, key, value, expire=None, read=False, tag=None, retry=False): + """Set `key` and `value` item in cache. + + When `read` is `True`, `value` should be a file-like object opened + for reading in binary mode. + + Raises :exc:`Timeout` error when database timeout occurs and `retry` is + `False` (default). + + :param key: key for item + :param value: value for item + :param float expire: seconds until item expires + (default None, no expiry) + :param bool read: read value as bytes from file (default False) + :param str tag: text to associate with key (default None) + :param bool retry: retry if database timeout occurs (default False) + :return: True if item was set + :raises Timeout: if database timeout occurs + + """ + now = time.time() + db_key, raw = self._disk.put(key) + expire_time = None if expire is None else now + expire + size, mode, filename, db_value = self._disk.store(value, read, key=key) + columns = (expire_time, tag, size, mode, filename, db_value) + + # The order of SELECT, UPDATE, and INSERT is important below. + # + # Typical cache usage pattern is: + # + # value = cache.get(key) + # if value is None: + # value = expensive_calculation() + # cache.set(key, value) + # + # Cache.get does not evict expired keys to avoid writes during lookups. + # Commonly used/expired keys will therefore remain in the cache making + # an UPDATE the preferred path. + # + # The alternative is to assume the key is not present by first trying + # to INSERT and then handling the IntegrityError that occurs from + # violating the UNIQUE constraint. This optimistic approach was + # rejected based on the common cache usage pattern. + # + # INSERT OR REPLACE aka UPSERT is not used because the old filename may + # need cleanup. + + with self._transact(retry, filename) as (sql, cleanup): + rows = sql( + 'SELECT rowid, filename FROM Cache' + ' WHERE key = ? AND raw = ?', + (db_key, raw), + ).fetchall() + + if rows: + ((rowid, old_filename),) = rows + cleanup(old_filename) + self._row_update(rowid, now, columns) + else: + self._row_insert(db_key, raw, now, columns) + + self._cull(now, sql, cleanup) + + return True + + def __setitem__(self, key, value): + """Set corresponding `value` for `key` in cache. + + :param key: key for item + :param value: value for item + :return: corresponding value + :raises KeyError: if key is not found + + """ + self.set(key, value, retry=True) + + def _row_update(self, rowid, now, columns): + sql = self._sql + expire_time, tag, size, mode, filename, value = columns + sql( + 'UPDATE Cache SET' + ' store_time = ?,' + ' expire_time = ?,' + ' access_time = ?,' + ' access_count = ?,' + ' tag = ?,' + ' size = ?,' + ' mode = ?,' + ' filename = ?,' + ' value = ?' + ' WHERE rowid = ?', + ( + now, # store_time + expire_time, + now, # access_time + 0, # access_count + tag, + size, + mode, + filename, + value, + rowid, + ), + ) + + def _row_insert(self, key, raw, now, columns): + sql = self._sql + expire_time, tag, size, mode, filename, value = columns + sql( + 'INSERT INTO Cache(' + ' key, raw, store_time, expire_time, access_time,' + ' access_count, tag, size, mode, filename, value' + ') VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', + ( + key, + raw, + now, # store_time + expire_time, + now, # access_time + 0, # access_count + tag, + size, + mode, + filename, + value, + ), + ) + + def _cull(self, now, sql, cleanup, limit=None): + cull_limit = self.cull_limit if limit is None else limit + + if cull_limit == 0: + return + + # Evict expired keys. + + select_expired_template = ( + 'SELECT %s FROM Cache' + ' WHERE expire_time IS NOT NULL AND expire_time < ?' + ' ORDER BY expire_time LIMIT ?' + ) + + select_expired = select_expired_template % 'filename' + rows = sql(select_expired, (now, cull_limit)).fetchall() + + if rows: + delete_expired = 'DELETE FROM Cache WHERE rowid IN (%s)' % ( + select_expired_template % 'rowid' + ) + sql(delete_expired, (now, cull_limit)) + + for (filename,) in rows: + cleanup(filename) + + cull_limit -= len(rows) + + if cull_limit == 0: + return + + # Evict keys by policy. + + select_policy = EVICTION_POLICY[self.eviction_policy]['cull'] + + if select_policy is None or self.volume() < self.size_limit: + return + + select_filename = select_policy.format(fields='filename', now=now) + rows = sql(select_filename, (cull_limit,)).fetchall() + + if rows: + delete = 'DELETE FROM Cache WHERE rowid IN (%s)' % ( + select_policy.format(fields='rowid', now=now) + ) + sql(delete, (cull_limit,)) + + for (filename,) in rows: + cleanup(filename) + + def touch(self, key, expire=None, retry=False): + """Touch `key` in cache and update `expire` time. + + Raises :exc:`Timeout` error when database timeout occurs and `retry` is + `False` (default). + + :param key: key for item + :param float expire: seconds until item expires + (default None, no expiry) + :param bool retry: retry if database timeout occurs (default False) + :return: True if key was touched + :raises Timeout: if database timeout occurs + + """ + now = time.time() + db_key, raw = self._disk.put(key) + expire_time = None if expire is None else now + expire + + with self._transact(retry) as (sql, _): + rows = sql( + 'SELECT rowid, expire_time FROM Cache' + ' WHERE key = ? AND raw = ?', + (db_key, raw), + ).fetchall() + + if rows: + ((rowid, old_expire_time),) = rows + + if old_expire_time is None or old_expire_time > now: + sql( + 'UPDATE Cache SET expire_time = ? WHERE rowid = ?', + (expire_time, rowid), + ) + return True + + return False + + def add(self, key, value, expire=None, read=False, tag=None, retry=False): + """Add `key` and `value` item to cache. + + Similar to `set`, but only add to cache if key not present. + + Operation is atomic. Only one concurrent add operation for a given key + will succeed. + + When `read` is `True`, `value` should be a file-like object opened + for reading in binary mode. + + Raises :exc:`Timeout` error when database timeout occurs and `retry` is + `False` (default). + + :param key: key for item + :param value: value for item + :param float expire: seconds until the key expires + (default None, no expiry) + :param bool read: read value as bytes from file (default False) + :param str tag: text to associate with key (default None) + :param bool retry: retry if database timeout occurs (default False) + :return: True if item was added + :raises Timeout: if database timeout occurs + + """ + now = time.time() + db_key, raw = self._disk.put(key) + expire_time = None if expire is None else now + expire + size, mode, filename, db_value = self._disk.store(value, read, key=key) + columns = (expire_time, tag, size, mode, filename, db_value) + + with self._transact(retry, filename) as (sql, cleanup): + rows = sql( + 'SELECT rowid, filename, expire_time FROM Cache' + ' WHERE key = ? AND raw = ?', + (db_key, raw), + ).fetchall() + + if rows: + ((rowid, old_filename, old_expire_time),) = rows + + if old_expire_time is None or old_expire_time > now: + cleanup(filename) + return False + + cleanup(old_filename) + self._row_update(rowid, now, columns) + else: + self._row_insert(db_key, raw, now, columns) + + self._cull(now, sql, cleanup) + + return True + + def incr(self, key, delta=1, default=0, retry=False): + """Increment value by delta for item with key. + + If key is missing and default is None then raise KeyError. Else if key + is missing and default is not None then use default for value. + + Operation is atomic. All concurrent increment operations will be + counted individually. + + Assumes value may be stored in a SQLite column. Most builds that target + machines with 64-bit pointer widths will support 64-bit signed + integers. + + Raises :exc:`Timeout` error when database timeout occurs and `retry` is + `False` (default). + + :param key: key for item + :param int delta: amount to increment (default 1) + :param int default: value if key is missing (default 0) + :param bool retry: retry if database timeout occurs (default False) + :return: new value for item + :raises KeyError: if key is not found and default is None + :raises Timeout: if database timeout occurs + + """ + now = time.time() + db_key, raw = self._disk.put(key) + select = ( + 'SELECT rowid, expire_time, filename, value FROM Cache' + ' WHERE key = ? AND raw = ?' + ) + + with self._transact(retry) as (sql, cleanup): + rows = sql(select, (db_key, raw)).fetchall() + + if not rows: + if default is None: + raise KeyError(key) + + value = default + delta + columns = (None, None) + self._disk.store( + value, False, key=key + ) + self._row_insert(db_key, raw, now, columns) + self._cull(now, sql, cleanup) + return value + + ((rowid, expire_time, filename, value),) = rows + + if expire_time is not None and expire_time < now: + if default is None: + raise KeyError(key) + + value = default + delta + columns = (None, None) + self._disk.store( + value, False, key=key + ) + self._row_update(rowid, now, columns) + self._cull(now, sql, cleanup) + cleanup(filename) + return value + + value += delta + + columns = 'store_time = ?, value = ?' + update_column = EVICTION_POLICY[self.eviction_policy]['get'] + + if update_column is not None: + columns += ', ' + update_column.format(now=now) + + update = 'UPDATE Cache SET %s WHERE rowid = ?' % columns + sql(update, (now, value, rowid)) + + return value + + def decr(self, key, delta=1, default=0, retry=False): + """Decrement value by delta for item with key. + + If key is missing and default is None then raise KeyError. Else if key + is missing and default is not None then use default for value. + + Operation is atomic. All concurrent decrement operations will be + counted individually. + + Unlike Memcached, negative values are supported. Value may be + decremented below zero. + + Assumes value may be stored in a SQLite column. Most builds that target + machines with 64-bit pointer widths will support 64-bit signed + integers. + + Raises :exc:`Timeout` error when database timeout occurs and `retry` is + `False` (default). + + :param key: key for item + :param int delta: amount to decrement (default 1) + :param int default: value if key is missing (default 0) + :param bool retry: retry if database timeout occurs (default False) + :return: new value for item + :raises KeyError: if key is not found and default is None + :raises Timeout: if database timeout occurs + + """ + return self.incr(key, -delta, default, retry) + + def get( + self, + key, + default=None, + read=False, + expire_time=False, + tag=False, + retry=False, + ): + """Retrieve value from cache. If `key` is missing, return `default`. + + Raises :exc:`Timeout` error when database timeout occurs and `retry` is + `False` (default). + + :param key: key for item + :param default: value to return if key is missing (default None) + :param bool read: if True, return file handle to value + (default False) + :param bool expire_time: if True, return expire_time in tuple + (default False) + :param bool tag: if True, return tag in tuple (default False) + :param bool retry: retry if database timeout occurs (default False) + :return: value for item or default if key not found + :raises Timeout: if database timeout occurs + + """ + db_key, raw = self._disk.put(key) + update_column = EVICTION_POLICY[self.eviction_policy]['get'] + select = ( + 'SELECT rowid, expire_time, tag, mode, filename, value' + ' FROM Cache WHERE key = ? AND raw = ?' + ' AND (expire_time IS NULL OR expire_time > ?)' + ) + + if expire_time and tag: + default = (default, None, None) + elif expire_time or tag: + default = (default, None) + + if not self.statistics and update_column is None: + # Fast path, no transaction necessary. + + rows = self._sql(select, (db_key, raw, time.time())).fetchall() + + if not rows: + return default + + ((rowid, db_expire_time, db_tag, mode, filename, db_value),) = rows + + try: + value = self._disk.fetch(mode, filename, db_value, read) + except IOError: + # Key was deleted before we could retrieve result. + return default + + else: # Slow path, transaction required. + cache_hit = ( + 'UPDATE Settings SET value = value + 1 WHERE key = "hits"' + ) + cache_miss = ( + 'UPDATE Settings SET value = value + 1 WHERE key = "misses"' + ) + + with self._transact(retry) as (sql, _): + rows = sql(select, (db_key, raw, time.time())).fetchall() + + if not rows: + if self.statistics: + sql(cache_miss) + return default + + ( + (rowid, db_expire_time, db_tag, mode, filename, db_value), + ) = rows # noqa: E127 + + try: + value = self._disk.fetch(mode, filename, db_value, read) + except IOError: + # Key was deleted before we could retrieve result. + if self.statistics: + sql(cache_miss) + return default + + if self.statistics: + sql(cache_hit) + + now = time.time() + update = 'UPDATE Cache SET %s WHERE rowid = ?' + + if update_column is not None: + sql(update % update_column.format(now=now), (rowid,)) + + if expire_time and tag: + return (value, db_expire_time, db_tag) + elif expire_time: + return (value, db_expire_time) + elif tag: + return (value, db_tag) + else: + return value + + def __getitem__(self, key): + """Return corresponding value for `key` from cache. + + :param key: key matching item + :return: corresponding value + :raises KeyError: if key is not found + + """ + value = self.get(key, default=ENOVAL, retry=True) + if value is ENOVAL: + raise KeyError(key) + return value + + def read(self, key, retry=False): + """Return file handle value corresponding to `key` from cache. + + Raises :exc:`Timeout` error when database timeout occurs and `retry` is + `False` (default). + + :param key: key matching item + :param bool retry: retry if database timeout occurs (default False) + :return: file open for reading in binary mode + :raises KeyError: if key is not found + :raises Timeout: if database timeout occurs + + """ + handle = self.get(key, default=ENOVAL, read=True, retry=retry) + if handle is ENOVAL: + raise KeyError(key) + return handle + + def __contains__(self, key): + """Return `True` if `key` matching item is found in cache. + + :param key: key matching item + :return: True if key matching item + + """ + sql = self._sql + db_key, raw = self._disk.put(key) + select = ( + 'SELECT rowid FROM Cache' + ' WHERE key = ? AND raw = ?' + ' AND (expire_time IS NULL OR expire_time > ?)' + ) + + rows = sql(select, (db_key, raw, time.time())).fetchall() + + return bool(rows) + + def pop( + self, key, default=None, expire_time=False, tag=False, retry=False + ): # noqa: E501 + """Remove corresponding item for `key` from cache and return value. + + If `key` is missing, return `default`. + + Operation is atomic. Concurrent operations will be serialized. + + Raises :exc:`Timeout` error when database timeout occurs and `retry` is + `False` (default). + + :param key: key for item + :param default: value to return if key is missing (default None) + :param bool expire_time: if True, return expire_time in tuple + (default False) + :param bool tag: if True, return tag in tuple (default False) + :param bool retry: retry if database timeout occurs (default False) + :return: value for item or default if key not found + :raises Timeout: if database timeout occurs + + """ + db_key, raw = self._disk.put(key) + select = ( + 'SELECT rowid, expire_time, tag, mode, filename, value' + ' FROM Cache WHERE key = ? AND raw = ?' + ' AND (expire_time IS NULL OR expire_time > ?)' + ) + + if expire_time and tag: + default = default, None, None + elif expire_time or tag: + default = default, None + + with self._transact(retry) as (sql, _): + rows = sql(select, (db_key, raw, time.time())).fetchall() + + if not rows: + return default + + ((rowid, db_expire_time, db_tag, mode, filename, db_value),) = rows + + sql('DELETE FROM Cache WHERE rowid = ?', (rowid,)) + + try: + value = self._disk.fetch(mode, filename, db_value, False) + except IOError: + # Key was deleted before we could retrieve result. + return default + finally: + if filename is not None: + self._disk.remove(filename) + + if expire_time and tag: + return value, db_expire_time, db_tag + elif expire_time: + return value, db_expire_time + elif tag: + return value, db_tag + else: + return value + + def __delitem__(self, key, retry=True): + """Delete corresponding item for `key` from cache. + + Raises :exc:`Timeout` error when database timeout occurs and `retry` is + `False` (default `True`). + + :param key: key matching item + :param bool retry: retry if database timeout occurs (default True) + :raises KeyError: if key is not found + :raises Timeout: if database timeout occurs + + """ + db_key, raw = self._disk.put(key) + + with self._transact(retry) as (sql, cleanup): + rows = sql( + 'SELECT rowid, filename FROM Cache' + ' WHERE key = ? AND raw = ?' + ' AND (expire_time IS NULL OR expire_time > ?)', + (db_key, raw, time.time()), + ).fetchall() + + if not rows: + raise KeyError(key) + + ((rowid, filename),) = rows + sql('DELETE FROM Cache WHERE rowid = ?', (rowid,)) + cleanup(filename) + + return True + + def delete(self, key, retry=False): + """Delete corresponding item for `key` from cache. + + Missing keys are ignored. + + Raises :exc:`Timeout` error when database timeout occurs and `retry` is + `False` (default). + + :param key: key matching item + :param bool retry: retry if database timeout occurs (default False) + :return: True if item was deleted + :raises Timeout: if database timeout occurs + + """ + # pylint: disable=unnecessary-dunder-call + try: + return self.__delitem__(key, retry=retry) + except KeyError: + return False + + def push( + self, + value, + prefix=None, + side='back', + expire=None, + read=False, + tag=None, + retry=False, + ): + """Push `value` onto `side` of queue identified by `prefix` in cache. + + When prefix is None, integer keys are used. Otherwise, string keys are + used in the format "prefix-integer". Integer starts at 500 trillion. + + Defaults to pushing value on back of queue. Set side to 'front' to push + value on front of queue. Side must be one of 'back' or 'front'. + + Operation is atomic. Concurrent operations will be serialized. + + When `read` is `True`, `value` should be a file-like object opened + for reading in binary mode. + + Raises :exc:`Timeout` error when database timeout occurs and `retry` is + `False` (default). + + See also `Cache.pull`. + + >>> cache = Cache() + >>> print(cache.push('first value')) + 500000000000000 + >>> cache.get(500000000000000) + 'first value' + >>> print(cache.push('second value')) + 500000000000001 + >>> print(cache.push('third value', side='front')) + 499999999999999 + >>> cache.push(1234, prefix='userids') + 'userids-500000000000000' + + :param value: value for item + :param str prefix: key prefix (default None, key is integer) + :param str side: either 'back' or 'front' (default 'back') + :param float expire: seconds until the key expires + (default None, no expiry) + :param bool read: read value as bytes from file (default False) + :param str tag: text to associate with key (default None) + :param bool retry: retry if database timeout occurs (default False) + :return: key for item in cache + :raises Timeout: if database timeout occurs + + """ + if prefix is None: + min_key = 0 + max_key = 999999999999999 + else: + min_key = prefix + '-000000000000000' + max_key = prefix + '-999999999999999' + + now = time.time() + raw = True + expire_time = None if expire is None else now + expire + size, mode, filename, db_value = self._disk.store(value, read) + columns = (expire_time, tag, size, mode, filename, db_value) + order = {'back': 'DESC', 'front': 'ASC'} + select = ( + 'SELECT key FROM Cache' + ' WHERE ? < key AND key < ? AND raw = ?' + ' ORDER BY key %s LIMIT 1' + ) % order[side] + + with self._transact(retry, filename) as (sql, cleanup): + rows = sql(select, (min_key, max_key, raw)).fetchall() + + if rows: + ((key,),) = rows + + if prefix is not None: + num = int(key[(key.rfind('-') + 1) :]) + else: + num = key + + if side == 'back': + num += 1 + else: + assert side == 'front' + num -= 1 + else: + num = 500000000000000 + + if prefix is not None: + db_key = '{0}-{1:015d}'.format(prefix, num) + else: + db_key = num + + self._row_insert(db_key, raw, now, columns) + self._cull(now, sql, cleanup) + + return db_key + + def pull( + self, + prefix=None, + default=(None, None), + side='front', + expire_time=False, + tag=False, + retry=False, + ): + """Pull key and value item pair from `side` of queue in cache. + + When prefix is None, integer keys are used. Otherwise, string keys are + used in the format "prefix-integer". Integer starts at 500 trillion. + + If queue is empty, return default. + + Defaults to pulling key and value item pairs from front of queue. Set + side to 'back' to pull from back of queue. Side must be one of 'front' + or 'back'. + + Operation is atomic. Concurrent operations will be serialized. + + Raises :exc:`Timeout` error when database timeout occurs and `retry` is + `False` (default). + + See also `Cache.push` and `Cache.get`. + + >>> cache = Cache() + >>> cache.pull() + (None, None) + >>> for letter in 'abc': + ... print(cache.push(letter)) + 500000000000000 + 500000000000001 + 500000000000002 + >>> key, value = cache.pull() + >>> print(key) + 500000000000000 + >>> value + 'a' + >>> _, value = cache.pull(side='back') + >>> value + 'c' + >>> cache.push(1234, 'userids') + 'userids-500000000000000' + >>> _, value = cache.pull('userids') + >>> value + 1234 + + :param str prefix: key prefix (default None, key is integer) + :param default: value to return if key is missing + (default (None, None)) + :param str side: either 'front' or 'back' (default 'front') + :param bool expire_time: if True, return expire_time in tuple + (default False) + :param bool tag: if True, return tag in tuple (default False) + :param bool retry: retry if database timeout occurs (default False) + :return: key and value item pair or default if queue is empty + :raises Timeout: if database timeout occurs + + """ + # Caution: Nearly identical code exists in Cache.peek + if prefix is None: + min_key = 0 + max_key = 999999999999999 + else: + min_key = prefix + '-000000000000000' + max_key = prefix + '-999999999999999' + + order = {'front': 'ASC', 'back': 'DESC'} + select = ( + 'SELECT rowid, key, expire_time, tag, mode, filename, value' + ' FROM Cache WHERE ? < key AND key < ? AND raw = 1' + ' ORDER BY key %s LIMIT 1' + ) % order[side] + + if expire_time and tag: + default = default, None, None + elif expire_time or tag: + default = default, None + + while True: + while True: + with self._transact(retry) as (sql, cleanup): + rows = sql(select, (min_key, max_key)).fetchall() + + if not rows: + return default + + ( + (rowid, key, db_expire, db_tag, mode, name, db_value), + ) = rows + + sql('DELETE FROM Cache WHERE rowid = ?', (rowid,)) + + if db_expire is not None and db_expire < time.time(): + cleanup(name) + else: + break + + try: + value = self._disk.fetch(mode, name, db_value, False) + except IOError: + # Key was deleted before we could retrieve result. + continue + finally: + if name is not None: + self._disk.remove(name) + break + + if expire_time and tag: + return (key, value), db_expire, db_tag + elif expire_time: + return (key, value), db_expire + elif tag: + return (key, value), db_tag + else: + return key, value + + def peek( + self, + prefix=None, + default=(None, None), + side='front', + expire_time=False, + tag=False, + retry=False, + ): + """Peek at key and value item pair from `side` of queue in cache. + + When prefix is None, integer keys are used. Otherwise, string keys are + used in the format "prefix-integer". Integer starts at 500 trillion. + + If queue is empty, return default. + + Defaults to peeking at key and value item pairs from front of queue. + Set side to 'back' to pull from back of queue. Side must be one of + 'front' or 'back'. + + Expired items are deleted from cache. Operation is atomic. Concurrent + operations will be serialized. + + Raises :exc:`Timeout` error when database timeout occurs and `retry` is + `False` (default). + + See also `Cache.pull` and `Cache.push`. + + >>> cache = Cache() + >>> for letter in 'abc': + ... print(cache.push(letter)) + 500000000000000 + 500000000000001 + 500000000000002 + >>> key, value = cache.peek() + >>> print(key) + 500000000000000 + >>> value + 'a' + >>> key, value = cache.peek(side='back') + >>> print(key) + 500000000000002 + >>> value + 'c' + + :param str prefix: key prefix (default None, key is integer) + :param default: value to return if key is missing + (default (None, None)) + :param str side: either 'front' or 'back' (default 'front') + :param bool expire_time: if True, return expire_time in tuple + (default False) + :param bool tag: if True, return tag in tuple (default False) + :param bool retry: retry if database timeout occurs (default False) + :return: key and value item pair or default if queue is empty + :raises Timeout: if database timeout occurs + + """ + # Caution: Nearly identical code exists in Cache.pull + if prefix is None: + min_key = 0 + max_key = 999999999999999 + else: + min_key = prefix + '-000000000000000' + max_key = prefix + '-999999999999999' + + order = {'front': 'ASC', 'back': 'DESC'} + select = ( + 'SELECT rowid, key, expire_time, tag, mode, filename, value' + ' FROM Cache WHERE ? < key AND key < ? AND raw = 1' + ' ORDER BY key %s LIMIT 1' + ) % order[side] + + if expire_time and tag: + default = default, None, None + elif expire_time or tag: + default = default, None + + while True: + while True: + with self._transact(retry) as (sql, cleanup): + rows = sql(select, (min_key, max_key)).fetchall() + + if not rows: + return default + + ( + (rowid, key, db_expire, db_tag, mode, name, db_value), + ) = rows + + if db_expire is not None and db_expire < time.time(): + sql('DELETE FROM Cache WHERE rowid = ?', (rowid,)) + cleanup(name) + else: + break + + try: + value = self._disk.fetch(mode, name, db_value, False) + except IOError: + # Key was deleted before we could retrieve result. + continue + break + + if expire_time and tag: + return (key, value), db_expire, db_tag + elif expire_time: + return (key, value), db_expire + elif tag: + return (key, value), db_tag + else: + return key, value + + def peekitem(self, last=True, expire_time=False, tag=False, retry=False): + """Peek at key and value item pair in cache based on iteration order. + + Expired items are deleted from cache. Operation is atomic. Concurrent + operations will be serialized. + + Raises :exc:`Timeout` error when database timeout occurs and `retry` is + `False` (default). + + >>> cache = Cache() + >>> for num, letter in enumerate('abc'): + ... cache[letter] = num + >>> cache.peekitem() + ('c', 2) + >>> cache.peekitem(last=False) + ('a', 0) + + :param bool last: last item in iteration order (default True) + :param bool expire_time: if True, return expire_time in tuple + (default False) + :param bool tag: if True, return tag in tuple (default False) + :param bool retry: retry if database timeout occurs (default False) + :return: key and value item pair + :raises KeyError: if cache is empty + :raises Timeout: if database timeout occurs + + """ + order = ('ASC', 'DESC') + select = ( + 'SELECT rowid, key, raw, expire_time, tag, mode, filename, value' + ' FROM Cache ORDER BY rowid %s LIMIT 1' + ) % order[last] + + while True: + while True: + with self._transact(retry) as (sql, cleanup): + rows = sql(select).fetchall() + + if not rows: + raise KeyError('dictionary is empty') + + ( + ( + rowid, + db_key, + raw, + db_expire, + db_tag, + mode, + name, + db_value, + ), + ) = rows + + if db_expire is not None and db_expire < time.time(): + sql('DELETE FROM Cache WHERE rowid = ?', (rowid,)) + cleanup(name) + else: + break + + key = self._disk.get(db_key, raw) + + try: + value = self._disk.fetch(mode, name, db_value, False) + except IOError: + # Key was deleted before we could retrieve result. + continue + break + + if expire_time and tag: + return (key, value), db_expire, db_tag + elif expire_time: + return (key, value), db_expire + elif tag: + return (key, value), db_tag + else: + return key, value + + def memoize( + self, name=None, typed=False, expire=None, tag=None, ignore=() + ): + """Memoizing cache decorator. + + Decorator to wrap callable with memoizing function using cache. + Repeated calls with the same arguments will lookup result in cache and + avoid function evaluation. + + If name is set to None (default), the callable name will be determined + automatically. + + When expire is set to zero, function results will not be set in the + cache. Cache lookups still occur, however. Read + :doc:`case-study-landing-page-caching` for example usage. + + If typed is set to True, function arguments of different types will be + cached separately. For example, f(3) and f(3.0) will be treated as + distinct calls with distinct results. + + The original underlying function is accessible through the __wrapped__ + attribute. This is useful for introspection, for bypassing the cache, + or for rewrapping the function with a different cache. + + >>> from diskcache import Cache + >>> cache = Cache() + >>> @cache.memoize(expire=1, tag='fib') + ... def fibonacci(number): + ... if number == 0: + ... return 0 + ... elif number == 1: + ... return 1 + ... else: + ... return fibonacci(number - 1) + fibonacci(number - 2) + >>> print(fibonacci(100)) + 354224848179261915075 + + An additional `__cache_key__` attribute can be used to generate the + cache key used for the given arguments. + + >>> key = fibonacci.__cache_key__(100) + >>> print(cache[key]) + 354224848179261915075 + + Remember to call memoize when decorating a callable. If you forget, + then a TypeError will occur. Note the lack of parenthenses after + memoize below: + + >>> @cache.memoize + ... def test(): + ... pass + Traceback (most recent call last): + ... + TypeError: name cannot be callable + + :param cache: cache to store callable arguments and return values + :param str name: name given for callable (default None, automatic) + :param bool typed: cache different types separately (default False) + :param float expire: seconds until arguments expire + (default None, no expiry) + :param str tag: text to associate with arguments (default None) + :param set ignore: positional or keyword args to ignore (default ()) + :return: callable decorator + + """ + # Caution: Nearly identical code exists in DjangoCache.memoize + if callable(name): + raise TypeError('name cannot be callable') + + def decorator(func): + """Decorator created by memoize() for callable `func`.""" + base = (full_name(func),) if name is None else (name,) + + @ft.wraps(func) + def wrapper(*args, **kwargs): + """Wrapper for callable to cache arguments and return values.""" + key = wrapper.__cache_key__(*args, **kwargs) + result = self.get(key, default=ENOVAL, retry=True) + + if result is ENOVAL: + result = func(*args, **kwargs) + if expire is None or expire > 0: + self.set(key, result, expire, tag=tag, retry=True) + + return result + + def __cache_key__(*args, **kwargs): + """Make key for cache given function arguments.""" + return args_to_key(base, args, kwargs, typed, ignore) + + wrapper.__cache_key__ = __cache_key__ + return wrapper + + return decorator + + def check(self, fix=False, retry=False): + """Check database and file system consistency. + + Intended for use in testing and post-mortem error analysis. + + While checking the Cache table for consistency, a writer lock is held + on the database. The lock blocks other cache clients from writing to + the database. For caches with many file references, the lock may be + held for a long time. For example, local benchmarking shows that a + cache with 1,000 file references takes ~60ms to check. + + Raises :exc:`Timeout` error when database timeout occurs and `retry` is + `False` (default). + + :param bool fix: correct inconsistencies + :param bool retry: retry if database timeout occurs (default False) + :return: list of warnings + :raises Timeout: if database timeout occurs + + """ + # pylint: disable=access-member-before-definition,W0201 + with warnings.catch_warnings(record=True) as warns: + sql = self._sql + + # Check integrity of database. + + rows = sql('PRAGMA integrity_check').fetchall() + + if len(rows) != 1 or rows[0][0] != 'ok': + for (message,) in rows: + warnings.warn(message) + + if fix: + sql('VACUUM') + + with self._transact(retry) as (sql, _): + + # Check Cache.filename against file system. + + filenames = set() + select = ( + 'SELECT rowid, size, filename FROM Cache' + ' WHERE filename IS NOT NULL' + ) + + rows = sql(select).fetchall() + + for rowid, size, filename in rows: + full_path = op.join(self._directory, filename) + filenames.add(full_path) + + if op.exists(full_path): + real_size = op.getsize(full_path) + + if size != real_size: + message = 'wrong file size: %s, %d != %d' + args = full_path, real_size, size + warnings.warn(message % args) + + if fix: + sql( + 'UPDATE Cache SET size = ?' + ' WHERE rowid = ?', + (real_size, rowid), + ) + + continue + + warnings.warn('file not found: %s' % full_path) + + if fix: + sql('DELETE FROM Cache WHERE rowid = ?', (rowid,)) + + # Check file system against Cache.filename. + + for dirpath, _, files in os.walk(self._directory): + paths = [op.join(dirpath, filename) for filename in files] + error = set(paths) - filenames + + for full_path in error: + if DBNAME in full_path: + continue + + message = 'unknown file: %s' % full_path + warnings.warn(message, UnknownFileWarning) + + if fix: + os.remove(full_path) + + # Check for empty directories. + + for dirpath, dirs, files in os.walk(self._directory): + if not (dirs or files): + message = 'empty directory: %s' % dirpath + warnings.warn(message, EmptyDirWarning) + + if fix: + os.rmdir(dirpath) + + # Check Settings.count against count of Cache rows. + + self.reset('count') + ((count,),) = sql('SELECT COUNT(key) FROM Cache').fetchall() + + if self.count != count: + message = 'Settings.count != COUNT(Cache.key); %d != %d' + warnings.warn(message % (self.count, count)) + + if fix: + sql( + 'UPDATE Settings SET value = ? WHERE key = ?', + (count, 'count'), + ) + + # Check Settings.size against sum of Cache.size column. + + self.reset('size') + select_size = 'SELECT COALESCE(SUM(size), 0) FROM Cache' + ((size,),) = sql(select_size).fetchall() + + if self.size != size: + message = 'Settings.size != SUM(Cache.size); %d != %d' + warnings.warn(message % (self.size, size)) + + if fix: + sql( + 'UPDATE Settings SET value = ? WHERE key =?', + (size, 'size'), + ) + + return warns + + def create_tag_index(self): + """Create tag index on cache database. + + It is better to initialize cache with `tag_index=True` than use this. + + :raises Timeout: if database timeout occurs + + """ + sql = self._sql + sql('CREATE INDEX IF NOT EXISTS Cache_tag_rowid ON Cache(tag, rowid)') + self.reset('tag_index', 1) + + def drop_tag_index(self): + """Drop tag index on cache database. + + :raises Timeout: if database timeout occurs + + """ + sql = self._sql + sql('DROP INDEX IF EXISTS Cache_tag_rowid') + self.reset('tag_index', 0) + + def evict(self, tag, retry=False): + """Remove items with matching `tag` from cache. + + Removing items is an iterative process. In each iteration, a subset of + items is removed. Concurrent writes may occur between iterations. + + If a :exc:`Timeout` occurs, the first element of the exception's + `args` attribute will be the number of items removed before the + exception occurred. + + Raises :exc:`Timeout` error when database timeout occurs and `retry` is + `False` (default). + + :param str tag: tag identifying items + :param bool retry: retry if database timeout occurs (default False) + :return: count of rows removed + :raises Timeout: if database timeout occurs + + """ + select = ( + 'SELECT rowid, filename FROM Cache' + ' WHERE tag = ? AND rowid > ?' + ' ORDER BY rowid LIMIT ?' + ) + args = [tag, 0, 100] + return self._select_delete(select, args, arg_index=1, retry=retry) + + def expire(self, now=None, retry=False): + """Remove expired items from cache. + + Removing items is an iterative process. In each iteration, a subset of + items is removed. Concurrent writes may occur between iterations. + + If a :exc:`Timeout` occurs, the first element of the exception's + `args` attribute will be the number of items removed before the + exception occurred. + + Raises :exc:`Timeout` error when database timeout occurs and `retry` is + `False` (default). + + :param float now: current time (default None, ``time.time()`` used) + :param bool retry: retry if database timeout occurs (default False) + :return: count of items removed + :raises Timeout: if database timeout occurs + + """ + select = ( + 'SELECT rowid, expire_time, filename FROM Cache' + ' WHERE ? < expire_time AND expire_time < ?' + ' ORDER BY expire_time LIMIT ?' + ) + args = [0, now or time.time(), 100] + return self._select_delete(select, args, row_index=1, retry=retry) + + def cull(self, retry=False): + """Cull items from cache until volume is less than size limit. + + Removing items is an iterative process. In each iteration, a subset of + items is removed. Concurrent writes may occur between iterations. + + If a :exc:`Timeout` occurs, the first element of the exception's + `args` attribute will be the number of items removed before the + exception occurred. + + Raises :exc:`Timeout` error when database timeout occurs and `retry` is + `False` (default). + + :param bool retry: retry if database timeout occurs (default False) + :return: count of items removed + :raises Timeout: if database timeout occurs + + """ + now = time.time() + + # Remove expired items. + + count = self.expire(now) + + # Remove items by policy. + + select_policy = EVICTION_POLICY[self.eviction_policy]['cull'] + + if select_policy is None: + return 0 + + select_filename = select_policy.format(fields='filename', now=now) + + try: + while self.volume() > self.size_limit: + with self._transact(retry) as (sql, cleanup): + rows = sql(select_filename, (10,)).fetchall() + + if not rows: + break + + count += len(rows) + delete = ( + 'DELETE FROM Cache WHERE rowid IN (%s)' + % select_policy.format(fields='rowid', now=now) + ) + sql(delete, (10,)) + + for (filename,) in rows: + cleanup(filename) + except Timeout: + raise Timeout(count) from None + + return count + + def clear(self, retry=False): + """Remove all items from cache. + + Removing items is an iterative process. In each iteration, a subset of + items is removed. Concurrent writes may occur between iterations. + + If a :exc:`Timeout` occurs, the first element of the exception's + `args` attribute will be the number of items removed before the + exception occurred. + + Raises :exc:`Timeout` error when database timeout occurs and `retry` is + `False` (default). + + :param bool retry: retry if database timeout occurs (default False) + :return: count of rows removed + :raises Timeout: if database timeout occurs + + """ + select = ( + 'SELECT rowid, filename FROM Cache' + ' WHERE rowid > ?' + ' ORDER BY rowid LIMIT ?' + ) + args = [0, 100] + return self._select_delete(select, args, retry=retry) + + def _select_delete( + self, select, args, row_index=0, arg_index=0, retry=False + ): + count = 0 + delete = 'DELETE FROM Cache WHERE rowid IN (%s)' + + try: + while True: + with self._transact(retry) as (sql, cleanup): + rows = sql(select, args).fetchall() + + if not rows: + break + + count += len(rows) + sql(delete % ','.join(str(row[0]) for row in rows)) + + for row in rows: + args[arg_index] = row[row_index] + cleanup(row[-1]) + + except Timeout: + raise Timeout(count) from None + + return count + + def iterkeys(self, reverse=False): + """Iterate Cache keys in database sort order. + + >>> cache = Cache() + >>> for key in [4, 1, 3, 0, 2]: + ... cache[key] = key + >>> list(cache.iterkeys()) + [0, 1, 2, 3, 4] + >>> list(cache.iterkeys(reverse=True)) + [4, 3, 2, 1, 0] + + :param bool reverse: reverse sort order (default False) + :return: iterator of Cache keys + + """ + sql = self._sql + limit = 100 + _disk_get = self._disk.get + + if reverse: + select = ( + 'SELECT key, raw FROM Cache' + ' ORDER BY key DESC, raw DESC LIMIT 1' + ) + iterate = ( + 'SELECT key, raw FROM Cache' + ' WHERE key = ? AND raw < ? OR key < ?' + ' ORDER BY key DESC, raw DESC LIMIT ?' + ) + else: + select = ( + 'SELECT key, raw FROM Cache' + ' ORDER BY key ASC, raw ASC LIMIT 1' + ) + iterate = ( + 'SELECT key, raw FROM Cache' + ' WHERE key = ? AND raw > ? OR key > ?' + ' ORDER BY key ASC, raw ASC LIMIT ?' + ) + + row = sql(select).fetchall() + + if row: + ((key, raw),) = row + else: + return + + yield _disk_get(key, raw) + + while True: + rows = sql(iterate, (key, raw, key, limit)).fetchall() + + if not rows: + break + + for key, raw in rows: + yield _disk_get(key, raw) + + def _iter(self, ascending=True): + sql = self._sql + rows = sql('SELECT MAX(rowid) FROM Cache').fetchall() + ((max_rowid,),) = rows + yield # Signal ready. + + if max_rowid is None: + return + + bound = max_rowid + 1 + limit = 100 + _disk_get = self._disk.get + rowid = 0 if ascending else bound + select = ( + 'SELECT rowid, key, raw FROM Cache' + ' WHERE ? < rowid AND rowid < ?' + ' ORDER BY rowid %s LIMIT ?' + ) % ('ASC' if ascending else 'DESC') + + while True: + if ascending: + args = (rowid, bound, limit) + else: + args = (0, rowid, limit) + + rows = sql(select, args).fetchall() + + if not rows: + break + + for rowid, key, raw in rows: + yield _disk_get(key, raw) + + def __iter__(self): + """Iterate keys in cache including expired items.""" + iterator = self._iter() + next(iterator) + return iterator + + def __reversed__(self): + """Reverse iterate keys in cache including expired items.""" + iterator = self._iter(ascending=False) + next(iterator) + return iterator + + def stats(self, enable=True, reset=False): + """Return cache statistics hits and misses. + + :param bool enable: enable collecting statistics (default True) + :param bool reset: reset hits and misses to 0 (default False) + :return: (hits, misses) + + """ + # pylint: disable=E0203,W0201 + result = (self.reset('hits'), self.reset('misses')) + + if reset: + self.reset('hits', 0) + self.reset('misses', 0) + + self.reset('statistics', enable) + + return result + + def volume(self): + """Return estimated total size of cache on disk. + + :return: size in bytes + + """ + ((page_count,),) = self._sql('PRAGMA page_count').fetchall() + total_size = self._page_size * page_count + self.reset('size') + return total_size + + def close(self): + """Close database connection.""" + con = getattr(self._local, 'con', None) + + if con is None: + return + + con.close() + + try: + delattr(self._local, 'con') + except AttributeError: + pass + + def __enter__(self): + # Create connection in thread. + # pylint: disable=unused-variable + connection = self._con # noqa + return self + + def __exit__(self, *exception): + self.close() + + def __len__(self): + """Count of items in cache including expired items.""" + return self.reset('count') + + def __getstate__(self): + return (self.directory, self.timeout, type(self.disk)) + + def __setstate__(self, state): + self.__init__(*state) + + def reset(self, key, value=ENOVAL, update=True): + """Reset `key` and `value` item from Settings table. + + Use `reset` to update the value of Cache settings correctly. Cache + settings are stored in the Settings table of the SQLite database. If + `update` is ``False`` then no attempt is made to update the database. + + If `value` is not given, it is reloaded from the Settings + table. Otherwise, the Settings table is updated. + + Settings with the ``disk_`` prefix correspond to Disk + attributes. Updating the value will change the unprefixed attribute on + the associated Disk instance. + + Settings with the ``sqlite_`` prefix correspond to SQLite + pragmas. Updating the value will execute the corresponding PRAGMA + statement. + + SQLite PRAGMA statements may be executed before the Settings table + exists in the database by setting `update` to ``False``. + + :param str key: Settings key for item + :param value: value for item (optional) + :param bool update: update database Settings table (default True) + :return: updated value for item + :raises Timeout: if database timeout occurs + + """ + sql = self._sql + sql_retry = self._sql_retry + + if value is ENOVAL: + select = 'SELECT value FROM Settings WHERE key = ?' + ((value,),) = sql_retry(select, (key,)).fetchall() + setattr(self, key, value) + return value + + if update: + statement = 'UPDATE Settings SET value = ? WHERE key = ?' + sql_retry(statement, (value, key)) + + if key.startswith('sqlite_'): + pragma = key[7:] + + # 2016-02-17 GrantJ - PRAGMA and isolation_level=None + # don't always play nicely together. Retry setting the + # PRAGMA. I think some PRAGMA statements expect to + # immediately take an EXCLUSIVE lock on the database. I + # can't find any documentation for this but without the + # retry, stress will intermittently fail with multiple + # processes. + + # 2018-11-05 GrantJ - Avoid setting pragma values that + # are already set. Pragma settings like auto_vacuum and + # journal_mode can take a long time or may not work after + # tables have been created. + + start = time.time() + while True: + try: + try: + ((old_value,),) = sql( + 'PRAGMA %s' % (pragma) + ).fetchall() + update = old_value != value + except ValueError: + update = True + if update: + sql('PRAGMA %s = %s' % (pragma, value)).fetchall() + break + except sqlite3.OperationalError as exc: + if str(exc) != 'database is locked': + raise + diff = time.time() - start + if diff > 60: + raise + time.sleep(0.001) + elif key.startswith('disk_'): + attr = key[5:] + setattr(self._disk, attr, value) + + setattr(self, key, value) + return value diff --git a/.venv/lib/python3.11/site-packages/diskcache/djangocache.py b/.venv/lib/python3.11/site-packages/diskcache/djangocache.py new file mode 100644 index 0000000000000000000000000000000000000000..5dc8ce2c5f98d6cceeff7fee5888b680f1e3b39a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/diskcache/djangocache.py @@ -0,0 +1,456 @@ +"""Django-compatible disk and file backed cache.""" + +from functools import wraps + +from django.core.cache.backends.base import BaseCache + +try: + from django.core.cache.backends.base import DEFAULT_TIMEOUT +except ImportError: # pragma: no cover + # For older versions of Django simply use 300 seconds. + DEFAULT_TIMEOUT = 300 + +from .core import ENOVAL, args_to_key, full_name +from .fanout import FanoutCache + + +class DjangoCache(BaseCache): + """Django-compatible disk and file backed cache.""" + + def __init__(self, directory, params): + """Initialize DjangoCache instance. + + :param str directory: cache directory + :param dict params: cache parameters + + """ + super().__init__(params) + shards = params.get('SHARDS', 8) + timeout = params.get('DATABASE_TIMEOUT', 0.010) + options = params.get('OPTIONS', {}) + self._cache = FanoutCache(directory, shards, timeout, **options) + + @property + def directory(self): + """Cache directory.""" + return self._cache.directory + + def cache(self, name): + """Return Cache with given `name` in subdirectory. + + :param str name: subdirectory name for Cache + :return: Cache with given name + + """ + return self._cache.cache(name) + + def deque(self, name, maxlen=None): + """Return Deque with given `name` in subdirectory. + + :param str name: subdirectory name for Deque + :param maxlen: max length (default None, no max) + :return: Deque with given name + + """ + return self._cache.deque(name, maxlen=maxlen) + + def index(self, name): + """Return Index with given `name` in subdirectory. + + :param str name: subdirectory name for Index + :return: Index with given name + + """ + return self._cache.index(name) + + def add( + self, + key, + value, + timeout=DEFAULT_TIMEOUT, + version=None, + read=False, + tag=None, + retry=True, + ): + """Set a value in the cache if the key does not already exist. If + timeout is given, that timeout will be used for the key; otherwise the + default cache timeout will be used. + + Return True if the value was stored, False otherwise. + + :param key: key for item + :param value: value for item + :param float timeout: seconds until the item expires + (default 300 seconds) + :param int version: key version number (default None, cache parameter) + :param bool read: read value as bytes from file (default False) + :param str tag: text to associate with key (default None) + :param bool retry: retry if database timeout occurs (default True) + :return: True if item was added + + """ + # pylint: disable=arguments-differ + key = self.make_key(key, version=version) + timeout = self.get_backend_timeout(timeout=timeout) + return self._cache.add(key, value, timeout, read, tag, retry) + + def get( + self, + key, + default=None, + version=None, + read=False, + expire_time=False, + tag=False, + retry=False, + ): + """Fetch a given key from the cache. If the key does not exist, return + default, which itself defaults to None. + + :param key: key for item + :param default: return value if key is missing (default None) + :param int version: key version number (default None, cache parameter) + :param bool read: if True, return file handle to value + (default False) + :param float expire_time: if True, return expire_time in tuple + (default False) + :param tag: if True, return tag in tuple (default False) + :param bool retry: retry if database timeout occurs (default False) + :return: value for item if key is found else default + + """ + # pylint: disable=arguments-differ + key = self.make_key(key, version=version) + return self._cache.get(key, default, read, expire_time, tag, retry) + + def read(self, key, version=None): + """Return file handle corresponding to `key` from Cache. + + :param key: Python key to retrieve + :param int version: key version number (default None, cache parameter) + :return: file open for reading in binary mode + :raises KeyError: if key is not found + + """ + key = self.make_key(key, version=version) + return self._cache.read(key) + + def set( + self, + key, + value, + timeout=DEFAULT_TIMEOUT, + version=None, + read=False, + tag=None, + retry=True, + ): + """Set a value in the cache. If timeout is given, that timeout will be + used for the key; otherwise the default cache timeout will be used. + + :param key: key for item + :param value: value for item + :param float timeout: seconds until the item expires + (default 300 seconds) + :param int version: key version number (default None, cache parameter) + :param bool read: read value as bytes from file (default False) + :param str tag: text to associate with key (default None) + :param bool retry: retry if database timeout occurs (default True) + :return: True if item was set + + """ + # pylint: disable=arguments-differ + key = self.make_key(key, version=version) + timeout = self.get_backend_timeout(timeout=timeout) + return self._cache.set(key, value, timeout, read, tag, retry) + + def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None, retry=True): + """Touch a key in the cache. If timeout is given, that timeout will be + used for the key; otherwise the default cache timeout will be used. + + :param key: key for item + :param float timeout: seconds until the item expires + (default 300 seconds) + :param int version: key version number (default None, cache parameter) + :param bool retry: retry if database timeout occurs (default True) + :return: True if key was touched + + """ + # pylint: disable=arguments-differ + key = self.make_key(key, version=version) + timeout = self.get_backend_timeout(timeout=timeout) + return self._cache.touch(key, timeout, retry) + + def pop( + self, + key, + default=None, + version=None, + expire_time=False, + tag=False, + retry=True, + ): + """Remove corresponding item for `key` from cache and return value. + + If `key` is missing, return `default`. + + Operation is atomic. Concurrent operations will be serialized. + + :param key: key for item + :param default: return value if key is missing (default None) + :param int version: key version number (default None, cache parameter) + :param float expire_time: if True, return expire_time in tuple + (default False) + :param tag: if True, return tag in tuple (default False) + :param bool retry: retry if database timeout occurs (default True) + :return: value for item if key is found else default + + """ + key = self.make_key(key, version=version) + return self._cache.pop(key, default, expire_time, tag, retry) + + def delete(self, key, version=None, retry=True): + """Delete a key from the cache, failing silently. + + :param key: key for item + :param int version: key version number (default None, cache parameter) + :param bool retry: retry if database timeout occurs (default True) + :return: True if item was deleted + + """ + # pylint: disable=arguments-differ + key = self.make_key(key, version=version) + return self._cache.delete(key, retry) + + def incr(self, key, delta=1, version=None, default=None, retry=True): + """Increment value by delta for item with key. + + If key is missing and default is None then raise KeyError. Else if key + is missing and default is not None then use default for value. + + Operation is atomic. All concurrent increment operations will be + counted individually. + + Assumes value may be stored in a SQLite column. Most builds that target + machines with 64-bit pointer widths will support 64-bit signed + integers. + + :param key: key for item + :param int delta: amount to increment (default 1) + :param int version: key version number (default None, cache parameter) + :param int default: value if key is missing (default None) + :param bool retry: retry if database timeout occurs (default True) + :return: new value for item on success else None + :raises ValueError: if key is not found and default is None + + """ + # pylint: disable=arguments-differ + key = self.make_key(key, version=version) + try: + return self._cache.incr(key, delta, default, retry) + except KeyError: + raise ValueError("Key '%s' not found" % key) from None + + def decr(self, key, delta=1, version=None, default=None, retry=True): + """Decrement value by delta for item with key. + + If key is missing and default is None then raise KeyError. Else if key + is missing and default is not None then use default for value. + + Operation is atomic. All concurrent decrement operations will be + counted individually. + + Unlike Memcached, negative values are supported. Value may be + decremented below zero. + + Assumes value may be stored in a SQLite column. Most builds that target + machines with 64-bit pointer widths will support 64-bit signed + integers. + + :param key: key for item + :param int delta: amount to decrement (default 1) + :param int version: key version number (default None, cache parameter) + :param int default: value if key is missing (default None) + :param bool retry: retry if database timeout occurs (default True) + :return: new value for item on success else None + :raises ValueError: if key is not found and default is None + + """ + # pylint: disable=arguments-differ + return self.incr(key, -delta, version, default, retry) + + def has_key(self, key, version=None): + """Returns True if the key is in the cache and has not expired. + + :param key: key for item + :param int version: key version number (default None, cache parameter) + :return: True if key is found + + """ + key = self.make_key(key, version=version) + return key in self._cache + + def expire(self): + """Remove expired items from cache. + + :return: count of items removed + + """ + return self._cache.expire() + + def stats(self, enable=True, reset=False): + """Return cache statistics hits and misses. + + :param bool enable: enable collecting statistics (default True) + :param bool reset: reset hits and misses to 0 (default False) + :return: (hits, misses) + + """ + return self._cache.stats(enable=enable, reset=reset) + + def create_tag_index(self): + """Create tag index on cache database. + + Better to initialize cache with `tag_index=True` than use this. + + :raises Timeout: if database timeout occurs + + """ + self._cache.create_tag_index() + + def drop_tag_index(self): + """Drop tag index on cache database. + + :raises Timeout: if database timeout occurs + + """ + self._cache.drop_tag_index() + + def evict(self, tag): + """Remove items with matching `tag` from cache. + + :param str tag: tag identifying items + :return: count of items removed + + """ + return self._cache.evict(tag) + + def cull(self): + """Cull items from cache until volume is less than size limit. + + :return: count of items removed + + """ + return self._cache.cull() + + def clear(self): + """Remove *all* values from the cache at once.""" + return self._cache.clear() + + def close(self, **kwargs): + """Close the cache connection.""" + # pylint: disable=unused-argument + self._cache.close() + + def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT): + """Return seconds to expiration. + + :param float timeout: seconds until the item expires + (default 300 seconds) + + """ + if timeout == DEFAULT_TIMEOUT: + timeout = self.default_timeout + elif timeout == 0: + # ticket 21147 - avoid time.time() related precision issues + timeout = -1 + return None if timeout is None else timeout + + def memoize( + self, + name=None, + timeout=DEFAULT_TIMEOUT, + version=None, + typed=False, + tag=None, + ignore=(), + ): + """Memoizing cache decorator. + + Decorator to wrap callable with memoizing function using cache. + Repeated calls with the same arguments will lookup result in cache and + avoid function evaluation. + + If name is set to None (default), the callable name will be determined + automatically. + + When timeout is set to zero, function results will not be set in the + cache. Cache lookups still occur, however. Read + :doc:`case-study-landing-page-caching` for example usage. + + If typed is set to True, function arguments of different types will be + cached separately. For example, f(3) and f(3.0) will be treated as + distinct calls with distinct results. + + The original underlying function is accessible through the __wrapped__ + attribute. This is useful for introspection, for bypassing the cache, + or for rewrapping the function with a different cache. + + An additional `__cache_key__` attribute can be used to generate the + cache key used for the given arguments. + + Remember to call memoize when decorating a callable. If you forget, + then a TypeError will occur. + + :param str name: name given for callable (default None, automatic) + :param float timeout: seconds until the item expires + (default 300 seconds) + :param int version: key version number (default None, cache parameter) + :param bool typed: cache different types separately (default False) + :param str tag: text to associate with arguments (default None) + :param set ignore: positional or keyword args to ignore (default ()) + :return: callable decorator + + """ + # Caution: Nearly identical code exists in Cache.memoize + if callable(name): + raise TypeError('name cannot be callable') + + def decorator(func): + """Decorator created by memoize() for callable `func`.""" + base = (full_name(func),) if name is None else (name,) + + @wraps(func) + def wrapper(*args, **kwargs): + """Wrapper for callable to cache arguments and return values.""" + key = wrapper.__cache_key__(*args, **kwargs) + result = self.get(key, ENOVAL, version, retry=True) + + if result is ENOVAL: + result = func(*args, **kwargs) + valid_timeout = ( + timeout is None + or timeout == DEFAULT_TIMEOUT + or timeout > 0 + ) + if valid_timeout: + self.set( + key, + result, + timeout, + version, + tag=tag, + retry=True, + ) + + return result + + def __cache_key__(*args, **kwargs): + """Make key for cache given function arguments.""" + return args_to_key(base, args, kwargs, typed, ignore) + + wrapper.__cache_key__ = __cache_key__ + return wrapper + + return decorator diff --git a/.venv/lib/python3.11/site-packages/diskcache/fanout.py b/.venv/lib/python3.11/site-packages/diskcache/fanout.py new file mode 100644 index 0000000000000000000000000000000000000000..9822ee4e576b4d97d5e0209862f0d3a93fde5b87 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/diskcache/fanout.py @@ -0,0 +1,687 @@ +"""Fanout cache automatically shards keys and values.""" + +import contextlib as cl +import functools +import itertools as it +import operator +import os.path as op +import sqlite3 +import tempfile +import time + +from .core import DEFAULT_SETTINGS, ENOVAL, Cache, Disk, Timeout +from .persistent import Deque, Index + + +class FanoutCache: + """Cache that shards keys and values.""" + + def __init__( + self, directory=None, shards=8, timeout=0.010, disk=Disk, **settings + ): + """Initialize cache instance. + + :param str directory: cache directory + :param int shards: number of shards to distribute writes + :param float timeout: SQLite connection timeout + :param disk: `Disk` instance for serialization + :param settings: any of `DEFAULT_SETTINGS` + + """ + if directory is None: + directory = tempfile.mkdtemp(prefix='diskcache-') + directory = str(directory) + directory = op.expanduser(directory) + directory = op.expandvars(directory) + + default_size_limit = DEFAULT_SETTINGS['size_limit'] + size_limit = settings.pop('size_limit', default_size_limit) / shards + + self._count = shards + self._directory = directory + self._disk = disk + self._shards = tuple( + Cache( + directory=op.join(directory, '%03d' % num), + timeout=timeout, + disk=disk, + size_limit=size_limit, + **settings, + ) + for num in range(shards) + ) + self._hash = self._shards[0].disk.hash + self._caches = {} + self._deques = {} + self._indexes = {} + + @property + def directory(self): + """Cache directory.""" + return self._directory + + def __getattr__(self, name): + safe_names = {'timeout', 'disk'} + valid_name = name in DEFAULT_SETTINGS or name in safe_names + assert valid_name, 'cannot access {} in cache shard'.format(name) + return getattr(self._shards[0], name) + + @cl.contextmanager + def transact(self, retry=True): + """Context manager to perform a transaction by locking the cache. + + While the cache is locked, no other write operation is permitted. + Transactions should therefore be as short as possible. Read and write + operations performed in a transaction are atomic. Read operations may + occur concurrent to a transaction. + + Transactions may be nested and may not be shared between threads. + + Blocks until transactions are held on all cache shards by retrying as + necessary. + + >>> cache = FanoutCache() + >>> with cache.transact(): # Atomically increment two keys. + ... _ = cache.incr('total', 123.4) + ... _ = cache.incr('count', 1) + >>> with cache.transact(): # Atomically calculate average. + ... average = cache['total'] / cache['count'] + >>> average + 123.4 + + :return: context manager for use in `with` statement + + """ + assert retry, 'retry must be True in FanoutCache' + with cl.ExitStack() as stack: + for shard in self._shards: + shard_transaction = shard.transact(retry=True) + stack.enter_context(shard_transaction) + yield + + def set(self, key, value, expire=None, read=False, tag=None, retry=False): + """Set `key` and `value` item in cache. + + When `read` is `True`, `value` should be a file-like object opened + for reading in binary mode. + + If database timeout occurs then fails silently unless `retry` is set to + `True` (default `False`). + + :param key: key for item + :param value: value for item + :param float expire: seconds until the key expires + (default None, no expiry) + :param bool read: read value as raw bytes from file (default False) + :param str tag: text to associate with key (default None) + :param bool retry: retry if database timeout occurs (default False) + :return: True if item was set + + """ + index = self._hash(key) % self._count + shard = self._shards[index] + try: + return shard.set(key, value, expire, read, tag, retry) + except Timeout: + return False + + def __setitem__(self, key, value): + """Set `key` and `value` item in cache. + + Calls :func:`FanoutCache.set` internally with `retry` set to `True`. + + :param key: key for item + :param value: value for item + + """ + index = self._hash(key) % self._count + shard = self._shards[index] + shard[key] = value + + def touch(self, key, expire=None, retry=False): + """Touch `key` in cache and update `expire` time. + + If database timeout occurs then fails silently unless `retry` is set to + `True` (default `False`). + + :param key: key for item + :param float expire: seconds until the key expires + (default None, no expiry) + :param bool retry: retry if database timeout occurs (default False) + :return: True if key was touched + + """ + index = self._hash(key) % self._count + shard = self._shards[index] + try: + return shard.touch(key, expire, retry) + except Timeout: + return False + + def add(self, key, value, expire=None, read=False, tag=None, retry=False): + """Add `key` and `value` item to cache. + + Similar to `set`, but only add to cache if key not present. + + This operation is atomic. Only one concurrent add operation for given + key from separate threads or processes will succeed. + + When `read` is `True`, `value` should be a file-like object opened + for reading in binary mode. + + If database timeout occurs then fails silently unless `retry` is set to + `True` (default `False`). + + :param key: key for item + :param value: value for item + :param float expire: seconds until the key expires + (default None, no expiry) + :param bool read: read value as bytes from file (default False) + :param str tag: text to associate with key (default None) + :param bool retry: retry if database timeout occurs (default False) + :return: True if item was added + + """ + index = self._hash(key) % self._count + shard = self._shards[index] + try: + return shard.add(key, value, expire, read, tag, retry) + except Timeout: + return False + + def incr(self, key, delta=1, default=0, retry=False): + """Increment value by delta for item with key. + + If key is missing and default is None then raise KeyError. Else if key + is missing and default is not None then use default for value. + + Operation is atomic. All concurrent increment operations will be + counted individually. + + Assumes value may be stored in a SQLite column. Most builds that target + machines with 64-bit pointer widths will support 64-bit signed + integers. + + If database timeout occurs then fails silently unless `retry` is set to + `True` (default `False`). + + :param key: key for item + :param int delta: amount to increment (default 1) + :param int default: value if key is missing (default 0) + :param bool retry: retry if database timeout occurs (default False) + :return: new value for item on success else None + :raises KeyError: if key is not found and default is None + + """ + index = self._hash(key) % self._count + shard = self._shards[index] + try: + return shard.incr(key, delta, default, retry) + except Timeout: + return None + + def decr(self, key, delta=1, default=0, retry=False): + """Decrement value by delta for item with key. + + If key is missing and default is None then raise KeyError. Else if key + is missing and default is not None then use default for value. + + Operation is atomic. All concurrent decrement operations will be + counted individually. + + Unlike Memcached, negative values are supported. Value may be + decremented below zero. + + Assumes value may be stored in a SQLite column. Most builds that target + machines with 64-bit pointer widths will support 64-bit signed + integers. + + If database timeout occurs then fails silently unless `retry` is set to + `True` (default `False`). + + :param key: key for item + :param int delta: amount to decrement (default 1) + :param int default: value if key is missing (default 0) + :param bool retry: retry if database timeout occurs (default False) + :return: new value for item on success else None + :raises KeyError: if key is not found and default is None + + """ + index = self._hash(key) % self._count + shard = self._shards[index] + try: + return shard.decr(key, delta, default, retry) + except Timeout: + return None + + def get( + self, + key, + default=None, + read=False, + expire_time=False, + tag=False, + retry=False, + ): + """Retrieve value from cache. If `key` is missing, return `default`. + + If database timeout occurs then returns `default` unless `retry` is set + to `True` (default `False`). + + :param key: key for item + :param default: return value if key is missing (default None) + :param bool read: if True, return file handle to value + (default False) + :param float expire_time: if True, return expire_time in tuple + (default False) + :param tag: if True, return tag in tuple (default False) + :param bool retry: retry if database timeout occurs (default False) + :return: value for item if key is found else default + + """ + index = self._hash(key) % self._count + shard = self._shards[index] + try: + return shard.get(key, default, read, expire_time, tag, retry) + except (Timeout, sqlite3.OperationalError): + return default + + def __getitem__(self, key): + """Return corresponding value for `key` from cache. + + Calls :func:`FanoutCache.get` internally with `retry` set to `True`. + + :param key: key for item + :return: value for item + :raises KeyError: if key is not found + + """ + index = self._hash(key) % self._count + shard = self._shards[index] + return shard[key] + + def read(self, key): + """Return file handle corresponding to `key` from cache. + + :param key: key for item + :return: file open for reading in binary mode + :raises KeyError: if key is not found + + """ + handle = self.get(key, default=ENOVAL, read=True, retry=True) + if handle is ENOVAL: + raise KeyError(key) + return handle + + def __contains__(self, key): + """Return `True` if `key` matching item is found in cache. + + :param key: key for item + :return: True if key is found + + """ + index = self._hash(key) % self._count + shard = self._shards[index] + return key in shard + + def pop( + self, key, default=None, expire_time=False, tag=False, retry=False + ): # noqa: E501 + """Remove corresponding item for `key` from cache and return value. + + If `key` is missing, return `default`. + + Operation is atomic. Concurrent operations will be serialized. + + If database timeout occurs then fails silently unless `retry` is set to + `True` (default `False`). + + :param key: key for item + :param default: return value if key is missing (default None) + :param float expire_time: if True, return expire_time in tuple + (default False) + :param tag: if True, return tag in tuple (default False) + :param bool retry: retry if database timeout occurs (default False) + :return: value for item if key is found else default + + """ + index = self._hash(key) % self._count + shard = self._shards[index] + try: + return shard.pop(key, default, expire_time, tag, retry) + except Timeout: + return default + + def delete(self, key, retry=False): + """Delete corresponding item for `key` from cache. + + Missing keys are ignored. + + If database timeout occurs then fails silently unless `retry` is set to + `True` (default `False`). + + :param key: key for item + :param bool retry: retry if database timeout occurs (default False) + :return: True if item was deleted + + """ + index = self._hash(key) % self._count + shard = self._shards[index] + try: + return shard.delete(key, retry) + except Timeout: + return False + + def __delitem__(self, key): + """Delete corresponding item for `key` from cache. + + Calls :func:`FanoutCache.delete` internally with `retry` set to `True`. + + :param key: key for item + :raises KeyError: if key is not found + + """ + index = self._hash(key) % self._count + shard = self._shards[index] + del shard[key] + + def check(self, fix=False, retry=False): + """Check database and file system consistency. + + Intended for use in testing and post-mortem error analysis. + + While checking the cache table for consistency, a writer lock is held + on the database. The lock blocks other cache clients from writing to + the database. For caches with many file references, the lock may be + held for a long time. For example, local benchmarking shows that a + cache with 1,000 file references takes ~60ms to check. + + If database timeout occurs then fails silently unless `retry` is set to + `True` (default `False`). + + :param bool fix: correct inconsistencies + :param bool retry: retry if database timeout occurs (default False) + :return: list of warnings + :raises Timeout: if database timeout occurs + + """ + warnings = (shard.check(fix, retry) for shard in self._shards) + return functools.reduce(operator.iadd, warnings, []) + + def expire(self, retry=False): + """Remove expired items from cache. + + If database timeout occurs then fails silently unless `retry` is set to + `True` (default `False`). + + :param bool retry: retry if database timeout occurs (default False) + :return: count of items removed + + """ + return self._remove('expire', args=(time.time(),), retry=retry) + + def create_tag_index(self): + """Create tag index on cache database. + + Better to initialize cache with `tag_index=True` than use this. + + :raises Timeout: if database timeout occurs + + """ + for shard in self._shards: + shard.create_tag_index() + + def drop_tag_index(self): + """Drop tag index on cache database. + + :raises Timeout: if database timeout occurs + + """ + for shard in self._shards: + shard.drop_tag_index() + + def evict(self, tag, retry=False): + """Remove items with matching `tag` from cache. + + If database timeout occurs then fails silently unless `retry` is set to + `True` (default `False`). + + :param str tag: tag identifying items + :param bool retry: retry if database timeout occurs (default False) + :return: count of items removed + + """ + return self._remove('evict', args=(tag,), retry=retry) + + def cull(self, retry=False): + """Cull items from cache until volume is less than size limit. + + If database timeout occurs then fails silently unless `retry` is set to + `True` (default `False`). + + :param bool retry: retry if database timeout occurs (default False) + :return: count of items removed + + """ + return self._remove('cull', retry=retry) + + def clear(self, retry=False): + """Remove all items from cache. + + If database timeout occurs then fails silently unless `retry` is set to + `True` (default `False`). + + :param bool retry: retry if database timeout occurs (default False) + :return: count of items removed + + """ + return self._remove('clear', retry=retry) + + def _remove(self, name, args=(), retry=False): + total = 0 + for shard in self._shards: + method = getattr(shard, name) + while True: + try: + count = method(*args, retry=retry) + total += count + except Timeout as timeout: + total += timeout.args[0] + else: + break + return total + + def stats(self, enable=True, reset=False): + """Return cache statistics hits and misses. + + :param bool enable: enable collecting statistics (default True) + :param bool reset: reset hits and misses to 0 (default False) + :return: (hits, misses) + + """ + results = [shard.stats(enable, reset) for shard in self._shards] + total_hits = sum(hits for hits, _ in results) + total_misses = sum(misses for _, misses in results) + return total_hits, total_misses + + def volume(self): + """Return estimated total size of cache on disk. + + :return: size in bytes + + """ + return sum(shard.volume() for shard in self._shards) + + def close(self): + """Close database connection.""" + for shard in self._shards: + shard.close() + self._caches.clear() + self._deques.clear() + self._indexes.clear() + + def __enter__(self): + return self + + def __exit__(self, *exception): + self.close() + + def __getstate__(self): + return (self._directory, self._count, self.timeout, type(self.disk)) + + def __setstate__(self, state): + self.__init__(*state) + + def __iter__(self): + """Iterate keys in cache including expired items.""" + iterators = (iter(shard) for shard in self._shards) + return it.chain.from_iterable(iterators) + + def __reversed__(self): + """Reverse iterate keys in cache including expired items.""" + iterators = (reversed(shard) for shard in reversed(self._shards)) + return it.chain.from_iterable(iterators) + + def __len__(self): + """Count of items in cache including expired items.""" + return sum(len(shard) for shard in self._shards) + + def reset(self, key, value=ENOVAL): + """Reset `key` and `value` item from Settings table. + + If `value` is not given, it is reloaded from the Settings + table. Otherwise, the Settings table is updated. + + Settings attributes on cache objects are lazy-loaded and + read-only. Use `reset` to update the value. + + Settings with the ``sqlite_`` prefix correspond to SQLite + pragmas. Updating the value will execute the corresponding PRAGMA + statement. + + :param str key: Settings key for item + :param value: value for item (optional) + :return: updated value for item + + """ + for shard in self._shards: + while True: + try: + result = shard.reset(key, value) + except Timeout: + pass + else: + break + return result + + def cache(self, name, timeout=60, disk=None, **settings): + """Return Cache with given `name` in subdirectory. + + If disk is none (default), uses the fanout cache disk. + + >>> fanout_cache = FanoutCache() + >>> cache = fanout_cache.cache('test') + >>> cache.set('abc', 123) + True + >>> cache.get('abc') + 123 + >>> len(cache) + 1 + >>> cache.delete('abc') + True + + :param str name: subdirectory name for Cache + :param float timeout: SQLite connection timeout + :param disk: Disk type or subclass for serialization + :param settings: any of DEFAULT_SETTINGS + :return: Cache with given name + + """ + _caches = self._caches + + try: + return _caches[name] + except KeyError: + parts = name.split('/') + directory = op.join(self._directory, 'cache', *parts) + temp = Cache( + directory=directory, + timeout=timeout, + disk=self._disk if disk is None else Disk, + **settings, + ) + _caches[name] = temp + return temp + + def deque(self, name, maxlen=None): + """Return Deque with given `name` in subdirectory. + + >>> cache = FanoutCache() + >>> deque = cache.deque('test') + >>> deque.extend('abc') + >>> deque.popleft() + 'a' + >>> deque.pop() + 'c' + >>> len(deque) + 1 + + :param str name: subdirectory name for Deque + :param maxlen: max length (default None, no max) + :return: Deque with given name + + """ + _deques = self._deques + + try: + return _deques[name] + except KeyError: + parts = name.split('/') + directory = op.join(self._directory, 'deque', *parts) + cache = Cache( + directory=directory, + disk=self._disk, + eviction_policy='none', + ) + deque = Deque.fromcache(cache, maxlen=maxlen) + _deques[name] = deque + return deque + + def index(self, name): + """Return Index with given `name` in subdirectory. + + >>> cache = FanoutCache() + >>> index = cache.index('test') + >>> index['abc'] = 123 + >>> index['def'] = 456 + >>> index['ghi'] = 789 + >>> index.popitem() + ('ghi', 789) + >>> del index['abc'] + >>> len(index) + 1 + >>> index['def'] + 456 + + :param str name: subdirectory name for Index + :return: Index with given name + + """ + _indexes = self._indexes + + try: + return _indexes[name] + except KeyError: + parts = name.split('/') + directory = op.join(self._directory, 'index', *parts) + cache = Cache( + directory=directory, + disk=self._disk, + eviction_policy='none', + ) + index = Index.fromcache(cache) + _indexes[name] = index + return index + + +FanoutCache.memoize = Cache.memoize # type: ignore diff --git a/.venv/lib/python3.11/site-packages/diskcache/persistent.py b/.venv/lib/python3.11/site-packages/diskcache/persistent.py new file mode 100644 index 0000000000000000000000000000000000000000..522bb74c5b1efc5b270df384f2c80f616d5eb2f0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/diskcache/persistent.py @@ -0,0 +1,1245 @@ +"""Persistent Data Types +""" + +import operator as op +from collections import OrderedDict +from collections.abc import ( + ItemsView, + KeysView, + MutableMapping, + Sequence, + ValuesView, +) +from contextlib import contextmanager +from shutil import rmtree + +from .core import ENOVAL, Cache + + +def _make_compare(seq_op, doc): + """Make compare method with Sequence semantics.""" + + def compare(self, that): + """Compare method for deque and sequence.""" + if not isinstance(that, Sequence): + return NotImplemented + + len_self = len(self) + len_that = len(that) + + if len_self != len_that: + if seq_op is op.eq: + return False + if seq_op is op.ne: + return True + + for alpha, beta in zip(self, that): + if alpha != beta: + return seq_op(alpha, beta) + + return seq_op(len_self, len_that) + + compare.__name__ = '__{0}__'.format(seq_op.__name__) + doc_str = 'Return True if and only if deque is {0} `that`.' + compare.__doc__ = doc_str.format(doc) + + return compare + + +class Deque(Sequence): + """Persistent sequence with double-ended queue semantics. + + Double-ended queue is an ordered collection with optimized access at its + endpoints. + + Items are serialized to disk. Deque may be initialized from directory path + where items are stored. + + >>> deque = Deque() + >>> deque += range(5) + >>> list(deque) + [0, 1, 2, 3, 4] + >>> for value in range(5): + ... deque.appendleft(-value) + >>> len(deque) + 10 + >>> list(deque) + [-4, -3, -2, -1, 0, 0, 1, 2, 3, 4] + >>> deque.pop() + 4 + >>> deque.popleft() + -4 + >>> deque.reverse() + >>> list(deque) + [3, 2, 1, 0, 0, -1, -2, -3] + + """ + + def __init__(self, iterable=(), directory=None, maxlen=None): + """Initialize deque instance. + + If directory is None then temporary directory created. The directory + will *not* be automatically removed. + + :param iterable: iterable of items to append to deque + :param directory: deque directory (default None) + + """ + self._cache = Cache(directory, eviction_policy='none') + self._maxlen = float('inf') if maxlen is None else maxlen + self._extend(iterable) + + @classmethod + def fromcache(cls, cache, iterable=(), maxlen=None): + """Initialize deque using `cache`. + + >>> cache = Cache() + >>> deque = Deque.fromcache(cache, [5, 6, 7, 8]) + >>> deque.cache is cache + True + >>> len(deque) + 4 + >>> 7 in deque + True + >>> deque.popleft() + 5 + + :param Cache cache: cache to use + :param iterable: iterable of items + :return: initialized Deque + + """ + # pylint: disable=no-member,protected-access + self = cls.__new__(cls) + self._cache = cache + self._maxlen = float('inf') if maxlen is None else maxlen + self._extend(iterable) + return self + + @property + def cache(self): + """Cache used by deque.""" + return self._cache + + @property + def directory(self): + """Directory path where deque is stored.""" + return self._cache.directory + + @property + def maxlen(self): + """Max length of the deque.""" + return self._maxlen + + @maxlen.setter + def maxlen(self, value): + """Set max length of the deque. + + Pops items from left while length greater than max. + + >>> deque = Deque() + >>> deque.extendleft('abcde') + >>> deque.maxlen = 3 + >>> list(deque) + ['c', 'd', 'e'] + + :param value: max length + + """ + self._maxlen = value + with self._cache.transact(retry=True): + while len(self._cache) > self._maxlen: + self._popleft() + + def _index(self, index, func): + len_self = len(self) + + if index >= 0: + if index >= len_self: + raise IndexError('deque index out of range') + + for key in self._cache.iterkeys(): + if index == 0: + try: + return func(key) + except KeyError: + continue + index -= 1 + else: + if index < -len_self: + raise IndexError('deque index out of range') + + index += 1 + + for key in self._cache.iterkeys(reverse=True): + if index == 0: + try: + return func(key) + except KeyError: + continue + index += 1 + + raise IndexError('deque index out of range') + + def __getitem__(self, index): + """deque.__getitem__(index) <==> deque[index] + + Return corresponding item for `index` in deque. + + See also `Deque.peekleft` and `Deque.peek` for indexing deque at index + ``0`` or ``-1``. + + >>> deque = Deque() + >>> deque.extend('abcde') + >>> deque[1] + 'b' + >>> deque[-2] + 'd' + + :param int index: index of item + :return: corresponding item + :raises IndexError: if index out of range + + """ + return self._index(index, self._cache.__getitem__) + + def __setitem__(self, index, value): + """deque.__setitem__(index, value) <==> deque[index] = value + + Store `value` in deque at `index`. + + >>> deque = Deque() + >>> deque.extend([None] * 3) + >>> deque[0] = 'a' + >>> deque[1] = 'b' + >>> deque[-1] = 'c' + >>> ''.join(deque) + 'abc' + + :param int index: index of value + :param value: value to store + :raises IndexError: if index out of range + + """ + + def _set_value(key): + return self._cache.__setitem__(key, value) + + self._index(index, _set_value) + + def __delitem__(self, index): + """deque.__delitem__(index) <==> del deque[index] + + Delete item in deque at `index`. + + >>> deque = Deque() + >>> deque.extend([None] * 3) + >>> del deque[0] + >>> del deque[1] + >>> del deque[-1] + >>> len(deque) + 0 + + :param int index: index of item + :raises IndexError: if index out of range + + """ + self._index(index, self._cache.__delitem__) + + def __repr__(self): + """deque.__repr__() <==> repr(deque) + + Return string with printable representation of deque. + + """ + name = type(self).__name__ + return '{0}(directory={1!r})'.format(name, self.directory) + + __eq__ = _make_compare(op.eq, 'equal to') + __ne__ = _make_compare(op.ne, 'not equal to') + __lt__ = _make_compare(op.lt, 'less than') + __gt__ = _make_compare(op.gt, 'greater than') + __le__ = _make_compare(op.le, 'less than or equal to') + __ge__ = _make_compare(op.ge, 'greater than or equal to') + + def __iadd__(self, iterable): + """deque.__iadd__(iterable) <==> deque += iterable + + Extend back side of deque with items from iterable. + + :param iterable: iterable of items to append to deque + :return: deque with added items + + """ + self._extend(iterable) + return self + + def __iter__(self): + """deque.__iter__() <==> iter(deque) + + Return iterator of deque from front to back. + + """ + _cache = self._cache + + for key in _cache.iterkeys(): + try: + yield _cache[key] + except KeyError: + pass + + def __len__(self): + """deque.__len__() <==> len(deque) + + Return length of deque. + + """ + return len(self._cache) + + def __reversed__(self): + """deque.__reversed__() <==> reversed(deque) + + Return iterator of deque from back to front. + + >>> deque = Deque() + >>> deque.extend('abcd') + >>> iterator = reversed(deque) + >>> next(iterator) + 'd' + >>> list(iterator) + ['c', 'b', 'a'] + + """ + _cache = self._cache + + for key in _cache.iterkeys(reverse=True): + try: + yield _cache[key] + except KeyError: + pass + + def __getstate__(self): + return self.directory, self.maxlen + + def __setstate__(self, state): + directory, maxlen = state + self.__init__(directory=directory, maxlen=maxlen) + + def append(self, value): + """Add `value` to back of deque. + + >>> deque = Deque() + >>> deque.append('a') + >>> deque.append('b') + >>> deque.append('c') + >>> list(deque) + ['a', 'b', 'c'] + + :param value: value to add to back of deque + + """ + with self._cache.transact(retry=True): + self._cache.push(value, retry=True) + if len(self._cache) > self._maxlen: + self._popleft() + + _append = append + + def appendleft(self, value): + """Add `value` to front of deque. + + >>> deque = Deque() + >>> deque.appendleft('a') + >>> deque.appendleft('b') + >>> deque.appendleft('c') + >>> list(deque) + ['c', 'b', 'a'] + + :param value: value to add to front of deque + + """ + with self._cache.transact(retry=True): + self._cache.push(value, side='front', retry=True) + if len(self._cache) > self._maxlen: + self._pop() + + _appendleft = appendleft + + def clear(self): + """Remove all elements from deque. + + >>> deque = Deque('abc') + >>> len(deque) + 3 + >>> deque.clear() + >>> list(deque) + [] + + """ + self._cache.clear(retry=True) + + _clear = clear + + def copy(self): + """Copy deque with same directory and max length.""" + TypeSelf = type(self) + return TypeSelf(directory=self.directory, maxlen=self.maxlen) + + def count(self, value): + """Return number of occurrences of `value` in deque. + + >>> deque = Deque() + >>> deque += [num for num in range(1, 5) for _ in range(num)] + >>> deque.count(0) + 0 + >>> deque.count(1) + 1 + >>> deque.count(4) + 4 + + :param value: value to count in deque + :return: count of items equal to value in deque + + """ + return sum(1 for item in self if value == item) + + def extend(self, iterable): + """Extend back side of deque with values from `iterable`. + + :param iterable: iterable of values + + """ + for value in iterable: + self._append(value) + + _extend = extend + + def extendleft(self, iterable): + """Extend front side of deque with value from `iterable`. + + >>> deque = Deque() + >>> deque.extendleft('abc') + >>> list(deque) + ['c', 'b', 'a'] + + :param iterable: iterable of values + + """ + for value in iterable: + self._appendleft(value) + + def peek(self): + """Peek at value at back of deque. + + Faster than indexing deque at -1. + + If deque is empty then raise IndexError. + + >>> deque = Deque() + >>> deque.peek() + Traceback (most recent call last): + ... + IndexError: peek from an empty deque + >>> deque += 'abc' + >>> deque.peek() + 'c' + + :return: value at back of deque + :raises IndexError: if deque is empty + + """ + default = None, ENOVAL + _, value = self._cache.peek(default=default, side='back', retry=True) + if value is ENOVAL: + raise IndexError('peek from an empty deque') + return value + + def peekleft(self): + """Peek at value at front of deque. + + Faster than indexing deque at 0. + + If deque is empty then raise IndexError. + + >>> deque = Deque() + >>> deque.peekleft() + Traceback (most recent call last): + ... + IndexError: peek from an empty deque + >>> deque += 'abc' + >>> deque.peekleft() + 'a' + + :return: value at front of deque + :raises IndexError: if deque is empty + + """ + default = None, ENOVAL + _, value = self._cache.peek(default=default, side='front', retry=True) + if value is ENOVAL: + raise IndexError('peek from an empty deque') + return value + + def pop(self): + """Remove and return value at back of deque. + + If deque is empty then raise IndexError. + + >>> deque = Deque() + >>> deque += 'ab' + >>> deque.pop() + 'b' + >>> deque.pop() + 'a' + >>> deque.pop() + Traceback (most recent call last): + ... + IndexError: pop from an empty deque + + :return: value at back of deque + :raises IndexError: if deque is empty + + """ + default = None, ENOVAL + _, value = self._cache.pull(default=default, side='back', retry=True) + if value is ENOVAL: + raise IndexError('pop from an empty deque') + return value + + _pop = pop + + def popleft(self): + """Remove and return value at front of deque. + + >>> deque = Deque() + >>> deque += 'ab' + >>> deque.popleft() + 'a' + >>> deque.popleft() + 'b' + >>> deque.popleft() + Traceback (most recent call last): + ... + IndexError: pop from an empty deque + + :return: value at front of deque + :raises IndexError: if deque is empty + + """ + default = None, ENOVAL + _, value = self._cache.pull(default=default, retry=True) + if value is ENOVAL: + raise IndexError('pop from an empty deque') + return value + + _popleft = popleft + + def remove(self, value): + """Remove first occurrence of `value` in deque. + + >>> deque = Deque() + >>> deque += 'aab' + >>> deque.remove('a') + >>> list(deque) + ['a', 'b'] + >>> deque.remove('b') + >>> list(deque) + ['a'] + >>> deque.remove('c') + Traceback (most recent call last): + ... + ValueError: deque.remove(value): value not in deque + + :param value: value to remove + :raises ValueError: if value not in deque + + """ + _cache = self._cache + + for key in _cache.iterkeys(): + try: + item = _cache[key] + except KeyError: + continue + else: + if value == item: + try: + del _cache[key] + except KeyError: + continue + return + + raise ValueError('deque.remove(value): value not in deque') + + def reverse(self): + """Reverse deque in place. + + >>> deque = Deque() + >>> deque += 'abc' + >>> deque.reverse() + >>> list(deque) + ['c', 'b', 'a'] + + """ + # pylint: disable=protected-access + # GrantJ 2019-03-22 Consider using an algorithm that swaps the values + # at two keys. Like self._cache.swap(key1, key2, retry=True) The swap + # method would exchange the values at two given keys. Then, using a + # forward iterator and a reverse iterator, the reverse method could + # avoid making copies of the values. + temp = Deque(iterable=reversed(self)) + self._clear() + self._extend(temp) + directory = temp.directory + temp._cache.close() + del temp + rmtree(directory) + + def rotate(self, steps=1): + """Rotate deque right by `steps`. + + If steps is negative then rotate left. + + >>> deque = Deque() + >>> deque += range(5) + >>> deque.rotate(2) + >>> list(deque) + [3, 4, 0, 1, 2] + >>> deque.rotate(-1) + >>> list(deque) + [4, 0, 1, 2, 3] + + :param int steps: number of steps to rotate (default 1) + + """ + if not isinstance(steps, int): + type_name = type(steps).__name__ + raise TypeError('integer argument expected, got %s' % type_name) + + len_self = len(self) + + if not len_self: + return + + if steps >= 0: + steps %= len_self + + for _ in range(steps): + try: + value = self._pop() + except IndexError: + return + else: + self._appendleft(value) + else: + steps *= -1 + steps %= len_self + + for _ in range(steps): + try: + value = self._popleft() + except IndexError: + return + else: + self._append(value) + + __hash__ = None # type: ignore + + @contextmanager + def transact(self): + """Context manager to perform a transaction by locking the deque. + + While the deque is locked, no other write operation is permitted. + Transactions should therefore be as short as possible. Read and write + operations performed in a transaction are atomic. Read operations may + occur concurrent to a transaction. + + Transactions may be nested and may not be shared between threads. + + >>> from diskcache import Deque + >>> deque = Deque() + >>> deque += range(5) + >>> with deque.transact(): # Atomically rotate elements. + ... value = deque.pop() + ... deque.appendleft(value) + >>> list(deque) + [4, 0, 1, 2, 3] + + :return: context manager for use in `with` statement + + """ + with self._cache.transact(retry=True): + yield + + +class Index(MutableMapping): + """Persistent mutable mapping with insertion order iteration. + + Items are serialized to disk. Index may be initialized from directory path + where items are stored. + + Hashing protocol is not used. Keys are looked up by their serialized + format. See ``diskcache.Disk`` for details. + + >>> index = Index() + >>> index.update([('a', 1), ('b', 2), ('c', 3)]) + >>> index['a'] + 1 + >>> list(index) + ['a', 'b', 'c'] + >>> len(index) + 3 + >>> del index['b'] + >>> index.popitem() + ('c', 3) + + """ + + def __init__(self, *args, **kwargs): + """Initialize index in directory and update items. + + Optional first argument may be string specifying directory where items + are stored. When None or not given, temporary directory is created. + + >>> index = Index({'a': 1, 'b': 2, 'c': 3}) + >>> len(index) + 3 + >>> directory = index.directory + >>> inventory = Index(directory, d=4) + >>> inventory['b'] + 2 + >>> len(inventory) + 4 + + """ + if args and isinstance(args[0], (bytes, str)): + directory = args[0] + args = args[1:] + else: + if args and args[0] is None: + args = args[1:] + directory = None + self._cache = Cache(directory, eviction_policy='none') + self._update(*args, **kwargs) + + _update = MutableMapping.update + + @classmethod + def fromcache(cls, cache, *args, **kwargs): + """Initialize index using `cache` and update items. + + >>> cache = Cache() + >>> index = Index.fromcache(cache, {'a': 1, 'b': 2, 'c': 3}) + >>> index.cache is cache + True + >>> len(index) + 3 + >>> 'b' in index + True + >>> index['c'] + 3 + + :param Cache cache: cache to use + :param args: mapping or sequence of items + :param kwargs: mapping of items + :return: initialized Index + + """ + # pylint: disable=no-member,protected-access + self = cls.__new__(cls) + self._cache = cache + self._update(*args, **kwargs) + return self + + @property + def cache(self): + """Cache used by index.""" + return self._cache + + @property + def directory(self): + """Directory path where items are stored.""" + return self._cache.directory + + def __getitem__(self, key): + """index.__getitem__(key) <==> index[key] + + Return corresponding value for `key` in index. + + >>> index = Index() + >>> index.update({'a': 1, 'b': 2}) + >>> index['a'] + 1 + >>> index['b'] + 2 + >>> index['c'] + Traceback (most recent call last): + ... + KeyError: 'c' + + :param key: key for item + :return: value for item in index with given key + :raises KeyError: if key is not found + + """ + return self._cache[key] + + def __setitem__(self, key, value): + """index.__setitem__(key, value) <==> index[key] = value + + Set `key` and `value` item in index. + + >>> index = Index() + >>> index['a'] = 1 + >>> index[0] = None + >>> len(index) + 2 + + :param key: key for item + :param value: value for item + + """ + self._cache[key] = value + + def __delitem__(self, key): + """index.__delitem__(key) <==> del index[key] + + Delete corresponding item for `key` from index. + + >>> index = Index() + >>> index.update({'a': 1, 'b': 2}) + >>> del index['a'] + >>> del index['b'] + >>> len(index) + 0 + >>> del index['c'] + Traceback (most recent call last): + ... + KeyError: 'c' + + :param key: key for item + :raises KeyError: if key is not found + + """ + del self._cache[key] + + def setdefault(self, key, default=None): + """Set and get value for `key` in index using `default`. + + If `key` is not in index then set corresponding value to `default`. If + `key` is in index then ignore `default` and return existing value. + + >>> index = Index() + >>> index.setdefault('a', 0) + 0 + >>> index.setdefault('a', 1) + 0 + + :param key: key for item + :param default: value if key is missing (default None) + :return: value for item in index with given key + + """ + _cache = self._cache + while True: + try: + return _cache[key] + except KeyError: + _cache.add(key, default, retry=True) + + def peekitem(self, last=True): + """Peek at key and value item pair in index based on iteration order. + + >>> index = Index() + >>> for num, letter in enumerate('xyz'): + ... index[letter] = num + >>> index.peekitem() + ('z', 2) + >>> index.peekitem(last=False) + ('x', 0) + + :param bool last: last item in iteration order (default True) + :return: key and value item pair + :raises KeyError: if cache is empty + + """ + return self._cache.peekitem(last, retry=True) + + def pop(self, key, default=ENOVAL): + """Remove corresponding item for `key` from index and return value. + + If `key` is missing then return `default`. If `default` is `ENOVAL` + then raise KeyError. + + >>> index = Index({'a': 1, 'b': 2}) + >>> index.pop('a') + 1 + >>> index.pop('b') + 2 + >>> index.pop('c', default=3) + 3 + >>> index.pop('d') + Traceback (most recent call last): + ... + KeyError: 'd' + + :param key: key for item + :param default: return value if key is missing (default ENOVAL) + :return: value for item if key is found else default + :raises KeyError: if key is not found and default is ENOVAL + + """ + _cache = self._cache + value = _cache.pop(key, default=default, retry=True) + if value is ENOVAL: + raise KeyError(key) + return value + + def popitem(self, last=True): + """Remove and return item pair. + + Item pairs are returned in last-in-first-out (LIFO) order if last is + True else first-in-first-out (FIFO) order. LIFO order imitates a stack + and FIFO order imitates a queue. + + >>> index = Index() + >>> index.update([('a', 1), ('b', 2), ('c', 3)]) + >>> index.popitem() + ('c', 3) + >>> index.popitem(last=False) + ('a', 1) + >>> index.popitem() + ('b', 2) + >>> index.popitem() + Traceback (most recent call last): + ... + KeyError: 'dictionary is empty' + + :param bool last: pop last item pair (default True) + :return: key and value item pair + :raises KeyError: if index is empty + + """ + # pylint: disable=arguments-differ,unbalanced-tuple-unpacking + _cache = self._cache + + with _cache.transact(retry=True): + key, value = _cache.peekitem(last=last) + del _cache[key] + + return key, value + + def push(self, value, prefix=None, side='back'): + """Push `value` onto `side` of queue in index identified by `prefix`. + + When prefix is None, integer keys are used. Otherwise, string keys are + used in the format "prefix-integer". Integer starts at 500 trillion. + + Defaults to pushing value on back of queue. Set side to 'front' to push + value on front of queue. Side must be one of 'back' or 'front'. + + See also `Index.pull`. + + >>> index = Index() + >>> print(index.push('apples')) + 500000000000000 + >>> print(index.push('beans')) + 500000000000001 + >>> print(index.push('cherries', side='front')) + 499999999999999 + >>> index[500000000000001] + 'beans' + >>> index.push('dates', prefix='fruit') + 'fruit-500000000000000' + + :param value: value for item + :param str prefix: key prefix (default None, key is integer) + :param str side: either 'back' or 'front' (default 'back') + :return: key for item in cache + + """ + return self._cache.push(value, prefix, side, retry=True) + + def pull(self, prefix=None, default=(None, None), side='front'): + """Pull key and value item pair from `side` of queue in index. + + When prefix is None, integer keys are used. Otherwise, string keys are + used in the format "prefix-integer". Integer starts at 500 trillion. + + If queue is empty, return default. + + Defaults to pulling key and value item pairs from front of queue. Set + side to 'back' to pull from back of queue. Side must be one of 'front' + or 'back'. + + See also `Index.push`. + + >>> index = Index() + >>> for letter in 'abc': + ... print(index.push(letter)) + 500000000000000 + 500000000000001 + 500000000000002 + >>> key, value = index.pull() + >>> print(key) + 500000000000000 + >>> value + 'a' + >>> _, value = index.pull(side='back') + >>> value + 'c' + >>> index.pull(prefix='fruit') + (None, None) + + :param str prefix: key prefix (default None, key is integer) + :param default: value to return if key is missing + (default (None, None)) + :param str side: either 'front' or 'back' (default 'front') + :return: key and value item pair or default if queue is empty + + """ + return self._cache.pull(prefix, default, side, retry=True) + + def clear(self): + """Remove all items from index. + + >>> index = Index({'a': 0, 'b': 1, 'c': 2}) + >>> len(index) + 3 + >>> index.clear() + >>> dict(index) + {} + + """ + self._cache.clear(retry=True) + + def __iter__(self): + """index.__iter__() <==> iter(index) + + Return iterator of index keys in insertion order. + + """ + return iter(self._cache) + + def __reversed__(self): + """index.__reversed__() <==> reversed(index) + + Return iterator of index keys in reversed insertion order. + + >>> index = Index() + >>> index.update([('a', 1), ('b', 2), ('c', 3)]) + >>> iterator = reversed(index) + >>> next(iterator) + 'c' + >>> list(iterator) + ['b', 'a'] + + """ + return reversed(self._cache) + + def __len__(self): + """index.__len__() <==> len(index) + + Return length of index. + + """ + return len(self._cache) + + def keys(self): + """Set-like object providing a view of index keys. + + >>> index = Index() + >>> index.update({'a': 1, 'b': 2, 'c': 3}) + >>> keys_view = index.keys() + >>> 'b' in keys_view + True + + :return: keys view + + """ + return KeysView(self) + + def values(self): + """Set-like object providing a view of index values. + + >>> index = Index() + >>> index.update({'a': 1, 'b': 2, 'c': 3}) + >>> values_view = index.values() + >>> 2 in values_view + True + + :return: values view + + """ + return ValuesView(self) + + def items(self): + """Set-like object providing a view of index items. + + >>> index = Index() + >>> index.update({'a': 1, 'b': 2, 'c': 3}) + >>> items_view = index.items() + >>> ('b', 2) in items_view + True + + :return: items view + + """ + return ItemsView(self) + + __hash__ = None # type: ignore + + def __getstate__(self): + return self.directory + + def __setstate__(self, state): + self.__init__(state) + + def __eq__(self, other): + """index.__eq__(other) <==> index == other + + Compare equality for index and `other`. + + Comparison to another index or ordered dictionary is + order-sensitive. Comparison to all other mappings is order-insensitive. + + >>> index = Index() + >>> pairs = [('a', 1), ('b', 2), ('c', 3)] + >>> index.update(pairs) + >>> from collections import OrderedDict + >>> od = OrderedDict(pairs) + >>> index == od + True + >>> index == {'c': 3, 'b': 2, 'a': 1} + True + + :param other: other mapping in equality comparison + :return: True if index equals other + + """ + if len(self) != len(other): + return False + + if isinstance(other, (Index, OrderedDict)): + alpha = ((key, self[key]) for key in self) + beta = ((key, other[key]) for key in other) + pairs = zip(alpha, beta) + return not any(a != x or b != y for (a, b), (x, y) in pairs) + else: + return all(self[key] == other.get(key, ENOVAL) for key in self) + + def __ne__(self, other): + """index.__ne__(other) <==> index != other + + Compare inequality for index and `other`. + + Comparison to another index or ordered dictionary is + order-sensitive. Comparison to all other mappings is order-insensitive. + + >>> index = Index() + >>> index.update([('a', 1), ('b', 2), ('c', 3)]) + >>> from collections import OrderedDict + >>> od = OrderedDict([('c', 3), ('b', 2), ('a', 1)]) + >>> index != od + True + >>> index != {'a': 1, 'b': 2} + True + + :param other: other mapping in inequality comparison + :return: True if index does not equal other + + """ + return not self == other + + def memoize(self, name=None, typed=False, ignore=()): + """Memoizing cache decorator. + + Decorator to wrap callable with memoizing function using cache. + Repeated calls with the same arguments will lookup result in cache and + avoid function evaluation. + + If name is set to None (default), the callable name will be determined + automatically. + + If typed is set to True, function arguments of different types will be + cached separately. For example, f(3) and f(3.0) will be treated as + distinct calls with distinct results. + + The original underlying function is accessible through the __wrapped__ + attribute. This is useful for introspection, for bypassing the cache, + or for rewrapping the function with a different cache. + + >>> from diskcache import Index + >>> mapping = Index() + >>> @mapping.memoize() + ... def fibonacci(number): + ... if number == 0: + ... return 0 + ... elif number == 1: + ... return 1 + ... else: + ... return fibonacci(number - 1) + fibonacci(number - 2) + >>> print(fibonacci(100)) + 354224848179261915075 + + An additional `__cache_key__` attribute can be used to generate the + cache key used for the given arguments. + + >>> key = fibonacci.__cache_key__(100) + >>> print(mapping[key]) + 354224848179261915075 + + Remember to call memoize when decorating a callable. If you forget, + then a TypeError will occur. Note the lack of parenthenses after + memoize below: + + >>> @mapping.memoize + ... def test(): + ... pass + Traceback (most recent call last): + ... + TypeError: name cannot be callable + + :param str name: name given for callable (default None, automatic) + :param bool typed: cache different types separately (default False) + :param set ignore: positional or keyword args to ignore (default ()) + :return: callable decorator + + """ + return self._cache.memoize(name, typed, ignore=ignore) + + @contextmanager + def transact(self): + """Context manager to perform a transaction by locking the index. + + While the index is locked, no other write operation is permitted. + Transactions should therefore be as short as possible. Read and write + operations performed in a transaction are atomic. Read operations may + occur concurrent to a transaction. + + Transactions may be nested and may not be shared between threads. + + >>> from diskcache import Index + >>> mapping = Index() + >>> with mapping.transact(): # Atomically increment two keys. + ... mapping['total'] = mapping.get('total', 0) + 123.4 + ... mapping['count'] = mapping.get('count', 0) + 1 + >>> with mapping.transact(): # Atomically calculate average. + ... average = mapping['total'] / mapping['count'] + >>> average + 123.4 + + :return: context manager for use in `with` statement + + """ + with self._cache.transact(retry=True): + yield + + def __repr__(self): + """index.__repr__() <==> repr(index) + + Return string with printable representation of index. + + """ + name = type(self).__name__ + return '{0}({1!r})'.format(name, self.directory) diff --git a/.venv/lib/python3.11/site-packages/diskcache/recipes.py b/.venv/lib/python3.11/site-packages/diskcache/recipes.py new file mode 100644 index 0000000000000000000000000000000000000000..babb68f67bd38f40b7fa31fbeaf4ab13ac762c65 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/diskcache/recipes.py @@ -0,0 +1,488 @@ +"""Disk Cache Recipes +""" + +import functools +import math +import os +import random +import threading +import time + +from .core import ENOVAL, args_to_key, full_name + + +class Averager: + """Recipe for calculating a running average. + + Sometimes known as "online statistics," the running average maintains the + total and count. The average can then be calculated at any time. + + Assumes the key will not be evicted. Set the eviction policy to 'none' on + the cache to guarantee the key is not evicted. + + >>> import diskcache + >>> cache = diskcache.FanoutCache() + >>> ave = Averager(cache, 'latency') + >>> ave.add(0.080) + >>> ave.add(0.120) + >>> ave.get() + 0.1 + >>> ave.add(0.160) + >>> ave.pop() + 0.12 + >>> print(ave.get()) + None + + """ + + def __init__(self, cache, key, expire=None, tag=None): + self._cache = cache + self._key = key + self._expire = expire + self._tag = tag + + def add(self, value): + """Add `value` to average.""" + with self._cache.transact(retry=True): + total, count = self._cache.get(self._key, default=(0.0, 0)) + total += value + count += 1 + self._cache.set( + self._key, + (total, count), + expire=self._expire, + tag=self._tag, + ) + + def get(self): + """Get current average or return `None` if count equals zero.""" + total, count = self._cache.get(self._key, default=(0.0, 0), retry=True) + return None if count == 0 else total / count + + def pop(self): + """Return current average and delete key.""" + total, count = self._cache.pop(self._key, default=(0.0, 0), retry=True) + return None if count == 0 else total / count + + +class Lock: + """Recipe for cross-process and cross-thread lock. + + Assumes the key will not be evicted. Set the eviction policy to 'none' on + the cache to guarantee the key is not evicted. + + >>> import diskcache + >>> cache = diskcache.Cache() + >>> lock = Lock(cache, 'report-123') + >>> lock.acquire() + >>> lock.release() + >>> with lock: + ... pass + + """ + + def __init__(self, cache, key, expire=None, tag=None): + self._cache = cache + self._key = key + self._expire = expire + self._tag = tag + + def acquire(self): + """Acquire lock using spin-lock algorithm.""" + while True: + added = self._cache.add( + self._key, + None, + expire=self._expire, + tag=self._tag, + retry=True, + ) + if added: + break + time.sleep(0.001) + + def release(self): + """Release lock by deleting key.""" + self._cache.delete(self._key, retry=True) + + def locked(self): + """Return true if the lock is acquired.""" + return self._key in self._cache + + def __enter__(self): + self.acquire() + + def __exit__(self, *exc_info): + self.release() + + +class RLock: + """Recipe for cross-process and cross-thread re-entrant lock. + + Assumes the key will not be evicted. Set the eviction policy to 'none' on + the cache to guarantee the key is not evicted. + + >>> import diskcache + >>> cache = diskcache.Cache() + >>> rlock = RLock(cache, 'user-123') + >>> rlock.acquire() + >>> rlock.acquire() + >>> rlock.release() + >>> with rlock: + ... pass + >>> rlock.release() + >>> rlock.release() + Traceback (most recent call last): + ... + AssertionError: cannot release un-acquired lock + + """ + + def __init__(self, cache, key, expire=None, tag=None): + self._cache = cache + self._key = key + self._expire = expire + self._tag = tag + + def acquire(self): + """Acquire lock by incrementing count using spin-lock algorithm.""" + pid = os.getpid() + tid = threading.get_ident() + pid_tid = '{}-{}'.format(pid, tid) + + while True: + with self._cache.transact(retry=True): + value, count = self._cache.get(self._key, default=(None, 0)) + if pid_tid == value or count == 0: + self._cache.set( + self._key, + (pid_tid, count + 1), + expire=self._expire, + tag=self._tag, + ) + return + time.sleep(0.001) + + def release(self): + """Release lock by decrementing count.""" + pid = os.getpid() + tid = threading.get_ident() + pid_tid = '{}-{}'.format(pid, tid) + + with self._cache.transact(retry=True): + value, count = self._cache.get(self._key, default=(None, 0)) + is_owned = pid_tid == value and count > 0 + assert is_owned, 'cannot release un-acquired lock' + self._cache.set( + self._key, + (value, count - 1), + expire=self._expire, + tag=self._tag, + ) + + def __enter__(self): + self.acquire() + + def __exit__(self, *exc_info): + self.release() + + +class BoundedSemaphore: + """Recipe for cross-process and cross-thread bounded semaphore. + + Assumes the key will not be evicted. Set the eviction policy to 'none' on + the cache to guarantee the key is not evicted. + + >>> import diskcache + >>> cache = diskcache.Cache() + >>> semaphore = BoundedSemaphore(cache, 'max-cons', value=2) + >>> semaphore.acquire() + >>> semaphore.acquire() + >>> semaphore.release() + >>> with semaphore: + ... pass + >>> semaphore.release() + >>> semaphore.release() + Traceback (most recent call last): + ... + AssertionError: cannot release un-acquired semaphore + + """ + + def __init__(self, cache, key, value=1, expire=None, tag=None): + self._cache = cache + self._key = key + self._value = value + self._expire = expire + self._tag = tag + + def acquire(self): + """Acquire semaphore by decrementing value using spin-lock algorithm.""" + while True: + with self._cache.transact(retry=True): + value = self._cache.get(self._key, default=self._value) + if value > 0: + self._cache.set( + self._key, + value - 1, + expire=self._expire, + tag=self._tag, + ) + return + time.sleep(0.001) + + def release(self): + """Release semaphore by incrementing value.""" + with self._cache.transact(retry=True): + value = self._cache.get(self._key, default=self._value) + assert self._value > value, 'cannot release un-acquired semaphore' + value += 1 + self._cache.set( + self._key, + value, + expire=self._expire, + tag=self._tag, + ) + + def __enter__(self): + self.acquire() + + def __exit__(self, *exc_info): + self.release() + + +def throttle( + cache, + count, + seconds, + name=None, + expire=None, + tag=None, + time_func=time.time, + sleep_func=time.sleep, +): + """Decorator to throttle calls to function. + + Assumes keys will not be evicted. Set the eviction policy to 'none' on the + cache to guarantee the keys are not evicted. + + >>> import diskcache, time + >>> cache = diskcache.Cache() + >>> count = 0 + >>> @throttle(cache, 2, 1) # 2 calls per 1 second + ... def increment(): + ... global count + ... count += 1 + >>> start = time.time() + >>> while (time.time() - start) <= 2: + ... increment() + >>> count in (6, 7) # 6 or 7 calls depending on CPU load + True + + """ + + def decorator(func): + rate = count / float(seconds) + key = full_name(func) if name is None else name + now = time_func() + cache.set(key, (now, count), expire=expire, tag=tag, retry=True) + + @functools.wraps(func) + def wrapper(*args, **kwargs): + while True: + with cache.transact(retry=True): + last, tally = cache.get(key) + now = time_func() + tally += (now - last) * rate + delay = 0 + + if tally > count: + cache.set(key, (now, count - 1), expire) + elif tally >= 1: + cache.set(key, (now, tally - 1), expire) + else: + delay = (1 - tally) / rate + + if delay: + sleep_func(delay) + else: + break + + return func(*args, **kwargs) + + return wrapper + + return decorator + + +def barrier(cache, lock_factory, name=None, expire=None, tag=None): + """Barrier to calling decorated function. + + Supports different kinds of locks: Lock, RLock, BoundedSemaphore. + + Assumes keys will not be evicted. Set the eviction policy to 'none' on the + cache to guarantee the keys are not evicted. + + >>> import diskcache, time + >>> cache = diskcache.Cache() + >>> @barrier(cache, Lock) + ... def work(num): + ... print('worker started') + ... time.sleep(1) + ... print('worker finished') + >>> import multiprocessing.pool + >>> pool = multiprocessing.pool.ThreadPool(2) + >>> _ = pool.map(work, range(2)) + worker started + worker finished + worker started + worker finished + >>> pool.terminate() + + """ + + def decorator(func): + key = full_name(func) if name is None else name + lock = lock_factory(cache, key, expire=expire, tag=tag) + + @functools.wraps(func) + def wrapper(*args, **kwargs): + with lock: + return func(*args, **kwargs) + + return wrapper + + return decorator + + +def memoize_stampede( + cache, expire, name=None, typed=False, tag=None, beta=1, ignore=() +): + """Memoizing cache decorator with cache stampede protection. + + Cache stampedes are a type of system overload that can occur when parallel + computing systems using memoization come under heavy load. This behaviour + is sometimes also called dog-piling, cache miss storm, cache choking, or + the thundering herd problem. + + The memoization decorator implements cache stampede protection through + early recomputation. Early recomputation of function results will occur + probabilistically before expiration in a background thread of + execution. Early probabilistic recomputation is based on research by + Vattani, A.; Chierichetti, F.; Lowenstein, K. (2015), Optimal Probabilistic + Cache Stampede Prevention, VLDB, pp. 886-897, ISSN 2150-8097 + + If name is set to None (default), the callable name will be determined + automatically. + + If typed is set to True, function arguments of different types will be + cached separately. For example, f(3) and f(3.0) will be treated as distinct + calls with distinct results. + + The original underlying function is accessible through the `__wrapped__` + attribute. This is useful for introspection, for bypassing the cache, or + for rewrapping the function with a different cache. + + >>> from diskcache import Cache + >>> cache = Cache() + >>> @memoize_stampede(cache, expire=1) + ... def fib(number): + ... if number == 0: + ... return 0 + ... elif number == 1: + ... return 1 + ... else: + ... return fib(number - 1) + fib(number - 2) + >>> print(fib(100)) + 354224848179261915075 + + An additional `__cache_key__` attribute can be used to generate the cache + key used for the given arguments. + + >>> key = fib.__cache_key__(100) + >>> del cache[key] + + Remember to call memoize when decorating a callable. If you forget, then a + TypeError will occur. + + :param cache: cache to store callable arguments and return values + :param float expire: seconds until arguments expire + :param str name: name given for callable (default None, automatic) + :param bool typed: cache different types separately (default False) + :param str tag: text to associate with arguments (default None) + :param set ignore: positional or keyword args to ignore (default ()) + :return: callable decorator + + """ + # Caution: Nearly identical code exists in Cache.memoize + def decorator(func): + """Decorator created by memoize call for callable.""" + base = (full_name(func),) if name is None else (name,) + + def timer(*args, **kwargs): + """Time execution of `func` and return result and time delta.""" + start = time.time() + result = func(*args, **kwargs) + delta = time.time() - start + return result, delta + + @functools.wraps(func) + def wrapper(*args, **kwargs): + """Wrapper for callable to cache arguments and return values.""" + key = wrapper.__cache_key__(*args, **kwargs) + pair, expire_time = cache.get( + key, + default=ENOVAL, + expire_time=True, + retry=True, + ) + + if pair is not ENOVAL: + result, delta = pair + now = time.time() + ttl = expire_time - now + + if (-delta * beta * math.log(random.random())) < ttl: + return result # Cache hit. + + # Check whether a thread has started for early recomputation. + + thread_key = key + (ENOVAL,) + thread_added = cache.add( + thread_key, + None, + expire=delta, + retry=True, + ) + + if thread_added: + # Start thread for early recomputation. + def recompute(): + with cache: + pair = timer(*args, **kwargs) + cache.set( + key, + pair, + expire=expire, + tag=tag, + retry=True, + ) + + thread = threading.Thread(target=recompute) + thread.daemon = True + thread.start() + + return result + + pair = timer(*args, **kwargs) + cache.set(key, pair, expire=expire, tag=tag, retry=True) + return pair[0] + + def __cache_key__(*args, **kwargs): + """Make key for cache given function arguments.""" + return args_to_key(base, args, kwargs, typed, ignore) + + wrapper.__cache_key__ = __cache_key__ + return wrapper + + return decorator diff --git a/.venv/lib/python3.11/site-packages/httpx-0.28.1.dist-info/INSTALLER b/.venv/lib/python3.11/site-packages/httpx-0.28.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/httpx-0.28.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.11/site-packages/httpx-0.28.1.dist-info/METADATA b/.venv/lib/python3.11/site-packages/httpx-0.28.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..b0d2b196385e98259971519793447c1fd7a9a643 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/httpx-0.28.1.dist-info/METADATA @@ -0,0 +1,203 @@ +Metadata-Version: 2.3 +Name: httpx +Version: 0.28.1 +Summary: The next generation HTTP client. +Project-URL: Changelog, https://github.com/encode/httpx/blob/master/CHANGELOG.md +Project-URL: Documentation, https://www.python-httpx.org +Project-URL: Homepage, https://github.com/encode/httpx +Project-URL: Source, https://github.com/encode/httpx +Author-email: Tom Christie +License: BSD-3-Clause +Classifier: Development Status :: 4 - Beta +Classifier: Environment :: Web Environment +Classifier: Framework :: AsyncIO +Classifier: Framework :: Trio +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Topic :: Internet :: WWW/HTTP +Requires-Python: >=3.8 +Requires-Dist: anyio +Requires-Dist: certifi +Requires-Dist: httpcore==1.* +Requires-Dist: idna +Provides-Extra: brotli +Requires-Dist: brotli; (platform_python_implementation == 'CPython') and extra == 'brotli' +Requires-Dist: brotlicffi; (platform_python_implementation != 'CPython') and extra == 'brotli' +Provides-Extra: cli +Requires-Dist: click==8.*; extra == 'cli' +Requires-Dist: pygments==2.*; extra == 'cli' +Requires-Dist: rich<14,>=10; extra == 'cli' +Provides-Extra: http2 +Requires-Dist: h2<5,>=3; extra == 'http2' +Provides-Extra: socks +Requires-Dist: socksio==1.*; extra == 'socks' +Provides-Extra: zstd +Requires-Dist: zstandard>=0.18.0; extra == 'zstd' +Description-Content-Type: text/markdown + +

+ HTTPX +

+ +

HTTPX - A next-generation HTTP client for Python.

+ +

+ + Test Suite + + + Package version + +

+ +HTTPX is a fully featured HTTP client library for Python 3. It includes **an integrated command line client**, has support for both **HTTP/1.1 and HTTP/2**, and provides both **sync and async APIs**. + +--- + +Install HTTPX using pip: + +```shell +$ pip install httpx +``` + +Now, let's get started: + +```pycon +>>> import httpx +>>> r = httpx.get('https://www.example.org/') +>>> r + +>>> r.status_code +200 +>>> r.headers['content-type'] +'text/html; charset=UTF-8' +>>> r.text +'\n\n\nExample Domain...' +``` + +Or, using the command-line client. + +```shell +$ pip install 'httpx[cli]' # The command line client is an optional dependency. +``` + +Which now allows us to use HTTPX directly from the command-line... + +

+ httpx --help +

+ +Sending a request... + +

+ httpx http://httpbin.org/json +

+ +## Features + +HTTPX builds on the well-established usability of `requests`, and gives you: + +* A broadly [requests-compatible API](https://www.python-httpx.org/compatibility/). +* An integrated command-line client. +* HTTP/1.1 [and HTTP/2 support](https://www.python-httpx.org/http2/). +* Standard synchronous interface, but with [async support if you need it](https://www.python-httpx.org/async/). +* Ability to make requests directly to [WSGI applications](https://www.python-httpx.org/advanced/transports/#wsgi-transport) or [ASGI applications](https://www.python-httpx.org/advanced/transports/#asgi-transport). +* Strict timeouts everywhere. +* Fully type annotated. +* 100% test coverage. + +Plus all the standard features of `requests`... + +* International Domains and URLs +* Keep-Alive & Connection Pooling +* Sessions with Cookie Persistence +* Browser-style SSL Verification +* Basic/Digest Authentication +* Elegant Key/Value Cookies +* Automatic Decompression +* Automatic Content Decoding +* Unicode Response Bodies +* Multipart File Uploads +* HTTP(S) Proxy Support +* Connection Timeouts +* Streaming Downloads +* .netrc Support +* Chunked Requests + +## Installation + +Install with pip: + +```shell +$ pip install httpx +``` + +Or, to include the optional HTTP/2 support, use: + +```shell +$ pip install httpx[http2] +``` + +HTTPX requires Python 3.8+. + +## Documentation + +Project documentation is available at [https://www.python-httpx.org/](https://www.python-httpx.org/). + +For a run-through of all the basics, head over to the [QuickStart](https://www.python-httpx.org/quickstart/). + +For more advanced topics, see the [Advanced Usage](https://www.python-httpx.org/advanced/) section, the [async support](https://www.python-httpx.org/async/) section, or the [HTTP/2](https://www.python-httpx.org/http2/) section. + +The [Developer Interface](https://www.python-httpx.org/api/) provides a comprehensive API reference. + +To find out about tools that integrate with HTTPX, see [Third Party Packages](https://www.python-httpx.org/third_party_packages/). + +## Contribute + +If you want to contribute with HTTPX check out the [Contributing Guide](https://www.python-httpx.org/contributing/) to learn how to start. + +## Dependencies + +The HTTPX project relies on these excellent libraries: + +* `httpcore` - The underlying transport implementation for `httpx`. + * `h11` - HTTP/1.1 support. +* `certifi` - SSL certificates. +* `idna` - Internationalized domain name support. +* `sniffio` - Async library autodetection. + +As well as these optional installs: + +* `h2` - HTTP/2 support. *(Optional, with `httpx[http2]`)* +* `socksio` - SOCKS proxy support. *(Optional, with `httpx[socks]`)* +* `rich` - Rich terminal support. *(Optional, with `httpx[cli]`)* +* `click` - Command line client support. *(Optional, with `httpx[cli]`)* +* `brotli` or `brotlicffi` - Decoding for "brotli" compressed responses. *(Optional, with `httpx[brotli]`)* +* `zstandard` - Decoding for "zstd" compressed responses. *(Optional, with `httpx[zstd]`)* + +A huge amount of credit is due to `requests` for the API layout that +much of this work follows, as well as to `urllib3` for plenty of design +inspiration around the lower-level networking details. + +--- + +

HTTPX is BSD licensed code.
Designed & crafted with care.

— 🦋 —

+ +## Release Information + +### Fixed + +* Reintroduced supposedly-private `URLTypes` shortcut. (#2673) + + +--- + +[Full changelog](https://github.com/encode/httpx/blob/master/CHANGELOG.md) diff --git a/.venv/lib/python3.11/site-packages/httpx-0.28.1.dist-info/RECORD b/.venv/lib/python3.11/site-packages/httpx-0.28.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..726690ade100cb7e73f5311219dd47caa10907ab --- /dev/null +++ b/.venv/lib/python3.11/site-packages/httpx-0.28.1.dist-info/RECORD @@ -0,0 +1,54 @@ +../../../bin/httpx,sha256=-O1w-cp46ZdDYeVBnP1ONQmtxcUS42Cdb-Qadrs4fbs,222 +httpx-0.28.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +httpx-0.28.1.dist-info/METADATA,sha256=_rubD48-gNV8gZnDBPNcQzboWB0dGNeYPJJ2a4J5OyU,7052 +httpx-0.28.1.dist-info/RECORD,, +httpx-0.28.1.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87 +httpx-0.28.1.dist-info/entry_points.txt,sha256=2lVkdQmxLA1pNMgSN2eV89o90HCZezhmNwsy6ryKDSA,37 +httpx-0.28.1.dist-info/licenses/LICENSE.md,sha256=TsWdVE8StfU5o6cW_TIaxYzNgDC0ZSIfLIgCAM3yjY0,1508 +httpx/__init__.py,sha256=CsaZe6yZj0rHg6322AWKWHGTMVr9txgEfD5P3_Rrz60,2171 +httpx/__pycache__/__init__.cpython-311.pyc,, +httpx/__pycache__/__version__.cpython-311.pyc,, +httpx/__pycache__/_api.cpython-311.pyc,, +httpx/__pycache__/_auth.cpython-311.pyc,, +httpx/__pycache__/_client.cpython-311.pyc,, +httpx/__pycache__/_config.cpython-311.pyc,, +httpx/__pycache__/_content.cpython-311.pyc,, +httpx/__pycache__/_decoders.cpython-311.pyc,, +httpx/__pycache__/_exceptions.cpython-311.pyc,, +httpx/__pycache__/_main.cpython-311.pyc,, +httpx/__pycache__/_models.cpython-311.pyc,, +httpx/__pycache__/_multipart.cpython-311.pyc,, +httpx/__pycache__/_status_codes.cpython-311.pyc,, +httpx/__pycache__/_types.cpython-311.pyc,, +httpx/__pycache__/_urlparse.cpython-311.pyc,, +httpx/__pycache__/_urls.cpython-311.pyc,, +httpx/__pycache__/_utils.cpython-311.pyc,, +httpx/__version__.py,sha256=LoUyYeOXTieGzuP_64UL0wxdtxjuu_QbOvE7NOg-IqU,108 +httpx/_api.py,sha256=r_Zgs4jIpcPJLqK5dbbSayqo_iVMKFaxZCd-oOHxLEs,11743 +httpx/_auth.py,sha256=Yr3QwaUSK17rGYx-7j-FdicFIzz4Y9FFV-1F4-7RXX4,11891 +httpx/_client.py,sha256=xD-UG67-WMkeltAAOeGGj-cZ2RRTAm19sWRxlFY7_40,65714 +httpx/_config.py,sha256=pPp2U-wicfcKsF-KYRE1LYdt3e6ERGeIoXZ8Gjo3LWc,8547 +httpx/_content.py,sha256=LGGzrJTR3OvN4Mb1GVVNLXkXJH-6oKlwAttO9p5w_yg,8161 +httpx/_decoders.py,sha256=p0dX8I0NEHexs3UGp4SsZutiMhsXrrWl6-GnqVb0iKM,12041 +httpx/_exceptions.py,sha256=bxW7fxzgVMAdNTbwT0Vnq04gJDW1_gI_GFiQPuMyjL0,8527 +httpx/_main.py,sha256=Cg9GMabiTT_swaDfUgIRitSwxLRMSwUDOm7LdSGqlA4,15626 +httpx/_models.py,sha256=4__Guyv1gLxuZChwim8kfQNiIOcJ9acreFOSurvZfms,44700 +httpx/_multipart.py,sha256=KOHEZZl6oohg9mPaKyyu345qq1rJLg35TUG3YAzXB3Y,9843 +httpx/_status_codes.py,sha256=DYn-2ufBgMeXy5s8x3_TB7wjAuAAMewTakPrm5rXEsc,5639 +httpx/_transports/__init__.py,sha256=GbUoBSAOp7z-l-9j5YhMhR3DMIcn6FVLhj072O3Nnno,275 +httpx/_transports/__pycache__/__init__.cpython-311.pyc,, +httpx/_transports/__pycache__/asgi.cpython-311.pyc,, +httpx/_transports/__pycache__/base.cpython-311.pyc,, +httpx/_transports/__pycache__/default.cpython-311.pyc,, +httpx/_transports/__pycache__/mock.cpython-311.pyc,, +httpx/_transports/__pycache__/wsgi.cpython-311.pyc,, +httpx/_transports/asgi.py,sha256=HRfiDYMPt4wQH2gFgHZg4c-i3sblo6bL5GTqcET-xz8,5501 +httpx/_transports/base.py,sha256=kZS_VMbViYfF570pogUCJ1bulz-ybfL51Pqs9yktebU,2523 +httpx/_transports/default.py,sha256=AzeaRUyVwCccTyyNJexDf0n1dFfzzydpdIQgvw7PLnk,13983 +httpx/_transports/mock.py,sha256=PTo0d567RITXxGrki6kN7_67wwAxfwiMDcuXJiZCjEo,1232 +httpx/_transports/wsgi.py,sha256=NcPX3Xap_EwCFZWO_OaSyQNuInCYx1QMNbO8GAei6jY,4825 +httpx/_types.py,sha256=Jyh41GQq7AOev8IOWKDAg7zCbvHAfufmW5g_PiTtErY,2965 +httpx/_urlparse.py,sha256=ZAmH47ONfkxrrj-PPYhGeiHjb6AjKCS-ANWIN4OL_KY,18546 +httpx/_urls.py,sha256=dX99VR1DSOHpgo9Aq7PzYO4FKdxqKjwyNp8grf8dHN0,21550 +httpx/_utils.py,sha256=_TVeqAKvxJkKHdz7dFeb4s0LZqQXgeFkXSgfiHBK_1o,8285 +httpx/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/.venv/lib/python3.11/site-packages/httpx-0.28.1.dist-info/WHEEL b/.venv/lib/python3.11/site-packages/httpx-0.28.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..21aaa72961a8af71c17d2cb3b76d5f7f567100e4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/httpx-0.28.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.26.3 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/.venv/lib/python3.11/site-packages/httpx-0.28.1.dist-info/entry_points.txt b/.venv/lib/python3.11/site-packages/httpx-0.28.1.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..8ae96007f7d725813fd02dc1d06d3834ee1939e4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/httpx-0.28.1.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +httpx = httpx:main diff --git a/.venv/lib/python3.11/site-packages/httpx-0.28.1.dist-info/licenses/LICENSE.md b/.venv/lib/python3.11/site-packages/httpx-0.28.1.dist-info/licenses/LICENSE.md new file mode 100644 index 0000000000000000000000000000000000000000..ab79d16a3f4c6c894c028d1f7431811e8711b42b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/httpx-0.28.1.dist-info/licenses/LICENSE.md @@ -0,0 +1,12 @@ +Copyright © 2019, [Encode OSS Ltd](https://www.encode.io/). +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/.venv/lib/python3.11/site-packages/outlines-0.1.11.dist-info/INSTALLER b/.venv/lib/python3.11/site-packages/outlines-0.1.11.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/outlines-0.1.11.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.11/site-packages/outlines-0.1.11.dist-info/LICENSE b/.venv/lib/python3.11/site-packages/outlines-0.1.11.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..cac50cdfa9f73d7329854bd5528c6c8c3c0eb5d4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/outlines-0.1.11.dist-info/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023- The Outlines developers + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/.venv/lib/python3.11/site-packages/outlines-0.1.11.dist-info/METADATA b/.venv/lib/python3.11/site-packages/outlines-0.1.11.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..da6d846fc4d7fa70f450086b539ead0b70588dfe --- /dev/null +++ b/.venv/lib/python3.11/site-packages/outlines-0.1.11.dist-info/METADATA @@ -0,0 +1,503 @@ +Metadata-Version: 2.1 +Name: outlines +Version: 0.1.11 +Summary: Probabilistic Generative Model Programming +Author: Outlines Developers +License: Apache-2.0 +Project-URL: homepage, https://github.com/dottxt-ai/outlines +Project-URL: documentation, https://dottxt-ai.github.io/outlines/ +Project-URL: repository, https://github.com/dottxt-ai/outlines +Keywords: machine learning,deep learning,language models,structured generation +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Information Technology +Classifier: Intended Audience :: Science/Research +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Requires-Python: >=3.9 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: interegular +Requires-Dist: jinja2 +Requires-Dist: lark +Requires-Dist: nest_asyncio +Requires-Dist: numpy +Requires-Dist: cloudpickle +Requires-Dist: diskcache +Requires-Dist: pydantic>=2.0 +Requires-Dist: referencing +Requires-Dist: jsonschema +Requires-Dist: requests +Requires-Dist: tqdm +Requires-Dist: typing_extensions +Requires-Dist: pycountry +Requires-Dist: airportsdata +Requires-Dist: torch +Requires-Dist: outlines_core==0.1.26 +Provides-Extra: vllm +Requires-Dist: vllm; extra == "vllm" +Requires-Dist: transformers; extra == "vllm" +Requires-Dist: numpy<2; extra == "vllm" +Provides-Extra: transformers +Requires-Dist: transformers; extra == "transformers" +Requires-Dist: accelerate; extra == "transformers" +Requires-Dist: datasets; extra == "transformers" +Requires-Dist: numpy<2; extra == "transformers" +Provides-Extra: mlxlm +Requires-Dist: mlx-lm; extra == "mlxlm" +Requires-Dist: datasets; extra == "mlxlm" +Provides-Extra: openai +Requires-Dist: openai; extra == "openai" +Provides-Extra: llamacpp +Requires-Dist: llama-cpp-python; extra == "llamacpp" +Requires-Dist: transformers; extra == "llamacpp" +Requires-Dist: datasets; extra == "llamacpp" +Requires-Dist: numpy<2; extra == "llamacpp" +Provides-Extra: exllamav2 +Requires-Dist: exllamav2; extra == "exllamav2" +Provides-Extra: test +Requires-Dist: pre-commit; extra == "test" +Requires-Dist: pytest; extra == "test" +Requires-Dist: pytest-benchmark; extra == "test" +Requires-Dist: pytest-cov; extra == "test" +Requires-Dist: pytest-mock; extra == "test" +Requires-Dist: coverage[toml]>=5.1; extra == "test" +Requires-Dist: diff-cover; extra == "test" +Requires-Dist: accelerate; extra == "test" +Requires-Dist: beartype<0.16.0; extra == "test" +Requires-Dist: responses; extra == "test" +Requires-Dist: llama-cpp-python; extra == "test" +Requires-Dist: mlx-lm>=0.19.2; (platform_machine == "arm64" and sys_platform == "darwin") and extra == "test" +Requires-Dist: huggingface_hub; extra == "test" +Requires-Dist: openai>=1.0.0; extra == "test" +Requires-Dist: datasets; extra == "test" +Requires-Dist: vllm; sys_platform != "darwin" and extra == "test" +Requires-Dist: transformers; extra == "test" +Requires-Dist: pillow; extra == "test" +Requires-Dist: exllamav2; extra == "test" +Requires-Dist: jax; extra == "test" +Provides-Extra: serve +Requires-Dist: vllm>=0.3.0; extra == "serve" +Requires-Dist: uvicorn; extra == "serve" +Requires-Dist: fastapi; extra == "serve" +Requires-Dist: pydantic>=2.0; extra == "serve" + +
+ +Outlines Logo + + + 🗒️ *Make LLMs speak the language of every application.* 🗒️ + +Made with ❤👷️ by the team at [.txt](https://dottxt.co). + +[![Documentation][documentation-badge]][documentation] +[![Contributors][contributors-badge]][contributors] +[![Downloads][downloads-badge]][pypistats] +[![Discord][discord-badge]][discord] + +[Youtube channel][youtube-dottxt] | [.txt blog][blog-dottxt] | [Twitter][dottxt-twitter] + + +
+ + +``` bash +pip install outlines +``` + +First time here? Go to our [setup guide](https://dottxt-ai.github.io/outlines/latest/welcome/) + +## Features + +- [x] 🤖 [Multiple model integrations](https://dottxt-ai.github.io/outlines/latest/installation): OpenAI, transformers, llama.cpp, exllama2, mamba +- [x] 🖍️ Simple and powerful prompting primitives based on the [Jinja templating engine](https://jinja.palletsprojects.com/) +- [x] 🚄 [Multiple choices](#multiple-choices), [type constraints](#type-constraint) and dynamic stopping +- [x] ⚡ Fast [regex-structured generation](#efficient-regex-structured-generation) +- [x] 🔥 Fast [JSON generation](#efficient-json-generation-following-a-pydantic-model) following a JSON schema or a Pydantic model +- [x] 📝 [Grammar-structured generation](#using-context-free-grammars-to-guide-generation) +- [x] 🐍 Interleave completions with loops, conditionals, and custom Python functions +- [x] 💾 Caching of generations +- [x] 🗂️ Batch inference +- [x] 🎲 Sample with the greedy, multinomial and beam search algorithms (and more to come!) +- [x] 🚀 [Serve with vLLM](https://dottxt-ai.github.io/outlines/latest/reference/serve/vllm), with official Docker image, [`outlinesdev/outlines`](https://hub.docker.com/r/outlinesdev/outlines)! + + +Outlines has new releases and features coming every week. Make sure to ⭐ star and 👀 watch this repository, follow [@dottxtai][dottxt-twitter] to stay up to date! + +## Why should I use structured generation? + +* It doesn't add any overhead during inference (cost-free) +* It allows Open Source models to beat closed source models ([Mistral](https://x.com/dottxtai/status/1797692104023363765), [GPT-4](https://x.com/dottxtai/status/1798443290913853770)) +* [It speeds up inference](http://blog.dottxt.co/coalescence.html) +* [It improves the performance of base models (GSM8K)](http://blog.dottxt.co/performance-gsm8k.html) +* [It improves the performance of finetuned models (CoNNL)](https://predibase.com/blog/lorax-outlines-better-json-extraction-with-structured-generation-and-lora) +* [It improves model efficiency (less examples needed)](https://huggingface.co/blog/evaluation-structured-outputs) + +## .txt company + +
+Outlines Logo +
+ +We started a company to keep pushing the boundaries of structured generation. Learn more about [.txt](https://twitter.com/dottxtai), and [give our .json API a try](https://h1xbpbfsf0w.typeform.com/to/ZgBCvJHF) if you need a hosted solution ✨ + +## Structured generation + +The first step towards reliability of systems that include large language models +is to ensure that there is a well-defined interface between their output and +user-defined code. **Outlines** provides ways to control the generation of +language models to make their output more predictable. + +### Multiple choices + +You can reduce the completion to a choice between multiple possibilities: + +``` python +import outlines + +model = outlines.models.transformers("microsoft/Phi-3-mini-4k-instruct") + +prompt = """You are a sentiment-labelling assistant. +Is the following review positive or negative? + +Review: This restaurant is just awesome! +""" + +generator = outlines.generate.choice(model, ["Positive", "Negative"]) +answer = generator(prompt) +``` + +You can also pass these choices through en enum: + +````python +from enum import Enum + +import outlines + +class Sentiment(str, Enum): + positive = "Positive" + negative = "Negative" + +model = outlines.models.transformers("microsoft/Phi-3-mini-4k-instruct") + +prompt = """You are a sentiment-labelling assistant. +Is the following review positive or negative? + +Review: This restaurant is just awesome! +""" + +generator = outlines.generate.choice(model, Sentiment) +answer = generator(prompt) +```` + +### Type constraint + +You can instruct the model to only return integers or floats: + + +``` python +import outlines + +model = outlines.models.transformers("WizardLM/WizardMath-7B-V1.1") + +prompt = "result of 9 + 9 = 18result of 1 + 2 = " +answer = outlines.generate.format(model, int)(prompt) +print(answer) +# 3 + +prompt = "sqrt(2)=" +generator = outlines.generate.format(model, float) +answer = generator(prompt, max_tokens=10) +print(answer) +# 1.41421356 +``` + +### Efficient regex-structured generation + +Outlines also comes with fast regex-structured generation. In fact, the `choice` and +`format` functions above all use regex-structured generation under the +hood: + +``` python +import outlines + +model = outlines.models.transformers("microsoft/Phi-3-mini-4k-instruct") + +prompt = "What is the IP address of the Google DNS servers? " + +generator = outlines.generate.text(model) +unstructured = generator(prompt, max_tokens=30) + +generator = outlines.generate.regex( + model, + r"((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)", +) +structured = generator(prompt, max_tokens=30) + +print(unstructured) +# What is the IP address of the Google DNS servers? +# +# Passive DNS servers are at DNS servers that are private. +# In other words, both IP servers are private. The database +# does not contain Chelsea Manning + +print(structured) +# What is the IP address of the Google DNS servers? +# 2.2.6.1 +``` + +Unlike other libraries, regex-structured generation in Outlines is almost as fast +as non-structured generation. + +### Efficient JSON generation following a Pydantic model + +Outlines allows to guide the generation process so the output is *guaranteed* to follow a [JSON schema](https://json-schema.org/) or [Pydantic model](https://docs.pydantic.dev/latest/): + +```python +from enum import Enum +from pydantic import BaseModel, constr + +import outlines +import torch + + +class Weapon(str, Enum): + sword = "sword" + axe = "axe" + mace = "mace" + spear = "spear" + bow = "bow" + crossbow = "crossbow" + + +class Armor(str, Enum): + leather = "leather" + chainmail = "chainmail" + plate = "plate" + + +class Character(BaseModel): + name: constr(max_length=10) + age: int + armor: Armor + weapon: Weapon + strength: int + + +model = outlines.models.transformers("microsoft/Phi-3-mini-4k-instruct") + +# Construct structured sequence generator +generator = outlines.generate.json(model, Character) + +# Draw a sample +seed = 789001 + +character = generator("Give me a character description", seed=seed) + +print(repr(character)) +# Character(name='Anderson', age=28, armor=, weapon=, strength=8) + +character = generator("Give me an interesting character description") + +print(repr(character)) +# Character(name='Vivian Thr', age=44, armor=, weapon=, strength=125) +``` + +The method works with union types, optional types, arrays, nested schemas, etc. Some field constraints are [not supported yet](https://github.com/dottxt-ai/outlines/issues/215), but everything else should work. + +### Efficient JSON generation following a JSON Schema + +Sometimes you just want to be able to pass a JSON Schema instead of a Pydantic model. We've got you covered: + +``` python +import outlines + +schema = '''{ + "title": "Character", + "type": "object", + "properties": { + "name": { + "title": "Name", + "maxLength": 10, + "type": "string" + }, + "age": { + "title": "Age", + "type": "integer" + }, + "armor": {"$ref": "#/definitions/Armor"}, + "weapon": {"$ref": "#/definitions/Weapon"}, + "strength": { + "title": "Strength", + "type": "integer" + } + }, + "required": ["name", "age", "armor", "weapon", "strength"], + "definitions": { + "Armor": { + "title": "Armor", + "description": "An enumeration.", + "enum": ["leather", "chainmail", "plate"], + "type": "string" + }, + "Weapon": { + "title": "Weapon", + "description": "An enumeration.", + "enum": ["sword", "axe", "mace", "spear", "bow", "crossbow"], + "type": "string" + } + } +}''' + +model = outlines.models.transformers("microsoft/Phi-3-mini-4k-instruct") +generator = outlines.generate.json(model, schema) +character = generator("Give me a character description") +``` + +### Using context-free grammars to guide generation + +Formal grammars rule the world, and Outlines makes them rule LLMs too. You can pass any context-free grammar in the EBNF format and Outlines will generate an output that is valid to this grammar: + +``` python +import outlines + +arithmetic_grammar = """ + ?start: expression + + ?expression: term (("+" | "-") term)* + + ?term: factor (("*" | "/") factor)* + + ?factor: NUMBER + | "-" factor + | "(" expression ")" + + %import common.NUMBER +""" + +model = outlines.models.transformers("WizardLM/WizardMath-7B-V1.1") +generator = outlines.generate.cfg(model, arithmetic_grammar) +sequence = generator("Alice had 4 apples and Bob ate 2. Write an expression for Alice's apples:") + +print(sequence) +# (8-2) +``` + +This was a very simple grammar, and you can use `outlines.generate.cfg` to generate syntactically valid Python, SQL, and much more than this. Any kind of structured text, really. All you have to do is search for "X EBNF grammar" on the web, and take a look at the [Outlines `grammars` module](https://github.com/dottxt-ai/outlines/tree/main/outlines/grammars). + +### Open functions + +Outlines can infer the structure of the output from the signature of a function. The result is a dictionary, and can be passed directly to the function using the usual dictionary expansion syntax `**`: + +```python +import outlines + + +def add(a: int, b: int): + return a + b + +model = outlines.models.transformers("WizardLM/WizardMath-7B-V1.1") +generator = outlines.generate.json(model, add) +result = generator("Return json with two integers named a and b respectively. a is odd and b even.") + +print(add(**result)) +# 3 +``` + +A great advantage of passing functions directly to specify the structure is that the structure of the LLM will change with the function's definition. No need to change the code at several places! + +You can also embed various functions into an enum to generate params: + +```python +from enum import Enum +from functools import partial + +import outlines + + +def add(a: int, b: int) -> int: + return a + b + +def mul(c: float, d: float) -> float: + return c * d + +class Operation(Enum): + add = partial(add) + mul = partial(mul) + +model = outlines.models.transformers("WizardLM/WizardMath-7B-V1.1") +generator = outlines.generate.json(model, add) +result = generator("Return json with two float named c and d respectively. c is negative and d greater than 1.0.") + +print(result) +# {'c': -3.14, 'd': 1.5} +``` + +## Prompting + +Building prompts can get messy. **Outlines** makes it easier to write and manage +prompts by encapsulating templates inside "template functions". + +These functions make it possible to neatly separate the prompt logic from the +general program logic; they can be imported from other modules and libraries. + +Template functions require no superfluous abstraction, they use the Jinja2 +templating engine to help build complex prompts in a concise manner: + +``` python +import outlines + +examples = [ + ("The food was disgusting", "Negative"), + ("We had a fantastic night", "Positive"), + ("Recommended", "Positive"), + ("The waiter was rude", "Negative") +] + +@outlines.prompt +def labelling(to_label, examples): + """You are a sentiment-labelling assistant. + + {% for example in examples %} + {{ example[0] }} // {{ example[1] }} + {% endfor %} + {{ to_label }} // + """ + +model = outlines.models.transformers("microsoft/Phi-3-mini-4k-instruct") +prompt = labelling("Just awesome", examples) +answer = outlines.generate.text(model)(prompt, max_tokens=100) +``` + +## Join us + +- 💡 **Have an idea?** Come chat with us on [Discord][discord] +- 🔨 **Want to contribute?** Consult our [contribution guide](https://dottxt-ai.github.io/outlines/latest/community/contribute/). +- 🐞 **Found a bug?** Open an [issue](https://github.com/dottxt-ai/outlines/issues) + + +## Cite Outlines + +``` +@article{willard2023efficient, + title={Efficient Guided Generation for LLMs}, + author={Willard, Brandon T and Louf, R{\'e}mi}, + journal={arXiv preprint arXiv:2307.09702}, + year={2023} +} +``` + +[documentation]: https://dottxt-ai.github.io/outlines/latest/welcome/ +[documentation-badge]: https://img.shields.io/readthedocs/outlines +[contributors]: https://github.com/dottxt-ai/outlines/graphs/contributors +[contributors-badge]: https://img.shields.io/github/contributors/dottxt-ai/outlines?style=flat-square&logo=github&logoColor=white&color=ECEFF4 +[dottxt-twitter]: https://twitter.com/dottxtai +[discord]: https://discord.gg/R9DSu34mGd +[discord-badge]: https://img.shields.io/discord/1182316225284554793?color=81A1C1&logo=discord&logoColor=white&style=flat-square +[downloads-badge]: https://img.shields.io/pypi/dm/outlines?color=89AC6B&logo=python&logoColor=white&style=flat-square +[pypistats]: https://pypistats.org/packages/outlines +[dottxt-twitter-badge]: https://img.shields.io/twitter/follow/dottxtai?style=social +[youtube-dottxt]: https://www.youtube.com/@dottxt-ai +[blog-dottxt]: https://blog.dottxt.co/ diff --git a/.venv/lib/python3.11/site-packages/outlines-0.1.11.dist-info/RECORD b/.venv/lib/python3.11/site-packages/outlines-0.1.11.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..0bd3a312a18bf1d492dcd7702884877ef81e650c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/outlines-0.1.11.dist-info/RECORD @@ -0,0 +1,100 @@ +outlines-0.1.11.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +outlines-0.1.11.dist-info/LICENSE,sha256=9xB47oqqPVZwSIdW8Zk7neOuZMlUagIy67vdWVxTddc,11354 +outlines-0.1.11.dist-info/METADATA,sha256=90I6ySed9yjWM_A0cZZ7kYaG6CSh1DiTnGq-Q1s_jeM,17137 +outlines-0.1.11.dist-info/RECORD,, +outlines-0.1.11.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91 +outlines-0.1.11.dist-info/top_level.txt,sha256=DRbCwvEBUKClPATvDaHzpX6gD7LgECM9WVYkEq0NHpY,9 +outlines/__init__.py,sha256=sYuMGn7xxyuPhwq-M3M2WKjwGqFwEXG0xyJw6lw31Ng,495 +outlines/__pycache__/__init__.cpython-311.pyc,, +outlines/__pycache__/_version.cpython-311.pyc,, +outlines/__pycache__/base.cpython-311.pyc,, +outlines/__pycache__/caching.cpython-311.pyc,, +outlines/__pycache__/function.cpython-311.pyc,, +outlines/__pycache__/grammars.cpython-311.pyc,, +outlines/__pycache__/prompts.cpython-311.pyc,, +outlines/__pycache__/samplers.cpython-311.pyc,, +outlines/_version.py,sha256=HreDwlLXV189L3kiBj3huM_kqWD1usijlC8LN1YXcCM,413 +outlines/base.py,sha256=InRqZU2VeNPjpkb3wfCDnYZ5xW1wxSYeCNXCHTLz_Vg,10501 +outlines/caching.py,sha256=WxfFldbINw0MBtsHhHI51nugsgH7dDpYyPf07A6Yv2E,5337 +outlines/fsm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +outlines/fsm/__pycache__/__init__.cpython-311.pyc,, +outlines/fsm/__pycache__/guide.cpython-311.pyc,, +outlines/fsm/__pycache__/json_schema.cpython-311.pyc,, +outlines/fsm/__pycache__/parsing.cpython-311.pyc,, +outlines/fsm/__pycache__/types.cpython-311.pyc,, +outlines/fsm/guide.py,sha256=0DZwVei2g-3kA9Cn5NECwDalWB2ufKTwxJVvdXOVGQ0,8953 +outlines/fsm/json_schema.py,sha256=eB0fMz3UKI-pHOsuYdVQZmsm2Jr1QIw_6DzkC83mB6Y,2535 +outlines/fsm/parsing.py,sha256=ypJ52to1umo2wItiUqhxXDGW4fQf731mq5cGLrQAOeI,39516 +outlines/fsm/types.py,sha256=XEhFaGaM6rrFKsXNXnGmvk1_5Jfht8nkqCcKBk2piDQ,2493 +outlines/function.py,sha256=kN22C9c5IBoQ3KR5GwCFR0gyPzG2Ke5k6ZAPb6pF55U,3707 +outlines/generate/__init__.py,sha256=aQs6Ga6r0n_KMzAY-d1NQhnGkQSWGdQXNCdJzMcbeGo,202 +outlines/generate/__pycache__/__init__.cpython-311.pyc,, +outlines/generate/__pycache__/api.cpython-311.pyc,, +outlines/generate/__pycache__/cfg.cpython-311.pyc,, +outlines/generate/__pycache__/choice.cpython-311.pyc,, +outlines/generate/__pycache__/format.cpython-311.pyc,, +outlines/generate/__pycache__/fsm.cpython-311.pyc,, +outlines/generate/__pycache__/generator.cpython-311.pyc,, +outlines/generate/__pycache__/json.cpython-311.pyc,, +outlines/generate/__pycache__/regex.cpython-311.pyc,, +outlines/generate/__pycache__/text.cpython-311.pyc,, +outlines/generate/api.py,sha256=54ww0C759h2A6COktBcJeLPDXPH1Nn4l0Iv2i-gLH84,20666 +outlines/generate/cfg.py,sha256=giAHsT-TAi4OnO_d3U15JJX1X194SKQrBqYgdxnFEw4,1686 +outlines/generate/choice.py,sha256=MNJZ0Ig-ZvW_Ci1IazrMqJNkuqnYU7H0R7cvic9YbPc,1752 +outlines/generate/format.py,sha256=d0tEbpdImunihJorf4cYc3KK3aeFrjuWI6G3KoO8Dqg,1435 +outlines/generate/fsm.py,sha256=N7M6BUmEoN02gcVijV3kPUa3Bk9S_sGfFGt1I-lvCeY,1111 +outlines/generate/generator.py,sha256=-EnFq8pb7fbfLPmqRFvMeXN-kA1l_mhwrGvDoRxKWx0,8811 +outlines/generate/json.py,sha256=cFHVogIC_ltTjoPURCP2WaQjuqslRuzcR7GLy3dlgjA,4309 +outlines/generate/regex.py,sha256=3PhYSiR2tpDLj3ty_fvjv7vMcU28Y9dgYiGsfRFOe8Q,1715 +outlines/generate/text.py,sha256=8-DcHDtV4imaqKfG_f4hhYQ_wbPwhhCdjuPmHG_HVo4,1409 +outlines/grammars.py,sha256=OXxQyKvthoQCfrwQuCHSSi4VYcb3GMAOYudC2DmvquU,396 +outlines/grammars/arithmetic.lark,sha256=4aWsZ_IkS9nP7NGihdgPf0wWaP2tn0xb_jhFNF5ws50,293 +outlines/grammars/common.lark,sha256=h6mPVV0vitrbCSVDUnL_GvQriCfwrN8EtWLFiss3K9Q,2243 +outlines/grammars/json.lark,sha256=6d6owpAzgVkAOUSsINg6MLu81VV_HQknRsMsSXHYB-k,373 +outlines/models/__init__.py,sha256=8vIXGlkrjOIeBYx21Uo0-3U6A4UyOBOMf9iK4Wswvcw,701 +outlines/models/__pycache__/__init__.cpython-311.pyc,, +outlines/models/__pycache__/exllamav2.cpython-311.pyc,, +outlines/models/__pycache__/llamacpp.cpython-311.pyc,, +outlines/models/__pycache__/mlxlm.cpython-311.pyc,, +outlines/models/__pycache__/openai.cpython-311.pyc,, +outlines/models/__pycache__/tokenizer.cpython-311.pyc,, +outlines/models/__pycache__/transformers.cpython-311.pyc,, +outlines/models/__pycache__/transformers_vision.cpython-311.pyc,, +outlines/models/__pycache__/vllm.cpython-311.pyc,, +outlines/models/exllamav2.py,sha256=Mo8gpuQI7KQe77T-BZHXHOV3Kkucgvkqo7-TjJcpzV0,13295 +outlines/models/llamacpp.py,sha256=mI_xD-DqfcADl9asF554qOKxpusekx65GEl1Ja-C-xY,14662 +outlines/models/mlxlm.py,sha256=ieim5QadwNQXM6311RBXOoYh52EnRcJZSvPiEfLpxbU,8588 +outlines/models/openai.py,sha256=Oa-HiCUf5tk8HL_UCMI9FJ4tz4F0gAnQgggE1EB28QU,9009 +outlines/models/tokenizer.py,sha256=x6228TFhbcGe-XssA4SAAjaOBEZoAvFciQUpK22Y28U,996 +outlines/models/transformers.py,sha256=xJblsZB8FoXfDxrhvJ7pW0Hj8HSLT9FndURPrZ7kO2M,15337 +outlines/models/transformers_vision.py,sha256=t77kgdRa5DIRiPis126AOfTnKl3PswL3klouUlFR9Jk,5069 +outlines/models/vllm.py,sha256=BRvkrYAC2gTMZ3vhcETXJYf_mlO1U49m3bMArGymyDU,7769 +outlines/processors/__init__.py,sha256=fDMQ-pyBPaDB7Eb8pgwJ16eTUbPAm-w2Wf-Vn8BuCGY,158 +outlines/processors/__pycache__/__init__.cpython-311.pyc,, +outlines/processors/__pycache__/base_logits_processor.cpython-311.pyc,, +outlines/processors/__pycache__/structured.cpython-311.pyc,, +outlines/processors/base_logits_processor.py,sha256=vFM2p65Mstk4YkO2ZC1xOON3YGj4KgWgjj_iFnROSQQ,5354 +outlines/processors/structured.py,sha256=XOZ3hq_B9BbD6nRuOjdZYQvXYRIYY1s6PJFYzdwtV-c,8240 +outlines/prompts.py,sha256=By6LodDBBDeh9xhCXqkxQqnD1pGNStK7JNJDmMylBMg,10071 +outlines/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +outlines/samplers.py,sha256=aQqVwEqgCoAVjr2qDkSk28hJXf4CQ8DT0LEJv73vQC4,10646 +outlines/serve/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +outlines/serve/__pycache__/__init__.cpython-311.pyc,, +outlines/serve/__pycache__/serve.cpython-311.pyc,, +outlines/serve/serve.py,sha256=xZnXnos-mB7xurY_y2zQIRkUi9508QNxZERZTfbxosw,4940 +outlines/types/__init__.py,sha256=0ZVfLELb_CZ6P9RTete561Uja8bgoGZ4S2shDy-iNhg,110 +outlines/types/__pycache__/__init__.cpython-311.pyc,, +outlines/types/__pycache__/airports.cpython-311.pyc,, +outlines/types/__pycache__/countries.cpython-311.pyc,, +outlines/types/__pycache__/email.cpython-311.pyc,, +outlines/types/__pycache__/isbn.cpython-311.pyc,, +outlines/types/__pycache__/locales.cpython-311.pyc,, +outlines/types/__pycache__/phone_numbers.cpython-311.pyc,, +outlines/types/__pycache__/zip_codes.cpython-311.pyc,, +outlines/types/airports.py,sha256=L2rBblU02mkiXrQfm35XS-r4h0L8OySZ-rEpJJvw75s,241 +outlines/types/countries.py,sha256=XWjvIEXkKNwHSdG4TILxfpSU3xHNJnTeMhvVLp1n_S4,748 +outlines/types/email.py,sha256=aOc004pbeIY4p_Ssj5kWBYXfwAukHxVVY10lTj77byY,739 +outlines/types/isbn.py,sha256=2HtRGX-eoOvGImOI0WL2LUAa7IuvJmGgr1Xb7JZOwi8,761 +outlines/types/locales.py,sha256=rKj2OfDIgY4akyjMWOCWF7jB93kv3NzdQcihM4ojh-s,530 +outlines/types/phone_numbers.py,sha256=l8MSwbzsQ2qjGzKN0vVH546IdaHTuT9OD9XzZE4zAp8,435 +outlines/types/zip_codes.py,sha256=lGj2OBwX3LwLk7agw396WK17Aky4a5fZpLeZsNPkjAg,300 diff --git a/.venv/lib/python3.11/site-packages/outlines-0.1.11.dist-info/WHEEL b/.venv/lib/python3.11/site-packages/outlines-0.1.11.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..ae527e7d64811439e61b93aa375defb30e06edfe --- /dev/null +++ b/.venv/lib/python3.11/site-packages/outlines-0.1.11.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (75.6.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/.venv/lib/python3.11/site-packages/outlines-0.1.11.dist-info/top_level.txt b/.venv/lib/python3.11/site-packages/outlines-0.1.11.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..768b9f9d0fc65abf8ac73ed084432c7ad32c7183 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/outlines-0.1.11.dist-info/top_level.txt @@ -0,0 +1 @@ +outlines diff --git a/.venv/lib/python3.11/site-packages/partial_json_parser/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/partial_json_parser/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cb2612132d04c5523b58434e424506e4af28478 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/partial_json_parser/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/partial_json_parser/__pycache__/options.cpython-311.pyc b/.venv/lib/python3.11/site-packages/partial_json_parser/__pycache__/options.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2136064518d56656761600dba14799e1bd07a4a Binary files /dev/null and b/.venv/lib/python3.11/site-packages/partial_json_parser/__pycache__/options.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/partial_json_parser/__pycache__/playground.cpython-311.pyc b/.venv/lib/python3.11/site-packages/partial_json_parser/__pycache__/playground.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b57d5e6887916fc3c8ff961f7059084eff1179f Binary files /dev/null and b/.venv/lib/python3.11/site-packages/partial_json_parser/__pycache__/playground.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/INSTALLER b/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/METADATA b/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..14fb3efa3a204b66464960cd6b28277fe2366e81 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/METADATA @@ -0,0 +1,142 @@ +Metadata-Version: 2.1 +Name: pyzmq +Version: 26.2.1 +Summary: Python bindings for 0MQ +Author: Brian E. Granger, Min Ragan-Kelley +Author-Email: PyZMQ Contributors +License: BSD 3-Clause License + + Copyright (c) 2009-2012, Brian Granger, Min Ragan-Kelley + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Topic :: System :: Networking +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Project-URL: Homepage, https://pyzmq.readthedocs.org +Project-URL: Documentation, https://pyzmq.readthedocs.org +Project-URL: Source, https://github.com/zeromq/pyzmq +Project-URL: Tracker, https://github.com/zeromq/pyzmq/issues +Requires-Python: >=3.7 +Requires-Dist: cffi; implementation_name == "pypy" +Description-Content-Type: text/markdown + +# PyZMQ: Python bindings for ØMQ + +This package contains Python bindings for [ZeroMQ](https://zeromq.org). +ØMQ is a lightweight and fast messaging implementation. + +PyZMQ should work with any reasonable version of Python (≥ 3.7), as well as PyPy. +The Cython backend used by CPython supports libzmq ≥ 2.1.4 (including 3.2.x and 4.x), +but the CFFI backend used by PyPy only supports libzmq ≥ 3.2.2 (including 4.x). + +For a summary of changes to pyzmq, see our +[changelog](https://pyzmq.readthedocs.io/en/latest/changelog.html). + +### ØMQ 3.x, 4.x + +PyZMQ fully supports the 3.x and 4.x APIs of libzmq, +developed at [zeromq/libzmq](https://github.com/zeromq/libzmq). +No code to change, no flags to pass, +just build pyzmq against the latest and it should work. + +PyZMQ does not support the old libzmq 2 API on PyPy. + +## Documentation + +See PyZMQ's Sphinx-generated +documentation [on Read the Docs](https://pyzmq.readthedocs.io) for API +details, and some notes on Python and Cython development. If you want to +learn about using ØMQ in general, the excellent [ØMQ +Guide](http://zguide.zeromq.org/py:all) is the place to start, which has a +Python version of every example. We also have some information on our +[wiki](https://github.com/zeromq/pyzmq/wiki). + +## Downloading + +Unless you specifically want to develop PyZMQ, we recommend downloading +the PyZMQ source code or wheels from +[PyPI](https://pypi.io/project/pyzmq/), +or install with conda. + +You can also get the latest source code from our GitHub repository, but +building from the repository will require that you install recent Cython. + +## Building and installation + +For more detail on building pyzmq, see [our docs](https://pyzmq.readthedocs.io/en/latest/howto/build.html). + +We build wheels for macOS, Windows, and Linux, so you can get a binary on those platforms with: + +``` +pip install pyzmq +``` + +but compiling from source with `pip install pyzmq` should work in most environments. +Make sure you are using the latest pip, or it may not find the right wheels. + +If the wheel doesn't work for some reason, or you want to force pyzmq to be compiled +(this is often preferable if you already have libzmq installed and configured the way you want it), +you can force installation from source with: + +``` +pip install --no-binary=pyzmq pyzmq +``` + +## Old versions + +pyzmq 16 drops support Python 2.6 and 3.2. +If you need to use one of those Python versions, you can pin your pyzmq version to before 16: + +``` +pip install 'pyzmq<16' +``` + +For libzmq 2.0.x, use 'pyzmq\<2.1' + +pyzmq-2.1.11 was the last version of pyzmq to support Python 2.5, +and pyzmq ≥ 2.2.0 requires Python ≥ 2.6. +pyzmq-13.0.0 introduces PyPy support via CFFI, which only supports libzmq-3.2.2 and newer. + +PyZMQ releases ≤ 2.2.0 matched libzmq versioning, but this is no longer the case, +starting with PyZMQ 13.0.0 (it was the thirteenth release, so why not?). +PyZMQ ≥ 13.0 follows semantic versioning conventions accounting only for PyZMQ itself. diff --git a/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/RECORD b/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..9944999484c3c849cc7320cce130a603d6199ac7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/RECORD @@ -0,0 +1,165 @@ +pyzmq-26.2.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pyzmq-26.2.1.dist-info/METADATA,sha256=i_kLKl899ZNg8SPQ32M1qoZ_ShKwnfE8d0HxyGGwTZY,6175 +pyzmq-26.2.1.dist-info/RECORD,, +pyzmq-26.2.1.dist-info/WHEEL,sha256=KY0YyooNfJVVrYiOQHtH0PeYsMO3J_yHl9pyO75QPIs,118 +pyzmq-26.2.1.dist-info/licenses/LICENSE.md,sha256=wM9fXAP41ncveicd8ctnEFRXi9PXlSfHL8Hyj4zHKno,1545 +pyzmq-26.2.1.dist-info/licenses/licenses/LICENSE.libsodium.txt,sha256=Q5ZNl2pts_uYavaJ0F-MoOmXGHi8yucJdQ2sj9xKmc8,823 +pyzmq-26.2.1.dist-info/licenses/licenses/LICENSE.tornado.txt,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358 +pyzmq-26.2.1.dist-info/licenses/licenses/LICENSE.zeromq.txt,sha256=HyVuytGSiAUQ6ErWBHTqt1iSGHhLmlC8fO7jTCuR8dU,16725 +pyzmq.libs/libsodium-3b70a246.so.26.2.0,sha256=-0Hpsz0Kl1EIO2aNPqOuuwpF99kRLtU_gmVOP4Q_QcY,1246761 +pyzmq.libs/libzmq-a430b4ce.so.5.2.5,sha256=FwrlGGWNx97IlSsnwoh12Gr2f1Bb3Fp5Lrg-lHsDuDI,970897 +zmq/__init__.pxd,sha256=P2y5B_9nDB_0RD7hxbXpVm4Jia1rKclTnnUVrSbF4lE,63 +zmq/__init__.py,sha256=0zUxdN9mC6mBJAOkBfsI4em8roHbvH0afHQTYsMFjXA,2232 +zmq/__init__.pyi,sha256=4JJGGKu9IVsRVES3jNr2_MrHXA9CW6rjAiZqLGaKmq8,960 +zmq/__pycache__/__init__.cpython-311.pyc,, +zmq/__pycache__/_future.cpython-311.pyc,, +zmq/__pycache__/_typing.cpython-311.pyc,, +zmq/__pycache__/asyncio.cpython-311.pyc,, +zmq/__pycache__/constants.cpython-311.pyc,, +zmq/__pycache__/decorators.cpython-311.pyc,, +zmq/__pycache__/error.cpython-311.pyc,, +zmq/_future.py,sha256=AVA5RzZrpvJaS5aAcGOGZbh-mK7vsR-X9q3fKygfNXo,23470 +zmq/_future.pyi,sha256=wJSz6Vcks4lAI325c5bkS6ZfOvwsLHcX3Dr3xwp7sj4,3193 +zmq/_typing.py,sha256=iTmWMo_xE7PQ9FRA4I_5gpj2YSL0C29vTz_2H-Tvtio,720 +zmq/asyncio.py,sha256=NjDmuZakTa2_4tvEH6Kn6j8s-bJ6JO0N7QscPdx_bfo,6518 +zmq/auth/__init__.py,sha256=D0XJjPJgN0ZqSBLDrbLm3l3N5pMQt75pv8LyearhsM8,346 +zmq/auth/__pycache__/__init__.cpython-311.pyc,, +zmq/auth/__pycache__/asyncio.cpython-311.pyc,, +zmq/auth/__pycache__/base.cpython-311.pyc,, +zmq/auth/__pycache__/certs.cpython-311.pyc,, +zmq/auth/__pycache__/ioloop.cpython-311.pyc,, +zmq/auth/__pycache__/thread.cpython-311.pyc,, +zmq/auth/asyncio.py,sha256=KLD0Kwev61dnImVhcLmEKr-PwTCqIyurWjs4SuH442A,1799 +zmq/auth/base.py,sha256=OPTB58nTeYJL-bu9bHa4lXUCCMwzo7cwlhxRuHBjd0Y,16337 +zmq/auth/certs.py,sha256=0lyPqG3o-ucI_UvCVpihxT10V9-hKJcyi5Us4trVGR0,4329 +zmq/auth/ioloop.py,sha256=xXF6P8A-HZlXfIYMVv8BW8EI_H2PjrJFF3a6Ajvd1rI,1298 +zmq/auth/thread.py,sha256=mv1NfTxJIydLhIkKdExPVDdWpcbczUDshIsYLUzhSkI,4103 +zmq/backend/__init__.py,sha256=5PfcIfpIzxGIegOaHbO3dBilBzby3l0yKC0o_Qf1m3A,940 +zmq/backend/__init__.pyi,sha256=NDw7Ahc8qjjQgW-MIqn_o4rWusK6slCCTpeyBpbXoZc,3379 +zmq/backend/__pycache__/__init__.cpython-311.pyc,, +zmq/backend/__pycache__/select.cpython-311.pyc,, +zmq/backend/cffi/README.md,sha256=u7zNkS3dJALRPzpgPv5s4Q1tIkevm0BMzVpcwZD0PoM,95 +zmq/backend/cffi/__init__.py,sha256=emCfRo-DxTG3mafsEBEpIX-GMFUUpajHpvSSpyB11w0,833 +zmq/backend/cffi/__pycache__/__init__.cpython-311.pyc,, +zmq/backend/cffi/__pycache__/_poll.cpython-311.pyc,, +zmq/backend/cffi/__pycache__/context.cpython-311.pyc,, +zmq/backend/cffi/__pycache__/devices.cpython-311.pyc,, +zmq/backend/cffi/__pycache__/error.cpython-311.pyc,, +zmq/backend/cffi/__pycache__/message.cpython-311.pyc,, +zmq/backend/cffi/__pycache__/socket.cpython-311.pyc,, +zmq/backend/cffi/__pycache__/utils.cpython-311.pyc,, +zmq/backend/cffi/_cdefs.h,sha256=Ajw-Sxw5wv9zko0yBkqEi7TZc3nuESv1z1buwy1ppQc,2639 +zmq/backend/cffi/_cffi_src.c,sha256=ADxHwdYLRjsRf8vuyOK8lbHiXTjgbfoiIwOjarFQ8ds,1314 +zmq/backend/cffi/_poll.py,sha256=niEuO85_fLbRntELWu0k89qLicJPgnirX13fTnw0Irc,2884 +zmq/backend/cffi/context.py,sha256=dKoVS0VJa0A1N3l9LYGRoYlRLmmHZigmnMhZyHsP-jA,1899 +zmq/backend/cffi/devices.py,sha256=Fc19maZ2HA0qlcP7na26fBb--V9bibIJPoXvYmKnxvk,1572 +zmq/backend/cffi/error.py,sha256=AO-QaesceSlKTWILjgzhr6PrffE8hQsPZFLbxQ3tIqE,380 +zmq/backend/cffi/message.py,sha256=r6ob45NS_ZQh9oWo_n9ihBlO9ine-Odmt2BT2lXhPRc,6488 +zmq/backend/cffi/socket.py,sha256=fD9JTFJRxW8l0mO8btJ-MHLXTm8juxIeH74X6vFL2Hc,11423 +zmq/backend/cffi/utils.py,sha256=xePrYKHy0P7vr3s49t4nxz7vD9zCcfe_ALQ-kL3DZ_U,2086 +zmq/backend/cython/__init__.pxd,sha256=iRgsrNY8-yEX3UL83jFHziSPaVibZx-qltTXcYVUM9Y,60 +zmq/backend/cython/__init__.py,sha256=SMuE8GAtsmEMWNGrv-ZlQZcuTD3EVU838E1ViRHLDNI,322 +zmq/backend/cython/__pycache__/__init__.cpython-311.pyc,, +zmq/backend/cython/__pycache__/_zmq.cpython-311.pyc,, +zmq/backend/cython/_externs.pxd,sha256=0EM00v73_7Bp_9Z4qcCuwe0IIgoPgatYgHX9wrjhbJE,339 +zmq/backend/cython/_zmq.cpython-311-x86_64-linux-gnu.so,sha256=HdUzJaAMfeYY3r3JMAHZPwkJS505fchR3tfSWuvwb-I,307649 +zmq/backend/cython/_zmq.pxd,sha256=fv1mQ6DxnJghW5XgD45dOnokVVH1UDTV0Us5KYuBo28,2186 +zmq/backend/cython/_zmq.py,sha256=PgojSlJb81xbwcetkdqi0l_d8jVeedxOyLTsGtbcLxk,58306 +zmq/backend/cython/constant_enums.pxi,sha256=LNVbov9C6GBuJvWHnfpqUjmNT0x8alTeub885-o_mI0,7562 +zmq/backend/cython/libzmq.pxd,sha256=ofccd3ZlZvJL7_Ud1gVPHTxl1PDO69UivxliA8QcD-w,4564 +zmq/backend/select.py,sha256=GbXUnUC4fdbrz7GIxraLyXH8A9Mv0_2cFLURezv5yps,902 +zmq/constants.py,sha256=xEyRW8hA1lLjDAnMjOWvmKAFQqZYzaTWO62dA7atnbM,28341 +zmq/decorators.py,sha256=sLeTjxsNcnjKYCsyUsx5RyC0X2Sfqi355nvBDzLDxGY,5099 +zmq/devices/__init__.py,sha256=ODgbZUVGiWBqsNYxKO-E4s3Q8ElZIHtqGhpqgDErDmw,730 +zmq/devices/__pycache__/__init__.cpython-311.pyc,, +zmq/devices/__pycache__/basedevice.cpython-311.pyc,, +zmq/devices/__pycache__/monitoredqueue.cpython-311.pyc,, +zmq/devices/__pycache__/monitoredqueuedevice.cpython-311.pyc,, +zmq/devices/__pycache__/proxydevice.cpython-311.pyc,, +zmq/devices/__pycache__/proxysteerabledevice.cpython-311.pyc,, +zmq/devices/basedevice.py,sha256=dQ8VMBy48wobP6QNM0KlIK9Bslj-BMfQ_xy9_MJxyUQ,9562 +zmq/devices/monitoredqueue.py,sha256=au2EN-fFLXDvVRFClAYS3q2Lb-KxW3keT9l82W5BKRo,1294 +zmq/devices/monitoredqueuedevice.py,sha256=onjY-L74VC_YjppiC4C_voBgbuEtRhstdTaytxQe14I,1929 +zmq/devices/proxydevice.py,sha256=MCB4j-65vyjSLytrzEWfB2q6YZLn_035Sxfns9j4yyQ,2843 +zmq/devices/proxysteerabledevice.py,sha256=atHF4HRd7A_lQQV8q6eHzQ_BaT-Gv6D3EabnN1p67vQ,3206 +zmq/error.py,sha256=fkENA-HNylo18IcEUcZTZqeSEienKFNLKck8_l7QeCE,5343 +zmq/eventloop/__init__.py,sha256=j5PpZdLAwLtwChrGCEZHJYJ6ZJoEzNBMlzY9r5K5iUw,103 +zmq/eventloop/__pycache__/__init__.cpython-311.pyc,, +zmq/eventloop/__pycache__/_deprecated.cpython-311.pyc,, +zmq/eventloop/__pycache__/future.cpython-311.pyc,, +zmq/eventloop/__pycache__/ioloop.cpython-311.pyc,, +zmq/eventloop/__pycache__/zmqstream.cpython-311.pyc,, +zmq/eventloop/_deprecated.py,sha256=nTNbRXtCZ9PZXdR3m1YPlMqg01FB85RT4EeT4vNdu1A,6437 +zmq/eventloop/future.py,sha256=lueaaPliVxJkvTaksnmAJkd09XZUfi4o0YnAQiFsciI,2612 +zmq/eventloop/ioloop.py,sha256=pmFSoqjZUy40wbibneTYwyDfVy43a1Ffvzus3pdelU4,766 +zmq/eventloop/zmqstream.py,sha256=E0nTkPIUZWx40CkprUMvN5d2Si01WTbFoAcHmMoWQEs,23064 +zmq/green/__init__.py,sha256=Vmg7Zv4rXt9dUbgy7pGx1a8igWFMqcIWRnRrzZq3Jx4,1367 +zmq/green/__pycache__/__init__.cpython-311.pyc,, +zmq/green/__pycache__/core.cpython-311.pyc,, +zmq/green/__pycache__/device.cpython-311.pyc,, +zmq/green/__pycache__/poll.cpython-311.pyc,, +zmq/green/core.py,sha256=PsvDz2VEG-UsxfJPU4LocYs9_mja5nvD4jTPNCMD_sw,10808 +zmq/green/device.py,sha256=HTtdyyENo8aOtpklkoDnfSVJKbM5Xh_-KMJZbImZgSQ,978 +zmq/green/eventloop/__init__.py,sha256=N13sRnQlJDo2gD70qPNZP7uc_EEMAjE6hDa-SLhKj0s,68 +zmq/green/eventloop/__pycache__/__init__.cpython-311.pyc,, +zmq/green/eventloop/__pycache__/ioloop.cpython-311.pyc,, +zmq/green/eventloop/__pycache__/zmqstream.cpython-311.pyc,, +zmq/green/eventloop/ioloop.py,sha256=rNJvPZsF-SZpXFEk7T8DXUE5yMFxltF5HE9qZkCmufc,43 +zmq/green/eventloop/zmqstream.py,sha256=3LGGOp9Lx0OxrsiNNxt4jdzNAJvXZNMLdlOYcsrDz8c,291 +zmq/green/poll.py,sha256=77Jpd7h-TJ0ZLE7vm1J7hfJfRCm3BI41cL5CYmUzH1A,2996 +zmq/log/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +zmq/log/__main__.py,sha256=FsHekF9qqnfhDkGvl8zVWxUuckLxTqubxMr6GjuCyTA,4005 +zmq/log/__pycache__/__init__.cpython-311.pyc,, +zmq/log/__pycache__/__main__.cpython-311.pyc,, +zmq/log/__pycache__/handlers.cpython-311.pyc,, +zmq/log/handlers.py,sha256=PLdJvzN3J6pg1Z4mied6RxwoKoW6VF8vz1mivTMl74Y,7228 +zmq/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +zmq/ssh/__init__.py,sha256=2Wcr18a8hS5Qjwhn1p6SYb6NMDIi7Y48JRXg56iU0fI,29 +zmq/ssh/__pycache__/__init__.cpython-311.pyc,, +zmq/ssh/__pycache__/forward.cpython-311.pyc,, +zmq/ssh/__pycache__/tunnel.cpython-311.pyc,, +zmq/ssh/forward.py,sha256=m2p4E7GVUYOSZdGbNkGNjoq6LG-ZzLKlhE0sZVa4e1U,3318 +zmq/ssh/tunnel.py,sha256=SCurCVb4WkKFZluqDKQzMkTsmlM0SHRNno5fUNRkrUk,13357 +zmq/sugar/__init__.py,sha256=51CnYi_GR7SlZQq_mP6D29JZpi8cD76SxO-IB7Tjo6I,721 +zmq/sugar/__init__.pyi,sha256=F_JYIucugCUuXik_FSVfzWXICyuH1yDzlshcZRb8bDU,219 +zmq/sugar/__pycache__/__init__.cpython-311.pyc,, +zmq/sugar/__pycache__/attrsettr.cpython-311.pyc,, +zmq/sugar/__pycache__/context.cpython-311.pyc,, +zmq/sugar/__pycache__/frame.cpython-311.pyc,, +zmq/sugar/__pycache__/poll.cpython-311.pyc,, +zmq/sugar/__pycache__/socket.cpython-311.pyc,, +zmq/sugar/__pycache__/stopwatch.cpython-311.pyc,, +zmq/sugar/__pycache__/tracker.cpython-311.pyc,, +zmq/sugar/__pycache__/version.cpython-311.pyc,, +zmq/sugar/attrsettr.py,sha256=LL3MjUFm2TW4VYTsmv1FwEATM3Qw_MGT6s9fB1mBVek,2638 +zmq/sugar/context.py,sha256=ZXC0GPAKIGMTQ0ueMuoeQy2bdS4EofrjyvdJ6Yq5d5c,14644 +zmq/sugar/frame.py,sha256=2WBuRkwy7E1qa8AIQX4AUrcKbTVWbBWDFgj1k6d_y3Q,4257 +zmq/sugar/poll.py,sha256=7qQnUTtQJL8IN5S2VeB0jheoqUKp37pM20yjt4Fv6P4,5752 +zmq/sugar/socket.py,sha256=z-Xyk-GUbratQfH4HP7zcwnpu1k1IFR7n4u7ez2aAmY,35694 +zmq/sugar/stopwatch.py,sha256=i1Cg96aPzsiHmUTAZEgSsiZ5qQJ7rw-pFgiIYJoJU1g,935 +zmq/sugar/tracker.py,sha256=raZKyJc3SYxlY17uAQpIPkHUaNk7bt9cwtdYugLd8QQ,3603 +zmq/sugar/version.py,sha256=jFVOijWwZ-KpQ3GaBLw6TUtYQbJkxAppGmo_yVhC2Gk,1620 +zmq/tests/__init__.py,sha256=VRi-RCwApzUasI3ruVLmdRDTWF-iRE3lYxsnrDFrN6k,8004 +zmq/tests/__pycache__/__init__.cpython-311.pyc,, +zmq/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +zmq/utils/__pycache__/__init__.cpython-311.pyc,, +zmq/utils/__pycache__/garbage.cpython-311.pyc,, +zmq/utils/__pycache__/interop.cpython-311.pyc,, +zmq/utils/__pycache__/jsonapi.cpython-311.pyc,, +zmq/utils/__pycache__/monitor.cpython-311.pyc,, +zmq/utils/__pycache__/strtypes.cpython-311.pyc,, +zmq/utils/__pycache__/win32.cpython-311.pyc,, +zmq/utils/__pycache__/z85.cpython-311.pyc,, +zmq/utils/buffers.pxd,sha256=rV7zDQ9ESlMH104whm83r01s8al4_AGFEQkMqiULShg,7031 +zmq/utils/garbage.py,sha256=hfBcYNhJum7TcW4ktHJWa7CoNnDvSRFc4K5IK7IguWY,6124 +zmq/utils/getpid_compat.h,sha256=emvckPfSlYeCoUNgfYTkAWC4ie-LXLRnXDNLlXxXaPI,116 +zmq/utils/interop.py,sha256=l4AsLmDz3UHmuHjwc5EEZ61P_56HGVIVg88Lj4wnjPk,685 +zmq/utils/ipcmaxlen.h,sha256=q-YGX5BECL_QpOzOx3oC_I8mcNCWbJJ6FnUrdKlG1fU,522 +zmq/utils/jsonapi.py,sha256=2G1kMc3EW_Y4jSH_DwZzSti8lxzpRTx02kLrDokVDSA,1025 +zmq/utils/monitor.py,sha256=ritTkUE8qGlWZjUZ3eflKOiI7WOJZFvmKNwG3d3YCzs,3310 +zmq/utils/mutex.h,sha256=tX_0NUDDv9s91JDDFW7UQh2wvqqaKzL9EX7dJUnQfi4,1625 +zmq/utils/pyversion_compat.h,sha256=4FkQ95UVmA_as9lBrIO7-wM5D0tEinVAlYmZls_SRT0,284 +zmq/utils/strtypes.py,sha256=sd0-cJGuDntYAcBMO_uqWAwvsOJGp8WXudMz3nJRUUA,1376 +zmq/utils/win32.py,sha256=aBQFhfZuNXvkHEtiYx5cqCAl3Mkl0qjsRzloIwR2K90,4940 +zmq/utils/z85.py,sha256=AM_l4fxgzA823RETaKj1QR8ci3gto-FoqvRm4qzxDXI,1838 +zmq/utils/zmq_compat.h,sha256=gsqk4EVjdWsatLrhxFAu2QHgUiQemuhqM-ZtVU4FSVE,3184 diff --git a/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/WHEEL b/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..d7c4af24a2ad4b5720c2b49737cd98d2ace18988 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: scikit-build-core 0.10.7 +Root-Is-Purelib: false +Tag: cp311-cp311-manylinux_2_28_x86_64 + diff --git a/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/licenses/LICENSE.md b/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/licenses/LICENSE.md new file mode 100644 index 0000000000000000000000000000000000000000..f7072d1c994fbb6eb8a48365be0d583cb02a03e9 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/licenses/LICENSE.md @@ -0,0 +1,30 @@ +BSD 3-Clause License + +Copyright (c) 2009-2012, Brian Granger, Min Ragan-Kelley + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/licenses/licenses/LICENSE.libsodium.txt b/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/licenses/licenses/LICENSE.libsodium.txt new file mode 100644 index 0000000000000000000000000000000000000000..6bce7f0581c80aa6d05d33dd5267e1e047c738a9 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/licenses/licenses/LICENSE.libsodium.txt @@ -0,0 +1,18 @@ +/* + * ISC License + * + * Copyright (c) 2013-2024 + * Frank Denis + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ diff --git a/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/licenses/licenses/LICENSE.tornado.txt b/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/licenses/licenses/LICENSE.tornado.txt new file mode 100644 index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/licenses/licenses/LICENSE.tornado.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/licenses/licenses/LICENSE.zeromq.txt b/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/licenses/licenses/LICENSE.zeromq.txt new file mode 100644 index 0000000000000000000000000000000000000000..a612ad9813b006ce81d1ee438dd784da99a54007 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/pyzmq-26.2.1.dist-info/licenses/licenses/LICENSE.zeromq.txt @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/.venv/lib/python3.11/site-packages/triton-3.1.0.dist-info/INSTALLER b/.venv/lib/python3.11/site-packages/triton-3.1.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/triton-3.1.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.11/site-packages/triton-3.1.0.dist-info/METADATA b/.venv/lib/python3.11/site-packages/triton-3.1.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..5a6b65899ee0dc7c3a6cd1e75b7d9ed9555bc680 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/triton-3.1.0.dist-info/METADATA @@ -0,0 +1,34 @@ +Metadata-Version: 2.1 +Name: triton +Version: 3.1.0 +Summary: A language and compiler for custom Deep Learning operations +Home-page: https://github.com/triton-lang/triton/ +Author: Philippe Tillet +Author-email: phil@openai.com +Keywords: Compiler,Deep Learning +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: Topic :: Software Development :: Build Tools +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Requires-Dist: filelock +Provides-Extra: build +Requires-Dist: cmake>=3.20; extra == "build" +Requires-Dist: lit; extra == "build" +Provides-Extra: tests +Requires-Dist: autopep8; extra == "tests" +Requires-Dist: flake8; extra == "tests" +Requires-Dist: isort; extra == "tests" +Requires-Dist: numpy; extra == "tests" +Requires-Dist: pytest; extra == "tests" +Requires-Dist: scipy>=1.7.1; extra == "tests" +Requires-Dist: llnl-hatchet; extra == "tests" +Provides-Extra: tutorials +Requires-Dist: matplotlib; extra == "tutorials" +Requires-Dist: pandas; extra == "tutorials" +Requires-Dist: tabulate; extra == "tutorials" + diff --git a/.venv/lib/python3.11/site-packages/triton-3.1.0.dist-info/RECORD b/.venv/lib/python3.11/site-packages/triton-3.1.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..4f1197a1c0a3c9e46d171fa0e5cd88ce895cac60 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/triton-3.1.0.dist-info/RECORD @@ -0,0 +1,376 @@ +../../../bin/proton,sha256=uEnXYDxjY3_G71CBrWKEk4c0Y5NMKhZp6vPtIhEx0B0,239 +../../../bin/proton-viewer,sha256=Kq3jKOOiaTwRUVfwAaQhC32tf2T_U9BXA09PgElQiS4,239 +triton-3.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +triton-3.1.0.dist-info/METADATA,sha256=jezNzPzOw7Q3Zi8460_-OpAq2Q7mwSCViAdoS7hbtR4,1311 +triton-3.1.0.dist-info/RECORD,, +triton-3.1.0.dist-info/WHEEL,sha256=YWWHkv6sHhBDPNqgSfLklIm4KZnZJH4x2lIHOwCoU7Q,152 +triton-3.1.0.dist-info/entry_points.txt,sha256=SAiHYj5xxm1U5d8569PbMXmtWkKGNtiyy7LeTlUHalM,99 +triton-3.1.0.dist-info/top_level.txt,sha256=Hb_kDzJ7TsGI6NCIladkPkdkXovbkIWxpHevKN759pc,261 +triton/_C/libproton.so,sha256=GozleVcqitlBcK-EQ1Q9XGWrLV97f_f0aQxlYEmx2Yw,15539808 +triton/_C/libtriton.so,sha256=oiJUpGk1zZHZMTnzsw1MLhHPq93Zu9ejhUCgfVzU2_4,472869472 +triton/__init__.py,sha256=-CXWFRYq7hHR_PAMxnIghXzleGLbAFRzCTO5MiBUP_0,1347 +triton/__pycache__/__init__.cpython-311.pyc,, +triton/__pycache__/errors.cpython-311.pyc,, +triton/__pycache__/testing.cpython-311.pyc,, +triton/backends/__init__.py,sha256=IqprKlbbTM6n-JCl6VeE5VU8kVOiaIP7UZSQPz8w4dw,1600 +triton/backends/__pycache__/__init__.cpython-311.pyc,, +triton/backends/__pycache__/compiler.cpython-311.pyc,, +triton/backends/__pycache__/driver.cpython-311.pyc,, +triton/backends/amd/__pycache__/compiler.cpython-311.pyc,, +triton/backends/amd/__pycache__/driver.cpython-311.pyc,, +triton/backends/amd/compiler.py,sha256=eVT-rZmK822lJwxbOW7tRdNrSb7FqfaL3PnoXDC5Qxw,11084 +triton/backends/amd/driver.c,sha256=-r1CUNk1RrMaUTIqO6iFjOW9MwLlZ_YRGR9LjIep93g,8397 +triton/backends/amd/driver.py,sha256=BOi1i_zovruvEfBpQeyj5znzYG2UjgrhuT-FsZf519g,18174 +triton/backends/amd/include/hip/amd_detail/amd_channel_descriptor.h,sha256=_2myGIdBTE0plFbGKOSx8HUqGZd0UBHo-YvKe2xkpbU,11708 +triton/backends/amd/include/hip/amd_detail/amd_device_functions.h,sha256=zfYTHJE_M_y2Y2ssP8ZH_EOczMBg4Iq2guglaKcI5js,31425 +triton/backends/amd/include/hip/amd_detail/amd_hip_atomic.h,sha256=PJRRTp83M0jIEBA_iWzfWwHZelSbL3TBrSDqlO3SQtk,49919 +triton/backends/amd/include/hip/amd_detail/amd_hip_bf16.h,sha256=fucv1_06JHVm82T0TmvERBbmtZTDQK6WJi_58oGQOXg,40634 +triton/backends/amd/include/hip/amd_detail/amd_hip_bfloat16.h,sha256=cFJlQEELGau_9geACeuiiFHyuAWCD6-VuSqcTnqajX0,9484 +triton/backends/amd/include/hip/amd_detail/amd_hip_common.h,sha256=dzkuIzuklqTRaNJjKLqfFEm6Fh4tK_FkTjYHFsZkmCI,1370 +triton/backends/amd/include/hip/amd_detail/amd_hip_complex.h,sha256=SEygl8X_MCXDVXxNIBm5Ds0eWwa-ojVXUUW48SIgsX8,5855 +triton/backends/amd/include/hip/amd_detail/amd_hip_cooperative_groups.h,sha256=SvrkniHiDGt-ztZRBvbkyajfUxTbGQzpZC1gnd4T-i8,31624 +triton/backends/amd/include/hip/amd_detail/amd_hip_fp16.h,sha256=86Nw97iaiC4QV5xBv8d3Bwc4FioMh5DQuCHj3sh_Yrw,57854 +triton/backends/amd/include/hip/amd_detail/amd_hip_gl_interop.h,sha256=9vxiV6rYRMGx12TPnrAVRvrfLyoRp74XRgKSPBPa2hk,3860 +triton/backends/amd/include/hip/amd_detail/amd_hip_math_constants.h,sha256=u1fIaf-AiWF70ZA1zxVkUIbRqoJLu5lrfYbgt_usySk,5890 +triton/backends/amd/include/hip/amd_detail/amd_hip_runtime.h,sha256=ZvDsQ0AiZnJ178NuAsA7AuHrySXbN3aFs5Z9m2tsIDg,13954 +triton/backends/amd/include/hip/amd_detail/amd_hip_runtime_pt_api.h,sha256=fc4mtHBkWmiSRh8m-dxIxvu9zsweLTwEgohkntYcgJw,9997 +triton/backends/amd/include/hip/amd_detail/amd_hip_unsafe_atomics.h,sha256=w9nJ1S32GRl_ejDiGacteM6Zf84iovIifAzWX8Bze0Q,24202 +triton/backends/amd/include/hip/amd_detail/amd_hip_vector_types.h,sha256=qPdmRJnzlgtjVshkafoHxdHoMLkoYS9U-ZD-TjLznr0,57088 +triton/backends/amd/include/hip/amd_detail/amd_math_functions.h,sha256=46wiaEMStCczEsHtccgHlATfw_0O5j6Z8rlFkC7bmUA,3171 +triton/backends/amd/include/hip/amd_detail/amd_surface_functions.h,sha256=rsQuylNqmNhLb7PZjBz7WbruD_6YIXtOptY2BNJDxVU,11062 +triton/backends/amd/include/hip/amd_detail/amd_warp_functions.h,sha256=p8DdtuxqlgGHzKdVPMHDnZOD8zA5f6GjLHYMr0_FKjQ,18966 +triton/backends/amd/include/hip/amd_detail/concepts.hpp,sha256=7EOkpr2w2-jclUQ115yxtFCkBWJ7btUzhBOe-mR0N0M,1252 +triton/backends/amd/include/hip/amd_detail/device_library_decls.h,sha256=4clSpgf898UVjfZFVnDkcYi75A27crPsuFtLcs1s4KU,7457 +triton/backends/amd/include/hip/amd_detail/functional_grid_launch.hpp,sha256=u7hRB9kQXX575a5C7cV3gKow55DSBUCwO0dTjIswlag,8129 +triton/backends/amd/include/hip/amd_detail/grid_launch.h,sha256=tNS7CQw9gy-z930CElH3n6c5iMvpsQ_WFZK024mNzEo,1830 +triton/backends/amd/include/hip/amd_detail/grid_launch.hpp,sha256=EuAlM3olyrArebqwW5eSxo4gfjvWCGOAGAuLLmFttgw,1370 +triton/backends/amd/include/hip/amd_detail/grid_launch_GGL.hpp,sha256=KpQAuyy1Dyt45WcPaR_x-Ex-onPGEHA01DBbla7TT-k,1219 +triton/backends/amd/include/hip/amd_detail/helpers.hpp,sha256=hi2pW1mXQnbIwvmwWt_nG6A38sqLOd-QP5S9sETTs60,5707 +triton/backends/amd/include/hip/amd_detail/hip_api_trace.hpp,sha256=d01j4SFQP_6ALwUHByxznZV8SrQHbuujRYon8rxFw-I,94612 +triton/backends/amd/include/hip/amd_detail/hip_assert.h,sha256=fNsG23KISuY-k5JFoX-5hZ7qGQScisXuHcdEwYlXOqw,3978 +triton/backends/amd/include/hip/amd_detail/hip_cooperative_groups_helper.h,sha256=tQ_XIvGKhvrj1h7gY-IVLmKvIPhsQa0YsBflxdhUHP8,7957 +triton/backends/amd/include/hip/amd_detail/hip_fp16_gcc.h,sha256=BtFsKmTptN4TOHocEicfNbBl2JCdZWKm_bd5mc5OzYY,6660 +triton/backends/amd/include/hip/amd_detail/hip_fp16_math_fwd.h,sha256=63tKWMPdW56qWlH_HbCaF_isVXufm514ol_SxL4YjTQ,5134 +triton/backends/amd/include/hip/amd_detail/hip_ldg.h,sha256=KAEZb9H4z4DDrkaloMOeWzahiDfI2V6c68vWT3jb5fU,3652 +triton/backends/amd/include/hip/amd_detail/hip_prof_str.h,sha256=s1T2IrCwYzZQOuCs5ppuegFQbjXSF2JA1eUSCmZg9AA,621355 +triton/backends/amd/include/hip/amd_detail/hip_runtime_prof.h,sha256=6GVfh1la0wtBVwdKX5y0C32dPD9shJp1o8wZdHsjZHA,2715 +triton/backends/amd/include/hip/amd_detail/host_defines.h,sha256=h_ZpFE4Clm2iyRyJevDb57Y-gC-6RVPjhnZ5rzPxiUo,7038 +triton/backends/amd/include/hip/amd_detail/hsa_helpers.hpp,sha256=Os-sJQOFI_0Abh8Ql05s0Rtfruk4NsSMfg7BtugxMgg,3232 +triton/backends/amd/include/hip/amd_detail/macro_based_grid_launch.hpp,sha256=6ocsArNa9_R6D6XCuNy8Zq23KG-j2uYsjqNCtnMrJws,67925 +triton/backends/amd/include/hip/amd_detail/math_fwd.h,sha256=nup5YhceJnngoLJCESI8qX08dNpbZci0i78WKu-wfdI,17000 +triton/backends/amd/include/hip/amd_detail/ockl_image.h,sha256=LzRPGMb515_iIAIIcbb2uQB-bTvT4xOjY51VdARD7lc,10538 +triton/backends/amd/include/hip/amd_detail/program_state.hpp,sha256=8QE9OmB8OKTy7rBr3EYEizJI2s-_1tgXpgU7zCA2Ky0,3154 +triton/backends/amd/include/hip/amd_detail/texture_fetch_functions.h,sha256=Ex1lF2gBWJxtC3yP9pXRSFywMp3gbEmyl0Sw8iL91yM,17787 +triton/backends/amd/include/hip/amd_detail/texture_indirect_functions.h,sha256=KkW5o5gMpoVMTRwzfXHA7-kZ9ynI8OaIw6jJ1EB1s98,18447 +triton/backends/amd/include/hip/channel_descriptor.h,sha256=gTYe7SzIg-m3ThOQY2vr5Rh6-uWvUP_d37v8F4T2Q14,1773 +triton/backends/amd/include/hip/device_functions.h,sha256=vkybrdk6wyZP-T1I5PRjtfcMqGYXDeBpB5jhYj358GU,1589 +triton/backends/amd/include/hip/driver_types.h,sha256=m1HI80HC80qkTeco2Jd07woL_jTy48lz9JiDCV_8zsg,18985 +triton/backends/amd/include/hip/hip_bf16.h,sha256=lLw6K5ltb6AqSuINYTq8flxxsDkBP8Y2zbqmUjBcG9c,1571 +triton/backends/amd/include/hip/hip_bfloat16.h,sha256=Nqoy9VjfjglVx2_NJcp8hyT1sJUukXRWj8XMlidv1yA,1755 +triton/backends/amd/include/hip/hip_common.h,sha256=q5aPhG3DHW0iUJ7ayS5lfM_ZnZQNbMmLmfdHlOwbPdA,3450 +triton/backends/amd/include/hip/hip_complex.h,sha256=TmdzQP5oVPfhBVARJYcR5eyv9HInmKMFuFoQ_1ECk_I,1594 +triton/backends/amd/include/hip/hip_cooperative_groups.h,sha256=gMLvaYQ3b-f1vcoMtEwtkN0hO5__zNfP5p5oBKmv_SE,1878 +triton/backends/amd/include/hip/hip_deprecated.h,sha256=gFLuCuKn7R_xCfum_i_Q-vi3Lg8NWHKphKZKze8DwEo,6340 +triton/backends/amd/include/hip/hip_ext.h,sha256=jK1Qc-SXgUyRTj8bBa9ZP__95Qgd2-W1mwnJo6Qpnoo,8560 +triton/backends/amd/include/hip/hip_fp16.h,sha256=vKJh-zgDWUW7NyXxtv2ho6aVLXX8BIPfzCigEQ5d6I4,1523 +triton/backends/amd/include/hip/hip_gl_interop.h,sha256=-GwkSFMBneM8akFE7pqlhi0k-Ft2uz5674wGoiaU43Q,1438 +triton/backends/amd/include/hip/hip_hcc.h,sha256=RYrArDlnTEP89xKbzIpW17_bsBY5moCitq00PL-4oWI,1307 +triton/backends/amd/include/hip/hip_math_constants.h,sha256=8bSfve5E7cDuvNAUkFUeQwSLg3iJJHuqhuD4FmHNxEM,1588 +triton/backends/amd/include/hip/hip_profile.h,sha256=sjsNuduu5Jd6s7sJndZvZLlE0RZ0wN1rTVwv5nR7If0,1304 +triton/backends/amd/include/hip/hip_runtime.h,sha256=uy90l8Nep6xNUzeGcHMoDv84BT3hMpieTV-5ijkpL5A,3058 +triton/backends/amd/include/hip/hip_runtime_api.h,sha256=fzb_xktisCVcp2pWG-ZKhIG-YVQzDjGyPt4wvA4iayM,386498 +triton/backends/amd/include/hip/hip_texture_types.h,sha256=AhkvjG4cDjf_ZFLg5SsSTfBnXG614PBK1XVPa7irZbk,1237 +triton/backends/amd/include/hip/hip_vector_types.h,sha256=6FcBMBkP3ZN1Enalpa9hV0VopxdBJvbUCuaxISgzbTY,1630 +triton/backends/amd/include/hip/hip_version.h,sha256=J3vgzfZH0UkK8RYvyHVj1PbUNSZH1JPtlcmXxLBgwVk,407 +triton/backends/amd/include/hip/hiprtc.h,sha256=npK6f2ZkYIe5blJIGuofuTG0PrSMS2mkFBUqrdOp0A0,15631 +triton/backends/amd/include/hip/library_types.h,sha256=tPOJTQedPH5qC9meawLgKpnbFrQC2WKlfo6s0rhKoZc,2370 +triton/backends/amd/include/hip/math_functions.h,sha256=frzdJ4veBG8n9ALO4EmRrdOiDguR6FP6ygLnvOnVVSM,1815 +triton/backends/amd/include/hip/surface_types.h,sha256=uQHjITphDM7k4pnuEoDEupMUxBobzvhJpSy0unpegh4,1959 +triton/backends/amd/include/hip/texture_types.h,sha256=CtmdykZfDikhnrVfdJk3w2VK5X3Af_6rEKzU-VgLu24,6687 +triton/backends/amd/include/hsa/Brig.h,sha256=5H-btCHq40qgjjpwVAoRWf3E0ccf-J6UCPEcKx_hGKw,32705 +triton/backends/amd/include/hsa/amd_hsa_common.h,sha256=q_zN0eq-dwR7FnQ84PcpV3yZyvjHsouIAjJgKltGoX8,3912 +triton/backends/amd/include/hsa/amd_hsa_elf.h,sha256=_9Zp3EWioseu3ljShNbwNe84AmRWNfjDxRZuj0jJUSY,16305 +triton/backends/amd/include/hsa/amd_hsa_kernel_code.h,sha256=C55F8a480QsW16-iwN9TIT3cKnGh6GoeoEaEv3aVh4g,12659 +triton/backends/amd/include/hsa/amd_hsa_queue.h,sha256=ZJ-k5wY30heLmQnGB0VUz36XCiVHRmspg5FRNMGIk_U,4766 +triton/backends/amd/include/hsa/amd_hsa_signal.h,sha256=FDegZnWQC04GtnqHjXOBsB-AoVSaqdhNY6Mwbua5FGA,2947 +triton/backends/amd/include/hsa/hsa.h,sha256=Jft1K5uFAcasOD9IYW6wD5GsGQcPQTrmbpjie-0Wh00,190916 +triton/backends/amd/include/hsa/hsa_amd_tool.h,sha256=pyZSyIVl-UA5AOhte78jvn4V3hCd0dxJAIv7KeADsPs,2843 +triton/backends/amd/include/hsa/hsa_api_trace.h,sha256=2iuwHcpyW9wvr-WPKCgatQzYBaA8rTa3w1BRMXBGcSI,28982 +triton/backends/amd/include/hsa/hsa_ext_amd.h,sha256=Riw3Ii-AYts1w_yjVD96ZXuY6-BBpnlx_bnnltThK1s,116016 +triton/backends/amd/include/hsa/hsa_ext_finalize.h,sha256=sv0AZbDM-B1wIdQ3cHTMlpUtNacQN2PkOgX90IZol_o,20227 +triton/backends/amd/include/hsa/hsa_ext_image.h,sha256=t5YJm_aw9EePCeFL1hoIfQ8ubIjBte-ptfReq6Ts-8Y,54232 +triton/backends/amd/include/hsa/hsa_ven_amd_aqlprofile.h,sha256=9uev2nT29MCdu7-HMkg9iItHop6QMOBMQL5DAFnftSg,19777 +triton/backends/amd/include/hsa/hsa_ven_amd_loader.h,sha256=c6cxPAzAox7u6IbFzEkQZfCuRl-Kr39WhY2_w23X1R4,26146 +triton/backends/amd/include/roctracer/ext/prof_protocol.h,sha256=6FAcvVD-dNM7uulFs2B-aTxw5xOAWGy6evdD4yUaebA,3849 +triton/backends/amd/include/roctracer/hip_ostream_ops.h,sha256=WNXFZxawBXHmFGMDFIOZqXkCw6VzyDexwGPkGJre4w0,184840 +triton/backends/amd/include/roctracer/hsa_ostream_ops.h,sha256=AYwF-IT9Dhl2FX-GuvCJZX6fSmHK0xkKLORx9QxuSK8,57857 +triton/backends/amd/include/roctracer/hsa_prof_str.h,sha256=ctT-KKsIGayp7RUGUsFNR-dE65VydyXla_Qgvf-efTU,122884 +triton/backends/amd/include/roctracer/roctracer.h,sha256=B8sHz2DMNprP7EqNWIGwVLY1KQMpxmhfVy4UoR8dzzY,23849 +triton/backends/amd/include/roctracer/roctracer_ext.h,sha256=vLaZ8peAxSy0cwrdEalKnUApkKspfa04iw1Mr_Zcio0,2940 +triton/backends/amd/include/roctracer/roctracer_hcc.h,sha256=NlF3R8JQ9oX9lGpm0b2n-EWJ0r3y9sP9wbwnoucaCuY,1303 +triton/backends/amd/include/roctracer/roctracer_hip.h,sha256=RCzYuNw1vLR7xK4rb06TtM9TU546UYKHJ83IMHmZEm8,1432 +triton/backends/amd/include/roctracer/roctracer_hsa.h,sha256=M8APM64XNAWSslxQisM-pcmKoUQaUdTMaKvSACyt0Ag,4108 +triton/backends/amd/include/roctracer/roctracer_plugin.h,sha256=8GGE1zDbdPCVJtbmwOCYq7X0mwFjfWRtzDYKLD4cKys,4786 +triton/backends/amd/include/roctracer/roctracer_roctx.h,sha256=gBjBk5vb0l3PbBSQ7V9iFtaM_RzkIDJEW1A_PXBihBM,2014 +triton/backends/amd/include/roctracer/roctx.h,sha256=RhJXUXRhSJ5LRE_1gm7E6-bjEMrfcFBLDLuf3UxAIh8,6717 +triton/backends/amd/lib/ockl.bc,sha256=wQKCzkKukIHbu0lyjKUYlhndc7S27xto6L54J0Bn-C0,246124 +triton/backends/amd/lib/ocml.bc,sha256=UPNTXW0gCXUNB-c6orSYwb-mz9_mjUc7zny_vfFza44,205964 +triton/backends/compiler.py,sha256=ILAX6cTYWKsF54P3ffULhsbW7uXXnz9LFYVitKVPhEM,2720 +triton/backends/driver.py,sha256=9EM4ox4FNCkLCGUwUIBMP6u95AOm0wBK4E8MKElfCAI,977 +triton/backends/nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +triton/backends/nvidia/__pycache__/__init__.cpython-311.pyc,, +triton/backends/nvidia/__pycache__/compiler.cpython-311.pyc,, +triton/backends/nvidia/__pycache__/driver.cpython-311.pyc,, +triton/backends/nvidia/bin/cuobjdump,sha256=FLKFErTLe_YgWmaukj-B8lkDrW6il4BbWWX2S0X_b1s,663040 +triton/backends/nvidia/bin/nvdisasm,sha256=rwo7W-VxMOzwUKMQdn01SkxzCzCjvzuIwQDcPJvL6-o,50683112 +triton/backends/nvidia/bin/ptxas,sha256=lN2lShZzlA1W0wcsZO96rLEloeZDlFhuEPd6el_w_4c,30314080 +triton/backends/nvidia/compiler.py,sha256=qXmJa5wKz9Qd2VO5C9xLEJaPfAtFoobq4_gpqt2yu1U,13246 +triton/backends/nvidia/driver.c,sha256=Qc4fWOCoqphl5muQ1YszB7tkxaAGgw5JVDRE-vViAvw,17309 +triton/backends/nvidia/driver.py,sha256=2ncSmPe8RdJKYfNe9RdCMEQh3xq0nVC_H8UQ3VpG6ks,13289 +triton/backends/nvidia/include/Openacc/cupti_openacc.h,sha256=Z0OM5e_hbd3cxdXyn3SCHqBBQawLg4QORnlm57Cr2-M,3513 +triton/backends/nvidia/include/Openmp/cupti_openmp.h,sha256=E1WNmeb_7HaUSmBegtUNe4IV1i7pXeNxgzIlyKn1zrM,3491 +triton/backends/nvidia/include/Openmp/omp-tools.h,sha256=AmuC_xPC7VPu3B-W4PmXuCNufFawhY8PjNXePaQFAOg,37403 +triton/backends/nvidia/include/builtin_types.h,sha256=JxT9Vf2q2snxTBOL9ACzNmYzTWACO2VOVUu1KdFt7_g,3150 +triton/backends/nvidia/include/channel_descriptor.h,sha256=no_vNky02LeMLI0CF8GDVGHaPm_uRUGcVUMYdt_Xn4U,21482 +triton/backends/nvidia/include/common_functions.h,sha256=22LTZRVcPZzEH6MJda7nNMCvMgIjSTe0OKR7sEQj6kc,3410 +triton/backends/nvidia/include/cooperative_groups.h,sha256=JUBW-C1x_7WWuNOaoorTKQab0qzrykkG8oAw1mEHZ2s,60332 +triton/backends/nvidia/include/cooperative_groups/details/async.h,sha256=xsEHCZP3nuEY3l2p8SU2d1226XiXumUvDP_Gyh8PdVY,19122 +triton/backends/nvidia/include/cooperative_groups/details/coalesced_reduce.h,sha256=pBQgFY7i64V87XNATg1UEIQHVNYOItQtHjS5B4yn8pc,4257 +triton/backends/nvidia/include/cooperative_groups/details/coalesced_scan.h,sha256=DfZv5d5W0XJv-tZVhgrIdjLjs6aCx_u0oy1lDIpjo1Q,7314 +triton/backends/nvidia/include/cooperative_groups/details/driver_abi.h,sha256=v-ZUb4UgGKJk6NR2WCWHD3x_42y-togI1urFn70Gi-g,3964 +triton/backends/nvidia/include/cooperative_groups/details/functional.h,sha256=2BV8i8Bidz0kgxuYkJCAbwFxOIZRyzHgG-c_rVKhRzc,8905 +triton/backends/nvidia/include/cooperative_groups/details/helpers.h,sha256=K9jvxnXc5-6Fum1KG4EQKJJrVZ4BhHOSAJbZR4uDL0c,26476 +triton/backends/nvidia/include/cooperative_groups/details/info.h,sha256=Ij_cqIrcXCcwlaQqCL7AHzMD4H89y0tJeQXCbjTGsFo,12578 +triton/backends/nvidia/include/cooperative_groups/details/invoke.h,sha256=Osq3K-tZuXHVCMQJ708PjPo-BwMhjhjApO4b0TYLFJg,8616 +triton/backends/nvidia/include/cooperative_groups/details/memory.h,sha256=WU28eUcYLA1z131VYGulR4eVCSN9xK9KSxbV656YPs0,5484 +triton/backends/nvidia/include/cooperative_groups/details/partitioning.h,sha256=4UXuvUmZvGANy0hd4erdBNllpgnn4K4qFWWlfzAsHO8,7125 +triton/backends/nvidia/include/cooperative_groups/details/reduce.h,sha256=UfMezM5pqRIotJjmuFgOmiMvbu49sYgjraHutmVVr0w,22111 +triton/backends/nvidia/include/cooperative_groups/details/scan.h,sha256=-Ttwb2AfEEY_tsmqJjR2dojkPpoRx387SoqxgvfdBtQ,17166 +triton/backends/nvidia/include/cooperative_groups/details/sync.h,sha256=zoiBicvB7rlXa_r_VSNuvHVwrLIM7EjF_KdmhvPj1LM,10638 +triton/backends/nvidia/include/cooperative_groups/memcpy_async.h,sha256=erOIHuObdfxRhBWfrXE3wsZF4B2GUuqwzQrsPwKPpbg,2960 +triton/backends/nvidia/include/cooperative_groups/reduce.h,sha256=B0hgDkqM-6ueqTTgb3b34A0RH4vGz8mBf5e2jT1dJ1o,2949 +triton/backends/nvidia/include/cooperative_groups/scan.h,sha256=2EU6T5cWNwftm2B7FicV31PojoI61yo5fHXGRYkGk40,2940 +triton/backends/nvidia/include/crt/common_functions.h,sha256=-U44f4yUGmwDPwd7Q_3Cz5if05xHGPSlAzz5zMylLSQ,13559 +triton/backends/nvidia/include/crt/cudacc_ext.h,sha256=KW6n0ImOZKS0VqVmBHWTXtHI816hh88YeEgUg2aYdVU,3224 +triton/backends/nvidia/include/crt/device_double_functions.h,sha256=A1vB3g0qwnNEfcpT1d9RiGDaxqPXXgYr-Vxe2oMHyxY,39938 +triton/backends/nvidia/include/crt/device_double_functions.hpp,sha256=YYIbqYhb5Qmf8c4YfcC_jytg4FRwcXPjv3TFTwhb24E,8568 +triton/backends/nvidia/include/crt/device_functions.h,sha256=txuWyo2qoqRZTomi3BSjwUbFvtD9Ea0WKamRgMFQzjQ,136370 +triton/backends/nvidia/include/crt/device_functions.hpp,sha256=9BxQiHjRuETOIntxXAlmTPKp8wlXrBKTPcBaSUQmwfQ,38985 +triton/backends/nvidia/include/crt/func_macro.h,sha256=EOpDlaM917bh9cwBiFBPF689DCMBw5hFarxLxFt-i74,1755 +triton/backends/nvidia/include/crt/host_config.h,sha256=ZnNRtvunIV0ctARy5qbTC1fa5-JpSK5eZ5u5SCcu_BM,12169 +triton/backends/nvidia/include/crt/host_defines.h,sha256=agpWQb4K25fhOP_RsrIuz1L_vPeC2AkbmJY12QgpXKc,9950 +triton/backends/nvidia/include/crt/host_runtime.h,sha256=lOpmkxFZVkEp8dcMAGEZRITsh-19o9jy39kdSNLc3Ng,10284 +triton/backends/nvidia/include/crt/math_functions.h,sha256=iYVBIFDocDsPxqaeKHeeTxAsY-zf04-zfkmETyeahuc,396266 +triton/backends/nvidia/include/crt/math_functions.hpp,sha256=u-CGbd0R2FZWdKG-6bdmGSor9KT_wnmISj63lPQKASM,100207 +triton/backends/nvidia/include/crt/mma.h,sha256=BgSSvJ_IR75W-3uLlC2yE6B7rHeWtamaNn6-XzYU73U,62564 +triton/backends/nvidia/include/crt/mma.hpp,sha256=spo0LX71tUCipxK517Bssj0nc-ZHf8oMWzvHoYYB_6I,66599 +triton/backends/nvidia/include/crt/nvfunctional,sha256=FDM0zqWO6bl9jpJKz9U8CMbjt6iTKh18tQalxAvRsag,16900 +triton/backends/nvidia/include/crt/sm_70_rt.h,sha256=Kf830xymA-zmF7LsunFHLSNyhhT5UiJMocgoHBQeNns,6837 +triton/backends/nvidia/include/crt/sm_70_rt.hpp,sha256=3a_rU-Y0MSB4htBDFY4PCQ_jXiWFTe7WT1ZyhMuCJOA,7837 +triton/backends/nvidia/include/crt/sm_80_rt.h,sha256=MdJHWCRzLM__nDDf1go61rDsl9ydOW3oi6SZBfjUyc8,7743 +triton/backends/nvidia/include/crt/sm_80_rt.hpp,sha256=o-rJu-jpehCeyABGgv-8dYRB7oJTCwuNdvSCq0VURdE,6705 +triton/backends/nvidia/include/crt/sm_90_rt.h,sha256=an47m0XFBaJ3pUX9MlE4-nktP1jb3eJUXhQ3ntZtzc8,11445 +triton/backends/nvidia/include/crt/sm_90_rt.hpp,sha256=YuqVygGV6rgtWtx1J9cPpEI3BXKQBII-Ez6oZFP3wrE,9228 +triton/backends/nvidia/include/crt/storage_class.h,sha256=dzcOZ16pLaN8ejqHaXw4iHbBJ6fXWxfaU-sj2QjYzzg,4791 +triton/backends/nvidia/include/cuComplex.h,sha256=WpcgpaiPhU_o9sTPMcNTEZuyXDIc8x3sz4dUWSztL2g,12186 +triton/backends/nvidia/include/cuda.h,sha256=29OuNnfs8Hb2sqCXHUKy3VudXxzN8050d0oW_C33ysE,1048458 +triton/backends/nvidia/include/cudaEGL.h,sha256=_CwaQ4cEP1vfNyBSSd5qFxznPCYOovF6Cpj-QWSIBq4,39544 +triton/backends/nvidia/include/cudaEGLTypedefs.h,sha256=xF_FAN1Kar9oyHJ3cCU7jztTpxX8WylpiuYyYpGGHek,5645 +triton/backends/nvidia/include/cudaGL.h,sha256=gMT1HPGa-siuji0gAsKYr4X45Lc29HKglC_ttNSGyUM,22501 +triton/backends/nvidia/include/cudaGLTypedefs.h,sha256=dClpQI-LuXgF9rPSBsj7OkIg8g_fXDjT0hLZS8TGpOg,6576 +triton/backends/nvidia/include/cudaProfilerTypedefs.h,sha256=F2aWLIKv_AhNbxNOaZVcRsxIh0kuscnV8UMWWxkBAlY,3297 +triton/backends/nvidia/include/cudaTypedefs.h,sha256=0hWYyV-KM7R5Qjagz9UP1ldhAZDHGIcJmYtYvB_nwNc,110387 +triton/backends/nvidia/include/cudaVDPAU.h,sha256=Np7Nc2Wjaz--hkpbhW6f9aapr-NbcPDAgkot0sJerco,12694 +triton/backends/nvidia/include/cudaVDPAUTypedefs.h,sha256=wz8nyOUdwM9mH9JO3QZW-A9dyxt-IufSX7nggSXpCNs,4144 +triton/backends/nvidia/include/cuda_awbarrier.h,sha256=3ZH-ZlXODhSiwSY9rqSni_EQwi25QMHP6Tm-zOdxBwE,9340 +triton/backends/nvidia/include/cuda_awbarrier_helpers.h,sha256=OCskCts5bCKl_RKBe9M74zKSIsVpePn44S_aJp1tFXE,12489 +triton/backends/nvidia/include/cuda_awbarrier_primitives.h,sha256=n5__E1jYYDhlgH-f3u8MQjtz57UZ7v5VshhMye1eicM,4699 +triton/backends/nvidia/include/cuda_bf16.h,sha256=2BKEN_8pbieiBHShSfIawa-Oy_3jJzQAl74TqoLQ3MQ,185707 +triton/backends/nvidia/include/cuda_bf16.hpp,sha256=ZJlZSkQJ65G0yhMPDAq3m-oMaEJ3ia9FOsbgnzCtPS0,137924 +triton/backends/nvidia/include/cuda_device_runtime_api.h,sha256=bIhfusirXe5-osOTPAILDh6pY8MW1hefyZvTD_IzgqM,46249 +triton/backends/nvidia/include/cuda_egl_interop.h,sha256=PNWYns30MIytJQHSOh7UbZYlaTX5e0bavzK14tde_C8,37109 +triton/backends/nvidia/include/cuda_fp16.h,sha256=1J7SldpmJk8SNDGD3SO0yVrsLoHkpN1VnMtRZr2Gbcs,175974 +triton/backends/nvidia/include/cuda_fp16.hpp,sha256=JyedVIUALPBiR_Ci3Rxef_sUs9VvDiP4MDc97Yk_Ys8,123259 +triton/backends/nvidia/include/cuda_fp8.h,sha256=Q3OP5o_3rSYbKtVIlcXVr_CncU3SPM-09j605e2Zegw,13833 +triton/backends/nvidia/include/cuda_fp8.hpp,sha256=b-PcyZgei5MmIp6op0QQ40BgNupO_ei648hG_dUS-FQ,64246 +triton/backends/nvidia/include/cuda_gl_interop.h,sha256=VQEswFeOBF6JN6Q0pdlkvc5WT7bD1FnTfKewvANulCc,19150 +triton/backends/nvidia/include/cuda_occupancy.h,sha256=Kr9HyOe-hlRjBAzbINwUYkNgbbIgIjuvKs09UZhMYQo,67179 +triton/backends/nvidia/include/cuda_pipeline.h,sha256=0enXG49wN4JajlQi3ahbp2ei_ufTY_Mznic7zfWmKHM,8130 +triton/backends/nvidia/include/cuda_pipeline_helpers.h,sha256=bo1L7e6vCuM-K3Il8K1z4wJUja5DyXQKdo_hSWUME-E,13852 +triton/backends/nvidia/include/cuda_pipeline_primitives.h,sha256=FnJJtuV6rHr6LgL56XDwilcSbFr6W1Hj6mf1AJaMI20,8675 +triton/backends/nvidia/include/cuda_runtime.h,sha256=a-OXWPsmKSPst7mRCCxHNZV7m-uRLCAY8oGRi-dJzPA,90683 +triton/backends/nvidia/include/cuda_runtime_api.h,sha256=7Ys9yv_2trFEVybtbh-UJKnDKG8fHWvUjSX4cgZGCck,608580 +triton/backends/nvidia/include/cuda_stdint.h,sha256=XbFOk9CtJjKqk7PpYNqbSVsDxAsVM8avA4rWpPi0BjQ,4093 +triton/backends/nvidia/include/cuda_surface_types.h,sha256=Mw5Lo4b8Q-f9mogOvATGyHhu9d2t2K6XOxuqtZrSh3A,3688 +triton/backends/nvidia/include/cuda_texture_types.h,sha256=ITbX-JNnP7Rm-JSgNVdJ9pq6k8FVor8RbnruDsKq6sk,3688 +triton/backends/nvidia/include/cuda_vdpau_interop.h,sha256=bXQanWc2IFXZAKWNGl2xAz9nLvFmQpWyGrsDvfeS9FA,7727 +triton/backends/nvidia/include/cudart_platform.h,sha256=YN6sKhB0b9w5tGX1IYL7ulJVPrWAiX9A44qLv4EtW5Q,2717 +triton/backends/nvidia/include/cupti.h,sha256=JkVyAGTIMYzwm62dfVqas3nMcILhgP_Wdz6fh4_NED0,4697 +triton/backends/nvidia/include/cupti_activity.h,sha256=1aNI_zmQnjAguMBU0UqqMR_heE77FiafQkZl9or_1Ww,210387 +triton/backends/nvidia/include/cupti_activity_deprecated.h,sha256=rYJsoAJxA2BTT50-olN8EYcSzdlXBpRbR1ATLG3rVIM,121526 +triton/backends/nvidia/include/cupti_callbacks.h,sha256=zrEVRb0hubSfD69QUmHsJiL8oAfvqyuKGcTVRihQrnc,29729 +triton/backends/nvidia/include/cupti_checkpoint.h,sha256=rTz8JoWxqESBXyZWUhZJGm4xeYcx4OJOtJ7Ld13T_b0,5264 +triton/backends/nvidia/include/cupti_common.h,sha256=85m74bxUgXp3tEaPQpezeazmpsNMw41PsjNSYmQdT20,3514 +triton/backends/nvidia/include/cupti_driver_cbid.h,sha256=dHKyQYZbBbdlxixzFkIoNHg5IfGXdgriyjN1Bu1i6g4,74462 +triton/backends/nvidia/include/cupti_events.h,sha256=f7lLGmD2e8FzvMhRgnn0-v7U0vTpUkiQHIpQxgARGb0,51896 +triton/backends/nvidia/include/cupti_metrics.h,sha256=iLAOlDrcbHEsIIUmgq0Tp1ZOY9O3Ot3wj2-bI8iYbSs,32148 +triton/backends/nvidia/include/cupti_nvtx_cbid.h,sha256=_azPtR1g4qivvX7qbvHRUg0RHCWF7iEOJyHMN9qZe9E,5912 +triton/backends/nvidia/include/cupti_pcsampling.h,sha256=ycJHT36DmPIaVzHsB3xxjXkhFyEfMCJOl3LbCsHFgyA,32144 +triton/backends/nvidia/include/cupti_pcsampling_util.h,sha256=lx8CaNXowJe5Zvc06LE-u_Zry_jODs1mM6j9Q5WIX9E,12430 +triton/backends/nvidia/include/cupti_profiler_target.h,sha256=JsceoDuhllWNEzaO0xxT81dJ55NrbF0UtRJJgit0P_E,32131 +triton/backends/nvidia/include/cupti_result.h,sha256=a-C4Y7LAYCiCT1ngOfoDuTi2stEG1YTafwwn6UfL-LU,12603 +triton/backends/nvidia/include/cupti_runtime_cbid.h,sha256=11pXl0MdmTtxUngel-ru4JdqWvF_gEIG14aQExRyfzI,46436 +triton/backends/nvidia/include/cupti_sass_metrics.h,sha256=3RW9snJuFQdOhrEn3wDJOru05q0V_zssWrqD7tvVJKw,19674 +triton/backends/nvidia/include/cupti_target.h,sha256=x4Vz1Upb6m9ixmVpmGaKQldDWYQI3OZ-ocEXGzNK0EE,1263 +triton/backends/nvidia/include/cupti_version.h,sha256=sjd-aUoTGkEWyvA2VUWIpZwXyXAaclqC8gbwNnuK5D0,4425 +triton/backends/nvidia/include/device_atomic_functions.h,sha256=OR2jNSfSKzaFri74zh4Vtz5M0z9UDBU3rKeC1rYaVQs,9500 +triton/backends/nvidia/include/device_atomic_functions.hpp,sha256=0e7MOiNNUnnloXpB_r9WT5YOws5cxgzQQAzRCYvgaFA,10486 +triton/backends/nvidia/include/device_double_functions.h,sha256=KUxId5Z1fx8SWfLRTxPD7RB-zN7zslzb4n7JaJLfL3I,3452 +triton/backends/nvidia/include/device_functions.h,sha256=bWSrhTYE9NQlss7xMSMEVusvto9j2fgUDXWVH2W_cOA,3410 +triton/backends/nvidia/include/device_launch_parameters.h,sha256=H1_CC-vvAaS26ys4XsTFkMgTxUTciAjdjswjizkisvQ,3846 +triton/backends/nvidia/include/device_types.h,sha256=2LFxoZBJPoA5V0H1EbKTEaXDi3GDJPtzOPdRHDaucIQ,3588 +triton/backends/nvidia/include/driver_functions.h,sha256=cN3IjRAz2Mj2Pj35SyxJIkZNDDusnJqaqzBdMzpQKbA,4625 +triton/backends/nvidia/include/driver_types.h,sha256=4eBQ10Nzgfs2BlxGaGHVMWLvnJfKrEnMml9zfFi0DyA,177782 +triton/backends/nvidia/include/fatbinary_section.h,sha256=NnuUfy358yGJx4enq0pBnetjv17UWa-nOlgYToUitrw,1809 +triton/backends/nvidia/include/generated_cudaGL_meta.h,sha256=dfd2QuaRdEjbStOKvaQLi1Md_qrpRQh8PfyZznJ8bWY,3115 +triton/backends/nvidia/include/generated_cudaVDPAU_meta.h,sha256=fAedsoQxaU3hIAApAWDOKsa9kgcuQw4tdyf8klLm-3k,1453 +triton/backends/nvidia/include/generated_cuda_gl_interop_meta.h,sha256=LXOqvQCej0sCgAT1LUKKYZ466EFxN4hIwf9oIhXOLF0,2250 +triton/backends/nvidia/include/generated_cuda_meta.h,sha256=hawYpDe0xpaDFDnClXI91JjwCRxWb-AS0FS8ydUMgxc,94639 +triton/backends/nvidia/include/generated_cuda_runtime_api_meta.h,sha256=D8CbAN3-jLuF2KGfsBHXEELSgL92KrUAiDvugWE8B8M,69706 +triton/backends/nvidia/include/generated_cuda_vdpau_interop_meta.h,sha256=8OLqWN26aEYpTWUXtbHJvA5GYhVv3ybYVOTW7yK37z8,1367 +triton/backends/nvidia/include/generated_cudart_removed_meta.h,sha256=X3I5WXmhtsJNNlgY7coJ5vg4t11G5FRR6Xo7MboIeck,5172 +triton/backends/nvidia/include/generated_nvtx_meta.h,sha256=YHb_RD8g3s4m8PJn7Z0wnxvUHarl7BOAX5ADr-BL3HI,7513 +triton/backends/nvidia/include/host_config.h,sha256=BscH_GazAZbbotddVzL5RmafbQ-QjRx8f-I1O01IBW8,3380 +triton/backends/nvidia/include/host_defines.h,sha256=bBQwQF5C1N1c2qpLV56g1c-weu9Ysgz-gIf2Kn3uz_A,3386 +triton/backends/nvidia/include/library_types.h,sha256=p6746aCd_A_1VlgKRhLJChzeZ4tN7e4HBH2Hm7hDjbU,4836 +triton/backends/nvidia/include/math_constants.h,sha256=cV6hAyQe8X7f7MBtaKjjIJq3BycOUDp6I5cizJX5HLw,7608 +triton/backends/nvidia/include/math_functions.h,sha256=5XcC6j-fJKttvhwc4hZNoLHNw808a2ZYIOtZ7ry7yd0,3398 +triton/backends/nvidia/include/mma.h,sha256=IY_VenxuEncwGq92MhrWUb-Xswh0ekAXLy9Rbxhxa2Y,2932 +triton/backends/nvidia/include/nvPTXCompiler.h,sha256=z_v0P6Sj0KfDQBmAKIdgFoPOylhsO4B221w3KDUqbM0,12076 +triton/backends/nvidia/include/nvfunctional,sha256=IkFoCi_Q4OhP9nEuBI-5jWwFlR_PfG05hJH7lSMsfWc,2975 +triton/backends/nvidia/include/nvperf_common.h,sha256=BqPml9AxyN10-ptWT3hQzh2JUWqQX57Q5BjQ3ZuaKNs,17255 +triton/backends/nvidia/include/nvperf_cuda_host.h,sha256=aBnyIr_hexPDGBkP6WSujN1mI_DYP25sEIXWYY1O7VI,8298 +triton/backends/nvidia/include/nvperf_host.h,sha256=afdHG6eraeo4ltlF9ihskqhU7IccxcRCaZDZ6_ikjkg,68506 +triton/backends/nvidia/include/nvperf_target.h,sha256=ZDA-JI459tLBW4iLLCQjYYRAMeHwfqDIgXbVqVLDYZ4,22539 +triton/backends/nvidia/include/sm_20_atomic_functions.h,sha256=x4ycINVq__l9B4SQPD-I48jQbKxxdBmgp8Vf2GO0Qfg,4478 +triton/backends/nvidia/include/sm_20_atomic_functions.hpp,sha256=1l5NLM8DhDbqYZ_E51LoqElQJXObkbwo57d3r-4uEbE,4107 +triton/backends/nvidia/include/sm_20_intrinsics.h,sha256=a4jDSp_DUW0d09g5wgEm_I7bGTAe73HKRinkhBKQBis,51048 +triton/backends/nvidia/include/sm_20_intrinsics.hpp,sha256=BhEBuXSKBsNGJDBJDtYL0cGRI3wX_w_OIgA5D-YxIWk,7694 +triton/backends/nvidia/include/sm_30_intrinsics.h,sha256=b6W8Vxp9vD9OCJI6lZuGyZYXEdQ3Ei8PTAloHNkwCcQ,16978 +triton/backends/nvidia/include/sm_30_intrinsics.hpp,sha256=yX0ebd265tJ-BDhvluP2BhadPuWXpRZPI2eeQFFt5ys,24567 +triton/backends/nvidia/include/sm_32_atomic_functions.h,sha256=HGnZgQHACE2AAb6zabGUURc53IsVZelc2BSJqvs9OgY,5703 +triton/backends/nvidia/include/sm_32_atomic_functions.hpp,sha256=CQTTvOEYp-s5hqAgLvAon11vLYDrDp8cTHdel-XRzBQ,6592 +triton/backends/nvidia/include/sm_32_intrinsics.h,sha256=Xdkogdsjy1vh8u3eGu0i5xTmHxBGAjj6_vVGR-spdOE,33539 +triton/backends/nvidia/include/sm_32_intrinsics.hpp,sha256=Gl8aSLDLcit4W3pKQS19GsDG8RYcwD65HwYB_CeZe8M,70616 +triton/backends/nvidia/include/sm_35_atomic_functions.h,sha256=a3XoEsKRCEOf0Q_5Y__rMfmC4pScv4VkUggVgVJVn44,2909 +triton/backends/nvidia/include/sm_35_intrinsics.h,sha256=0mS5-LCgvZiTvL7-MG_4YwI-zWGvM-s4xyRuMkunMC8,2664 +triton/backends/nvidia/include/sm_60_atomic_functions.h,sha256=_anfNaJsvQpDEorYeUKIkbizYkwrinBcG_ZCiECtLqI,13178 +triton/backends/nvidia/include/sm_60_atomic_functions.hpp,sha256=cgIKddDn2B3QzYlzeBILAP1IRys74QCCxsH0QqaVGls,22903 +triton/backends/nvidia/include/sm_61_intrinsics.h,sha256=h_MBL1UUDxQX_qOddSImzqyFjcrhhm_63G97pGDyreU,10902 +triton/backends/nvidia/include/sm_61_intrinsics.hpp,sha256=N-nQvcBsPMT2Umy5zR69c9K1q366W-Jqe7NpoLTqTmg,6787 +triton/backends/nvidia/include/surface_functions.h,sha256=b1O82SAvEgWWxA9uZTWQcGimzZUoem2QbAET3wh3fZc,6782 +triton/backends/nvidia/include/surface_indirect_functions.h,sha256=vy9QuFVV-ezZP-x2RT9RLp2qIUgdngACOCmalSfVFPA,10877 +triton/backends/nvidia/include/surface_types.h,sha256=XkFXD1nHbeSMgajR-UJE9uQ7TByzJnjdnUL4-yGiufk,4530 +triton/backends/nvidia/include/texture_fetch_functions.h,sha256=KLCmUxf5aY5_UalX8tSFB6e4TrjA8hyUPxLOkMFltAo,12468 +triton/backends/nvidia/include/texture_indirect_functions.h,sha256=lH_y3Ni-hq4RZ0_PMFbBM0th5-OmTn3TtqtpkHHhA8w,21163 +triton/backends/nvidia/include/texture_types.h,sha256=73ntVyg8r8fzKy5VIk6yuvC45GDeWepaLIqIk-M3Ri8,6360 +triton/backends/nvidia/include/vector_functions.h,sha256=WypGkL-IDbGOlay7g_G0p3HO7OLGRE0Do__JtiFoWxY,8003 +triton/backends/nvidia/include/vector_functions.hpp,sha256=afXhNSd3LFTZo96EPtesTLfvxd4nTmLVzgkj967rTRg,10060 +triton/backends/nvidia/include/vector_types.h,sha256=6CJ4yt3KD7zQVfm1NhrgqNYYEDEIZWwaivlFx12nhNg,13396 +triton/backends/nvidia/lib/libdevice.10.bc,sha256=XC-uN8huaMOjhgWpX1EtfRLV89uYYxC-R_VzBKpype4,473728 +triton/compiler/__init__.py,sha256=PD2VOiqnb3qUrti77C_E83lX8Rch-jLVwN98I7XiMRA,256 +triton/compiler/__pycache__/__init__.cpython-311.pyc,, +triton/compiler/__pycache__/code_generator.cpython-311.pyc,, +triton/compiler/__pycache__/compiler.cpython-311.pyc,, +triton/compiler/__pycache__/errors.cpython-311.pyc,, +triton/compiler/__pycache__/make_launcher.cpython-311.pyc,, +triton/compiler/code_generator.py,sha256=g11PwzUA4azK_FxeAPT3PYdDLBFIYMBLaISb9vlCuOU,57757 +triton/compiler/compiler.py,sha256=3HZ8j63PI-qC154lfDfUQHfq5_aBz5Z1Zf1D6cAXk3s,16605 +triton/compiler/errors.py,sha256=I9Y15pDWcL9heY4SWWdLeMDtW6Iiq2pFXzKfJ6dY_C0,1732 +triton/compiler/make_launcher.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +triton/errors.py,sha256=8WfnuRKLG578mgY6cBA3ECruVMf9ULEKFNgRcJ6IhWM,89 +triton/language/__init__.py,sha256=j2x4eORgWxCDtbAMIj67qPpl4DzeJiffxhqkCLvIBNU,4716 +triton/language/__pycache__/__init__.cpython-311.pyc,, +triton/language/__pycache__/core.cpython-311.pyc,, +triton/language/__pycache__/math.cpython-311.pyc,, +triton/language/__pycache__/random.cpython-311.pyc,, +triton/language/__pycache__/semantic.cpython-311.pyc,, +triton/language/__pycache__/standard.cpython-311.pyc,, +triton/language/core.py,sha256=cTMPOU5YSw3ylZXUn2rIrAxVA9uRRwkNxYrYiM66x74,89244 +triton/language/extra/__init__.py,sha256=8krf1SOD94ZnkeuqXUWl0syCblbuL9KF9DjoXSAf5kM,64 +triton/language/extra/__pycache__/__init__.cpython-311.pyc,, +triton/language/extra/__pycache__/libdevice.cpython-311.pyc,, +triton/language/extra/cuda/__init__.py,sha256=L-L0lztxn9O06wGzVyhmQRFQ_EI-6gyB65iEzO7oEB4,290 +triton/language/extra/cuda/__pycache__/__init__.cpython-311.pyc,, +triton/language/extra/cuda/__pycache__/libdevice.cpython-311.pyc,, +triton/language/extra/cuda/__pycache__/utils.cpython-311.pyc,, +triton/language/extra/cuda/libdevice.py,sha256=WiYuVmetUT8F74Q8auQtDnkSQU_-rkyF0dOgujDhuJA,56033 +triton/language/extra/cuda/utils.py,sha256=e1BslV7lZGhi2uVIlo5lI9dcN61HUMIU2asPaRjsyIo,4379 +triton/language/extra/hip/__init__.py,sha256=ieSER4LeX9_0horChGUUVwpuKAprkuka8uGAkEBDyDM,49 +triton/language/extra/hip/__pycache__/__init__.cpython-311.pyc,, +triton/language/extra/hip/__pycache__/libdevice.cpython-311.pyc,, +triton/language/extra/hip/libdevice.py,sha256=NaAqjBuLcc2e9XOxZi4eYM1wc8El3iCrsYCfWgWp-28,16551 +triton/language/extra/libdevice.py,sha256=wNGqO71EcHrrnN9ArQQ6znwSpywXu-OaUwVMBoVPjKI,14729 +triton/language/math.py,sha256=o0vg065LOsmu3hlc_aQvigoJeyvJZC-1lnPOaztgfxA,7332 +triton/language/random.py,sha256=NhMKN68bGaDexWmqCNMLAMdMjwK2tjKZnYbeoWkUZ5I,6736 +triton/language/semantic.py,sha256=NtQvBCmq4CfLmFtTuavGFvrk2tng6gOVPh6ftG0PuaA,73108 +triton/language/standard.py,sha256=5dLLckwI0O9BMPO6hY0izyH4NL_ZcKU4iqatKmurkfk,13132 +triton/ops/__init__.py,sha256=Yo_IfcP54HxucFaQNc4aOtfOGryUcQZUDA4aONg6sHk,324 +triton/ops/__pycache__/__init__.cpython-311.pyc,, +triton/ops/__pycache__/cross_entropy.cpython-311.pyc,, +triton/ops/__pycache__/flash_attention.cpython-311.pyc,, +triton/ops/__pycache__/matmul.cpython-311.pyc,, +triton/ops/__pycache__/matmul_perf_model.cpython-311.pyc,, +triton/ops/blocksparse/__init__.py,sha256=6YEVQNzipgQCpoO_7B8H7ckaSW2Idt1244s7IyLWAwc,100 +triton/ops/blocksparse/__pycache__/__init__.cpython-311.pyc,, +triton/ops/blocksparse/__pycache__/matmul.cpython-311.pyc,, +triton/ops/blocksparse/__pycache__/softmax.cpython-311.pyc,, +triton/ops/blocksparse/matmul.py,sha256=S29Wv0X47AUoCMfSw7A7-Lt6lUyGPzy63Q8pcD41O1w,15920 +triton/ops/blocksparse/softmax.py,sha256=2jfmu1Bn9XsM4PyBsSRaSi3-XK0bJABxwQ-XsTwo7fg,8243 +triton/ops/cross_entropy.py,sha256=Jr-iQ6oZQir8gh4WRmlPoh_CY4fM8x9c9dDsuavyFyQ,3451 +triton/ops/flash_attention.py,sha256=1W8-D9OFJWAYmNhsFipKufHb1ZNEOIuz4ZMq_3HEq3s,18030 +triton/ops/matmul.py,sha256=kKVeZG7t31g_iS9Sk2Y-XJc3GzP5DTwmcv11OUAE4-4,9257 +triton/ops/matmul_perf_model.py,sha256=E8LuqIrb-u_NCqSDD0r9hHNPkPKCTMTKJNAVOuZomaU,6697 +triton/profiler/__init__.py,sha256=8MMGWMNsHxvgFva8l6o9lzUcAdGjpxiQouuTwJ4qkdQ,184 +triton/profiler/__pycache__/__init__.cpython-311.pyc,, +triton/profiler/__pycache__/flags.cpython-311.pyc,, +triton/profiler/__pycache__/hook.cpython-311.pyc,, +triton/profiler/__pycache__/profile.cpython-311.pyc,, +triton/profiler/__pycache__/proton.cpython-311.pyc,, +triton/profiler/__pycache__/scope.cpython-311.pyc,, +triton/profiler/__pycache__/viewer.cpython-311.pyc,, +triton/profiler/flags.py,sha256=BFBKQnozRN9Jp18_S5MuIeu5CJMW7_I38pM55qOg2oQ,604 +triton/profiler/hook.py,sha256=1FqwAGrdmmzWIyy3qqPH3-3OHtQtdN64FRwEnizXCx8,1100 +triton/profiler/profile.py,sha256=RXz6bej6-Z33i1CLH9aGSgegQb1LMKWwbnwdIyLSlt4,5832 +triton/profiler/proton.py,sha256=f1cokCi2wYzCOnl8ztPb-_xc-uKSBMW3h3uJajvYuX8,2624 +triton/profiler/scope.py,sha256=gwsjiwrXH16_SMHEooGM3KLLe7XIowjFvd__L5t4WSg,3125 +triton/profiler/viewer.py,sha256=0cHhg6gOe2t4_JA9GXp8wBthFySO6Hw8kYW2PWwjwMM,9635 +triton/runtime/__init__.py,sha256=mKL5cqIBDUw2WO80NRCh4s1G8KYaqgM59TTAbTkPPjQ,621 +triton/runtime/__pycache__/__init__.cpython-311.pyc,, +triton/runtime/__pycache__/autotuner.cpython-311.pyc,, +triton/runtime/__pycache__/build.cpython-311.pyc,, +triton/runtime/__pycache__/cache.cpython-311.pyc,, +triton/runtime/__pycache__/driver.cpython-311.pyc,, +triton/runtime/__pycache__/errors.cpython-311.pyc,, +triton/runtime/__pycache__/interpreter.cpython-311.pyc,, +triton/runtime/__pycache__/jit.cpython-311.pyc,, +triton/runtime/autotuner.py,sha256=ndJ_wuOVaXCBVsjmIzFX1IRPHTAH6FKUm-1CHpOhNQs,14974 +triton/runtime/build.py,sha256=7PqCGjCdwjakAJq6FRxnJ8CQtUmWBNeYqgqYdfks1G0,2594 +triton/runtime/cache.py,sha256=Fmr6AgDubT7XPe9Lan6WE_czbSS7ZFZ__8qkdvaWkSE,9759 +triton/runtime/driver.py,sha256=VZ-883Xri71R72lHB6usIpLo3gGLbZJkAlLP3ewWSpc,1509 +triton/runtime/errors.py,sha256=oj73dn34qJbLhOjakakAuZPSv-laZyIYylJiJwREA8Y,787 +triton/runtime/interpreter.py,sha256=NWnlemxOKQGQPnYUbIhiQJQkRKgGo2yuUnc2dnAnTdc,49061 +triton/runtime/jit.py,sha256=dUK_klRkYzHtkM4W_hv7LK-H1310jYQ5aqtEyAJ9UIs,34722 +triton/testing.py,sha256=CIyD69haM99jj-eFDPesNV2pXzmy_U5tIZm2l_gGWaI,18413 +triton/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +triton/tools/__pycache__/__init__.cpython-311.pyc,, +triton/tools/__pycache__/build_extern.cpython-311.pyc,, +triton/tools/__pycache__/compile.cpython-311.pyc,, +triton/tools/__pycache__/disasm.cpython-311.pyc,, +triton/tools/__pycache__/link.cpython-311.pyc,, +triton/tools/build_extern.py,sha256=jCr-2hu3nLGBIJhCGUQ1jAyzLttughjkiPGEwRFjLR0,13673 +triton/tools/compile.c,sha256=rjuAQ8b-2DTtbj29SgK1NxJI5BSU2P9ccp9wa5p8Iyc,2090 +triton/tools/compile.h,sha256=n9QKIFZTL4RSsiXtAxBP9XGSnxjyaevQQ9bBpwDsvAg,332 +triton/tools/compile.py,sha256=CR1_-TBz77rMeN9lN2pc6EtErQwfQBdBMQvpWKr-ezs,6468 +triton/tools/disasm.py,sha256=U58GRL7v14hu4-B_kWkciHaY9jVIkTKg7DtioH4LTHo,5080 +triton/tools/link.py,sha256=u7qtfZRLriZkAMEGNvj8YF-k1cthmLL7BwHYqBgT63E,11871 diff --git a/.venv/lib/python3.11/site-packages/triton-3.1.0.dist-info/WHEEL b/.venv/lib/python3.11/site-packages/triton-3.1.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..a9a865296bb2a6e89041edea9c427cae8da32ad4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/triton-3.1.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.44.0) +Root-Is-Purelib: false +Tag: cp311-cp311-manylinux_2_17_x86_64 +Tag: cp311-cp311-manylinux2014_x86_64 + diff --git a/.venv/lib/python3.11/site-packages/triton-3.1.0.dist-info/entry_points.txt b/.venv/lib/python3.11/site-packages/triton-3.1.0.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..fec7e033ca5aee50e0b944b9c14f2987c668d505 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/triton-3.1.0.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +proton = triton.profiler.proton:main +proton-viewer = triton.profiler.viewer:main diff --git a/.venv/lib/python3.11/site-packages/triton-3.1.0.dist-info/top_level.txt b/.venv/lib/python3.11/site-packages/triton-3.1.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..1016c8445b5539e982b3a35cd969e4b758233df1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/triton-3.1.0.dist-info/top_level.txt @@ -0,0 +1,15 @@ +triton +triton/_C +triton/backends +triton/backends/amd +triton/backends/nvidia +triton/compiler +triton/language +triton/language/extra +triton/language/extra/cuda +triton/language/extra/hip +triton/ops +triton/ops/blocksparse +triton/profiler +triton/runtime +triton/tools diff --git a/.venv/lib/python3.11/site-packages/wrapt/weakrefs.py b/.venv/lib/python3.11/site-packages/wrapt/weakrefs.py new file mode 100644 index 0000000000000000000000000000000000000000..f931b60d5f27b9e802a31cdfc6834a809062c14f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/wrapt/weakrefs.py @@ -0,0 +1,98 @@ +import functools +import weakref + +from .__wrapt__ import ObjectProxy, _FunctionWrapperBase + +# A weak function proxy. This will work on instance methods, class +# methods, static methods and regular functions. Special treatment is +# needed for the method types because the bound method is effectively a +# transient object and applying a weak reference to one will immediately +# result in it being destroyed and the weakref callback called. The weak +# reference is therefore applied to the instance the method is bound to +# and the original function. The function is then rebound at the point +# of a call via the weak function proxy. + +def _weak_function_proxy_callback(ref, proxy, callback): + if proxy._self_expired: + return + + proxy._self_expired = True + + # This could raise an exception. We let it propagate back and let + # the weakref.proxy() deal with it, at which point it generally + # prints out a short error message direct to stderr and keeps going. + + if callback is not None: + callback(proxy) + +class WeakFunctionProxy(ObjectProxy): + + __slots__ = ('_self_expired', '_self_instance') + + def __init__(self, wrapped, callback=None): + # We need to determine if the wrapped function is actually a + # bound method. In the case of a bound method, we need to keep a + # reference to the original unbound function and the instance. + # This is necessary because if we hold a reference to the bound + # function, it will be the only reference and given it is a + # temporary object, it will almost immediately expire and + # the weakref callback triggered. So what is done is that we + # hold a reference to the instance and unbound function and + # when called bind the function to the instance once again and + # then call it. Note that we avoid using a nested function for + # the callback here so as not to cause any odd reference cycles. + + _callback = callback and functools.partial( + _weak_function_proxy_callback, proxy=self, + callback=callback) + + self._self_expired = False + + if isinstance(wrapped, _FunctionWrapperBase): + self._self_instance = weakref.ref(wrapped._self_instance, + _callback) + + if wrapped._self_parent is not None: + super(WeakFunctionProxy, self).__init__( + weakref.proxy(wrapped._self_parent, _callback)) + + else: + super(WeakFunctionProxy, self).__init__( + weakref.proxy(wrapped, _callback)) + + return + + try: + self._self_instance = weakref.ref(wrapped.__self__, _callback) + + super(WeakFunctionProxy, self).__init__( + weakref.proxy(wrapped.__func__, _callback)) + + except AttributeError: + self._self_instance = None + + super(WeakFunctionProxy, self).__init__( + weakref.proxy(wrapped, _callback)) + + def __call__(*args, **kwargs): + def _unpack_self(self, *args): + return self, args + + self, args = _unpack_self(*args) + + # We perform a boolean check here on the instance and wrapped + # function as that will trigger the reference error prior to + # calling if the reference had expired. + + instance = self._self_instance and self._self_instance() + function = self.__wrapped__ and self.__wrapped__ + + # If the wrapped function was originally a bound function, for + # which we retained a reference to the instance and the unbound + # function we need to rebind the function and then call it. If + # not just called the wrapped function. + + if instance is None: + return self.__wrapped__(*args, **kwargs) + + return function.__get__(instance, type(instance))(*args, **kwargs) diff --git a/.venv/lib/python3.11/site-packages/zipp/__init__.py b/.venv/lib/python3.11/site-packages/zipp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..031d9d4f21aa9353b4adea8ce42038571bbd72db --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zipp/__init__.py @@ -0,0 +1,455 @@ +""" +A Path-like interface for zipfiles. + +This codebase is shared between zipfile.Path in the stdlib +and zipp in PyPI. See +https://github.com/python/importlib_metadata/wiki/Development-Methodology +for more detail. +""" + +import functools +import io +import itertools +import pathlib +import posixpath +import re +import stat +import sys +import zipfile + +from .compat.py310 import text_encoding +from .glob import Translator + +from ._functools import save_method_args + + +__all__ = ['Path'] + + +def _parents(path): + """ + Given a path with elements separated by + posixpath.sep, generate all parents of that path. + + >>> list(_parents('b/d')) + ['b'] + >>> list(_parents('/b/d/')) + ['/b'] + >>> list(_parents('b/d/f/')) + ['b/d', 'b'] + >>> list(_parents('b')) + [] + >>> list(_parents('')) + [] + """ + return itertools.islice(_ancestry(path), 1, None) + + +def _ancestry(path): + """ + Given a path with elements separated by + posixpath.sep, generate all elements of that path. + + >>> list(_ancestry('b/d')) + ['b/d', 'b'] + >>> list(_ancestry('/b/d/')) + ['/b/d', '/b'] + >>> list(_ancestry('b/d/f/')) + ['b/d/f', 'b/d', 'b'] + >>> list(_ancestry('b')) + ['b'] + >>> list(_ancestry('')) + [] + + Multiple separators are treated like a single. + + >>> list(_ancestry('//b//d///f//')) + ['//b//d///f', '//b//d', '//b'] + """ + path = path.rstrip(posixpath.sep) + while path.rstrip(posixpath.sep): + yield path + path, tail = posixpath.split(path) + + +_dedupe = dict.fromkeys +"""Deduplicate an iterable in original order""" + + +def _difference(minuend, subtrahend): + """ + Return items in minuend not in subtrahend, retaining order + with O(1) lookup. + """ + return itertools.filterfalse(set(subtrahend).__contains__, minuend) + + +class InitializedState: + """ + Mix-in to save the initialization state for pickling. + """ + + @save_method_args + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def __getstate__(self): + return self._saved___init__.args, self._saved___init__.kwargs + + def __setstate__(self, state): + args, kwargs = state + super().__init__(*args, **kwargs) + + +class CompleteDirs(InitializedState, zipfile.ZipFile): + """ + A ZipFile subclass that ensures that implied directories + are always included in the namelist. + + >>> list(CompleteDirs._implied_dirs(['foo/bar.txt', 'foo/bar/baz.txt'])) + ['foo/', 'foo/bar/'] + >>> list(CompleteDirs._implied_dirs(['foo/bar.txt', 'foo/bar/baz.txt', 'foo/bar/'])) + ['foo/'] + """ + + @staticmethod + def _implied_dirs(names): + parents = itertools.chain.from_iterable(map(_parents, names)) + as_dirs = (p + posixpath.sep for p in parents) + return _dedupe(_difference(as_dirs, names)) + + def namelist(self): + names = super().namelist() + return names + list(self._implied_dirs(names)) + + def _name_set(self): + return set(self.namelist()) + + def resolve_dir(self, name): + """ + If the name represents a directory, return that name + as a directory (with the trailing slash). + """ + names = self._name_set() + dirname = name + '/' + dir_match = name not in names and dirname in names + return dirname if dir_match else name + + def getinfo(self, name): + """ + Supplement getinfo for implied dirs. + """ + try: + return super().getinfo(name) + except KeyError: + if not name.endswith('/') or name not in self._name_set(): + raise + return zipfile.ZipInfo(filename=name) + + @classmethod + def make(cls, source): + """ + Given a source (filename or zipfile), return an + appropriate CompleteDirs subclass. + """ + if isinstance(source, CompleteDirs): + return source + + if not isinstance(source, zipfile.ZipFile): + return cls(source) + + # Only allow for FastLookup when supplied zipfile is read-only + if 'r' not in source.mode: + cls = CompleteDirs + + source.__class__ = cls + return source + + @classmethod + def inject(cls, zf: zipfile.ZipFile) -> zipfile.ZipFile: + """ + Given a writable zip file zf, inject directory entries for + any directories implied by the presence of children. + """ + for name in cls._implied_dirs(zf.namelist()): + zf.writestr(name, b"") + return zf + + +class FastLookup(CompleteDirs): + """ + ZipFile subclass to ensure implicit + dirs exist and are resolved rapidly. + """ + + def namelist(self): + return self._namelist + + @functools.cached_property + def _namelist(self): + return super().namelist() + + def _name_set(self): + return self._name_set_prop + + @functools.cached_property + def _name_set_prop(self): + return super()._name_set() + + +def _extract_text_encoding(encoding=None, *args, **kwargs): + # compute stack level so that the caller of the caller sees any warning. + is_pypy = sys.implementation.name == 'pypy' + stack_level = 3 + is_pypy + return text_encoding(encoding, stack_level), args, kwargs + + +class Path: + """ + A :class:`importlib.resources.abc.Traversable` interface for zip files. + + Implements many of the features users enjoy from + :class:`pathlib.Path`. + + Consider a zip file with this structure:: + + . + ├── a.txt + └── b + ├── c.txt + └── d + └── e.txt + + >>> data = io.BytesIO() + >>> zf = zipfile.ZipFile(data, 'w') + >>> zf.writestr('a.txt', 'content of a') + >>> zf.writestr('b/c.txt', 'content of c') + >>> zf.writestr('b/d/e.txt', 'content of e') + >>> zf.filename = 'mem/abcde.zip' + + Path accepts the zipfile object itself or a filename + + >>> path = Path(zf) + + From there, several path operations are available. + + Directory iteration (including the zip file itself): + + >>> a, b = path.iterdir() + >>> a + Path('mem/abcde.zip', 'a.txt') + >>> b + Path('mem/abcde.zip', 'b/') + + name property: + + >>> b.name + 'b' + + join with divide operator: + + >>> c = b / 'c.txt' + >>> c + Path('mem/abcde.zip', 'b/c.txt') + >>> c.name + 'c.txt' + + Read text: + + >>> c.read_text(encoding='utf-8') + 'content of c' + + existence: + + >>> c.exists() + True + >>> (b / 'missing.txt').exists() + False + + Coercion to string: + + >>> import os + >>> str(c).replace(os.sep, posixpath.sep) + 'mem/abcde.zip/b/c.txt' + + At the root, ``name``, ``filename``, and ``parent`` + resolve to the zipfile. + + >>> str(path) + 'mem/abcde.zip/' + >>> path.name + 'abcde.zip' + >>> path.filename == pathlib.Path('mem/abcde.zip') + True + >>> str(path.parent) + 'mem' + + If the zipfile has no filename, such attributes are not + valid and accessing them will raise an Exception. + + >>> zf.filename = None + >>> path.name + Traceback (most recent call last): + ... + TypeError: ... + + >>> path.filename + Traceback (most recent call last): + ... + TypeError: ... + + >>> path.parent + Traceback (most recent call last): + ... + TypeError: ... + + # workaround python/cpython#106763 + >>> pass + """ + + __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})" + + def __init__(self, root, at=""): + """ + Construct a Path from a ZipFile or filename. + + Note: When the source is an existing ZipFile object, + its type (__class__) will be mutated to a + specialized type. If the caller wishes to retain the + original type, the caller should either create a + separate ZipFile object or pass a filename. + """ + self.root = FastLookup.make(root) + self.at = at + + def __eq__(self, other): + """ + >>> Path(zipfile.ZipFile(io.BytesIO(), 'w')) == 'foo' + False + """ + if self.__class__ is not other.__class__: + return NotImplemented + return (self.root, self.at) == (other.root, other.at) + + def __hash__(self): + return hash((self.root, self.at)) + + def open(self, mode='r', *args, pwd=None, **kwargs): + """ + Open this entry as text or binary following the semantics + of ``pathlib.Path.open()`` by passing arguments through + to io.TextIOWrapper(). + """ + if self.is_dir(): + raise IsADirectoryError(self) + zip_mode = mode[0] + if zip_mode == 'r' and not self.exists(): + raise FileNotFoundError(self) + stream = self.root.open(self.at, zip_mode, pwd=pwd) + if 'b' in mode: + if args or kwargs: + raise ValueError("encoding args invalid for binary operation") + return stream + # Text mode: + encoding, args, kwargs = _extract_text_encoding(*args, **kwargs) + return io.TextIOWrapper(stream, encoding, *args, **kwargs) + + def _base(self): + return pathlib.PurePosixPath(self.at or self.root.filename) + + @property + def name(self): + return self._base().name + + @property + def suffix(self): + return self._base().suffix + + @property + def suffixes(self): + return self._base().suffixes + + @property + def stem(self): + return self._base().stem + + @property + def filename(self): + return pathlib.Path(self.root.filename).joinpath(self.at) + + def read_text(self, *args, **kwargs): + encoding, args, kwargs = _extract_text_encoding(*args, **kwargs) + with self.open('r', encoding, *args, **kwargs) as strm: + return strm.read() + + def read_bytes(self): + with self.open('rb') as strm: + return strm.read() + + def _is_child(self, path): + return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/") + + def _next(self, at): + return self.__class__(self.root, at) + + def is_dir(self): + return not self.at or self.at.endswith("/") + + def is_file(self): + return self.exists() and not self.is_dir() + + def exists(self): + return self.at in self.root._name_set() + + def iterdir(self): + if not self.is_dir(): + raise ValueError("Can't listdir a file") + subs = map(self._next, self.root.namelist()) + return filter(self._is_child, subs) + + def match(self, path_pattern): + return pathlib.PurePosixPath(self.at).match(path_pattern) + + def is_symlink(self): + """ + Return whether this path is a symlink. + """ + info = self.root.getinfo(self.at) + mode = info.external_attr >> 16 + return stat.S_ISLNK(mode) + + def glob(self, pattern): + if not pattern: + raise ValueError(f"Unacceptable pattern: {pattern!r}") + + prefix = re.escape(self.at) + tr = Translator(seps='/') + matches = re.compile(prefix + tr.translate(pattern)).fullmatch + return map(self._next, filter(matches, self.root.namelist())) + + def rglob(self, pattern): + return self.glob(f'**/{pattern}') + + def relative_to(self, other, *extra): + return posixpath.relpath(str(self), str(other.joinpath(*extra))) + + def __str__(self): + return posixpath.join(self.root.filename, self.at) + + def __repr__(self): + return self.__repr.format(self=self) + + def joinpath(self, *other): + next = posixpath.join(self.at, *other) + return self._next(self.root.resolve_dir(next)) + + __truediv__ = joinpath + + @property + def parent(self): + if not self.at: + return self.filename.parent + parent_at = posixpath.dirname(self.at.rstrip('/')) + if parent_at: + parent_at += '/' + return self._next(parent_at) diff --git a/.venv/lib/python3.11/site-packages/zipp/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zipp/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3343f4258b60680697a89c868a79d4839734db7 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zipp/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zipp/__pycache__/_functools.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zipp/__pycache__/_functools.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b01f020a10d007e2f1df3274f1a57c1159f65691 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zipp/__pycache__/_functools.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zipp/__pycache__/glob.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zipp/__pycache__/glob.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05db6fb5278ce822b89b3e9d338268a70f5d44ac Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zipp/__pycache__/glob.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zipp/compat/__init__.py b/.venv/lib/python3.11/site-packages/zipp/compat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/.venv/lib/python3.11/site-packages/zipp/compat/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zipp/compat/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6927c14f75ad16829bbfee15a799860bb3fa554e Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zipp/compat/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zipp/compat/__pycache__/overlay.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zipp/compat/__pycache__/overlay.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94b8a20c053f17e76d156917dc1140d27832e0e1 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zipp/compat/__pycache__/overlay.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zipp/compat/__pycache__/py310.cpython-311.pyc b/.venv/lib/python3.11/site-packages/zipp/compat/__pycache__/py310.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a16999be58881c2626c8d4b18227ea11c9c4b35b Binary files /dev/null and b/.venv/lib/python3.11/site-packages/zipp/compat/__pycache__/py310.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/zipp/compat/overlay.py b/.venv/lib/python3.11/site-packages/zipp/compat/overlay.py new file mode 100644 index 0000000000000000000000000000000000000000..5a97ee7cd8b98f3a5487c0a0b0a80ffde5ff4dfd --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zipp/compat/overlay.py @@ -0,0 +1,37 @@ +""" +Expose zipp.Path as .zipfile.Path. + +Includes everything else in ``zipfile`` to match future usage. Just +use: + +>>> from zipp.compat.overlay import zipfile + +in place of ``import zipfile``. + +Relative imports are supported too. + +>>> from zipp.compat.overlay.zipfile import ZipInfo + +The ``zipfile`` object added to ``sys.modules`` needs to be +hashable (#126). + +>>> _ = hash(sys.modules['zipp.compat.overlay.zipfile']) +""" + +import importlib +import sys +import types + +import zipp + + +class HashableNamespace(types.SimpleNamespace): + def __hash__(self): + return hash(tuple(vars(self))) + + +zipfile = HashableNamespace(**vars(importlib.import_module('zipfile'))) +zipfile.Path = zipp.Path +zipfile._path = zipp + +sys.modules[__name__ + '.zipfile'] = zipfile # type: ignore[assignment] diff --git a/.venv/lib/python3.11/site-packages/zipp/compat/py310.py b/.venv/lib/python3.11/site-packages/zipp/compat/py310.py new file mode 100644 index 0000000000000000000000000000000000000000..e1e7ec229062b8556cdd85f530d1ff301b2e6845 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/zipp/compat/py310.py @@ -0,0 +1,13 @@ +import io +import sys + + +def _text_encoding(encoding, stacklevel=2, /): # pragma: no cover + return encoding + + +text_encoding = ( + io.text_encoding # type: ignore[unused-ignore, attr-defined] + if sys.version_info > (3, 10) + else _text_encoding +)