Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- llava_next/lib/python3.10/site-packages/anyio-4.6.2.post1.dist-info/INSTALLER +1 -0
- llava_next/lib/python3.10/site-packages/anyio-4.6.2.post1.dist-info/LICENSE +20 -0
- llava_next/lib/python3.10/site-packages/anyio-4.6.2.post1.dist-info/METADATA +105 -0
- llava_next/lib/python3.10/site-packages/anyio-4.6.2.post1.dist-info/RECORD +83 -0
- llava_next/lib/python3.10/site-packages/anyio-4.6.2.post1.dist-info/REQUESTED +0 -0
- llava_next/lib/python3.10/site-packages/anyio-4.6.2.post1.dist-info/WHEEL +5 -0
- llava_next/lib/python3.10/site-packages/anyio-4.6.2.post1.dist-info/entry_points.txt +2 -0
- llava_next/lib/python3.10/site-packages/anyio-4.6.2.post1.dist-info/top_level.txt +1 -0
- llava_next/lib/python3.10/site-packages/bitsandbytes/__init__.py +24 -0
- llava_next/lib/python3.10/site-packages/bitsandbytes/__main__.py +4 -0
- llava_next/lib/python3.10/site-packages/bitsandbytes/consts.py +12 -0
- llava_next/lib/python3.10/site-packages/bitsandbytes/cuda_specs.py +41 -0
- llava_next/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cpu.so +0 -0
- llava_next/lib/python3.10/site-packages/gitdb-4.0.11.dist-info/AUTHORS +4 -0
- llava_next/lib/python3.10/site-packages/gitdb-4.0.11.dist-info/INSTALLER +1 -0
- llava_next/lib/python3.10/site-packages/gitdb-4.0.11.dist-info/LICENSE +42 -0
- llava_next/lib/python3.10/site-packages/gitdb-4.0.11.dist-info/METADATA +31 -0
- llava_next/lib/python3.10/site-packages/gitdb-4.0.11.dist-info/RECORD +58 -0
- llava_next/lib/python3.10/site-packages/gitdb-4.0.11.dist-info/REQUESTED +0 -0
- llava_next/lib/python3.10/site-packages/gitdb-4.0.11.dist-info/WHEEL +5 -0
- llava_next/lib/python3.10/site-packages/gitdb-4.0.11.dist-info/top_level.txt +1 -0
- llava_next/lib/python3.10/site-packages/httpcore-0.17.3.dist-info/INSTALLER +1 -0
- llava_next/lib/python3.10/site-packages/httpcore-0.17.3.dist-info/METADATA +542 -0
- llava_next/lib/python3.10/site-packages/httpcore-0.17.3.dist-info/RECORD +70 -0
- llava_next/lib/python3.10/site-packages/httpcore-0.17.3.dist-info/top_level.txt +4 -0
- llava_next/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/INSTALLER +1 -0
- llava_next/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/LICENSE +27 -0
- llava_next/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/METADATA +233 -0
- llava_next/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/RECORD +181 -0
- llava_next/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/REQUESTED +0 -0
- llava_next/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/WHEEL +5 -0
- llava_next/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/top_level.txt +1 -0
- llava_next/lib/python3.10/site-packages/ninja/__init__.py +55 -0
- llava_next/lib/python3.10/site-packages/ninja/__main__.py +5 -0
- llava_next/lib/python3.10/site-packages/ninja/__pycache__/__init__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/ninja/__pycache__/__main__.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/ninja/__pycache__/_version.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/ninja/__pycache__/ninja_syntax.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/ninja/_version.py +16 -0
- llava_next/lib/python3.10/site-packages/ninja/ninja_syntax.py +199 -0
- llava_next/lib/python3.10/site-packages/ninja/py.typed +0 -0
- llava_next/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/core.cpython-310.pyc +0 -0
- llava_next/lib/python3.10/site-packages/pandas/tests/indexing/__pycache__/test_loc.cpython-310.pyc +3 -0
- parrot/lib/python3.10/site-packages/transformers/models/gptj/configuration_gptj.py +215 -0
- parrot/lib/python3.10/site-packages/transformers/models/gptj/modeling_tf_gptj.py +1099 -0
- parrot/lib/python3.10/site-packages/transformers/utils/__init__.py +261 -0
- parrot/lib/python3.10/site-packages/transformers/utils/backbone_utils.py +350 -0
- parrot/lib/python3.10/site-packages/transformers/utils/bitsandbytes.py +28 -0
- parrot/lib/python3.10/site-packages/transformers/utils/constants.py +6 -0
.gitattributes
CHANGED
|
@@ -328,3 +328,4 @@ llava_next/lib/python3.10/site-packages/pandas/io/__pycache__/pytables.cpython-3
|
|
| 328 |
llava_next/lib/python3.10/site-packages/torchvision/_C.so filter=lfs diff=lfs merge=lfs -text
|
| 329 |
parrot/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 330 |
parrot/lib/python3.10/site-packages/decord/libdecord.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 328 |
llava_next/lib/python3.10/site-packages/torchvision/_C.so filter=lfs diff=lfs merge=lfs -text
|
| 329 |
parrot/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/modeling_perceiver.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 330 |
parrot/lib/python3.10/site-packages/decord/libdecord.so filter=lfs diff=lfs merge=lfs -text
|
| 331 |
+
llava_next/lib/python3.10/site-packages/pandas/tests/indexing/__pycache__/test_loc.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
llava_next/lib/python3.10/site-packages/anyio-4.6.2.post1.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
llava_next/lib/python3.10/site-packages/anyio-4.6.2.post1.dist-info/LICENSE
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
The MIT License (MIT)
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2018 Alex Grönholm
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
| 6 |
+
this software and associated documentation files (the "Software"), to deal in
|
| 7 |
+
the Software without restriction, including without limitation the rights to
|
| 8 |
+
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
| 9 |
+
the Software, and to permit persons to whom the Software is furnished to do so,
|
| 10 |
+
subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
| 17 |
+
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
| 18 |
+
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
| 19 |
+
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
| 20 |
+
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
llava_next/lib/python3.10/site-packages/anyio-4.6.2.post1.dist-info/METADATA
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: anyio
|
| 3 |
+
Version: 4.6.2.post1
|
| 4 |
+
Summary: High level compatibility layer for multiple asynchronous event loop implementations
|
| 5 |
+
Author-email: Alex Grönholm <alex.gronholm@nextday.fi>
|
| 6 |
+
License: MIT
|
| 7 |
+
Project-URL: Documentation, https://anyio.readthedocs.io/en/latest/
|
| 8 |
+
Project-URL: Changelog, https://anyio.readthedocs.io/en/stable/versionhistory.html
|
| 9 |
+
Project-URL: Source code, https://github.com/agronholm/anyio
|
| 10 |
+
Project-URL: Issue tracker, https://github.com/agronholm/anyio/issues
|
| 11 |
+
Classifier: Development Status :: 5 - Production/Stable
|
| 12 |
+
Classifier: Intended Audience :: Developers
|
| 13 |
+
Classifier: License :: OSI Approved :: MIT License
|
| 14 |
+
Classifier: Framework :: AnyIO
|
| 15 |
+
Classifier: Typing :: Typed
|
| 16 |
+
Classifier: Programming Language :: Python
|
| 17 |
+
Classifier: Programming Language :: Python :: 3
|
| 18 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 19 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 20 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 21 |
+
Classifier: Programming Language :: Python :: 3.12
|
| 22 |
+
Classifier: Programming Language :: Python :: 3.13
|
| 23 |
+
Requires-Python: >=3.9
|
| 24 |
+
Description-Content-Type: text/x-rst
|
| 25 |
+
License-File: LICENSE
|
| 26 |
+
Requires-Dist: idna >=2.8
|
| 27 |
+
Requires-Dist: sniffio >=1.1
|
| 28 |
+
Requires-Dist: exceptiongroup >=1.0.2 ; python_version < "3.11"
|
| 29 |
+
Requires-Dist: typing-extensions >=4.1 ; python_version < "3.11"
|
| 30 |
+
Provides-Extra: doc
|
| 31 |
+
Requires-Dist: packaging ; extra == 'doc'
|
| 32 |
+
Requires-Dist: Sphinx ~=7.4 ; extra == 'doc'
|
| 33 |
+
Requires-Dist: sphinx-rtd-theme ; extra == 'doc'
|
| 34 |
+
Requires-Dist: sphinx-autodoc-typehints >=1.2.0 ; extra == 'doc'
|
| 35 |
+
Provides-Extra: test
|
| 36 |
+
Requires-Dist: anyio[trio] ; extra == 'test'
|
| 37 |
+
Requires-Dist: coverage[toml] >=7 ; extra == 'test'
|
| 38 |
+
Requires-Dist: exceptiongroup >=1.2.0 ; extra == 'test'
|
| 39 |
+
Requires-Dist: hypothesis >=4.0 ; extra == 'test'
|
| 40 |
+
Requires-Dist: psutil >=5.9 ; extra == 'test'
|
| 41 |
+
Requires-Dist: pytest >=7.0 ; extra == 'test'
|
| 42 |
+
Requires-Dist: pytest-mock >=3.6.1 ; extra == 'test'
|
| 43 |
+
Requires-Dist: trustme ; extra == 'test'
|
| 44 |
+
Requires-Dist: uvloop >=0.21.0b1 ; (platform_python_implementation == "CPython" and platform_system != "Windows") and extra == 'test'
|
| 45 |
+
Requires-Dist: truststore >=0.9.1 ; (python_version >= "3.10") and extra == 'test'
|
| 46 |
+
Provides-Extra: trio
|
| 47 |
+
Requires-Dist: trio >=0.26.1 ; extra == 'trio'
|
| 48 |
+
|
| 49 |
+
.. image:: https://github.com/agronholm/anyio/actions/workflows/test.yml/badge.svg
|
| 50 |
+
:target: https://github.com/agronholm/anyio/actions/workflows/test.yml
|
| 51 |
+
:alt: Build Status
|
| 52 |
+
.. image:: https://coveralls.io/repos/github/agronholm/anyio/badge.svg?branch=master
|
| 53 |
+
:target: https://coveralls.io/github/agronholm/anyio?branch=master
|
| 54 |
+
:alt: Code Coverage
|
| 55 |
+
.. image:: https://readthedocs.org/projects/anyio/badge/?version=latest
|
| 56 |
+
:target: https://anyio.readthedocs.io/en/latest/?badge=latest
|
| 57 |
+
:alt: Documentation
|
| 58 |
+
.. image:: https://badges.gitter.im/gitterHQ/gitter.svg
|
| 59 |
+
:target: https://gitter.im/python-trio/AnyIO
|
| 60 |
+
:alt: Gitter chat
|
| 61 |
+
|
| 62 |
+
AnyIO is an asynchronous networking and concurrency library that works on top of either asyncio_ or
|
| 63 |
+
trio_. It implements trio-like `structured concurrency`_ (SC) on top of asyncio and works in harmony
|
| 64 |
+
with the native SC of trio itself.
|
| 65 |
+
|
| 66 |
+
Applications and libraries written against AnyIO's API will run unmodified on either asyncio_ or
|
| 67 |
+
trio_. AnyIO can also be adopted into a library or application incrementally – bit by bit, no full
|
| 68 |
+
refactoring necessary. It will blend in with the native libraries of your chosen backend.
|
| 69 |
+
|
| 70 |
+
Documentation
|
| 71 |
+
-------------
|
| 72 |
+
|
| 73 |
+
View full documentation at: https://anyio.readthedocs.io/
|
| 74 |
+
|
| 75 |
+
Features
|
| 76 |
+
--------
|
| 77 |
+
|
| 78 |
+
AnyIO offers the following functionality:
|
| 79 |
+
|
| 80 |
+
* Task groups (nurseries_ in trio terminology)
|
| 81 |
+
* High-level networking (TCP, UDP and UNIX sockets)
|
| 82 |
+
|
| 83 |
+
* `Happy eyeballs`_ algorithm for TCP connections (more robust than that of asyncio on Python
|
| 84 |
+
3.8)
|
| 85 |
+
* async/await style UDP sockets (unlike asyncio where you still have to use Transports and
|
| 86 |
+
Protocols)
|
| 87 |
+
|
| 88 |
+
* A versatile API for byte streams and object streams
|
| 89 |
+
* Inter-task synchronization and communication (locks, conditions, events, semaphores, object
|
| 90 |
+
streams)
|
| 91 |
+
* Worker threads
|
| 92 |
+
* Subprocesses
|
| 93 |
+
* Asynchronous file I/O (using worker threads)
|
| 94 |
+
* Signal handling
|
| 95 |
+
|
| 96 |
+
AnyIO also comes with its own pytest_ plugin which also supports asynchronous fixtures.
|
| 97 |
+
It even works with the popular Hypothesis_ library.
|
| 98 |
+
|
| 99 |
+
.. _asyncio: https://docs.python.org/3/library/asyncio.html
|
| 100 |
+
.. _trio: https://github.com/python-trio/trio
|
| 101 |
+
.. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency
|
| 102 |
+
.. _nurseries: https://trio.readthedocs.io/en/stable/reference-core.html#nurseries-and-spawning
|
| 103 |
+
.. _Happy eyeballs: https://en.wikipedia.org/wiki/Happy_Eyeballs
|
| 104 |
+
.. _pytest: https://docs.pytest.org/en/latest/
|
| 105 |
+
.. _Hypothesis: https://hypothesis.works/
|
llava_next/lib/python3.10/site-packages/anyio-4.6.2.post1.dist-info/RECORD
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
anyio-4.6.2.post1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
anyio-4.6.2.post1.dist-info/LICENSE,sha256=U2GsncWPLvX9LpsJxoKXwX8ElQkJu8gCO9uC6s8iwrA,1081
|
| 3 |
+
anyio-4.6.2.post1.dist-info/METADATA,sha256=-tUagL58CG66oT2eLY1593L_yXsIb6xW0oouVCQsE5c,4697
|
| 4 |
+
anyio-4.6.2.post1.dist-info/RECORD,,
|
| 5 |
+
anyio-4.6.2.post1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 6 |
+
anyio-4.6.2.post1.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
| 7 |
+
anyio-4.6.2.post1.dist-info/entry_points.txt,sha256=_d6Yu6uiaZmNe0CydowirE9Cmg7zUL2g08tQpoS3Qvc,39
|
| 8 |
+
anyio-4.6.2.post1.dist-info/top_level.txt,sha256=QglSMiWX8_5dpoVAEIHdEYzvqFMdSYWmCj6tYw2ITkQ,6
|
| 9 |
+
anyio/__init__.py,sha256=myTIdg75VPwA-9L7BpislRQplJUPMeleUBHa4MyIruw,4315
|
| 10 |
+
anyio/__pycache__/__init__.cpython-310.pyc,,
|
| 11 |
+
anyio/__pycache__/from_thread.cpython-310.pyc,,
|
| 12 |
+
anyio/__pycache__/lowlevel.cpython-310.pyc,,
|
| 13 |
+
anyio/__pycache__/pytest_plugin.cpython-310.pyc,,
|
| 14 |
+
anyio/__pycache__/to_process.cpython-310.pyc,,
|
| 15 |
+
anyio/__pycache__/to_thread.cpython-310.pyc,,
|
| 16 |
+
anyio/_backends/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 17 |
+
anyio/_backends/__pycache__/__init__.cpython-310.pyc,,
|
| 18 |
+
anyio/_backends/__pycache__/_asyncio.cpython-310.pyc,,
|
| 19 |
+
anyio/_backends/__pycache__/_trio.cpython-310.pyc,,
|
| 20 |
+
anyio/_backends/_asyncio.py,sha256=H3rMz2wquSxPnV4KIXpXGtBFWXk3jkFljrzvk7KWk4E,91497
|
| 21 |
+
anyio/_backends/_trio.py,sha256=wfgvQ2ut2CAxOjcuDLAdrucfEgc02XXRN9aC3IEBHdY,40311
|
| 22 |
+
anyio/_core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 23 |
+
anyio/_core/__pycache__/__init__.cpython-310.pyc,,
|
| 24 |
+
anyio/_core/__pycache__/_eventloop.cpython-310.pyc,,
|
| 25 |
+
anyio/_core/__pycache__/_exceptions.cpython-310.pyc,,
|
| 26 |
+
anyio/_core/__pycache__/_fileio.cpython-310.pyc,,
|
| 27 |
+
anyio/_core/__pycache__/_resources.cpython-310.pyc,,
|
| 28 |
+
anyio/_core/__pycache__/_signals.cpython-310.pyc,,
|
| 29 |
+
anyio/_core/__pycache__/_sockets.cpython-310.pyc,,
|
| 30 |
+
anyio/_core/__pycache__/_streams.cpython-310.pyc,,
|
| 31 |
+
anyio/_core/__pycache__/_subprocesses.cpython-310.pyc,,
|
| 32 |
+
anyio/_core/__pycache__/_synchronization.cpython-310.pyc,,
|
| 33 |
+
anyio/_core/__pycache__/_tasks.cpython-310.pyc,,
|
| 34 |
+
anyio/_core/__pycache__/_testing.cpython-310.pyc,,
|
| 35 |
+
anyio/_core/__pycache__/_typedattr.cpython-310.pyc,,
|
| 36 |
+
anyio/_core/_eventloop.py,sha256=t_tAwBFPjF8jrZGjlJ6bbYy6KA3bjsbZxV9mvh9t1i0,4695
|
| 37 |
+
anyio/_core/_exceptions.py,sha256=NPxECdXkG4nk3NOCUeFmBEAgPhmj7Bzs4vFAKaW_vqw,2481
|
| 38 |
+
anyio/_core/_fileio.py,sha256=lbGk3xq_6DhvbEI8ykdFf2NjYnhuyc8hjXKZTLYkW4k,20961
|
| 39 |
+
anyio/_core/_resources.py,sha256=NbmU5O5UX3xEyACnkmYX28Fmwdl-f-ny0tHym26e0w0,435
|
| 40 |
+
anyio/_core/_signals.py,sha256=vulT1M1xdLYtAR-eY5TamIgaf1WTlOwOrMGwswlTTr8,905
|
| 41 |
+
anyio/_core/_sockets.py,sha256=iM3UeMU68n0PlQjl2U9HyiOpV26rnjqV4KBr_Fo2z1I,24293
|
| 42 |
+
anyio/_core/_streams.py,sha256=OnaKgoDD-FcMSwLvkoAUGP51sG2ZdRvMpxt9q2w1gYA,1804
|
| 43 |
+
anyio/_core/_subprocesses.py,sha256=WquR6sHrnaZofaeqnL8U4Yv___msVW_WqivleLHK4zI,7760
|
| 44 |
+
anyio/_core/_synchronization.py,sha256=UDsbG5f8jWsWkRxYUOKp_WOBWCI9-vBO6wBrsR6WNjA,20121
|
| 45 |
+
anyio/_core/_tasks.py,sha256=pvVEX2Fw159sf0ypAPerukKsZgRRwvFFedVW52nR2Vk,4764
|
| 46 |
+
anyio/_core/_testing.py,sha256=YUGwA5cgFFbUTv4WFd7cv_BSVr4ryTtPp8owQA3JdWE,2118
|
| 47 |
+
anyio/_core/_typedattr.py,sha256=P4ozZikn3-DbpoYcvyghS_FOYAgbmUxeoU8-L_07pZM,2508
|
| 48 |
+
anyio/abc/__init__.py,sha256=U44_s3BglL8BojWQiq0KuokvCqkunIp-ySH3GyRXxAc,2681
|
| 49 |
+
anyio/abc/__pycache__/__init__.cpython-310.pyc,,
|
| 50 |
+
anyio/abc/__pycache__/_eventloop.cpython-310.pyc,,
|
| 51 |
+
anyio/abc/__pycache__/_resources.cpython-310.pyc,,
|
| 52 |
+
anyio/abc/__pycache__/_sockets.cpython-310.pyc,,
|
| 53 |
+
anyio/abc/__pycache__/_streams.cpython-310.pyc,,
|
| 54 |
+
anyio/abc/__pycache__/_subprocesses.cpython-310.pyc,,
|
| 55 |
+
anyio/abc/__pycache__/_tasks.cpython-310.pyc,,
|
| 56 |
+
anyio/abc/__pycache__/_testing.cpython-310.pyc,,
|
| 57 |
+
anyio/abc/_eventloop.py,sha256=kdkLSnizMk3tPq61K109iPUQ6uXpvp1uNsj5aP1s0N8,9619
|
| 58 |
+
anyio/abc/_resources.py,sha256=DrYvkNN1hH6Uvv5_5uKySvDsnknGVDe8FCKfko0VtN8,783
|
| 59 |
+
anyio/abc/_sockets.py,sha256=KhWtJxan8jpBXKwPaFeQzI4iRXdFaOIn0HXtDZnaO7U,6262
|
| 60 |
+
anyio/abc/_streams.py,sha256=GzST5Q2zQmxVzdrAqtbSyHNxkPlIC9AzeZJg_YyPAXw,6598
|
| 61 |
+
anyio/abc/_subprocesses.py,sha256=cumAPJTktOQtw63IqG0lDpyZqu_l1EElvQHMiwJgL08,2067
|
| 62 |
+
anyio/abc/_tasks.py,sha256=0Jc6oIwUjMIVReehF6knOZyAqlgwDt4TP1NQkx4IQGw,2731
|
| 63 |
+
anyio/abc/_testing.py,sha256=tBJUzkSfOXJw23fe8qSJ03kJlShOYjjaEyFB6k6MYT8,1821
|
| 64 |
+
anyio/from_thread.py,sha256=dbi5TUH45_Sg_jZ8Vv1NJWVohe0WeQ_OaCvXIKveAGg,17478
|
| 65 |
+
anyio/lowlevel.py,sha256=nkgmW--SdxGVp0cmLUYazjkigveRm5HY7-gW8Bpp9oY,4169
|
| 66 |
+
anyio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 67 |
+
anyio/pytest_plugin.py,sha256=vjGhGRHD31OyMgJRFQrMvExhx3Ea8KbyDqYKmiSDdXA,6712
|
| 68 |
+
anyio/streams/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 69 |
+
anyio/streams/__pycache__/__init__.cpython-310.pyc,,
|
| 70 |
+
anyio/streams/__pycache__/buffered.cpython-310.pyc,,
|
| 71 |
+
anyio/streams/__pycache__/file.cpython-310.pyc,,
|
| 72 |
+
anyio/streams/__pycache__/memory.cpython-310.pyc,,
|
| 73 |
+
anyio/streams/__pycache__/stapled.cpython-310.pyc,,
|
| 74 |
+
anyio/streams/__pycache__/text.cpython-310.pyc,,
|
| 75 |
+
anyio/streams/__pycache__/tls.cpython-310.pyc,,
|
| 76 |
+
anyio/streams/buffered.py,sha256=UCldKC168YuLvT7n3HtNPnQ2iWAMSTYQWbZvzLwMwkM,4500
|
| 77 |
+
anyio/streams/file.py,sha256=6uoTNb5KbMoj-6gS3_xrrL8uZN8Q4iIvOS1WtGyFfKw,4383
|
| 78 |
+
anyio/streams/memory.py,sha256=j8AyOExK4-UPaon_Xbhwax25Vqs0DwFg3ZXc-EIiHjY,10550
|
| 79 |
+
anyio/streams/stapled.py,sha256=U09pCrmOw9kkNhe6tKopsm1QIMT1lFTFvtb-A7SIe4k,4302
|
| 80 |
+
anyio/streams/text.py,sha256=6x8w8xlfCZKTUWQoJiMPoMhSSJFUBRKgoBNSBtbd9yg,5094
|
| 81 |
+
anyio/streams/tls.py,sha256=m3AE2LVSpoRHSIwSoSCupiOVL54EvOFoY3CcwTxcZfg,12742
|
| 82 |
+
anyio/to_process.py,sha256=cR4n7TssbbJowE_9cWme49zaeuoBuMzqgZ6cBIs0YIs,9571
|
| 83 |
+
anyio/to_thread.py,sha256=WM2JQ2MbVsd5D5CM08bQiTwzZIvpsGjfH1Fy247KoDQ,2396
|
llava_next/lib/python3.10/site-packages/anyio-4.6.2.post1.dist-info/REQUESTED
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/anyio-4.6.2.post1.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: setuptools (75.1.0)
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-any
|
| 5 |
+
|
llava_next/lib/python3.10/site-packages/anyio-4.6.2.post1.dist-info/entry_points.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[pytest11]
|
| 2 |
+
anyio = anyio.pytest_plugin
|
llava_next/lib/python3.10/site-packages/anyio-4.6.2.post1.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
anyio
|
llava_next/lib/python3.10/site-packages/bitsandbytes/__init__.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
#
|
| 3 |
+
# This source code is licensed under the MIT license found in the
|
| 4 |
+
# LICENSE file in the root directory of this source tree.
|
| 5 |
+
|
| 6 |
+
from . import research, utils
|
| 7 |
+
from .autograd._functions import (
|
| 8 |
+
MatmulLtState,
|
| 9 |
+
bmm_cublas,
|
| 10 |
+
matmul,
|
| 11 |
+
matmul_4bit,
|
| 12 |
+
matmul_cublas,
|
| 13 |
+
mm_cublas,
|
| 14 |
+
)
|
| 15 |
+
from .nn import modules
|
| 16 |
+
from .optim import adam
|
| 17 |
+
|
| 18 |
+
__pdoc__ = {
|
| 19 |
+
"libbitsandbytes": False,
|
| 20 |
+
"optim.optimizer.Optimizer8bit": False,
|
| 21 |
+
"optim.optimizer.MockArgs": False,
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
__version__ = "0.44.1"
|
llava_next/lib/python3.10/site-packages/bitsandbytes/__main__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
if __name__ == "__main__":
|
| 2 |
+
from bitsandbytes.diagnostics.main import main
|
| 3 |
+
|
| 4 |
+
main()
|
llava_next/lib/python3.10/site-packages/bitsandbytes/consts.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pathlib import Path
|
| 2 |
+
import platform
|
| 3 |
+
|
| 4 |
+
DYNAMIC_LIBRARY_SUFFIX = {
|
| 5 |
+
"Darwin": ".dylib",
|
| 6 |
+
"Linux": ".so",
|
| 7 |
+
"Windows": ".dll",
|
| 8 |
+
}.get(platform.system(), ".so")
|
| 9 |
+
|
| 10 |
+
PACKAGE_DIR = Path(__file__).parent
|
| 11 |
+
PACKAGE_GITHUB_URL = "https://github.com/TimDettmers/bitsandbytes"
|
| 12 |
+
NONPYTORCH_DOC_URL = "https://github.com/TimDettmers/bitsandbytes/blob/main/docs/source/nonpytorchcuda.mdx"
|
llava_next/lib/python3.10/site-packages/bitsandbytes/cuda_specs.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import dataclasses
|
| 2 |
+
from typing import List, Optional, Tuple
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@dataclasses.dataclass(frozen=True)
|
| 8 |
+
class CUDASpecs:
|
| 9 |
+
highest_compute_capability: Tuple[int, int]
|
| 10 |
+
cuda_version_string: str
|
| 11 |
+
cuda_version_tuple: Tuple[int, int]
|
| 12 |
+
|
| 13 |
+
@property
|
| 14 |
+
def has_cublaslt(self) -> bool:
|
| 15 |
+
return self.highest_compute_capability >= (7, 5)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def get_compute_capabilities() -> List[Tuple[int, int]]:
|
| 19 |
+
return sorted(torch.cuda.get_device_capability(torch.cuda.device(i)) for i in range(torch.cuda.device_count()))
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def get_cuda_version_tuple() -> Tuple[int, int]:
|
| 23 |
+
# https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION
|
| 24 |
+
major, minor = map(int, torch.version.cuda.split("."))
|
| 25 |
+
return major, minor
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def get_cuda_version_string() -> str:
|
| 29 |
+
major, minor = get_cuda_version_tuple()
|
| 30 |
+
return f"{major}{minor}"
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def get_cuda_specs() -> Optional[CUDASpecs]:
|
| 34 |
+
if not torch.cuda.is_available():
|
| 35 |
+
return None
|
| 36 |
+
|
| 37 |
+
return CUDASpecs(
|
| 38 |
+
highest_compute_capability=(get_compute_capabilities()[-1]),
|
| 39 |
+
cuda_version_string=(get_cuda_version_string()),
|
| 40 |
+
cuda_version_tuple=get_cuda_version_tuple(),
|
| 41 |
+
)
|
llava_next/lib/python3.10/site-packages/bitsandbytes/libbitsandbytes_cpu.so
ADDED
|
Binary file (32.8 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/gitdb-4.0.11.dist-info/AUTHORS
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Creator: Sebastian Thiel
|
| 2 |
+
|
| 3 |
+
Contributors:
|
| 4 |
+
- Ram Rachum (@cool-RR)
|
llava_next/lib/python3.10/site-packages/gitdb-4.0.11.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
llava_next/lib/python3.10/site-packages/gitdb-4.0.11.dist-info/LICENSE
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Copyright (C) 2010, 2011 Sebastian Thiel and contributors
|
| 2 |
+
All rights reserved.
|
| 3 |
+
|
| 4 |
+
Redistribution and use in source and binary forms, with or without
|
| 5 |
+
modification, are permitted provided that the following conditions
|
| 6 |
+
are met:
|
| 7 |
+
|
| 8 |
+
* Redistributions of source code must retain the above copyright
|
| 9 |
+
notice, this list of conditions and the following disclaimer.
|
| 10 |
+
|
| 11 |
+
* Redistributions in binary form must reproduce the above copyright
|
| 12 |
+
notice, this list of conditions and the following disclaimer in the
|
| 13 |
+
documentation and/or other materials provided with the distribution.
|
| 14 |
+
|
| 15 |
+
* Neither the name of the GitDB project nor the names of
|
| 16 |
+
its contributors may be used to endorse or promote products derived
|
| 17 |
+
from this software without specific prior written permission.
|
| 18 |
+
|
| 19 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 20 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 21 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 22 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 23 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 24 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
| 25 |
+
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
| 26 |
+
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
| 27 |
+
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
| 28 |
+
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 29 |
+
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
Additional Licenses
|
| 33 |
+
-------------------
|
| 34 |
+
The files at
|
| 35 |
+
gitdb/test/fixtures/packs/pack-11fdfa9e156ab73caae3b6da867192221f2089c2.idx
|
| 36 |
+
and
|
| 37 |
+
gitdb/test/fixtures/packs/pack-11fdfa9e156ab73caae3b6da867192221f2089c2.pack
|
| 38 |
+
are licensed under GNU GPL as part of the git source repository,
|
| 39 |
+
see http://en.wikipedia.org/wiki/Git_%28software%29 for more information.
|
| 40 |
+
|
| 41 |
+
They are not required for the actual operation, which is why they are not found
|
| 42 |
+
in the distribution package.
|
llava_next/lib/python3.10/site-packages/gitdb-4.0.11.dist-info/METADATA
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: gitdb
|
| 3 |
+
Version: 4.0.11
|
| 4 |
+
Summary: Git Object Database
|
| 5 |
+
Home-page: https://github.com/gitpython-developers/gitdb
|
| 6 |
+
Author: Sebastian Thiel
|
| 7 |
+
Author-email: byronimo@gmail.com
|
| 8 |
+
License: BSD License
|
| 9 |
+
Classifier: Development Status :: 5 - Production/Stable
|
| 10 |
+
Classifier: Environment :: Console
|
| 11 |
+
Classifier: Intended Audience :: Developers
|
| 12 |
+
Classifier: License :: OSI Approved :: BSD License
|
| 13 |
+
Classifier: Operating System :: OS Independent
|
| 14 |
+
Classifier: Operating System :: POSIX
|
| 15 |
+
Classifier: Operating System :: Microsoft :: Windows
|
| 16 |
+
Classifier: Operating System :: MacOS :: MacOS X
|
| 17 |
+
Classifier: Programming Language :: Python
|
| 18 |
+
Classifier: Programming Language :: Python :: 3
|
| 19 |
+
Classifier: Programming Language :: Python :: 3.7
|
| 20 |
+
Classifier: Programming Language :: Python :: 3.8
|
| 21 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 22 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 23 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 24 |
+
Classifier: Programming Language :: Python :: 3.12
|
| 25 |
+
Classifier: Programming Language :: Python :: 3 :: Only
|
| 26 |
+
Requires-Python: >=3.7
|
| 27 |
+
License-File: LICENSE
|
| 28 |
+
License-File: AUTHORS
|
| 29 |
+
Requires-Dist: smmap <6,>=3.0.1
|
| 30 |
+
|
| 31 |
+
GitDB is a pure-Python git object database
|
llava_next/lib/python3.10/site-packages/gitdb-4.0.11.dist-info/RECORD
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gitdb-4.0.11.dist-info/AUTHORS,sha256=aUmmuuKGJrGDzN5i-dDIbj00R1IOPcFTZDWznhEwZuM,66
|
| 2 |
+
gitdb-4.0.11.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 3 |
+
gitdb-4.0.11.dist-info/LICENSE,sha256=79KfWWoI6IV-aOdpSlC82nKDl5LafD8EG8v_XxgAkjk,1984
|
| 4 |
+
gitdb-4.0.11.dist-info/METADATA,sha256=7X3m4N9rVUD2-HtGkVm3olIWk3Ld5m_lkXlDyzuPv14,1180
|
| 5 |
+
gitdb-4.0.11.dist-info/RECORD,,
|
| 6 |
+
gitdb-4.0.11.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 7 |
+
gitdb-4.0.11.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
|
| 8 |
+
gitdb-4.0.11.dist-info/top_level.txt,sha256=ss6atT8cG4mQuAYXO6PokJ0r4Mm5cBiDbKsu2e3YHfs,6
|
| 9 |
+
gitdb/__init__.py,sha256=DMI-8DaXN1M41QkJAFg7KP_pnXGi6XbjxM9J_B1s29I,966
|
| 10 |
+
gitdb/__pycache__/__init__.cpython-310.pyc,,
|
| 11 |
+
gitdb/__pycache__/base.cpython-310.pyc,,
|
| 12 |
+
gitdb/__pycache__/const.cpython-310.pyc,,
|
| 13 |
+
gitdb/__pycache__/exc.cpython-310.pyc,,
|
| 14 |
+
gitdb/__pycache__/fun.cpython-310.pyc,,
|
| 15 |
+
gitdb/__pycache__/pack.cpython-310.pyc,,
|
| 16 |
+
gitdb/__pycache__/stream.cpython-310.pyc,,
|
| 17 |
+
gitdb/__pycache__/typ.cpython-310.pyc,,
|
| 18 |
+
gitdb/__pycache__/util.cpython-310.pyc,,
|
| 19 |
+
gitdb/base.py,sha256=krg61c_FKEvn4KAcX6pjH-hlpFfRCcM7mrki4Hnmxew,8023
|
| 20 |
+
gitdb/const.py,sha256=WWmEYKNDdm3J9fxYTFT_B6-QLDSMBClbz0LSBa1D1S8,90
|
| 21 |
+
gitdb/db/__init__.py,sha256=aQTZnxpfk6c76X1ubZzO9pBq4STnL6hCoGxZzXurEZ4,371
|
| 22 |
+
gitdb/db/__pycache__/__init__.cpython-310.pyc,,
|
| 23 |
+
gitdb/db/__pycache__/base.cpython-310.pyc,,
|
| 24 |
+
gitdb/db/__pycache__/git.cpython-310.pyc,,
|
| 25 |
+
gitdb/db/__pycache__/loose.cpython-310.pyc,,
|
| 26 |
+
gitdb/db/__pycache__/mem.cpython-310.pyc,,
|
| 27 |
+
gitdb/db/__pycache__/pack.cpython-310.pyc,,
|
| 28 |
+
gitdb/db/__pycache__/ref.cpython-310.pyc,,
|
| 29 |
+
gitdb/db/base.py,sha256=_qpZ1VzwpoTcU3_-IIXmwIW0p63HXrtJ5jWNgjrLjRY,9061
|
| 30 |
+
gitdb/db/git.py,sha256=b5O01eZsnzldruzEENvqiw8Q7Daz2iETrlAqH0aggdE,2666
|
| 31 |
+
gitdb/db/loose.py,sha256=UPsy-VHJOPBe7ND0224c6ai1ykutzrGxFqK5ZTPRKnk,8036
|
| 32 |
+
gitdb/db/mem.py,sha256=VDscunT4EtRo2GBE52lup1ki3si8enImUSKmSVq8kwc,3343
|
| 33 |
+
gitdb/db/pack.py,sha256=iZPDF6rQ-kCrgbdsZRA83CL-zWsoQaZxo5rcTarjAoQ,7285
|
| 34 |
+
gitdb/db/ref.py,sha256=rGOzF3cAOBW9i0Owz_Iw6c4K5BgbYFwk5btMaXGvCoc,2591
|
| 35 |
+
gitdb/exc.py,sha256=B0N6I2rctCwd0Upoq2laTigbVZNOZ38nfobRcKDBtV4,1496
|
| 36 |
+
gitdb/fun.py,sha256=jw3paQMgxexUn2NENFavLXfABfNvDBRfRCvv2VHkvms,23243
|
| 37 |
+
gitdb/pack.py,sha256=buboXaTzXKEedNX5j1zIUIPhnLmHkDRl7Rg9ftclt14,39228
|
| 38 |
+
gitdb/stream.py,sha256=0-BWcggWWZniMXGGaAe3MR6iH9DBt5ir9YtgE2_rKKw,27541
|
| 39 |
+
gitdb/test/__init__.py,sha256=4cJQwIMepzCxK0hdxDegJGxNVK4dungjbPzCUJc1ZIU,204
|
| 40 |
+
gitdb/test/__pycache__/__init__.cpython-310.pyc,,
|
| 41 |
+
gitdb/test/__pycache__/lib.cpython-310.pyc,,
|
| 42 |
+
gitdb/test/__pycache__/test_base.cpython-310.pyc,,
|
| 43 |
+
gitdb/test/__pycache__/test_example.cpython-310.pyc,,
|
| 44 |
+
gitdb/test/__pycache__/test_pack.cpython-310.pyc,,
|
| 45 |
+
gitdb/test/__pycache__/test_stream.cpython-310.pyc,,
|
| 46 |
+
gitdb/test/__pycache__/test_util.cpython-310.pyc,,
|
| 47 |
+
gitdb/test/lib.py,sha256=iXBcJ3RDAN0uj9NbyEzt90TuCsDFt3SjHA61Ly6Xyxs,5489
|
| 48 |
+
gitdb/test/test_base.py,sha256=tIS3b_YnV5_rPjkneSNzrME-fQDfGKL3sr_4nvujVho,2822
|
| 49 |
+
gitdb/test/test_example.py,sha256=In7WgwvrGCbxPcuLCmkVIs4kdhCJ8rtcEvp115fHeU4,1350
|
| 50 |
+
gitdb/test/test_pack.py,sha256=bbkupZPBFlpjBcPZTZ0OmRrTA9I3-fVdfwLtNa14_3s,9228
|
| 51 |
+
gitdb/test/test_stream.py,sha256=RwSB49q6JHM7EFW413CZQeGRfkK92aQ0IVfTocIa3bo,5727
|
| 52 |
+
gitdb/test/test_util.py,sha256=WQKQBP2uPF4wwNJFlUd9-YE2Q3CmlgpoY4MRk5G0r54,3243
|
| 53 |
+
gitdb/typ.py,sha256=dZlbzfy5RFNHiZHEOwVy-6T-aZ3xLv0mGaJVyxKBd0M,373
|
| 54 |
+
gitdb/util.py,sha256=F3bE24b2QDihz4tqc3Km7wbC6N65EjwFoao-yzrRJSY,12302
|
| 55 |
+
gitdb/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 56 |
+
gitdb/utils/__pycache__/__init__.cpython-310.pyc,,
|
| 57 |
+
gitdb/utils/__pycache__/encoding.cpython-310.pyc,,
|
| 58 |
+
gitdb/utils/encoding.py,sha256=ceZZFb86LGJ71cwW6qkq_BFquAlNE7jaafNbwxYRSXk,372
|
llava_next/lib/python3.10/site-packages/gitdb-4.0.11.dist-info/REQUESTED
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/gitdb-4.0.11.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: bdist_wheel (0.41.2)
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-any
|
| 5 |
+
|
llava_next/lib/python3.10/site-packages/gitdb-4.0.11.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
gitdb
|
llava_next/lib/python3.10/site-packages/httpcore-0.17.3.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
llava_next/lib/python3.10/site-packages/httpcore-0.17.3.dist-info/METADATA
ADDED
|
@@ -0,0 +1,542 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: httpcore
|
| 3 |
+
Version: 0.17.3
|
| 4 |
+
Summary: A minimal low-level HTTP client.
|
| 5 |
+
Home-page: https://github.com/encode/httpcore
|
| 6 |
+
Author: Tom Christie
|
| 7 |
+
Author-email: tom@tomchristie.com
|
| 8 |
+
License: BSD
|
| 9 |
+
Project-URL: Documentation, https://www.encode.io/httpcore
|
| 10 |
+
Project-URL: Source, https://github.com/encode/httpcore
|
| 11 |
+
Classifier: Development Status :: 3 - Alpha
|
| 12 |
+
Classifier: Environment :: Web Environment
|
| 13 |
+
Classifier: Intended Audience :: Developers
|
| 14 |
+
Classifier: License :: OSI Approved :: BSD License
|
| 15 |
+
Classifier: Operating System :: OS Independent
|
| 16 |
+
Classifier: Topic :: Internet :: WWW/HTTP
|
| 17 |
+
Classifier: Framework :: AsyncIO
|
| 18 |
+
Classifier: Framework :: Trio
|
| 19 |
+
Classifier: Programming Language :: Python :: 3
|
| 20 |
+
Classifier: Programming Language :: Python :: 3.7
|
| 21 |
+
Classifier: Programming Language :: Python :: 3.8
|
| 22 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 23 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 24 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 25 |
+
Classifier: Programming Language :: Python :: 3 :: Only
|
| 26 |
+
Requires-Python: >=3.7
|
| 27 |
+
Description-Content-Type: text/markdown
|
| 28 |
+
License-File: LICENSE.md
|
| 29 |
+
Requires-Dist: h11 (<0.15,>=0.13)
|
| 30 |
+
Requires-Dist: sniffio (==1.*)
|
| 31 |
+
Requires-Dist: anyio (<5.0,>=3.0)
|
| 32 |
+
Requires-Dist: certifi
|
| 33 |
+
Provides-Extra: http2
|
| 34 |
+
Requires-Dist: h2 (<5,>=3) ; extra == 'http2'
|
| 35 |
+
Provides-Extra: socks
|
| 36 |
+
Requires-Dist: socksio (==1.*) ; extra == 'socks'
|
| 37 |
+
|
| 38 |
+
# HTTP Core
|
| 39 |
+
|
| 40 |
+
[](https://github.com/encode/httpcore/actions)
|
| 41 |
+
[](https://pypi.org/project/httpcore/)
|
| 42 |
+
|
| 43 |
+
> *Do one thing, and do it well.*
|
| 44 |
+
|
| 45 |
+
The HTTP Core package provides a minimal low-level HTTP client, which does
|
| 46 |
+
one thing only. Sending HTTP requests.
|
| 47 |
+
|
| 48 |
+
It does not provide any high level model abstractions over the API,
|
| 49 |
+
does not handle redirects, multipart uploads, building authentication headers,
|
| 50 |
+
transparent HTTP caching, URL parsing, session cookie handling,
|
| 51 |
+
content or charset decoding, handling JSON, environment based configuration
|
| 52 |
+
defaults, or any of that Jazz.
|
| 53 |
+
|
| 54 |
+
Some things HTTP Core does do:
|
| 55 |
+
|
| 56 |
+
* Sending HTTP requests.
|
| 57 |
+
* Thread-safe / task-safe connection pooling.
|
| 58 |
+
* HTTP(S) proxy & SOCKS proxy support.
|
| 59 |
+
* Supports HTTP/1.1 and HTTP/2.
|
| 60 |
+
* Provides both sync and async interfaces.
|
| 61 |
+
* Async backend support for `asyncio` and `trio`.
|
| 62 |
+
|
| 63 |
+
## Requirements
|
| 64 |
+
|
| 65 |
+
Python 3.7+
|
| 66 |
+
|
| 67 |
+
## Installation
|
| 68 |
+
|
| 69 |
+
For HTTP/1.1 only support, install with:
|
| 70 |
+
|
| 71 |
+
```shell
|
| 72 |
+
$ pip install httpcore
|
| 73 |
+
```
|
| 74 |
+
|
| 75 |
+
For HTTP/1.1 and HTTP/2 support, install with:
|
| 76 |
+
|
| 77 |
+
```shell
|
| 78 |
+
$ pip install httpcore[http2]
|
| 79 |
+
```
|
| 80 |
+
|
| 81 |
+
For SOCKS proxy support, install with:
|
| 82 |
+
|
| 83 |
+
```shell
|
| 84 |
+
$ pip install httpcore[socks]
|
| 85 |
+
```
|
| 86 |
+
|
| 87 |
+
# Sending requests
|
| 88 |
+
|
| 89 |
+
Send an HTTP request:
|
| 90 |
+
|
| 91 |
+
```python
|
| 92 |
+
import httpcore
|
| 93 |
+
|
| 94 |
+
response = httpcore.request("GET", "https://www.example.com/")
|
| 95 |
+
|
| 96 |
+
print(response)
|
| 97 |
+
# <Response [200]>
|
| 98 |
+
print(response.status)
|
| 99 |
+
# 200
|
| 100 |
+
print(response.headers)
|
| 101 |
+
# [(b'Accept-Ranges', b'bytes'), (b'Age', b'557328'), (b'Cache-Control', b'max-age=604800'), ...]
|
| 102 |
+
print(response.content)
|
| 103 |
+
# b'<!doctype html>\n<html>\n<head>\n<title>Example Domain</title>\n\n<meta charset="utf-8"/>\n ...'
|
| 104 |
+
```
|
| 105 |
+
|
| 106 |
+
The top-level `httpcore.request()` function is provided for convenience. In practice whenever you're working with `httpcore` you'll want to use the connection pooling functionality that it provides.
|
| 107 |
+
|
| 108 |
+
```python
|
| 109 |
+
import httpcore
|
| 110 |
+
|
| 111 |
+
http = httpcore.ConnectionPool()
|
| 112 |
+
response = http.request("GET", "https://www.example.com/")
|
| 113 |
+
```
|
| 114 |
+
|
| 115 |
+
Once you're ready to get going, [head over to the documentation](https://www.encode.io/httpcore/).
|
| 116 |
+
|
| 117 |
+
## Motivation
|
| 118 |
+
|
| 119 |
+
You *probably* don't want to be using HTTP Core directly. It might make sense if
|
| 120 |
+
you're writing something like a proxy service in Python, and you just want
|
| 121 |
+
something at the lowest possible level, but more typically you'll want to use
|
| 122 |
+
a higher level client library, such as `httpx`.
|
| 123 |
+
|
| 124 |
+
The motivation for `httpcore` is:
|
| 125 |
+
|
| 126 |
+
* To provide a reusable low-level client library, that other packages can then build on top of.
|
| 127 |
+
* To provide a *really clear interface split* between the networking code and client logic,
|
| 128 |
+
so that each is easier to understand and reason about in isolation.
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
# Changelog
|
| 132 |
+
|
| 133 |
+
All notable changes to this project will be documented in this file.
|
| 134 |
+
|
| 135 |
+
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
|
| 136 |
+
|
| 137 |
+
## 0.17.3 (5th July 2023)
|
| 138 |
+
|
| 139 |
+
- Support async cancellations, ensuring that the connection pool is left in a clean state when cancellations occur. (#726)
|
| 140 |
+
- The networking backend interface has [been added to the public API](https://www.encode.io/httpcore/network-backends). Some classes which were previously private implementation detail are now part of the top-level public API. (#699)
|
| 141 |
+
- Graceful handling of HTTP/2 GoAway frames, with requests being transparently retried on a new connection. (#730)
|
| 142 |
+
- Add exceptions when a synchronous `trace callback` is passed to an asynchronous request or an asynchronous `trace callback` is passed to a synchronous request. (#717)
|
| 143 |
+
|
| 144 |
+
## 0.17.2 (May 23th, 2023)
|
| 145 |
+
|
| 146 |
+
- Add `socket_options` argument to `ConnectionPool` and `HTTProxy` classes. (#668)
|
| 147 |
+
- Improve logging with per-module logger names. (#690)
|
| 148 |
+
- Add `sni_hostname` request extension. (#696)
|
| 149 |
+
- Resolve race condition during import of `anyio` package. (#692)
|
| 150 |
+
- Enable TCP_NODELAY for all synchronous sockets. (#651)
|
| 151 |
+
|
| 152 |
+
## 0.17.1 (May 17th, 2023)
|
| 153 |
+
|
| 154 |
+
- If 'retries' is set, then allow retries if an SSL handshake error occurs. (#669)
|
| 155 |
+
- Improve correctness of tracebacks on network exceptions, by raising properly chained exceptions. (#678)
|
| 156 |
+
- Prevent connection-hanging behaviour when HTTP/2 connections are closed by a server-sent 'GoAway' frame. (#679)
|
| 157 |
+
- Fix edge-case exception when removing requests from the connection pool. (#680)
|
| 158 |
+
- Fix pool timeout edge-case. (#688)
|
| 159 |
+
|
| 160 |
+
## 0.17.0 (March 16th, 2023)
|
| 161 |
+
|
| 162 |
+
- Add DEBUG level logging. (#648)
|
| 163 |
+
- Respect HTTP/2 max concurrent streams when settings updates are sent by server. (#652)
|
| 164 |
+
- Increase the allowable HTTP header size to 100kB. (#647)
|
| 165 |
+
- Add `retries` option to SOCKS proxy classes. (#643)
|
| 166 |
+
|
| 167 |
+
## 0.16.3 (December 20th, 2022)
|
| 168 |
+
|
| 169 |
+
- Allow `ws` and `wss` schemes. Allows us to properly support websocket upgrade connections. (#625)
|
| 170 |
+
- Forwarding HTTP proxies use a connection-per-remote-host. Required by some proxy implementations. (#637)
|
| 171 |
+
- Don't raise `RuntimeError` when closing a connection pool with active connections. Removes some error cases when cancellations are used. (#631)
|
| 172 |
+
- Lazy import `anyio`, so that it's no longer a hard dependancy, and isn't imported if unused. (#639)
|
| 173 |
+
|
| 174 |
+
## 0.16.2 (November 25th, 2022)
|
| 175 |
+
|
| 176 |
+
- Revert 'Fix async cancellation behaviour', which introduced race conditions. (#627)
|
| 177 |
+
- Raise `RuntimeError` if attempting to us UNIX domain sockets on Windows. (#619)
|
| 178 |
+
|
| 179 |
+
## 0.16.1 (November 17th, 2022)
|
| 180 |
+
|
| 181 |
+
- Fix HTTP/1.1 interim informational responses, such as "100 Continue". (#605)
|
| 182 |
+
|
| 183 |
+
## 0.16.0 (October 11th, 2022)
|
| 184 |
+
|
| 185 |
+
- Support HTTP/1.1 informational responses. (#581)
|
| 186 |
+
- Fix async cancellation behaviour. (#580)
|
| 187 |
+
- Support `h11` 0.14. (#579)
|
| 188 |
+
|
| 189 |
+
## 0.15.0 (May 17th, 2022)
|
| 190 |
+
|
| 191 |
+
- Drop Python 3.6 support (#535)
|
| 192 |
+
- Ensure HTTP proxy CONNECT requests include `timeout` configuration. (#506)
|
| 193 |
+
- Switch to explicit `typing.Optional` for type hints. (#513)
|
| 194 |
+
- For `trio` map OSError exceptions to `ConnectError`. (#543)
|
| 195 |
+
|
| 196 |
+
## 0.14.7 (February 4th, 2022)
|
| 197 |
+
|
| 198 |
+
- Requests which raise a PoolTimeout need to be removed from the pool queue. (#502)
|
| 199 |
+
- Fix AttributeError that happened when Socks5Connection were terminated. (#501)
|
| 200 |
+
|
| 201 |
+
## 0.14.6 (February 1st, 2022)
|
| 202 |
+
|
| 203 |
+
- Fix SOCKS support for `http://` URLs. (#492)
|
| 204 |
+
- Resolve race condition around exceptions during streaming a response. (#491)
|
| 205 |
+
|
| 206 |
+
## 0.14.5 (January 18th, 2022)
|
| 207 |
+
|
| 208 |
+
- SOCKS proxy support. (#478)
|
| 209 |
+
- Add proxy_auth argument to HTTPProxy. (#481)
|
| 210 |
+
- Improve error message on 'RemoteProtocolError' exception when server disconnects without sending a response. (#479)
|
| 211 |
+
|
| 212 |
+
## 0.14.4 (January 5th, 2022)
|
| 213 |
+
|
| 214 |
+
- Support HTTP/2 on HTTPS tunnelling proxies. (#468)
|
| 215 |
+
- Fix proxy headers missing on HTTP forwarding. (#456)
|
| 216 |
+
- Only instantiate SSL context if required. (#457)
|
| 217 |
+
- More robust HTTP/2 handling. (#253, #439, #440, #441)
|
| 218 |
+
|
| 219 |
+
## 0.14.3 (November 17th, 2021)
|
| 220 |
+
|
| 221 |
+
- Fix race condition when removing closed connections from the pool. (#437)
|
| 222 |
+
|
| 223 |
+
## 0.14.2 (November 16th, 2021)
|
| 224 |
+
|
| 225 |
+
- Failed connections no longer remain in the pool. (Pull #433)
|
| 226 |
+
|
| 227 |
+
## 0.14.1 (November 12th, 2021)
|
| 228 |
+
|
| 229 |
+
- `max_connections` becomes optional. (Pull #429)
|
| 230 |
+
- `certifi` is now included in the install dependancies. (Pull #428)
|
| 231 |
+
- `h2` is now strictly optional. (Pull #428)
|
| 232 |
+
|
| 233 |
+
## 0.14.0 (November 11th, 2021)
|
| 234 |
+
|
| 235 |
+
The 0.14 release is a complete reworking of `httpcore`, comprehensively addressing some underlying issues in the connection pooling, as well as substantially redesigning the API to be more user friendly.
|
| 236 |
+
|
| 237 |
+
Some of the lower-level API design also makes the components more easily testable in isolation, and the package now has 100% test coverage.
|
| 238 |
+
|
| 239 |
+
See [discussion #419](https://github.com/encode/httpcore/discussions/419) for a little more background.
|
| 240 |
+
|
| 241 |
+
There's some other neat bits in there too, such as the "trace" extension, which gives a hook into inspecting the internal events that occur during the request/response cycle. This extension is needed for the HTTPX cli, in order to...
|
| 242 |
+
|
| 243 |
+
* Log the point at which the connection is established, and the IP/port on which it is made.
|
| 244 |
+
* Determine if the outgoing request should log as HTTP/1.1 or HTTP/2, rather than having to assume it's HTTP/2 if the --http2 flag was passed. (Which may not actually be true.)
|
| 245 |
+
* Log SSL version info / certificate info.
|
| 246 |
+
|
| 247 |
+
Note that `curio` support is not currently available in 0.14.0. If you're using `httpcore` with `curio` please get in touch, so we can assess if we ought to prioritize it as a feature or not.
|
| 248 |
+
|
| 249 |
+
## 0.13.7 (September 13th, 2021)
|
| 250 |
+
|
| 251 |
+
- Fix broken error messaging when URL scheme is missing, or a non HTTP(S) scheme is used. (Pull #403)
|
| 252 |
+
|
| 253 |
+
## 0.13.6 (June 15th, 2021)
|
| 254 |
+
|
| 255 |
+
### Fixed
|
| 256 |
+
|
| 257 |
+
- Close sockets when read or write timeouts occur. (Pull #365)
|
| 258 |
+
|
| 259 |
+
## 0.13.5 (June 14th, 2021)
|
| 260 |
+
|
| 261 |
+
### Fixed
|
| 262 |
+
|
| 263 |
+
- Resolved niggles with AnyIO EOF behaviours. (Pull #358, #362)
|
| 264 |
+
|
| 265 |
+
## 0.13.4 (June 9th, 2021)
|
| 266 |
+
|
| 267 |
+
### Added
|
| 268 |
+
|
| 269 |
+
- Improved error messaging when URL scheme is missing, or a non HTTP(S) scheme is used. (Pull #354)
|
| 270 |
+
|
| 271 |
+
### Fixed
|
| 272 |
+
|
| 273 |
+
- Switched to `anyio` as the default backend implementation when running with `asyncio`. Resolves some awkward [TLS timeout issues](https://github.com/encode/httpx/discussions/1511).
|
| 274 |
+
|
| 275 |
+
## 0.13.3 (May 6th, 2021)
|
| 276 |
+
|
| 277 |
+
### Added
|
| 278 |
+
|
| 279 |
+
- Support HTTP/2 prior knowledge, using `httpcore.SyncConnectionPool(http1=False)`. (Pull #333)
|
| 280 |
+
|
| 281 |
+
### Fixed
|
| 282 |
+
|
| 283 |
+
- Handle cases where environment does not provide `select.poll` support. (Pull #331)
|
| 284 |
+
|
| 285 |
+
## 0.13.2 (April 29th, 2021)
|
| 286 |
+
|
| 287 |
+
### Added
|
| 288 |
+
|
| 289 |
+
- Improve error message for specific case of `RemoteProtocolError` where server disconnects without sending a response. (Pull #313)
|
| 290 |
+
|
| 291 |
+
## 0.13.1 (April 28th, 2021)
|
| 292 |
+
|
| 293 |
+
### Fixed
|
| 294 |
+
|
| 295 |
+
- More resiliant testing for closed connections. (Pull #311)
|
| 296 |
+
- Don't raise exceptions on ungraceful connection closes. (Pull #310)
|
| 297 |
+
|
| 298 |
+
## 0.13.0 (April 21st, 2021)
|
| 299 |
+
|
| 300 |
+
The 0.13 release updates the core API in order to match the HTTPX Transport API,
|
| 301 |
+
introduced in HTTPX 0.18 onwards.
|
| 302 |
+
|
| 303 |
+
An example of making requests with the new interface is:
|
| 304 |
+
|
| 305 |
+
```python
|
| 306 |
+
with httpcore.SyncConnectionPool() as http:
|
| 307 |
+
status_code, headers, stream, extensions = http.handle_request(
|
| 308 |
+
method=b'GET',
|
| 309 |
+
url=(b'https', b'example.org', 443, b'/'),
|
| 310 |
+
headers=[(b'host', b'example.org'), (b'user-agent', b'httpcore')]
|
| 311 |
+
stream=httpcore.ByteStream(b''),
|
| 312 |
+
extensions={}
|
| 313 |
+
)
|
| 314 |
+
body = stream.read()
|
| 315 |
+
print(status_code, body)
|
| 316 |
+
```
|
| 317 |
+
|
| 318 |
+
### Changed
|
| 319 |
+
|
| 320 |
+
- The `.request()` method is now `handle_request()`. (Pull #296)
|
| 321 |
+
- The `.arequest()` method is now `.handle_async_request()`. (Pull #296)
|
| 322 |
+
- The `headers` argument is no longer optional. (Pull #296)
|
| 323 |
+
- The `stream` argument is no longer optional. (Pull #296)
|
| 324 |
+
- The `ext` argument is now named `extensions`, and is no longer optional. (Pull #296)
|
| 325 |
+
- The `"reason"` extension keyword is now named `"reason_phrase"`. (Pull #296)
|
| 326 |
+
- The `"reason_phrase"` and `"http_version"` extensions now use byte strings for their values. (Pull #296)
|
| 327 |
+
- The `httpcore.PlainByteStream()` class becomes `httpcore.ByteStream()`. (Pull #296)
|
| 328 |
+
|
| 329 |
+
### Added
|
| 330 |
+
|
| 331 |
+
- Streams now support a `.read()` interface. (Pull #296)
|
| 332 |
+
|
| 333 |
+
### Fixed
|
| 334 |
+
|
| 335 |
+
- Task cancellation no longer leaks connections from the connection pool. (Pull #305)
|
| 336 |
+
|
| 337 |
+
## 0.12.3 (December 7th, 2020)
|
| 338 |
+
|
| 339 |
+
### Fixed
|
| 340 |
+
|
| 341 |
+
- Abort SSL connections on close rather than waiting for remote EOF when using `asyncio`. (Pull #167)
|
| 342 |
+
- Fix exception raised in case of connect timeouts when using the `anyio` backend. (Pull #236)
|
| 343 |
+
- Fix `Host` header precedence for `:authority` in HTTP/2. (Pull #241, #243)
|
| 344 |
+
- Handle extra edge case when detecting for socket readability when using `asyncio`. (Pull #242, #244)
|
| 345 |
+
- Fix `asyncio` SSL warning when using proxy tunneling. (Pull #249)
|
| 346 |
+
|
| 347 |
+
## 0.12.2 (November 20th, 2020)
|
| 348 |
+
|
| 349 |
+
### Fixed
|
| 350 |
+
|
| 351 |
+
- Properly wrap connect errors on the asyncio backend. (Pull #235)
|
| 352 |
+
- Fix `ImportError` occurring on Python 3.9 when using the HTTP/1.1 sync client in a multithreaded context. (Pull #237)
|
| 353 |
+
|
| 354 |
+
## 0.12.1 (November 7th, 2020)
|
| 355 |
+
|
| 356 |
+
### Added
|
| 357 |
+
|
| 358 |
+
- Add connect retries. (Pull #221)
|
| 359 |
+
|
| 360 |
+
### Fixed
|
| 361 |
+
|
| 362 |
+
- Tweak detection of dropped connections, resolving an issue with open files limits on Linux. (Pull #185)
|
| 363 |
+
- Avoid leaking connections when establishing an HTTP tunnel to a proxy has failed. (Pull #223)
|
| 364 |
+
- Properly wrap OS errors when using `trio`. (Pull #225)
|
| 365 |
+
|
| 366 |
+
## 0.12.0 (October 6th, 2020)
|
| 367 |
+
|
| 368 |
+
### Changed
|
| 369 |
+
|
| 370 |
+
- HTTP header casing is now preserved, rather than always sent in lowercase. (#216 and python-hyper/h11#104)
|
| 371 |
+
|
| 372 |
+
### Added
|
| 373 |
+
|
| 374 |
+
- Add Python 3.9 to officially supported versions.
|
| 375 |
+
|
| 376 |
+
### Fixed
|
| 377 |
+
|
| 378 |
+
- Gracefully handle a stdlib asyncio bug when a connection is closed while it is in a paused-for-reading state. (#201)
|
| 379 |
+
|
| 380 |
+
## 0.11.1 (September 28nd, 2020)
|
| 381 |
+
|
| 382 |
+
### Fixed
|
| 383 |
+
|
| 384 |
+
- Add await to async semaphore release() coroutine (#197)
|
| 385 |
+
- Drop incorrect curio classifier (#192)
|
| 386 |
+
|
| 387 |
+
## 0.11.0 (September 22nd, 2020)
|
| 388 |
+
|
| 389 |
+
The Transport API with 0.11.0 has a couple of significant changes.
|
| 390 |
+
|
| 391 |
+
Firstly we've moved changed the request interface in order to allow extensions, which will later enable us to support features
|
| 392 |
+
such as trailing headers, HTTP/2 server push, and CONNECT/Upgrade connections.
|
| 393 |
+
|
| 394 |
+
The interface changes from:
|
| 395 |
+
|
| 396 |
+
```python
|
| 397 |
+
def request(method, url, headers, stream, timeout):
|
| 398 |
+
return (http_version, status_code, reason, headers, stream)
|
| 399 |
+
```
|
| 400 |
+
|
| 401 |
+
To instead including an optional dictionary of extensions on the request and response:
|
| 402 |
+
|
| 403 |
+
```python
|
| 404 |
+
def request(method, url, headers, stream, ext):
|
| 405 |
+
return (status_code, headers, stream, ext)
|
| 406 |
+
```
|
| 407 |
+
|
| 408 |
+
Having an open-ended extensions point will allow us to add later support for various optional features, that wouldn't otherwise be supported without these API changes.
|
| 409 |
+
|
| 410 |
+
In particular:
|
| 411 |
+
|
| 412 |
+
* Trailing headers support.
|
| 413 |
+
* HTTP/2 Server Push
|
| 414 |
+
* sendfile.
|
| 415 |
+
* Exposing raw connection on CONNECT, Upgrade, HTTP/2 bi-di streaming.
|
| 416 |
+
* Exposing debug information out of the API, including template name, template context.
|
| 417 |
+
|
| 418 |
+
Currently extensions are limited to:
|
| 419 |
+
|
| 420 |
+
* request: `timeout` - Optional. Timeout dictionary.
|
| 421 |
+
* response: `http_version` - Optional. Include the HTTP version used on the response.
|
| 422 |
+
* response: `reason` - Optional. Include the reason phrase used on the response. Only valid with HTTP/1.*.
|
| 423 |
+
|
| 424 |
+
See https://github.com/encode/httpx/issues/1274#issuecomment-694884553 for the history behind this.
|
| 425 |
+
|
| 426 |
+
Secondly, the async version of `request` is now namespaced as `arequest`.
|
| 427 |
+
|
| 428 |
+
This allows concrete transports to support both sync and async implementations on the same class.
|
| 429 |
+
|
| 430 |
+
### Added
|
| 431 |
+
|
| 432 |
+
- Add curio support. (Pull #168)
|
| 433 |
+
- Add anyio support, with `backend="anyio"`. (Pull #169)
|
| 434 |
+
|
| 435 |
+
### Changed
|
| 436 |
+
|
| 437 |
+
- Update the Transport API to use 'ext' for optional extensions. (Pull #190)
|
| 438 |
+
- Update the Transport API to use `.request` and `.arequest` so implementations can support both sync and async. (Pull #189)
|
| 439 |
+
|
| 440 |
+
## 0.10.2 (August 20th, 2020)
|
| 441 |
+
|
| 442 |
+
### Added
|
| 443 |
+
|
| 444 |
+
- Added Unix Domain Socket support. (Pull #139)
|
| 445 |
+
|
| 446 |
+
### Fixed
|
| 447 |
+
|
| 448 |
+
- Always include the port on proxy CONNECT requests. (Pull #154)
|
| 449 |
+
- Fix `max_keepalive_connections` configuration. (Pull #153)
|
| 450 |
+
- Fixes behaviour in HTTP/1.1 where server disconnects can be used to signal the end of the response body. (Pull #164)
|
| 451 |
+
|
| 452 |
+
## 0.10.1 (August 7th, 2020)
|
| 453 |
+
|
| 454 |
+
- Include `max_keepalive_connections` on `AsyncHTTPProxy`/`SyncHTTPProxy` classes.
|
| 455 |
+
|
| 456 |
+
## 0.10.0 (August 7th, 2020)
|
| 457 |
+
|
| 458 |
+
The most notable change in the 0.10.0 release is that HTTP/2 support is now fully optional.
|
| 459 |
+
|
| 460 |
+
Use either `pip install httpcore` for HTTP/1.1 support only, or `pip install httpcore[http2]` for HTTP/1.1 and HTTP/2 support.
|
| 461 |
+
|
| 462 |
+
### Added
|
| 463 |
+
|
| 464 |
+
- HTTP/2 support becomes optional. (Pull #121, #130)
|
| 465 |
+
- Add `local_address=...` support. (Pull #100, #134)
|
| 466 |
+
- Add `PlainByteStream`, `IteratorByteStream`, `AsyncIteratorByteStream`. The `AsyncByteSteam` and `SyncByteStream` classes are now pure interface classes. (#133)
|
| 467 |
+
- Add `LocalProtocolError`, `RemoteProtocolError` exceptions. (Pull #129)
|
| 468 |
+
- Add `UnsupportedProtocol` exception. (Pull #128)
|
| 469 |
+
- Add `.get_connection_info()` method. (Pull #102, #137)
|
| 470 |
+
- Add better TRACE logs. (Pull #101)
|
| 471 |
+
|
| 472 |
+
### Changed
|
| 473 |
+
|
| 474 |
+
- `max_keepalive` is deprecated in favour of `max_keepalive_connections`. (Pull #140)
|
| 475 |
+
|
| 476 |
+
### Fixed
|
| 477 |
+
|
| 478 |
+
- Improve handling of server disconnects. (Pull #112)
|
| 479 |
+
|
| 480 |
+
## 0.9.1 (May 27th, 2020)
|
| 481 |
+
|
| 482 |
+
### Fixed
|
| 483 |
+
|
| 484 |
+
- Proper host resolution for sync case, including IPv6 support. (Pull #97)
|
| 485 |
+
- Close outstanding connections when connection pool is closed. (Pull #98)
|
| 486 |
+
|
| 487 |
+
## 0.9.0 (May 21th, 2020)
|
| 488 |
+
|
| 489 |
+
### Changed
|
| 490 |
+
|
| 491 |
+
- URL port becomes an `Optional[int]` instead of `int`. (Pull #92)
|
| 492 |
+
|
| 493 |
+
### Fixed
|
| 494 |
+
|
| 495 |
+
- Honor HTTP/2 max concurrent streams settings. (Pull #89, #90)
|
| 496 |
+
- Remove incorrect debug log. (Pull #83)
|
| 497 |
+
|
| 498 |
+
## 0.8.4 (May 11th, 2020)
|
| 499 |
+
|
| 500 |
+
### Added
|
| 501 |
+
|
| 502 |
+
- Logging via HTTPCORE_LOG_LEVEL and HTTPX_LOG_LEVEL environment variables
|
| 503 |
+
and TRACE level logging. (Pull #79)
|
| 504 |
+
|
| 505 |
+
### Fixed
|
| 506 |
+
|
| 507 |
+
- Reuse of connections on HTTP/2 in close concurrency situations. (Pull #81)
|
| 508 |
+
|
| 509 |
+
## 0.8.3 (May 6rd, 2020)
|
| 510 |
+
|
| 511 |
+
### Fixed
|
| 512 |
+
|
| 513 |
+
- Include `Host` and `Accept` headers on proxy "CONNECT" requests.
|
| 514 |
+
- De-duplicate any headers also contained in proxy_headers.
|
| 515 |
+
- HTTP/2 flag not being passed down to proxy connections.
|
| 516 |
+
|
| 517 |
+
## 0.8.2 (May 3rd, 2020)
|
| 518 |
+
|
| 519 |
+
### Fixed
|
| 520 |
+
|
| 521 |
+
- Fix connections using proxy forwarding requests not being added to the
|
| 522 |
+
connection pool properly. (Pull #70)
|
| 523 |
+
|
| 524 |
+
## 0.8.1 (April 30th, 2020)
|
| 525 |
+
|
| 526 |
+
### Changed
|
| 527 |
+
|
| 528 |
+
- Allow inherintance of both `httpcore.AsyncByteStream`, `httpcore.SyncByteStream` without type conflicts.
|
| 529 |
+
|
| 530 |
+
## 0.8.0 (April 30th, 2020)
|
| 531 |
+
|
| 532 |
+
### Fixed
|
| 533 |
+
|
| 534 |
+
- Fixed tunnel proxy support.
|
| 535 |
+
|
| 536 |
+
### Added
|
| 537 |
+
|
| 538 |
+
- New `TimeoutException` base class.
|
| 539 |
+
|
| 540 |
+
## 0.7.0 (March 5th, 2020)
|
| 541 |
+
|
| 542 |
+
- First integration with HTTPX.
|
llava_next/lib/python3.10/site-packages/httpcore-0.17.3.dist-info/RECORD
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
httpcore-0.17.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
httpcore-0.17.3.dist-info/LICENSE.md,sha256=_ctZFUx0y6uhahEkL3dAvqnyPW_rVUeRfYxflKgDkqU,1518
|
| 3 |
+
httpcore-0.17.3.dist-info/METADATA,sha256=FXYdgFJ2kxh_T0yVw4qIdD031yF4wtYjTlU0TLrNjIk,18594
|
| 4 |
+
httpcore-0.17.3.dist-info/RECORD,,
|
| 5 |
+
httpcore-0.17.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 6 |
+
httpcore-0.17.3.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
|
| 7 |
+
httpcore-0.17.3.dist-info/top_level.txt,sha256=kYeSB6l1hBNp7JwgSwLajcsxRlrSCVKOhYKSkdgx798,59
|
| 8 |
+
httpcore/__init__.py,sha256=Dza2gJlD90bgsFlu61Fo9RpTqTj7-mxGdJVA1X-MG_U,3338
|
| 9 |
+
httpcore/__pycache__/__init__.cpython-310.pyc,,
|
| 10 |
+
httpcore/__pycache__/_api.cpython-310.pyc,,
|
| 11 |
+
httpcore/__pycache__/_exceptions.cpython-310.pyc,,
|
| 12 |
+
httpcore/__pycache__/_models.cpython-310.pyc,,
|
| 13 |
+
httpcore/__pycache__/_ssl.cpython-310.pyc,,
|
| 14 |
+
httpcore/__pycache__/_synchronization.cpython-310.pyc,,
|
| 15 |
+
httpcore/__pycache__/_trace.cpython-310.pyc,,
|
| 16 |
+
httpcore/__pycache__/_utils.cpython-310.pyc,,
|
| 17 |
+
httpcore/_api.py,sha256=IBR18qZQ8ETcghJXC1Gd-30WuKYRS0EyF2eC80_OBQ8,3167
|
| 18 |
+
httpcore/_async/__init__.py,sha256=EWdl2v4thnAHzJpqjU4h2a8DUiGAvNiWrkii9pfhTf0,1221
|
| 19 |
+
httpcore/_async/__pycache__/__init__.cpython-310.pyc,,
|
| 20 |
+
httpcore/_async/__pycache__/connection.cpython-310.pyc,,
|
| 21 |
+
httpcore/_async/__pycache__/connection_pool.cpython-310.pyc,,
|
| 22 |
+
httpcore/_async/__pycache__/http11.cpython-310.pyc,,
|
| 23 |
+
httpcore/_async/__pycache__/http2.cpython-310.pyc,,
|
| 24 |
+
httpcore/_async/__pycache__/http_proxy.cpython-310.pyc,,
|
| 25 |
+
httpcore/_async/__pycache__/interfaces.cpython-310.pyc,,
|
| 26 |
+
httpcore/_async/__pycache__/socks_proxy.cpython-310.pyc,,
|
| 27 |
+
httpcore/_async/connection.py,sha256=0LKFUXPkxusvJAUyHSJpy4mMkgf71BtOjtlaMBL4sUs,8420
|
| 28 |
+
httpcore/_async/connection_pool.py,sha256=hj1viqcWZivNmoRu-QZjyuOvAFx3-Ae2rMpuK6OZhEM,14305
|
| 29 |
+
httpcore/_async/http11.py,sha256=z58glbEF4YrDM03KVHkuNXNRpAQaJQ4qyblapA-mk4o,11968
|
| 30 |
+
httpcore/_async/http2.py,sha256=KXwWZxZ-43vxIWzr1aTLErhaCodDzFr-XAvzc4fUb10,23879
|
| 31 |
+
httpcore/_async/http_proxy.py,sha256=6jdp87k6_iNCAaM7bJF8wOw_4mX_xrXGU_c4qDjJxLk,13999
|
| 32 |
+
httpcore/_async/interfaces.py,sha256=J2iq9rs7x3nKS6iCfntjHY0Woast6V_HuXuE8rs3HmA,4486
|
| 33 |
+
httpcore/_async/socks_proxy.py,sha256=7tFg_GuAL6WoV5-emaBaiDEmZBHdVODaQXd7nkOoGC8,13810
|
| 34 |
+
httpcore/_backends/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 35 |
+
httpcore/_backends/__pycache__/__init__.cpython-310.pyc,,
|
| 36 |
+
httpcore/_backends/__pycache__/anyio.cpython-310.pyc,,
|
| 37 |
+
httpcore/_backends/__pycache__/auto.cpython-310.pyc,,
|
| 38 |
+
httpcore/_backends/__pycache__/base.cpython-310.pyc,,
|
| 39 |
+
httpcore/_backends/__pycache__/mock.cpython-310.pyc,,
|
| 40 |
+
httpcore/_backends/__pycache__/sync.cpython-310.pyc,,
|
| 41 |
+
httpcore/_backends/__pycache__/trio.cpython-310.pyc,,
|
| 42 |
+
httpcore/_backends/anyio.py,sha256=mU8gtunBSLxESGkU0Iy1ZMgumDlAeMkwBjFE3kZiCnc,5208
|
| 43 |
+
httpcore/_backends/auto.py,sha256=8r0ipGxSwXoCb_xKQAyRwL1UzfXVbO4Ee2y8vYQv3Ic,1654
|
| 44 |
+
httpcore/_backends/base.py,sha256=Qsb8b_PSiVP1ldHHGXHxQzJ1Qlzj2r8KR9KQeANkSbE,3218
|
| 45 |
+
httpcore/_backends/mock.py,sha256=S4IADhC6kE22ge_jR_WHlEUkD6QAsXnwz26DSWZLcG4,4179
|
| 46 |
+
httpcore/_backends/sync.py,sha256=Q2skeGyuAt6ETqPjZkiw-iUU0zh_nFXvCFkrsT-Y9GI,4444
|
| 47 |
+
httpcore/_backends/trio.py,sha256=INOeHEkA8pO6AsSqjColWcayM0FQSyGi1hpaQghjrCs,6078
|
| 48 |
+
httpcore/_exceptions.py,sha256=7zb3KNiG0qmfUNIdFgdaUSbn2Pu3oztghi6Vg7i-LJU,1185
|
| 49 |
+
httpcore/_models.py,sha256=1aM8l5D3CbP5QKXCBsdzAWVCHSm0t7UVrCNVTaXUPI8,16343
|
| 50 |
+
httpcore/_ssl.py,sha256=srqmSNU4iOUvWF-SrJvb8G_YEbHFELOXQOwdDIBTS9c,187
|
| 51 |
+
httpcore/_sync/__init__.py,sha256=JBDIgXt5la1LCJ1sLQeKhjKFpLnpNr8Svs6z2ni3fgg,1141
|
| 52 |
+
httpcore/_sync/__pycache__/__init__.cpython-310.pyc,,
|
| 53 |
+
httpcore/_sync/__pycache__/connection.cpython-310.pyc,,
|
| 54 |
+
httpcore/_sync/__pycache__/connection_pool.cpython-310.pyc,,
|
| 55 |
+
httpcore/_sync/__pycache__/http11.cpython-310.pyc,,
|
| 56 |
+
httpcore/_sync/__pycache__/http2.cpython-310.pyc,,
|
| 57 |
+
httpcore/_sync/__pycache__/http_proxy.cpython-310.pyc,,
|
| 58 |
+
httpcore/_sync/__pycache__/interfaces.cpython-310.pyc,,
|
| 59 |
+
httpcore/_sync/__pycache__/socks_proxy.cpython-310.pyc,,
|
| 60 |
+
httpcore/_sync/connection.py,sha256=8IOzYLwK8_GuUPz9fF3z0EARb-ueGeKW6ZDXRPdNluQ,8209
|
| 61 |
+
httpcore/_sync/connection_pool.py,sha256=1iwYLdiq3pi9LBvpMZ8O8gWdb56qqPlm6rp35zeORBQ,13928
|
| 62 |
+
httpcore/_sync/http11.py,sha256=FTg8wAzMu1kSDjCQqQUXIslJ90aFrWnO6eL459K8SYs,11629
|
| 63 |
+
httpcore/_sync/http2.py,sha256=lkpHesGkrwzIA4oHLyClJf5IAwRLcaAFMnmffAahAK4,23343
|
| 64 |
+
httpcore/_sync/http_proxy.py,sha256=PcTIz3XuYT3rKvdaruAtH5W7EQvjofOcUHTv9YXiOc0,13761
|
| 65 |
+
httpcore/_sync/interfaces.py,sha256=EM4PTf-rgkclzisFcrTyx1G8FwraoffE8rbckOznX_o,4365
|
| 66 |
+
httpcore/_sync/socks_proxy.py,sha256=BLRF27DHvsfpdZ7WVzK3Ba3vxN6zk0iD_3xRCzDt-2Q,13595
|
| 67 |
+
httpcore/_synchronization.py,sha256=_d_vHqylvzm1Jh58_0G7i-1VwCg3Gu39Cgd4nWASvP0,8751
|
| 68 |
+
httpcore/_trace.py,sha256=akf5PsWVq3rZjqmXniomU59OY37K7JHoeNDCQ4GU84E,3954
|
| 69 |
+
httpcore/_utils.py,sha256=9QPh5ib4JilWX4dBCC_XO6wdBY4b0kbUGgfV3QfBANc,1525
|
| 70 |
+
httpcore/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
llava_next/lib/python3.10/site-packages/httpcore-0.17.3.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
httpcore
|
| 2 |
+
httpcore/_async
|
| 3 |
+
httpcore/_backends
|
| 4 |
+
httpcore/_sync
|
llava_next/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
llava_next/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/LICENSE
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Copyright (c) 2005-2021 Fredrik Johansson and mpmath contributors
|
| 2 |
+
|
| 3 |
+
All rights reserved.
|
| 4 |
+
|
| 5 |
+
Redistribution and use in source and binary forms, with or without
|
| 6 |
+
modification, are permitted provided that the following conditions are met:
|
| 7 |
+
|
| 8 |
+
a. Redistributions of source code must retain the above copyright notice,
|
| 9 |
+
this list of conditions and the following disclaimer.
|
| 10 |
+
b. Redistributions in binary form must reproduce the above copyright
|
| 11 |
+
notice, this list of conditions and the following disclaimer in the
|
| 12 |
+
documentation and/or other materials provided with the distribution.
|
| 13 |
+
c. Neither the name of the copyright holder nor the names of its
|
| 14 |
+
contributors may be used to endorse or promote products derived
|
| 15 |
+
from this software without specific prior written permission.
|
| 16 |
+
|
| 17 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 18 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 19 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
| 20 |
+
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
|
| 21 |
+
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 22 |
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 23 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 24 |
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
| 25 |
+
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
| 26 |
+
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
|
| 27 |
+
DAMAGE.
|
llava_next/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/METADATA
ADDED
|
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: mpmath
|
| 3 |
+
Version: 1.3.0
|
| 4 |
+
Summary: Python library for arbitrary-precision floating-point arithmetic
|
| 5 |
+
Home-page: http://mpmath.org/
|
| 6 |
+
Author: Fredrik Johansson
|
| 7 |
+
Author-email: fredrik.johansson@gmail.com
|
| 8 |
+
License: BSD
|
| 9 |
+
Project-URL: Source, https://github.com/fredrik-johansson/mpmath
|
| 10 |
+
Project-URL: Tracker, https://github.com/fredrik-johansson/mpmath/issues
|
| 11 |
+
Project-URL: Documentation, http://mpmath.org/doc/current/
|
| 12 |
+
Classifier: License :: OSI Approved :: BSD License
|
| 13 |
+
Classifier: Topic :: Scientific/Engineering :: Mathematics
|
| 14 |
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
| 15 |
+
Classifier: Programming Language :: Python
|
| 16 |
+
Classifier: Programming Language :: Python :: 2
|
| 17 |
+
Classifier: Programming Language :: Python :: 2.7
|
| 18 |
+
Classifier: Programming Language :: Python :: 3
|
| 19 |
+
Classifier: Programming Language :: Python :: 3.5
|
| 20 |
+
Classifier: Programming Language :: Python :: 3.6
|
| 21 |
+
Classifier: Programming Language :: Python :: 3.7
|
| 22 |
+
Classifier: Programming Language :: Python :: 3.8
|
| 23 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 24 |
+
Classifier: Programming Language :: Python :: Implementation :: CPython
|
| 25 |
+
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
| 26 |
+
License-File: LICENSE
|
| 27 |
+
Provides-Extra: develop
|
| 28 |
+
Requires-Dist: pytest (>=4.6) ; extra == 'develop'
|
| 29 |
+
Requires-Dist: pycodestyle ; extra == 'develop'
|
| 30 |
+
Requires-Dist: pytest-cov ; extra == 'develop'
|
| 31 |
+
Requires-Dist: codecov ; extra == 'develop'
|
| 32 |
+
Requires-Dist: wheel ; extra == 'develop'
|
| 33 |
+
Provides-Extra: docs
|
| 34 |
+
Requires-Dist: sphinx ; extra == 'docs'
|
| 35 |
+
Provides-Extra: gmpy
|
| 36 |
+
Requires-Dist: gmpy2 (>=2.1.0a4) ; (platform_python_implementation != "PyPy") and extra == 'gmpy'
|
| 37 |
+
Provides-Extra: tests
|
| 38 |
+
Requires-Dist: pytest (>=4.6) ; extra == 'tests'
|
| 39 |
+
|
| 40 |
+
mpmath
|
| 41 |
+
======
|
| 42 |
+
|
| 43 |
+
|pypi version| |Build status| |Code coverage status| |Zenodo Badge|
|
| 44 |
+
|
| 45 |
+
.. |pypi version| image:: https://img.shields.io/pypi/v/mpmath.svg
|
| 46 |
+
:target: https://pypi.python.org/pypi/mpmath
|
| 47 |
+
.. |Build status| image:: https://github.com/fredrik-johansson/mpmath/workflows/test/badge.svg
|
| 48 |
+
:target: https://github.com/fredrik-johansson/mpmath/actions?workflow=test
|
| 49 |
+
.. |Code coverage status| image:: https://codecov.io/gh/fredrik-johansson/mpmath/branch/master/graph/badge.svg
|
| 50 |
+
:target: https://codecov.io/gh/fredrik-johansson/mpmath
|
| 51 |
+
.. |Zenodo Badge| image:: https://zenodo.org/badge/2934512.svg
|
| 52 |
+
:target: https://zenodo.org/badge/latestdoi/2934512
|
| 53 |
+
|
| 54 |
+
A Python library for arbitrary-precision floating-point arithmetic.
|
| 55 |
+
|
| 56 |
+
Website: http://mpmath.org/
|
| 57 |
+
Main author: Fredrik Johansson <fredrik.johansson@gmail.com>
|
| 58 |
+
|
| 59 |
+
Mpmath is free software released under the New BSD License (see the
|
| 60 |
+
LICENSE file for details)
|
| 61 |
+
|
| 62 |
+
0. History and credits
|
| 63 |
+
----------------------
|
| 64 |
+
|
| 65 |
+
The following people (among others) have contributed major patches
|
| 66 |
+
or new features to mpmath:
|
| 67 |
+
|
| 68 |
+
* Pearu Peterson <pearu.peterson@gmail.com>
|
| 69 |
+
* Mario Pernici <mario.pernici@mi.infn.it>
|
| 70 |
+
* Ondrej Certik <ondrej@certik.cz>
|
| 71 |
+
* Vinzent Steinberg <vinzent.steinberg@gmail.cm>
|
| 72 |
+
* Nimish Telang <ntelang@gmail.com>
|
| 73 |
+
* Mike Taschuk <mtaschuk@ece.ualberta.ca>
|
| 74 |
+
* Case Van Horsen <casevh@gmail.com>
|
| 75 |
+
* Jorn Baayen <jorn.baayen@gmail.com>
|
| 76 |
+
* Chris Smith <smichr@gmail.com>
|
| 77 |
+
* Juan Arias de Reyna <arias@us.es>
|
| 78 |
+
* Ioannis Tziakos <itziakos@gmail.com>
|
| 79 |
+
* Aaron Meurer <asmeurer@gmail.com>
|
| 80 |
+
* Stefan Krastanov <krastanov.stefan@gmail.com>
|
| 81 |
+
* Ken Allen <ken.allen@sbcglobal.net>
|
| 82 |
+
* Timo Hartmann <thartmann15@gmail.com>
|
| 83 |
+
* Sergey B Kirpichev <skirpichev@gmail.com>
|
| 84 |
+
* Kris Kuhlman <kristopher.kuhlman@gmail.com>
|
| 85 |
+
* Paul Masson <paulmasson@analyticphysics.com>
|
| 86 |
+
* Michael Kagalenko <michael.kagalenko@gmail.com>
|
| 87 |
+
* Jonathan Warner <warnerjon12@gmail.com>
|
| 88 |
+
* Max Gaukler <max.gaukler@fau.de>
|
| 89 |
+
* Guillermo Navas-Palencia <g.navas.palencia@gmail.com>
|
| 90 |
+
* Nike Dattani <nike@hpqc.org>
|
| 91 |
+
|
| 92 |
+
Numerous other people have contributed by reporting bugs,
|
| 93 |
+
requesting new features, or suggesting improvements to the
|
| 94 |
+
documentation.
|
| 95 |
+
|
| 96 |
+
For a detailed changelog, including individual contributions,
|
| 97 |
+
see the CHANGES file.
|
| 98 |
+
|
| 99 |
+
Fredrik's work on mpmath during summer 2008 was sponsored by Google
|
| 100 |
+
as part of the Google Summer of Code program.
|
| 101 |
+
|
| 102 |
+
Fredrik's work on mpmath during summer 2009 was sponsored by the
|
| 103 |
+
American Institute of Mathematics under the support of the National Science
|
| 104 |
+
Foundation Grant No. 0757627 (FRG: L-functions and Modular Forms).
|
| 105 |
+
|
| 106 |
+
Any opinions, findings, and conclusions or recommendations expressed in this
|
| 107 |
+
material are those of the author(s) and do not necessarily reflect the
|
| 108 |
+
views of the sponsors.
|
| 109 |
+
|
| 110 |
+
Credit also goes to:
|
| 111 |
+
|
| 112 |
+
* The authors of the GMP library and the Python wrapper
|
| 113 |
+
gmpy, enabling mpmath to become much faster at
|
| 114 |
+
high precision
|
| 115 |
+
* The authors of MPFR, pari/gp, MPFUN, and other arbitrary-
|
| 116 |
+
precision libraries, whose documentation has been helpful
|
| 117 |
+
for implementing many of the algorithms in mpmath
|
| 118 |
+
* Wikipedia contributors; Abramowitz & Stegun; Gradshteyn & Ryzhik;
|
| 119 |
+
Wolfram Research for MathWorld and the Wolfram Functions site.
|
| 120 |
+
These are the main references used for special functions
|
| 121 |
+
implementations.
|
| 122 |
+
* George Brandl for developing the Sphinx documentation tool
|
| 123 |
+
used to build mpmath's documentation
|
| 124 |
+
|
| 125 |
+
Release history:
|
| 126 |
+
|
| 127 |
+
* Version 1.3.0 released on March 7, 2023
|
| 128 |
+
* Version 1.2.0 released on February 1, 2021
|
| 129 |
+
* Version 1.1.0 released on December 11, 2018
|
| 130 |
+
* Version 1.0.0 released on September 27, 2017
|
| 131 |
+
* Version 0.19 released on June 10, 2014
|
| 132 |
+
* Version 0.18 released on December 31, 2013
|
| 133 |
+
* Version 0.17 released on February 1, 2011
|
| 134 |
+
* Version 0.16 released on September 24, 2010
|
| 135 |
+
* Version 0.15 released on June 6, 2010
|
| 136 |
+
* Version 0.14 released on February 5, 2010
|
| 137 |
+
* Version 0.13 released on August 13, 2009
|
| 138 |
+
* Version 0.12 released on June 9, 2009
|
| 139 |
+
* Version 0.11 released on January 26, 2009
|
| 140 |
+
* Version 0.10 released on October 15, 2008
|
| 141 |
+
* Version 0.9 released on August 23, 2008
|
| 142 |
+
* Version 0.8 released on April 20, 2008
|
| 143 |
+
* Version 0.7 released on March 12, 2008
|
| 144 |
+
* Version 0.6 released on January 13, 2008
|
| 145 |
+
* Version 0.5 released on November 24, 2007
|
| 146 |
+
* Version 0.4 released on November 3, 2007
|
| 147 |
+
* Version 0.3 released on October 5, 2007
|
| 148 |
+
* Version 0.2 released on October 2, 2007
|
| 149 |
+
* Version 0.1 released on September 27, 2007
|
| 150 |
+
|
| 151 |
+
1. Download & installation
|
| 152 |
+
--------------------------
|
| 153 |
+
|
| 154 |
+
Mpmath requires Python 2.7 or 3.5 (or later versions). It has been tested
|
| 155 |
+
with CPython 2.7, 3.5 through 3.7 and for PyPy.
|
| 156 |
+
|
| 157 |
+
The latest release of mpmath can be downloaded from the mpmath
|
| 158 |
+
website and from https://github.com/fredrik-johansson/mpmath/releases
|
| 159 |
+
|
| 160 |
+
It should also be available in the Python Package Index at
|
| 161 |
+
https://pypi.python.org/pypi/mpmath
|
| 162 |
+
|
| 163 |
+
To install latest release of Mpmath with pip, simply run
|
| 164 |
+
|
| 165 |
+
``pip install mpmath``
|
| 166 |
+
|
| 167 |
+
Or unpack the mpmath archive and run
|
| 168 |
+
|
| 169 |
+
``python setup.py install``
|
| 170 |
+
|
| 171 |
+
Mpmath can also be installed using
|
| 172 |
+
|
| 173 |
+
``python -m easy_install mpmath``
|
| 174 |
+
|
| 175 |
+
The latest development code is available from
|
| 176 |
+
https://github.com/fredrik-johansson/mpmath
|
| 177 |
+
|
| 178 |
+
See the main documentation for more detailed instructions.
|
| 179 |
+
|
| 180 |
+
2. Running tests
|
| 181 |
+
----------------
|
| 182 |
+
|
| 183 |
+
The unit tests in mpmath/tests/ can be run via the script
|
| 184 |
+
runtests.py, but it is recommended to run them with py.test
|
| 185 |
+
(https://pytest.org/), especially
|
| 186 |
+
to generate more useful reports in case there are failures.
|
| 187 |
+
|
| 188 |
+
You may also want to check out the demo scripts in the demo
|
| 189 |
+
directory.
|
| 190 |
+
|
| 191 |
+
The master branch is automatically tested by Travis CI.
|
| 192 |
+
|
| 193 |
+
3. Documentation
|
| 194 |
+
----------------
|
| 195 |
+
|
| 196 |
+
Documentation in reStructuredText format is available in the
|
| 197 |
+
doc directory included with the source package. These files
|
| 198 |
+
are human-readable, but can be compiled to prettier HTML using
|
| 199 |
+
the build.py script (requires Sphinx, http://sphinx.pocoo.org/).
|
| 200 |
+
|
| 201 |
+
See setup.txt in the documentation for more information.
|
| 202 |
+
|
| 203 |
+
The most recent documentation is also available in HTML format:
|
| 204 |
+
|
| 205 |
+
http://mpmath.org/doc/current/
|
| 206 |
+
|
| 207 |
+
4. Known problems
|
| 208 |
+
-----------------
|
| 209 |
+
|
| 210 |
+
Mpmath is a work in progress. Major issues include:
|
| 211 |
+
|
| 212 |
+
* Some functions may return incorrect values when given extremely
|
| 213 |
+
large arguments or arguments very close to singularities.
|
| 214 |
+
|
| 215 |
+
* Directed rounding works for arithmetic operations. It is implemented
|
| 216 |
+
heuristically for other operations, and their results may be off by one
|
| 217 |
+
or two units in the last place (even if otherwise accurate).
|
| 218 |
+
|
| 219 |
+
* Some IEEE 754 features are not available. Inifinities and NaN are
|
| 220 |
+
partially supported; denormal rounding is currently not available
|
| 221 |
+
at all.
|
| 222 |
+
|
| 223 |
+
* The interface for switching precision and rounding is not finalized.
|
| 224 |
+
The current method is not threadsafe.
|
| 225 |
+
|
| 226 |
+
5. Help and bug reports
|
| 227 |
+
-----------------------
|
| 228 |
+
|
| 229 |
+
General questions and comments can be sent to the mpmath mailinglist,
|
| 230 |
+
mpmath@googlegroups.com
|
| 231 |
+
|
| 232 |
+
You can also report bugs and send patches to the mpmath issue tracker,
|
| 233 |
+
https://github.com/fredrik-johansson/mpmath/issues
|
llava_next/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/RECORD
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
mpmath-1.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
mpmath-1.3.0.dist-info/LICENSE,sha256=wmyugdpFCOXiSZhXd6M4IfGDIj67dNf4z7-Q_n7vL7c,1537
|
| 3 |
+
mpmath-1.3.0.dist-info/METADATA,sha256=RLZupES5wNGa6UgV01a_BHrmtoDBkmi1wmVofNaoFAY,8630
|
| 4 |
+
mpmath-1.3.0.dist-info/RECORD,,
|
| 5 |
+
mpmath-1.3.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 6 |
+
mpmath-1.3.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
|
| 7 |
+
mpmath-1.3.0.dist-info/top_level.txt,sha256=BUVWrh8EVlkOhM1n3X9S8msTaVcC-3s6Sjt60avHYus,7
|
| 8 |
+
mpmath/__init__.py,sha256=skFYTSwfwDBLChAV6pI3SdewgAQR3UBtyrfIK_Jdn-g,8765
|
| 9 |
+
mpmath/__pycache__/__init__.cpython-310.pyc,,
|
| 10 |
+
mpmath/__pycache__/ctx_base.cpython-310.pyc,,
|
| 11 |
+
mpmath/__pycache__/ctx_fp.cpython-310.pyc,,
|
| 12 |
+
mpmath/__pycache__/ctx_iv.cpython-310.pyc,,
|
| 13 |
+
mpmath/__pycache__/ctx_mp.cpython-310.pyc,,
|
| 14 |
+
mpmath/__pycache__/ctx_mp_python.cpython-310.pyc,,
|
| 15 |
+
mpmath/__pycache__/function_docs.cpython-310.pyc,,
|
| 16 |
+
mpmath/__pycache__/identification.cpython-310.pyc,,
|
| 17 |
+
mpmath/__pycache__/math2.cpython-310.pyc,,
|
| 18 |
+
mpmath/__pycache__/rational.cpython-310.pyc,,
|
| 19 |
+
mpmath/__pycache__/usertools.cpython-310.pyc,,
|
| 20 |
+
mpmath/__pycache__/visualization.cpython-310.pyc,,
|
| 21 |
+
mpmath/calculus/__init__.py,sha256=UAgCIJ1YmaeyTqpNzjBlCZGeIzLtUZMEEpl99VWNjus,162
|
| 22 |
+
mpmath/calculus/__pycache__/__init__.cpython-310.pyc,,
|
| 23 |
+
mpmath/calculus/__pycache__/approximation.cpython-310.pyc,,
|
| 24 |
+
mpmath/calculus/__pycache__/calculus.cpython-310.pyc,,
|
| 25 |
+
mpmath/calculus/__pycache__/differentiation.cpython-310.pyc,,
|
| 26 |
+
mpmath/calculus/__pycache__/extrapolation.cpython-310.pyc,,
|
| 27 |
+
mpmath/calculus/__pycache__/inverselaplace.cpython-310.pyc,,
|
| 28 |
+
mpmath/calculus/__pycache__/odes.cpython-310.pyc,,
|
| 29 |
+
mpmath/calculus/__pycache__/optimization.cpython-310.pyc,,
|
| 30 |
+
mpmath/calculus/__pycache__/polynomials.cpython-310.pyc,,
|
| 31 |
+
mpmath/calculus/__pycache__/quadrature.cpython-310.pyc,,
|
| 32 |
+
mpmath/calculus/approximation.py,sha256=vyzu3YI6r63Oq1KFHrQz02mGXAcH23emqNYhJuUaFZ4,8817
|
| 33 |
+
mpmath/calculus/calculus.py,sha256=A0gSp0hxSyEDfugJViY3CeWalF-vK701YftzrjSQzQ4,112
|
| 34 |
+
mpmath/calculus/differentiation.py,sha256=2L6CBj8xtX9iip98NPbKsLtwtRjxi571wYmTMHFeL90,20226
|
| 35 |
+
mpmath/calculus/extrapolation.py,sha256=xM0rvk2DFEF4iR1Jhl-Y3aS93iW9VVJX7y9IGpmzC-A,73306
|
| 36 |
+
mpmath/calculus/inverselaplace.py,sha256=5-pn8N_t0PtgBTXixsXZ4xxrihK2J5gYsVfTKfDx4gA,36056
|
| 37 |
+
mpmath/calculus/odes.py,sha256=gaHiw7IJjsONNTAa6izFPZpmcg9uyTp8MULnGdzTIGo,9908
|
| 38 |
+
mpmath/calculus/optimization.py,sha256=bKnShXElBOmVOIOlFeksDsYCp9fYSmYwKmXDt0z26MM,32856
|
| 39 |
+
mpmath/calculus/polynomials.py,sha256=D16BhU_SHbVi06IxNwABHR-H77IylndNsN3muPTuFYs,7877
|
| 40 |
+
mpmath/calculus/quadrature.py,sha256=n-avtS8E43foV-5tr5lofgOBaiMUYE8AJjQcWI9QcKk,42432
|
| 41 |
+
mpmath/ctx_base.py,sha256=rfjmfMyA55x8R_cWFINUwWVTElfZmyx5erKDdauSEVw,15985
|
| 42 |
+
mpmath/ctx_fp.py,sha256=ctUjx_NoU0iFWk05cXDYCL2ZtLZOlWs1n6Zao3pbG2g,6572
|
| 43 |
+
mpmath/ctx_iv.py,sha256=tqdMr-GDfkZk1EhoGeCAajy7pQv-RWtrVqhYjfI8r4g,17211
|
| 44 |
+
mpmath/ctx_mp.py,sha256=d3r4t7xHNqSFtmqsA9Btq1Npy3WTM-pcM2_jeCyECxY,49452
|
| 45 |
+
mpmath/ctx_mp_python.py,sha256=3olYWo4lk1SnQ0A_IaZ181qqG8u5pxGat_v-L4Qtn3Y,37815
|
| 46 |
+
mpmath/function_docs.py,sha256=g4PP8n6ILXmHcLyA50sxK6Tmp_Z4_pRN-wDErU8D1i4,283512
|
| 47 |
+
mpmath/functions/__init__.py,sha256=YXVdhqv-6LKm6cr5xxtTNTtuD9zDPKGQl8GmS0xz2xo,330
|
| 48 |
+
mpmath/functions/__pycache__/__init__.cpython-310.pyc,,
|
| 49 |
+
mpmath/functions/__pycache__/bessel.cpython-310.pyc,,
|
| 50 |
+
mpmath/functions/__pycache__/elliptic.cpython-310.pyc,,
|
| 51 |
+
mpmath/functions/__pycache__/expintegrals.cpython-310.pyc,,
|
| 52 |
+
mpmath/functions/__pycache__/factorials.cpython-310.pyc,,
|
| 53 |
+
mpmath/functions/__pycache__/functions.cpython-310.pyc,,
|
| 54 |
+
mpmath/functions/__pycache__/hypergeometric.cpython-310.pyc,,
|
| 55 |
+
mpmath/functions/__pycache__/orthogonal.cpython-310.pyc,,
|
| 56 |
+
mpmath/functions/__pycache__/qfunctions.cpython-310.pyc,,
|
| 57 |
+
mpmath/functions/__pycache__/rszeta.cpython-310.pyc,,
|
| 58 |
+
mpmath/functions/__pycache__/signals.cpython-310.pyc,,
|
| 59 |
+
mpmath/functions/__pycache__/theta.cpython-310.pyc,,
|
| 60 |
+
mpmath/functions/__pycache__/zeta.cpython-310.pyc,,
|
| 61 |
+
mpmath/functions/__pycache__/zetazeros.cpython-310.pyc,,
|
| 62 |
+
mpmath/functions/bessel.py,sha256=dUPLu8frlK-vmf3-irX_7uvwyw4xccv6EIizmIZ88kM,37938
|
| 63 |
+
mpmath/functions/elliptic.py,sha256=qz0yVMb4lWEeOTDL_DWz5u5awmGIPKAsuZFJXgwHJNU,42237
|
| 64 |
+
mpmath/functions/expintegrals.py,sha256=75X_MRdYc1F_X73bgNiOJqwRlS2hqAzcFLl3RM2tCDc,11644
|
| 65 |
+
mpmath/functions/factorials.py,sha256=8_6kCR7e4k1GwxiAOJu0NRadeF4jA28qx4hidhu4ILk,5273
|
| 66 |
+
mpmath/functions/functions.py,sha256=ub2JExvqzCWLkm5yAm72Fr6fdWmZZUknq9_3w9MEigI,18100
|
| 67 |
+
mpmath/functions/hypergeometric.py,sha256=Z0OMAMC4ylK42n_SnamyFVnUx6zHLyCLCoJDSZ1JrHY,51570
|
| 68 |
+
mpmath/functions/orthogonal.py,sha256=FabkxKfBoSseA5flWu1a3re-2BYaew9augqIsT8LaLw,16097
|
| 69 |
+
mpmath/functions/qfunctions.py,sha256=a3EHGKQt_jMd4x9I772Jz-TGFnGY-arWqPvZGz9QSe0,7633
|
| 70 |
+
mpmath/functions/rszeta.py,sha256=yuUVp4ilIyDmXyE3WTBxDDjwfEJNypJnbPS-xPH5How,46184
|
| 71 |
+
mpmath/functions/signals.py,sha256=ELotwQaW1CDpv-eeJzOZ5c23NhfaZcj9_Gkb3psvS0Q,703
|
| 72 |
+
mpmath/functions/theta.py,sha256=KggOocczoMG6_HMoal4oEP7iZ4SKOou9JFE-WzY2r3M,37320
|
| 73 |
+
mpmath/functions/zeta.py,sha256=ue7JY7GXA0oX8q08sQJl2CSRrZ7kOt8HsftpVjnTwrE,36410
|
| 74 |
+
mpmath/functions/zetazeros.py,sha256=uq6TVyZBcY2MLX7VSdVfn0TOkowBLM9fXtnySEwaNzw,30858
|
| 75 |
+
mpmath/identification.py,sha256=7aMdngRAaeL_MafDUNbmEIlGQSklHDZ8pmPFt-OLgkw,29253
|
| 76 |
+
mpmath/libmp/__init__.py,sha256=UCDjLZw4brbklaCmSixCcPdLdHkz8sF_-6F_wr0duAg,3790
|
| 77 |
+
mpmath/libmp/__pycache__/__init__.cpython-310.pyc,,
|
| 78 |
+
mpmath/libmp/__pycache__/backend.cpython-310.pyc,,
|
| 79 |
+
mpmath/libmp/__pycache__/gammazeta.cpython-310.pyc,,
|
| 80 |
+
mpmath/libmp/__pycache__/libelefun.cpython-310.pyc,,
|
| 81 |
+
mpmath/libmp/__pycache__/libhyper.cpython-310.pyc,,
|
| 82 |
+
mpmath/libmp/__pycache__/libintmath.cpython-310.pyc,,
|
| 83 |
+
mpmath/libmp/__pycache__/libmpc.cpython-310.pyc,,
|
| 84 |
+
mpmath/libmp/__pycache__/libmpf.cpython-310.pyc,,
|
| 85 |
+
mpmath/libmp/__pycache__/libmpi.cpython-310.pyc,,
|
| 86 |
+
mpmath/libmp/backend.py,sha256=26A8pUkaGov26vrrFNQVyWJ5LDtK8sl3UHrYLecaTjA,3360
|
| 87 |
+
mpmath/libmp/gammazeta.py,sha256=Xqdw6PMoswDaSca_sOs-IglRuk3fb8c9p43M_lbcrlc,71469
|
| 88 |
+
mpmath/libmp/libelefun.py,sha256=joBZP4FOdxPfieWso1LPtSr6dHydpG_LQiF_bYQYWMg,43861
|
| 89 |
+
mpmath/libmp/libhyper.py,sha256=J9fmdDF6u27EcssEWvBuVaAa3hFjPvPN1SgRgu1dEbc,36624
|
| 90 |
+
mpmath/libmp/libintmath.py,sha256=aIRT0rkUZ_sdGQf3TNCLd-pBMvtQWjssbvFLfK7U0jc,16688
|
| 91 |
+
mpmath/libmp/libmpc.py,sha256=KBndUjs5YVS32-Id3fflDfYgpdW1Prx6zfo8Ez5Qbrs,26875
|
| 92 |
+
mpmath/libmp/libmpf.py,sha256=vpP0kNVkScbCVoZogJ4Watl4I7Ce0d4dzHVjfVe57so,45021
|
| 93 |
+
mpmath/libmp/libmpi.py,sha256=u0I5Eiwkqa-4-dXETi5k7MuaxBeZbvCAPFtl93U9YF0,27622
|
| 94 |
+
mpmath/math2.py,sha256=O5Dglg81SsW0wfHDUJcXOD8-cCaLvbVIvyw0sVmRbpI,18561
|
| 95 |
+
mpmath/matrices/__init__.py,sha256=ETzGDciYbq9ftiKwaMbJ15EI-KNXHrzRb-ZHehhqFjs,94
|
| 96 |
+
mpmath/matrices/__pycache__/__init__.cpython-310.pyc,,
|
| 97 |
+
mpmath/matrices/__pycache__/calculus.cpython-310.pyc,,
|
| 98 |
+
mpmath/matrices/__pycache__/eigen.cpython-310.pyc,,
|
| 99 |
+
mpmath/matrices/__pycache__/eigen_symmetric.cpython-310.pyc,,
|
| 100 |
+
mpmath/matrices/__pycache__/linalg.cpython-310.pyc,,
|
| 101 |
+
mpmath/matrices/__pycache__/matrices.cpython-310.pyc,,
|
| 102 |
+
mpmath/matrices/calculus.py,sha256=PNRq-p2nxgT-fzC54K2depi8ddhdx6Q86G8qpUiHeUY,18609
|
| 103 |
+
mpmath/matrices/eigen.py,sha256=GbDXI3CixzEdXxr1G86uUWkAngAvd-05MmSQ-Tsu_5k,24394
|
| 104 |
+
mpmath/matrices/eigen_symmetric.py,sha256=FPKPeQr1cGYw6Y6ea32a1YdEWQDLP6JlQHEA2WfNLYg,58534
|
| 105 |
+
mpmath/matrices/linalg.py,sha256=04C3ijzMFom7ob5fXBCDfyPPdo3BIboIeE8x2A6vqF0,26958
|
| 106 |
+
mpmath/matrices/matrices.py,sha256=o78Eq62EHQnxcsR0LBoWDEGREOoN4L2iDM1q3dQrw0o,32331
|
| 107 |
+
mpmath/rational.py,sha256=64d56fvZXngYZT7nOAHeFRUX77eJ1A0R3rpfWBU-mSo,5976
|
| 108 |
+
mpmath/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 109 |
+
mpmath/tests/__pycache__/__init__.cpython-310.pyc,,
|
| 110 |
+
mpmath/tests/__pycache__/extratest_gamma.cpython-310.pyc,,
|
| 111 |
+
mpmath/tests/__pycache__/extratest_zeta.cpython-310.pyc,,
|
| 112 |
+
mpmath/tests/__pycache__/runtests.cpython-310.pyc,,
|
| 113 |
+
mpmath/tests/__pycache__/test_basic_ops.cpython-310.pyc,,
|
| 114 |
+
mpmath/tests/__pycache__/test_bitwise.cpython-310.pyc,,
|
| 115 |
+
mpmath/tests/__pycache__/test_calculus.cpython-310.pyc,,
|
| 116 |
+
mpmath/tests/__pycache__/test_compatibility.cpython-310.pyc,,
|
| 117 |
+
mpmath/tests/__pycache__/test_convert.cpython-310.pyc,,
|
| 118 |
+
mpmath/tests/__pycache__/test_diff.cpython-310.pyc,,
|
| 119 |
+
mpmath/tests/__pycache__/test_division.cpython-310.pyc,,
|
| 120 |
+
mpmath/tests/__pycache__/test_eigen.cpython-310.pyc,,
|
| 121 |
+
mpmath/tests/__pycache__/test_eigen_symmetric.cpython-310.pyc,,
|
| 122 |
+
mpmath/tests/__pycache__/test_elliptic.cpython-310.pyc,,
|
| 123 |
+
mpmath/tests/__pycache__/test_fp.cpython-310.pyc,,
|
| 124 |
+
mpmath/tests/__pycache__/test_functions.cpython-310.pyc,,
|
| 125 |
+
mpmath/tests/__pycache__/test_functions2.cpython-310.pyc,,
|
| 126 |
+
mpmath/tests/__pycache__/test_gammazeta.cpython-310.pyc,,
|
| 127 |
+
mpmath/tests/__pycache__/test_hp.cpython-310.pyc,,
|
| 128 |
+
mpmath/tests/__pycache__/test_identify.cpython-310.pyc,,
|
| 129 |
+
mpmath/tests/__pycache__/test_interval.cpython-310.pyc,,
|
| 130 |
+
mpmath/tests/__pycache__/test_levin.cpython-310.pyc,,
|
| 131 |
+
mpmath/tests/__pycache__/test_linalg.cpython-310.pyc,,
|
| 132 |
+
mpmath/tests/__pycache__/test_matrices.cpython-310.pyc,,
|
| 133 |
+
mpmath/tests/__pycache__/test_mpmath.cpython-310.pyc,,
|
| 134 |
+
mpmath/tests/__pycache__/test_ode.cpython-310.pyc,,
|
| 135 |
+
mpmath/tests/__pycache__/test_pickle.cpython-310.pyc,,
|
| 136 |
+
mpmath/tests/__pycache__/test_power.cpython-310.pyc,,
|
| 137 |
+
mpmath/tests/__pycache__/test_quad.cpython-310.pyc,,
|
| 138 |
+
mpmath/tests/__pycache__/test_rootfinding.cpython-310.pyc,,
|
| 139 |
+
mpmath/tests/__pycache__/test_special.cpython-310.pyc,,
|
| 140 |
+
mpmath/tests/__pycache__/test_str.cpython-310.pyc,,
|
| 141 |
+
mpmath/tests/__pycache__/test_summation.cpython-310.pyc,,
|
| 142 |
+
mpmath/tests/__pycache__/test_trig.cpython-310.pyc,,
|
| 143 |
+
mpmath/tests/__pycache__/test_visualization.cpython-310.pyc,,
|
| 144 |
+
mpmath/tests/__pycache__/torture.cpython-310.pyc,,
|
| 145 |
+
mpmath/tests/extratest_gamma.py,sha256=xidhXUelILcxtiPGoTBHjqUOKIJzEaZ_v3nntGQyWZQ,7228
|
| 146 |
+
mpmath/tests/extratest_zeta.py,sha256=sg10j9RhjBpV2EdUqyYhGV2ERWvM--EvwwGIz6HTmlw,1003
|
| 147 |
+
mpmath/tests/runtests.py,sha256=7NUV82F3K_5AhU8mCLUFf5OibtT7uloFCwPyM3l71wM,5189
|
| 148 |
+
mpmath/tests/test_basic_ops.py,sha256=dsB8DRG-GrPzBaZ-bIauYabaeqXbfqBo9SIP9BqcTSs,15348
|
| 149 |
+
mpmath/tests/test_bitwise.py,sha256=-nLYhgQbhDza3SQM63BhktYntACagqMYx9ib3dPnTKM,7686
|
| 150 |
+
mpmath/tests/test_calculus.py,sha256=4oxtNfMpO4RLLoOzrv7r9-h8BcqfBsJIE6UpsHe7c4w,9187
|
| 151 |
+
mpmath/tests/test_compatibility.py,sha256=_t3ASZ3jhfAMnN1voWX7PDNIDzn-3PokkJGIdT1x7y0,2306
|
| 152 |
+
mpmath/tests/test_convert.py,sha256=JPcDcTJIWh5prIxjx5DM1aNWgqlUoF2KpHvAgK3uHi4,8834
|
| 153 |
+
mpmath/tests/test_diff.py,sha256=qjiF8NxQ8vueuZ5ZHGPQ-kjcj_I7Jh_fEdFtaA8DzEI,2466
|
| 154 |
+
mpmath/tests/test_division.py,sha256=6lUeZfmaBWvvszdqlWLMHgXPjVsxvW1WZpd4-jFWCpU,5340
|
| 155 |
+
mpmath/tests/test_eigen.py,sha256=2mnqVATGbsJkvSVHPpitfAk881twFfb3LsO3XikV9Hs,3905
|
| 156 |
+
mpmath/tests/test_eigen_symmetric.py,sha256=v0VimCicIU2owASDMBaP-t-30uq-pXcsglt95KBtNO4,8778
|
| 157 |
+
mpmath/tests/test_elliptic.py,sha256=Kjiwq9Bb6N_OOzzWewGQ1M_PMa7vRs42V0t90gloZxo,26225
|
| 158 |
+
mpmath/tests/test_fp.py,sha256=AJo0FTyH4BuUnUsv176LD956om308KGYndy-b54KGxM,89997
|
| 159 |
+
mpmath/tests/test_functions.py,sha256=b47VywdomoOX6KmMmz9-iv2IqVIydwKSuUw2pWlFHrY,30955
|
| 160 |
+
mpmath/tests/test_functions2.py,sha256=vlw2RWhL1oTcifnOMDx1a_YzN96UgNNIE5STeKRv1HY,96990
|
| 161 |
+
mpmath/tests/test_gammazeta.py,sha256=AB34O0DV7AlEf9Z4brnCadeQU5-uAwhWRw5FZas65DA,27917
|
| 162 |
+
mpmath/tests/test_hp.py,sha256=6hcENu6Te2klPEiTSeLBIRPlH7PADlJwFKbx8xpnOhg,10461
|
| 163 |
+
mpmath/tests/test_identify.py,sha256=lGUIPfrB2paTg0cFUo64GmMzF77F9gs9FQjX7gxGHV8,692
|
| 164 |
+
mpmath/tests/test_interval.py,sha256=TjYd7a9ca6iRJiLjw06isLeZTuGoGAPmgleDZ0cYfJ0,17527
|
| 165 |
+
mpmath/tests/test_levin.py,sha256=P8M11yV1dj_gdSNv5xuwCzFiF86QyRDtPMjURy6wJ28,5090
|
| 166 |
+
mpmath/tests/test_linalg.py,sha256=miKEnwB8iwWV13hi1bF1cg3hgB4rTKOR0fvDVfWmXds,10440
|
| 167 |
+
mpmath/tests/test_matrices.py,sha256=qyA4Ml2CvNvW034lzB01G6wVgNr7UrgZqh2wkMXtpzM,7944
|
| 168 |
+
mpmath/tests/test_mpmath.py,sha256=LVyJUeofiaxW-zLKWVBCz59L9UQsjlW0Ts9_oBiEv_4,196
|
| 169 |
+
mpmath/tests/test_ode.py,sha256=zAxexBH4fnmFNO4bvEHbug1NJWC5zqfFaVDlYijowkY,1822
|
| 170 |
+
mpmath/tests/test_pickle.py,sha256=Y8CKmDLFsJHUqG8CDaBw5ilrPP4YT1xijVduLpQ7XFE,401
|
| 171 |
+
mpmath/tests/test_power.py,sha256=sz_K02SmNxpa6Kb1uJLN_N4tXTJGdQ___vPRshEN7Gk,5227
|
| 172 |
+
mpmath/tests/test_quad.py,sha256=49Ltft0vZ_kdKLL5s-Kj-BzAVoF5LPVEUeNUzdOkghI,3893
|
| 173 |
+
mpmath/tests/test_rootfinding.py,sha256=umQegEaKHmYOEl5jEyoD-VLKDtXsTJJkepKEr4c0dC0,3132
|
| 174 |
+
mpmath/tests/test_special.py,sha256=YbMIoMIkJEvvKYIzS0CXthJFG0--j6un7-tcE6b7FPM,2848
|
| 175 |
+
mpmath/tests/test_str.py,sha256=0WsGD9hMPRi8zcuYMA9Cu2mOvQiCFskPwMsMf8lBDK4,544
|
| 176 |
+
mpmath/tests/test_summation.py,sha256=fdNlsvRVOsbWxbhlyDLDaEO2S8kTJrRMKIvB5-aNci0,2035
|
| 177 |
+
mpmath/tests/test_trig.py,sha256=zPtkIEnZaThxcWur4k7BX8-2Jmj-AhO191Svv7ANYUU,4799
|
| 178 |
+
mpmath/tests/test_visualization.py,sha256=1PqtkoUx-WsKYgTRiu5o9pBc85kwhf1lzU2eobDQCJM,944
|
| 179 |
+
mpmath/tests/torture.py,sha256=LD95oES7JY2KroELK-m-jhvtbvZaKChnt0Cq7kFMNCw,7868
|
| 180 |
+
mpmath/usertools.py,sha256=a-TDw7XSRsPdBEffxOooDV4WDFfuXnO58P75dcAD87I,3029
|
| 181 |
+
mpmath/visualization.py,sha256=pnnbjcd9AhFVRBZavYX5gjx4ytK_kXoDDisYR6EpXhs,10627
|
llava_next/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/REQUESTED
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: bdist_wheel (0.38.4)
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-any
|
| 5 |
+
|
llava_next/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
mpmath
|
llava_next/lib/python3.10/site-packages/ninja/__init__.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
import os
|
| 3 |
+
import platform
|
| 4 |
+
import subprocess
|
| 5 |
+
import sys
|
| 6 |
+
|
| 7 |
+
from ._version import version as __version__
|
| 8 |
+
|
| 9 |
+
__all__ = ["__version__", "DATA", "BIN_DIR", "ninja"]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def __dir__():
|
| 13 |
+
return __all__
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
try:
|
| 17 |
+
from .ninja_syntax import Writer, escape, expand
|
| 18 |
+
except ImportError:
|
| 19 |
+
# Support importing `ninja_syntax` from the source tree
|
| 20 |
+
if not os.path.exists(
|
| 21 |
+
os.path.join(os.path.dirname(__file__), 'ninja_syntax.py')):
|
| 22 |
+
sys.path.insert(0, os.path.abspath(os.path.join(
|
| 23 |
+
os.path.dirname(__file__), '../../Ninja-src/misc')))
|
| 24 |
+
from ninja_syntax import Writer, escape, expand # noqa: F401
|
| 25 |
+
|
| 26 |
+
DATA = os.path.join(os.path.dirname(__file__), 'data')
|
| 27 |
+
|
| 28 |
+
# Support running tests from the source tree
|
| 29 |
+
if not os.path.exists(DATA):
|
| 30 |
+
from skbuild.constants import CMAKE_INSTALL_DIR as SKBUILD_CMAKE_INSTALL_DIR
|
| 31 |
+
from skbuild.constants import set_skbuild_plat_name
|
| 32 |
+
|
| 33 |
+
if platform.system().lower() == "darwin":
|
| 34 |
+
# Since building the project specifying --plat-name or CMAKE_OSX_* variables
|
| 35 |
+
# leads to different SKBUILD_DIR, the code below attempt to guess the most
|
| 36 |
+
# likely plat-name.
|
| 37 |
+
_skbuild_dirs = os.listdir(os.path.join(os.path.dirname(__file__), '..', '..', '_skbuild'))
|
| 38 |
+
if _skbuild_dirs:
|
| 39 |
+
_likely_plat_name = '-'.join(_skbuild_dirs[0].split('-')[:3])
|
| 40 |
+
set_skbuild_plat_name(_likely_plat_name)
|
| 41 |
+
|
| 42 |
+
_data = os.path.abspath(os.path.join(
|
| 43 |
+
os.path.dirname(__file__), '..', '..', SKBUILD_CMAKE_INSTALL_DIR(), 'src/ninja/data'))
|
| 44 |
+
if os.path.exists(_data):
|
| 45 |
+
DATA = _data
|
| 46 |
+
|
| 47 |
+
BIN_DIR = os.path.join(DATA, 'bin')
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def _program(name, args):
|
| 51 |
+
return subprocess.call([os.path.join(BIN_DIR, name)] + args, close_fds=False)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def ninja():
|
| 55 |
+
raise SystemExit(_program('ninja', sys.argv[1:]))
|
llava_next/lib/python3.10/site-packages/ninja/__main__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
from ninja import ninja
|
| 3 |
+
|
| 4 |
+
if __name__ == '__main__':
|
| 5 |
+
ninja()
|
llava_next/lib/python3.10/site-packages/ninja/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.58 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/ninja/__pycache__/__main__.cpython-310.pyc
ADDED
|
Binary file (233 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/ninja/__pycache__/_version.cpython-310.pyc
ADDED
|
Binary file (492 Bytes). View file
|
|
|
llava_next/lib/python3.10/site-packages/ninja/__pycache__/ninja_syntax.cpython-310.pyc
ADDED
|
Binary file (5.96 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/ninja/_version.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# file generated by setuptools_scm
|
| 2 |
+
# don't change, don't track in version control
|
| 3 |
+
TYPE_CHECKING = False
|
| 4 |
+
if TYPE_CHECKING:
|
| 5 |
+
from typing import Tuple, Union
|
| 6 |
+
VERSION_TUPLE = Tuple[Union[int, str], ...]
|
| 7 |
+
else:
|
| 8 |
+
VERSION_TUPLE = object
|
| 9 |
+
|
| 10 |
+
version: str
|
| 11 |
+
__version__: str
|
| 12 |
+
__version_tuple__: VERSION_TUPLE
|
| 13 |
+
version_tuple: VERSION_TUPLE
|
| 14 |
+
|
| 15 |
+
__version__ = version = '1.11.1.1'
|
| 16 |
+
__version_tuple__ = version_tuple = (1, 11, 1, 1)
|
llava_next/lib/python3.10/site-packages/ninja/ninja_syntax.py
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/python
|
| 2 |
+
|
| 3 |
+
# Copyright 2011 Google Inc. All Rights Reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
"""Python module for generating .ninja files.
|
| 18 |
+
|
| 19 |
+
Note that this is emphatically not a required piece of Ninja; it's
|
| 20 |
+
just a helpful utility for build-file-generation systems that already
|
| 21 |
+
use Python.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
import re
|
| 25 |
+
import textwrap
|
| 26 |
+
|
| 27 |
+
def escape_path(word):
|
| 28 |
+
return word.replace('$ ', '$$ ').replace(' ', '$ ').replace(':', '$:')
|
| 29 |
+
|
| 30 |
+
class Writer(object):
|
| 31 |
+
def __init__(self, output, width=78):
|
| 32 |
+
self.output = output
|
| 33 |
+
self.width = width
|
| 34 |
+
|
| 35 |
+
def newline(self):
|
| 36 |
+
self.output.write('\n')
|
| 37 |
+
|
| 38 |
+
def comment(self, text):
|
| 39 |
+
for line in textwrap.wrap(text, self.width - 2, break_long_words=False,
|
| 40 |
+
break_on_hyphens=False):
|
| 41 |
+
self.output.write('# ' + line + '\n')
|
| 42 |
+
|
| 43 |
+
def variable(self, key, value, indent=0):
|
| 44 |
+
if value is None:
|
| 45 |
+
return
|
| 46 |
+
if isinstance(value, list):
|
| 47 |
+
value = ' '.join(filter(None, value)) # Filter out empty strings.
|
| 48 |
+
self._line('%s = %s' % (key, value), indent)
|
| 49 |
+
|
| 50 |
+
def pool(self, name, depth):
|
| 51 |
+
self._line('pool %s' % name)
|
| 52 |
+
self.variable('depth', depth, indent=1)
|
| 53 |
+
|
| 54 |
+
def rule(self, name, command, description=None, depfile=None,
|
| 55 |
+
generator=False, pool=None, restat=False, rspfile=None,
|
| 56 |
+
rspfile_content=None, deps=None):
|
| 57 |
+
self._line('rule %s' % name)
|
| 58 |
+
self.variable('command', command, indent=1)
|
| 59 |
+
if description:
|
| 60 |
+
self.variable('description', description, indent=1)
|
| 61 |
+
if depfile:
|
| 62 |
+
self.variable('depfile', depfile, indent=1)
|
| 63 |
+
if generator:
|
| 64 |
+
self.variable('generator', '1', indent=1)
|
| 65 |
+
if pool:
|
| 66 |
+
self.variable('pool', pool, indent=1)
|
| 67 |
+
if restat:
|
| 68 |
+
self.variable('restat', '1', indent=1)
|
| 69 |
+
if rspfile:
|
| 70 |
+
self.variable('rspfile', rspfile, indent=1)
|
| 71 |
+
if rspfile_content:
|
| 72 |
+
self.variable('rspfile_content', rspfile_content, indent=1)
|
| 73 |
+
if deps:
|
| 74 |
+
self.variable('deps', deps, indent=1)
|
| 75 |
+
|
| 76 |
+
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
|
| 77 |
+
variables=None, implicit_outputs=None, pool=None, dyndep=None):
|
| 78 |
+
outputs = as_list(outputs)
|
| 79 |
+
out_outputs = [escape_path(x) for x in outputs]
|
| 80 |
+
all_inputs = [escape_path(x) for x in as_list(inputs)]
|
| 81 |
+
|
| 82 |
+
if implicit:
|
| 83 |
+
implicit = [escape_path(x) for x in as_list(implicit)]
|
| 84 |
+
all_inputs.append('|')
|
| 85 |
+
all_inputs.extend(implicit)
|
| 86 |
+
if order_only:
|
| 87 |
+
order_only = [escape_path(x) for x in as_list(order_only)]
|
| 88 |
+
all_inputs.append('||')
|
| 89 |
+
all_inputs.extend(order_only)
|
| 90 |
+
if implicit_outputs:
|
| 91 |
+
implicit_outputs = [escape_path(x)
|
| 92 |
+
for x in as_list(implicit_outputs)]
|
| 93 |
+
out_outputs.append('|')
|
| 94 |
+
out_outputs.extend(implicit_outputs)
|
| 95 |
+
|
| 96 |
+
self._line('build %s: %s' % (' '.join(out_outputs),
|
| 97 |
+
' '.join([rule] + all_inputs)))
|
| 98 |
+
if pool is not None:
|
| 99 |
+
self._line(' pool = %s' % pool)
|
| 100 |
+
if dyndep is not None:
|
| 101 |
+
self._line(' dyndep = %s' % dyndep)
|
| 102 |
+
|
| 103 |
+
if variables:
|
| 104 |
+
if isinstance(variables, dict):
|
| 105 |
+
iterator = iter(variables.items())
|
| 106 |
+
else:
|
| 107 |
+
iterator = iter(variables)
|
| 108 |
+
|
| 109 |
+
for key, val in iterator:
|
| 110 |
+
self.variable(key, val, indent=1)
|
| 111 |
+
|
| 112 |
+
return outputs
|
| 113 |
+
|
| 114 |
+
def include(self, path):
|
| 115 |
+
self._line('include %s' % path)
|
| 116 |
+
|
| 117 |
+
def subninja(self, path):
|
| 118 |
+
self._line('subninja %s' % path)
|
| 119 |
+
|
| 120 |
+
def default(self, paths):
|
| 121 |
+
self._line('default %s' % ' '.join(as_list(paths)))
|
| 122 |
+
|
| 123 |
+
def _count_dollars_before_index(self, s, i):
|
| 124 |
+
"""Returns the number of '$' characters right in front of s[i]."""
|
| 125 |
+
dollar_count = 0
|
| 126 |
+
dollar_index = i - 1
|
| 127 |
+
while dollar_index > 0 and s[dollar_index] == '$':
|
| 128 |
+
dollar_count += 1
|
| 129 |
+
dollar_index -= 1
|
| 130 |
+
return dollar_count
|
| 131 |
+
|
| 132 |
+
def _line(self, text, indent=0):
|
| 133 |
+
"""Write 'text' word-wrapped at self.width characters."""
|
| 134 |
+
leading_space = ' ' * indent
|
| 135 |
+
while len(leading_space) + len(text) > self.width:
|
| 136 |
+
# The text is too wide; wrap if possible.
|
| 137 |
+
|
| 138 |
+
# Find the rightmost space that would obey our width constraint and
|
| 139 |
+
# that's not an escaped space.
|
| 140 |
+
available_space = self.width - len(leading_space) - len(' $')
|
| 141 |
+
space = available_space
|
| 142 |
+
while True:
|
| 143 |
+
space = text.rfind(' ', 0, space)
|
| 144 |
+
if (space < 0 or
|
| 145 |
+
self._count_dollars_before_index(text, space) % 2 == 0):
|
| 146 |
+
break
|
| 147 |
+
|
| 148 |
+
if space < 0:
|
| 149 |
+
# No such space; just use the first unescaped space we can find.
|
| 150 |
+
space = available_space - 1
|
| 151 |
+
while True:
|
| 152 |
+
space = text.find(' ', space + 1)
|
| 153 |
+
if (space < 0 or
|
| 154 |
+
self._count_dollars_before_index(text, space) % 2 == 0):
|
| 155 |
+
break
|
| 156 |
+
if space < 0:
|
| 157 |
+
# Give up on breaking.
|
| 158 |
+
break
|
| 159 |
+
|
| 160 |
+
self.output.write(leading_space + text[0:space] + ' $\n')
|
| 161 |
+
text = text[space+1:]
|
| 162 |
+
|
| 163 |
+
# Subsequent lines are continuations, so indent them.
|
| 164 |
+
leading_space = ' ' * (indent+2)
|
| 165 |
+
|
| 166 |
+
self.output.write(leading_space + text + '\n')
|
| 167 |
+
|
| 168 |
+
def close(self):
|
| 169 |
+
self.output.close()
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def as_list(input):
|
| 173 |
+
if input is None:
|
| 174 |
+
return []
|
| 175 |
+
if isinstance(input, list):
|
| 176 |
+
return input
|
| 177 |
+
return [input]
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def escape(string):
|
| 181 |
+
"""Escape a string such that it can be embedded into a Ninja file without
|
| 182 |
+
further interpretation."""
|
| 183 |
+
assert '\n' not in string, 'Ninja syntax does not allow newlines'
|
| 184 |
+
# We only have one special metacharacter: '$'.
|
| 185 |
+
return string.replace('$', '$$')
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def expand(string, vars, local_vars={}):
|
| 189 |
+
"""Expand a string containing $vars as Ninja would.
|
| 190 |
+
|
| 191 |
+
Note: doesn't handle the full Ninja variable syntax, but it's enough
|
| 192 |
+
to make configure.py's use of it work.
|
| 193 |
+
"""
|
| 194 |
+
def exp(m):
|
| 195 |
+
var = m.group(1)
|
| 196 |
+
if var == '$':
|
| 197 |
+
return '$'
|
| 198 |
+
return local_vars.get(var, vars.get(var, ''))
|
| 199 |
+
return re.sub(r'\$(\$|\w*)', exp, string)
|
llava_next/lib/python3.10/site-packages/ninja/py.typed
ADDED
|
File without changes
|
llava_next/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/core.cpython-310.pyc
ADDED
|
Binary file (50.1 kB). View file
|
|
|
llava_next/lib/python3.10/site-packages/pandas/tests/indexing/__pycache__/test_loc.cpython-310.pyc
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c0c347411d047dee0c308f2da6fd90327ea505d0e64771384f6806c2aecd5ff8
|
| 3 |
+
size 107549
|
parrot/lib/python3.10/site-packages/transformers/models/gptj/configuration_gptj.py
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2021 The EleutherAI and HuggingFace Teams. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" GPT-J model configuration"""
|
| 16 |
+
from collections import OrderedDict
|
| 17 |
+
from typing import Any, List, Mapping, Optional
|
| 18 |
+
|
| 19 |
+
from ... import PreTrainedTokenizer, TensorType, is_torch_available
|
| 20 |
+
from ...configuration_utils import PretrainedConfig
|
| 21 |
+
from ...onnx import OnnxConfigWithPast, PatchingSpec
|
| 22 |
+
from ...utils import logging
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
logger = logging.get_logger(__name__)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class GPTJConfig(PretrainedConfig):
|
| 29 |
+
r"""
|
| 30 |
+
This is the configuration class to store the configuration of a [`GPTJModel`]. It is used to instantiate a GPT-J
|
| 31 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
| 32 |
+
defaults will yield a similar configuration to that of the GPT-J
|
| 33 |
+
[EleutherAI/gpt-j-6B](https://huggingface.co/EleutherAI/gpt-j-6B) architecture. Configuration objects inherit from
|
| 34 |
+
[`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`]
|
| 35 |
+
for more information.
|
| 36 |
+
|
| 37 |
+
Args:
|
| 38 |
+
vocab_size (`int`, *optional*, defaults to 50400):
|
| 39 |
+
Vocabulary size of the GPT-J model. Defines the number of different tokens that can be represented by the
|
| 40 |
+
`inputs_ids` passed when calling [`GPTJModel`].
|
| 41 |
+
n_positions (`int`, *optional*, defaults to 2048):
|
| 42 |
+
The maximum sequence length that this model might ever be used with. Typically set this to something large
|
| 43 |
+
just in case (e.g., 512 or 1024 or 2048).
|
| 44 |
+
n_embd (`int`, *optional*, defaults to 4096):
|
| 45 |
+
Dimensionality of the embeddings and hidden states.
|
| 46 |
+
n_layer (`int`, *optional*, defaults to 28):
|
| 47 |
+
Number of hidden layers in the Transformer encoder.
|
| 48 |
+
n_head (`int`, *optional*, defaults to 16):
|
| 49 |
+
Number of attention heads for each attention layer in the Transformer encoder.
|
| 50 |
+
rotary_dim (`int`, *optional*, defaults to 64):
|
| 51 |
+
Number of dimensions in the embedding that Rotary Position Embedding is applied to.
|
| 52 |
+
n_inner (`int`, *optional*, defaults to None):
|
| 53 |
+
Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
|
| 54 |
+
activation_function (`str`, *optional*, defaults to `"gelu_new"`):
|
| 55 |
+
Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
|
| 56 |
+
resid_pdrop (`float`, *optional*, defaults to 0.1):
|
| 57 |
+
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
|
| 58 |
+
embd_pdrop (`int`, *optional*, defaults to 0.1):
|
| 59 |
+
The dropout ratio for the embeddings.
|
| 60 |
+
attn_pdrop (`float`, *optional*, defaults to 0.1):
|
| 61 |
+
The dropout ratio for the attention.
|
| 62 |
+
layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
|
| 63 |
+
The epsilon to use in the layer normalization layers.
|
| 64 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
| 65 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| 66 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 67 |
+
Whether or not the model should return the last key/values attentions (not used by all models).
|
| 68 |
+
|
| 69 |
+
Example:
|
| 70 |
+
|
| 71 |
+
```python
|
| 72 |
+
>>> from transformers import GPTJModel, GPTJConfig
|
| 73 |
+
|
| 74 |
+
>>> # Initializing a GPT-J 6B configuration
|
| 75 |
+
>>> configuration = GPTJConfig()
|
| 76 |
+
|
| 77 |
+
>>> # Initializing a model from the configuration
|
| 78 |
+
>>> model = GPTJModel(configuration)
|
| 79 |
+
|
| 80 |
+
>>> # Accessing the model configuration
|
| 81 |
+
>>> configuration = model.config
|
| 82 |
+
```"""
|
| 83 |
+
|
| 84 |
+
model_type = "gptj"
|
| 85 |
+
attribute_map = {
|
| 86 |
+
"max_position_embeddings": "n_positions",
|
| 87 |
+
"hidden_size": "n_embd",
|
| 88 |
+
"num_attention_heads": "n_head",
|
| 89 |
+
"num_hidden_layers": "n_layer",
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
def __init__(
|
| 93 |
+
self,
|
| 94 |
+
vocab_size=50400,
|
| 95 |
+
n_positions=2048,
|
| 96 |
+
n_embd=4096,
|
| 97 |
+
n_layer=28,
|
| 98 |
+
n_head=16,
|
| 99 |
+
rotary_dim=64,
|
| 100 |
+
n_inner=None,
|
| 101 |
+
activation_function="gelu_new",
|
| 102 |
+
resid_pdrop=0.0,
|
| 103 |
+
embd_pdrop=0.0,
|
| 104 |
+
attn_pdrop=0.0,
|
| 105 |
+
layer_norm_epsilon=1e-5,
|
| 106 |
+
initializer_range=0.02,
|
| 107 |
+
use_cache=True,
|
| 108 |
+
bos_token_id=50256,
|
| 109 |
+
eos_token_id=50256,
|
| 110 |
+
tie_word_embeddings=False,
|
| 111 |
+
**kwargs,
|
| 112 |
+
):
|
| 113 |
+
self.vocab_size = vocab_size
|
| 114 |
+
self.n_positions = n_positions
|
| 115 |
+
self.n_embd = n_embd
|
| 116 |
+
self.n_layer = n_layer
|
| 117 |
+
self.n_head = n_head
|
| 118 |
+
self.n_inner = n_inner
|
| 119 |
+
self.rotary_dim = rotary_dim
|
| 120 |
+
self.activation_function = activation_function
|
| 121 |
+
self.resid_pdrop = resid_pdrop
|
| 122 |
+
self.embd_pdrop = embd_pdrop
|
| 123 |
+
self.attn_pdrop = attn_pdrop
|
| 124 |
+
self.layer_norm_epsilon = layer_norm_epsilon
|
| 125 |
+
self.initializer_range = initializer_range
|
| 126 |
+
self.use_cache = use_cache
|
| 127 |
+
|
| 128 |
+
self.bos_token_id = bos_token_id
|
| 129 |
+
self.eos_token_id = eos_token_id
|
| 130 |
+
|
| 131 |
+
super().__init__(
|
| 132 |
+
bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
# Copied from transformers.models.gpt2.configuration_gpt2.GPT2OnnxConfig
|
| 137 |
+
class GPTJOnnxConfig(OnnxConfigWithPast):
|
| 138 |
+
def __init__(
|
| 139 |
+
self,
|
| 140 |
+
config: PretrainedConfig,
|
| 141 |
+
task: str = "default",
|
| 142 |
+
patching_specs: List[PatchingSpec] = None,
|
| 143 |
+
use_past: bool = False,
|
| 144 |
+
):
|
| 145 |
+
super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)
|
| 146 |
+
if not getattr(self._config, "pad_token_id", None):
|
| 147 |
+
# TODO: how to do that better?
|
| 148 |
+
self._config.pad_token_id = 0
|
| 149 |
+
|
| 150 |
+
@property
|
| 151 |
+
def inputs(self) -> Mapping[str, Mapping[int, str]]:
|
| 152 |
+
common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
|
| 153 |
+
if self.use_past:
|
| 154 |
+
self.fill_with_past_key_values_(common_inputs, direction="inputs")
|
| 155 |
+
common_inputs["attention_mask"] = {0: "batch", 1: "past_sequence + sequence"}
|
| 156 |
+
else:
|
| 157 |
+
common_inputs["attention_mask"] = {0: "batch", 1: "sequence"}
|
| 158 |
+
|
| 159 |
+
return common_inputs
|
| 160 |
+
|
| 161 |
+
@property
|
| 162 |
+
def num_layers(self) -> int:
|
| 163 |
+
return self._config.n_layer
|
| 164 |
+
|
| 165 |
+
@property
|
| 166 |
+
def num_attention_heads(self) -> int:
|
| 167 |
+
return self._config.n_head
|
| 168 |
+
|
| 169 |
+
def generate_dummy_inputs(
|
| 170 |
+
self,
|
| 171 |
+
tokenizer: PreTrainedTokenizer,
|
| 172 |
+
batch_size: int = -1,
|
| 173 |
+
seq_length: int = -1,
|
| 174 |
+
is_pair: bool = False,
|
| 175 |
+
framework: Optional[TensorType] = None,
|
| 176 |
+
) -> Mapping[str, Any]:
|
| 177 |
+
common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(
|
| 178 |
+
tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
# We need to order the input in the way they appears in the forward()
|
| 182 |
+
ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]})
|
| 183 |
+
|
| 184 |
+
# Need to add the past_keys
|
| 185 |
+
if self.use_past:
|
| 186 |
+
if not is_torch_available():
|
| 187 |
+
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
|
| 188 |
+
else:
|
| 189 |
+
import torch
|
| 190 |
+
|
| 191 |
+
batch, seqlen = common_inputs["input_ids"].shape
|
| 192 |
+
# Not using the same length for past_key_values
|
| 193 |
+
past_key_values_length = seqlen + 2
|
| 194 |
+
past_shape = (
|
| 195 |
+
batch,
|
| 196 |
+
self.num_attention_heads,
|
| 197 |
+
past_key_values_length,
|
| 198 |
+
self._config.hidden_size // self.num_attention_heads,
|
| 199 |
+
)
|
| 200 |
+
ordered_inputs["past_key_values"] = [
|
| 201 |
+
(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)
|
| 202 |
+
]
|
| 203 |
+
|
| 204 |
+
ordered_inputs["attention_mask"] = common_inputs["attention_mask"]
|
| 205 |
+
if self.use_past:
|
| 206 |
+
mask_dtype = ordered_inputs["attention_mask"].dtype
|
| 207 |
+
ordered_inputs["attention_mask"] = torch.cat(
|
| 208 |
+
[ordered_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
return ordered_inputs
|
| 212 |
+
|
| 213 |
+
@property
|
| 214 |
+
def default_onnx_opset(self) -> int:
|
| 215 |
+
return 13
|
parrot/lib/python3.10/site-packages/transformers/models/gptj/modeling_tf_gptj.py
ADDED
|
@@ -0,0 +1,1099 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2022 The EleutherAI and HuggingFace Teams. All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
""" TF 2.0 GPT-J model."""
|
| 16 |
+
|
| 17 |
+
from __future__ import annotations
|
| 18 |
+
|
| 19 |
+
from typing import Optional, Tuple, Union
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
import tensorflow as tf
|
| 23 |
+
|
| 24 |
+
from ...activations_tf import get_tf_activation
|
| 25 |
+
from ...file_utils import (
|
| 26 |
+
add_code_sample_docstrings,
|
| 27 |
+
add_start_docstrings,
|
| 28 |
+
add_start_docstrings_to_model_forward,
|
| 29 |
+
)
|
| 30 |
+
from ...modeling_tf_outputs import (
|
| 31 |
+
TFBaseModelOutputWithPast,
|
| 32 |
+
TFCausalLMOutputWithPast,
|
| 33 |
+
TFQuestionAnsweringModelOutput,
|
| 34 |
+
TFSequenceClassifierOutputWithPast,
|
| 35 |
+
)
|
| 36 |
+
from ...modeling_tf_utils import (
|
| 37 |
+
TFCausalLanguageModelingLoss,
|
| 38 |
+
TFModelInputType,
|
| 39 |
+
TFPreTrainedModel,
|
| 40 |
+
TFQuestionAnsweringLoss,
|
| 41 |
+
TFSequenceClassificationLoss,
|
| 42 |
+
TFSharedEmbeddings,
|
| 43 |
+
get_initializer,
|
| 44 |
+
keras,
|
| 45 |
+
keras_serializable,
|
| 46 |
+
unpack_inputs,
|
| 47 |
+
)
|
| 48 |
+
from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
|
| 49 |
+
from ...utils import logging
|
| 50 |
+
from .configuration_gptj import GPTJConfig
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
logger = logging.get_logger(__name__)
|
| 54 |
+
|
| 55 |
+
_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-j-6B"
|
| 56 |
+
_CONFIG_FOR_DOC = "GPTJConfig"
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def create_sinusoidal_positions(num_pos: int, dim: int) -> tf.Tensor:
|
| 60 |
+
inv_freq = tf.cast(1.0 / (10000 ** (tf.range(0, dim, 2) / dim)), tf.float32)
|
| 61 |
+
sinusoid_inp = tf.cast(tf.einsum("i , j -> i j", tf.range(num_pos, dtype=tf.float32), inv_freq), tf.float32)
|
| 62 |
+
sin, cos = tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)
|
| 63 |
+
out = tf.concat((sin, cos), axis=1)
|
| 64 |
+
return out
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def rotate_every_two(x: tf.Tensor) -> tf.Tensor:
|
| 68 |
+
rotate_half_tensor = tf.stack((-x[:, :, :, 1::2], x[:, :, :, ::2]), axis=-1)
|
| 69 |
+
new_shape = shape_list(rotate_half_tensor)[:-2] + [tf.math.reduce_prod(shape_list(rotate_half_tensor)[-2:])]
|
| 70 |
+
rotate_half_tensor = tf.reshape(rotate_half_tensor, new_shape)
|
| 71 |
+
return rotate_half_tensor
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def apply_rotary_pos_emb(tensor: tf.Tensor, sincos: tf.Tensor) -> tf.Tensor:
|
| 75 |
+
sin_pos, cos_pos = sincos
|
| 76 |
+
sin_pos = tf.repeat(sin_pos[:, :, None, :], 2, 3)
|
| 77 |
+
cos_pos = tf.repeat(cos_pos[:, :, None, :], 2, 3)
|
| 78 |
+
return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class TFGPTJAttention(keras.layers.Layer):
|
| 82 |
+
def __init__(self, config: GPTJConfig, **kwargs):
|
| 83 |
+
super().__init__(**kwargs)
|
| 84 |
+
|
| 85 |
+
self.embed_dim = config.hidden_size
|
| 86 |
+
self.num_attention_heads = config.num_attention_heads
|
| 87 |
+
self.head_dim = self.embed_dim // self.num_attention_heads
|
| 88 |
+
if self.head_dim * self.num_attention_heads != self.embed_dim:
|
| 89 |
+
raise ValueError(
|
| 90 |
+
f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
|
| 91 |
+
f" `num_attention_heads`: {self.num_attention_heads})."
|
| 92 |
+
)
|
| 93 |
+
self.scale_attn = self.head_dim**0.5
|
| 94 |
+
self.rotary_dim = config.rotary_dim
|
| 95 |
+
|
| 96 |
+
self.attn_dropout = keras.layers.Dropout(config.attn_pdrop)
|
| 97 |
+
self.resid_dropout = keras.layers.Dropout(config.resid_pdrop)
|
| 98 |
+
|
| 99 |
+
self.q_proj = keras.layers.Dense(
|
| 100 |
+
self.embed_dim,
|
| 101 |
+
use_bias=False,
|
| 102 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 103 |
+
name="q_proj",
|
| 104 |
+
)
|
| 105 |
+
self.k_proj = keras.layers.Dense(
|
| 106 |
+
self.embed_dim,
|
| 107 |
+
use_bias=False,
|
| 108 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 109 |
+
name="k_proj",
|
| 110 |
+
)
|
| 111 |
+
self.v_proj = keras.layers.Dense(
|
| 112 |
+
self.embed_dim,
|
| 113 |
+
use_bias=False,
|
| 114 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 115 |
+
name="v_proj",
|
| 116 |
+
)
|
| 117 |
+
self.out_proj = keras.layers.Dense(
|
| 118 |
+
self.embed_dim,
|
| 119 |
+
use_bias=False,
|
| 120 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 121 |
+
name="out_proj",
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
self.max_positions = config.max_position_embeddings
|
| 125 |
+
self.lower_triangle_mask = tf.reshape(
|
| 126 |
+
tf.cast(tf.experimental.numpy.tril(tf.ones((self.max_positions, self.max_positions))), tf.int8),
|
| 127 |
+
(1, 1, self.max_positions, self.max_positions),
|
| 128 |
+
)
|
| 129 |
+
pos_embd_dim = self.rotary_dim or self.embed_dim
|
| 130 |
+
self.embed_positions = create_sinusoidal_positions(self.max_positions, pos_embd_dim)
|
| 131 |
+
|
| 132 |
+
def get_causal_mask(self, key_length, query_length) -> tf.Tensor:
|
| 133 |
+
return tf.cast(self.lower_triangle_mask[:, :, key_length - query_length : key_length, :key_length], tf.bool)
|
| 134 |
+
|
| 135 |
+
@staticmethod
|
| 136 |
+
def get_masked_bias(dtype: tf.DType) -> tf.Tensor:
|
| 137 |
+
return tf.cast(tf.constant(-1e9), dtype)
|
| 138 |
+
|
| 139 |
+
def _split_heads(self, hidden_states: tf.Tensor, rotary: bool) -> tf.Tensor:
|
| 140 |
+
"""
|
| 141 |
+
Splits hidden dim into attn_head_size and num_attention_heads
|
| 142 |
+
"""
|
| 143 |
+
new_shape = shape_list(hidden_states)[:-1] + [self.num_attention_heads, self.head_dim]
|
| 144 |
+
hidden_states = tf.reshape(hidden_states, new_shape)
|
| 145 |
+
if rotary:
|
| 146 |
+
return hidden_states
|
| 147 |
+
if len(shape_list(hidden_states)) == 4:
|
| 148 |
+
return tf.transpose(hidden_states, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)
|
| 149 |
+
if len(shape_list(hidden_states)) == 5:
|
| 150 |
+
return tf.transpose(hidden_states, (0, 1, 3, 2, 4)) # (batch, blocks, head, block_length, head_features)
|
| 151 |
+
raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(shape_list(hidden_states))}")
|
| 152 |
+
|
| 153 |
+
def _merge_heads(self, hidden_states: tf.Tensor) -> tf.Tensor:
|
| 154 |
+
"""
|
| 155 |
+
Merges attn_head_size dim and num_attn_heads dim into hidden dim
|
| 156 |
+
"""
|
| 157 |
+
if len(shape_list(hidden_states)) == 4:
|
| 158 |
+
hidden_states = tf.transpose(hidden_states, (0, 2, 1, 3))
|
| 159 |
+
elif len(shape_list(hidden_states)) == 5:
|
| 160 |
+
hidden_states = tf.transpose(hidden_states, (0, 1, 3, 2, 4))
|
| 161 |
+
else:
|
| 162 |
+
raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(shape_list(hidden_states))}")
|
| 163 |
+
new_shape = shape_list(hidden_states)[:-2] + [self.num_attention_heads * self.head_dim]
|
| 164 |
+
return tf.reshape(hidden_states, new_shape)
|
| 165 |
+
|
| 166 |
+
def _attn(
|
| 167 |
+
self,
|
| 168 |
+
query: tf.Tensor,
|
| 169 |
+
key: tf.Tensor,
|
| 170 |
+
value: tf.Tensor,
|
| 171 |
+
attention_mask: tf.Tensor | None = None,
|
| 172 |
+
head_mask: tf.Tensor | None = None,
|
| 173 |
+
) -> Tuple[tf.Tensor, tf.Tensor]:
|
| 174 |
+
# compute causal mask from causal mask buffer
|
| 175 |
+
query_length, key_length = shape_list(query)[-2], shape_list(key)[-2]
|
| 176 |
+
causal_mask = self.get_causal_mask(key_length, query_length)
|
| 177 |
+
|
| 178 |
+
# Keep the attention weights computation in fp32 to avoid overflow issues
|
| 179 |
+
query = tf.cast(query, tf.float32)
|
| 180 |
+
key = tf.cast(key, tf.float32)
|
| 181 |
+
|
| 182 |
+
attn_weights = tf.matmul(query, key, transpose_b=True)
|
| 183 |
+
attn_weights = tf.where(causal_mask, attn_weights, self.get_masked_bias(attn_weights.dtype))
|
| 184 |
+
|
| 185 |
+
attn_weights = attn_weights / self.scale_attn
|
| 186 |
+
|
| 187 |
+
if attention_mask is not None:
|
| 188 |
+
# Apply the attention mask
|
| 189 |
+
attn_weights = attn_weights + attention_mask
|
| 190 |
+
|
| 191 |
+
attn_weights = stable_softmax(attn_weights, axis=-1)
|
| 192 |
+
attn_weights = tf.cast(attn_weights, value.dtype)
|
| 193 |
+
attn_weights = self.attn_dropout(attn_weights)
|
| 194 |
+
|
| 195 |
+
# Mask heads if we want to
|
| 196 |
+
if head_mask is not None:
|
| 197 |
+
attn_weights = attn_weights * head_mask
|
| 198 |
+
|
| 199 |
+
attn_output = tf.matmul(attn_weights, value)
|
| 200 |
+
|
| 201 |
+
return attn_output, attn_weights
|
| 202 |
+
|
| 203 |
+
def call(
|
| 204 |
+
self,
|
| 205 |
+
hidden_states: tf.Tensor,
|
| 206 |
+
layer_past: Optional[Tuple[tf.Tensor, tf.Tensor]] = None,
|
| 207 |
+
attention_mask: tf.Tensor | None = None,
|
| 208 |
+
position_ids: tf.Tensor | None = None,
|
| 209 |
+
head_mask: tf.Tensor | None = None,
|
| 210 |
+
use_cache: bool = False,
|
| 211 |
+
output_attentions: bool = False,
|
| 212 |
+
):
|
| 213 |
+
query = self.q_proj(hidden_states)
|
| 214 |
+
key = self.k_proj(hidden_states)
|
| 215 |
+
value = self.v_proj(hidden_states)
|
| 216 |
+
|
| 217 |
+
query = self._split_heads(query, True)
|
| 218 |
+
key = self._split_heads(key, True)
|
| 219 |
+
value = self._split_heads(value, False)
|
| 220 |
+
|
| 221 |
+
sincos = tf.cast(tf.gather(self.embed_positions, position_ids, axis=0), hidden_states.dtype)
|
| 222 |
+
sincos = tf.split(sincos, 2, axis=-1)
|
| 223 |
+
if self.rotary_dim is not None:
|
| 224 |
+
k_rot = key[:, :, :, : self.rotary_dim]
|
| 225 |
+
k_pass = key[:, :, :, self.rotary_dim :]
|
| 226 |
+
|
| 227 |
+
q_rot = query[:, :, :, : self.rotary_dim]
|
| 228 |
+
q_pass = query[:, :, :, self.rotary_dim :]
|
| 229 |
+
|
| 230 |
+
k_rot = apply_rotary_pos_emb(k_rot, sincos)
|
| 231 |
+
q_rot = apply_rotary_pos_emb(q_rot, sincos)
|
| 232 |
+
|
| 233 |
+
key = tf.concat((k_rot, k_pass), axis=-1)
|
| 234 |
+
query = tf.concat((q_rot, q_pass), axis=-1)
|
| 235 |
+
else:
|
| 236 |
+
key = apply_rotary_pos_emb(key, sincos)
|
| 237 |
+
query = apply_rotary_pos_emb(query, sincos)
|
| 238 |
+
|
| 239 |
+
key = tf.transpose(key, (0, 2, 1, 3))
|
| 240 |
+
query = tf.transpose(query, (0, 2, 1, 3))
|
| 241 |
+
|
| 242 |
+
if layer_past is not None:
|
| 243 |
+
past_key = layer_past[0]
|
| 244 |
+
past_value = layer_past[1]
|
| 245 |
+
key = tf.concat((past_key, key), axis=-2)
|
| 246 |
+
value = tf.concat((past_value, value), axis=-2)
|
| 247 |
+
|
| 248 |
+
if use_cache is True:
|
| 249 |
+
present = (key, value)
|
| 250 |
+
else:
|
| 251 |
+
present = None
|
| 252 |
+
|
| 253 |
+
# compute self-attention: V x Softmax(QK^T)
|
| 254 |
+
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
|
| 255 |
+
|
| 256 |
+
attn_output = self._merge_heads(attn_output)
|
| 257 |
+
attn_output = self.out_proj(attn_output)
|
| 258 |
+
attn_output = self.resid_dropout(attn_output)
|
| 259 |
+
|
| 260 |
+
outputs = (attn_output, present)
|
| 261 |
+
if output_attentions:
|
| 262 |
+
outputs += (attn_weights,)
|
| 263 |
+
|
| 264 |
+
return outputs # a, present, (attentions)
|
| 265 |
+
|
| 266 |
+
def build(self, input_shape=None):
|
| 267 |
+
if self.built:
|
| 268 |
+
return
|
| 269 |
+
self.built = True
|
| 270 |
+
if getattr(self, "q_proj", None) is not None:
|
| 271 |
+
with tf.name_scope(self.q_proj.name):
|
| 272 |
+
self.q_proj.build([None, None, self.embed_dim])
|
| 273 |
+
if getattr(self, "k_proj", None) is not None:
|
| 274 |
+
with tf.name_scope(self.k_proj.name):
|
| 275 |
+
self.k_proj.build([None, None, self.embed_dim])
|
| 276 |
+
if getattr(self, "v_proj", None) is not None:
|
| 277 |
+
with tf.name_scope(self.v_proj.name):
|
| 278 |
+
self.v_proj.build([None, None, self.embed_dim])
|
| 279 |
+
if getattr(self, "out_proj", None) is not None:
|
| 280 |
+
with tf.name_scope(self.out_proj.name):
|
| 281 |
+
self.out_proj.build([None, None, self.embed_dim])
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
class TFGPTJMLP(keras.layers.Layer):
|
| 285 |
+
def __init__(self, intermediate_size: int, config: GPTJConfig, **kwargs):
|
| 286 |
+
super().__init__(**kwargs)
|
| 287 |
+
embed_dim = config.n_embd
|
| 288 |
+
|
| 289 |
+
self.fc_in = keras.layers.Dense(
|
| 290 |
+
intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="fc_in"
|
| 291 |
+
)
|
| 292 |
+
self.fc_out = keras.layers.Dense(
|
| 293 |
+
embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="fc_out"
|
| 294 |
+
)
|
| 295 |
+
|
| 296 |
+
self.act = get_tf_activation(config.activation_function)
|
| 297 |
+
self.dropout = keras.layers.Dropout(config.embd_pdrop)
|
| 298 |
+
self.embed_dim = config.n_embd
|
| 299 |
+
self.intermediate_size = intermediate_size
|
| 300 |
+
|
| 301 |
+
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
|
| 302 |
+
hidden_states = self.fc_in(hidden_states)
|
| 303 |
+
hidden_states = self.act(hidden_states)
|
| 304 |
+
hidden_states = self.fc_out(hidden_states)
|
| 305 |
+
hidden_states = self.dropout(hidden_states)
|
| 306 |
+
return hidden_states
|
| 307 |
+
|
| 308 |
+
def build(self, input_shape=None):
|
| 309 |
+
if self.built:
|
| 310 |
+
return
|
| 311 |
+
self.built = True
|
| 312 |
+
if getattr(self, "fc_in", None) is not None:
|
| 313 |
+
with tf.name_scope(self.fc_in.name):
|
| 314 |
+
self.fc_in.build([None, None, self.embed_dim])
|
| 315 |
+
if getattr(self, "fc_out", None) is not None:
|
| 316 |
+
with tf.name_scope(self.fc_out.name):
|
| 317 |
+
self.fc_out.build([None, None, self.intermediate_size])
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
class TFGPTJBlock(keras.layers.Layer):
|
| 321 |
+
def __init__(self, config: GPTJConfig, **kwargs):
|
| 322 |
+
super().__init__(**kwargs)
|
| 323 |
+
inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
|
| 324 |
+
self.ln_1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1")
|
| 325 |
+
self.attn = TFGPTJAttention(config, name="attn")
|
| 326 |
+
self.mlp = TFGPTJMLP(inner_dim, config, name="mlp")
|
| 327 |
+
self.config = config
|
| 328 |
+
|
| 329 |
+
def call(
|
| 330 |
+
self,
|
| 331 |
+
hidden_states: tf.Tensor,
|
| 332 |
+
layer_past: tf.Tensor | None = None,
|
| 333 |
+
attention_mask: tf.Tensor | None = None,
|
| 334 |
+
position_ids: tf.Tensor | None = None,
|
| 335 |
+
head_mask: tf.Tensor | None = None,
|
| 336 |
+
use_cache: bool = False,
|
| 337 |
+
output_attentions: bool = False,
|
| 338 |
+
):
|
| 339 |
+
residual = hidden_states
|
| 340 |
+
hidden_states = self.ln_1(hidden_states)
|
| 341 |
+
attn_outputs = self.attn(
|
| 342 |
+
hidden_states=hidden_states,
|
| 343 |
+
layer_past=layer_past,
|
| 344 |
+
attention_mask=attention_mask,
|
| 345 |
+
position_ids=position_ids,
|
| 346 |
+
head_mask=head_mask,
|
| 347 |
+
use_cache=use_cache,
|
| 348 |
+
output_attentions=output_attentions,
|
| 349 |
+
) # attn_outputs: attn_output, present, (attentions)
|
| 350 |
+
attn_output = attn_outputs[0]
|
| 351 |
+
outputs = attn_outputs[1:]
|
| 352 |
+
|
| 353 |
+
feed_forward_hidden_states = self.mlp(hidden_states)
|
| 354 |
+
hidden_states = attn_output + feed_forward_hidden_states + residual
|
| 355 |
+
|
| 356 |
+
if use_cache:
|
| 357 |
+
outputs = (hidden_states,) + outputs
|
| 358 |
+
else:
|
| 359 |
+
outputs = (hidden_states,) + outputs[1:]
|
| 360 |
+
return outputs # hidden_states, present, (attentions)
|
| 361 |
+
|
| 362 |
+
def build(self, input_shape=None):
|
| 363 |
+
if self.built:
|
| 364 |
+
return
|
| 365 |
+
self.built = True
|
| 366 |
+
if getattr(self, "ln_1", None) is not None:
|
| 367 |
+
with tf.name_scope(self.ln_1.name):
|
| 368 |
+
self.ln_1.build([None, None, self.config.n_embd])
|
| 369 |
+
if getattr(self, "attn", None) is not None:
|
| 370 |
+
with tf.name_scope(self.attn.name):
|
| 371 |
+
self.attn.build(None)
|
| 372 |
+
if getattr(self, "mlp", None) is not None:
|
| 373 |
+
with tf.name_scope(self.mlp.name):
|
| 374 |
+
self.mlp.build(None)
|
| 375 |
+
|
| 376 |
+
|
| 377 |
+
@keras_serializable
|
| 378 |
+
class TFGPTJMainLayer(keras.layers.Layer):
|
| 379 |
+
config_class = GPTJConfig
|
| 380 |
+
|
| 381 |
+
def __init__(self, config: GPTJConfig, *inputs, **kwargs):
|
| 382 |
+
super().__init__(*inputs, **kwargs)
|
| 383 |
+
|
| 384 |
+
self.config = config
|
| 385 |
+
self.output_attentions = config.output_attentions
|
| 386 |
+
self.output_hidden_states = config.output_hidden_states
|
| 387 |
+
self.use_cache = config.use_cache
|
| 388 |
+
self.return_dict = config.use_return_dict
|
| 389 |
+
|
| 390 |
+
self.num_hidden_layers = config.n_layer
|
| 391 |
+
self.n_embd = config.n_embd
|
| 392 |
+
self.n_positions = config.n_positions
|
| 393 |
+
self.initializer_range = config.initializer_range
|
| 394 |
+
|
| 395 |
+
self.wte = TFSharedEmbeddings(
|
| 396 |
+
config.vocab_size, config.hidden_size, initializer_range=config.initializer_range, name="wte"
|
| 397 |
+
)
|
| 398 |
+
self.drop = keras.layers.Dropout(config.embd_pdrop)
|
| 399 |
+
self.h = [TFGPTJBlock(config, name=f"h_._{i}") for i in range(config.n_layer)]
|
| 400 |
+
self.ln_f = keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_f")
|
| 401 |
+
self.embed_dim = config.n_embd
|
| 402 |
+
|
| 403 |
+
def get_input_embeddings(self):
|
| 404 |
+
return self.wte
|
| 405 |
+
|
| 406 |
+
def set_input_embeddings(self, value: tf.Tensor):
|
| 407 |
+
self.wte.weight = value
|
| 408 |
+
self.wte.vocab_size = shape_list(value)[0]
|
| 409 |
+
|
| 410 |
+
def _prune_heads(self, heads_to_prune):
|
| 411 |
+
"""
|
| 412 |
+
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
|
| 413 |
+
"""
|
| 414 |
+
raise NotImplementedError
|
| 415 |
+
|
| 416 |
+
@unpack_inputs
|
| 417 |
+
def call(
|
| 418 |
+
self,
|
| 419 |
+
input_ids=None,
|
| 420 |
+
past_key_values=None,
|
| 421 |
+
attention_mask=None,
|
| 422 |
+
token_type_ids=None,
|
| 423 |
+
position_ids=None,
|
| 424 |
+
head_mask=None,
|
| 425 |
+
inputs_embeds=None,
|
| 426 |
+
use_cache=None,
|
| 427 |
+
output_attentions=None,
|
| 428 |
+
output_hidden_states=None,
|
| 429 |
+
return_dict=None,
|
| 430 |
+
training=False,
|
| 431 |
+
) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:
|
| 432 |
+
if input_ids is not None and inputs_embeds is not None:
|
| 433 |
+
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
| 434 |
+
elif input_ids is not None:
|
| 435 |
+
input_shape = shape_list(input_ids)
|
| 436 |
+
input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
|
| 437 |
+
elif inputs_embeds is not None:
|
| 438 |
+
input_shape = shape_list(inputs_embeds)[:-1]
|
| 439 |
+
else:
|
| 440 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
| 441 |
+
|
| 442 |
+
if past_key_values is None:
|
| 443 |
+
past_length = 0
|
| 444 |
+
past_key_values = [None] * len(self.h)
|
| 445 |
+
else:
|
| 446 |
+
past_length = shape_list(past_key_values[0][0])[-2]
|
| 447 |
+
|
| 448 |
+
if position_ids is None:
|
| 449 |
+
position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length), axis=0)
|
| 450 |
+
|
| 451 |
+
if attention_mask is not None:
|
| 452 |
+
# We create a 3D attention mask from a 2D tensor mask.
|
| 453 |
+
# Sizes are [batch_size, 1, 1, to_seq_length]
|
| 454 |
+
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
|
| 455 |
+
# this attention mask is more simple than the triangular masking of causal attention
|
| 456 |
+
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
|
| 457 |
+
attention_mask_shape = shape_list(attention_mask)
|
| 458 |
+
attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1]))
|
| 459 |
+
|
| 460 |
+
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
| 461 |
+
# masked positions, this operation will create a tensor which is 0.0 for
|
| 462 |
+
# positions we want to attend and -10000.0 for masked positions.
|
| 463 |
+
# Since we are adding it to the raw scores before the softmax, this is
|
| 464 |
+
# effectively the same as removing these entirely.
|
| 465 |
+
one_cst = tf.constant(1.0)
|
| 466 |
+
attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype)
|
| 467 |
+
attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), tf.constant(-10000.0))
|
| 468 |
+
|
| 469 |
+
# Prepare head mask if needed
|
| 470 |
+
# 1.0 in head_mask indicate we keep the head
|
| 471 |
+
# attention_probs has shape bsz x n_heads x N x N
|
| 472 |
+
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
|
| 473 |
+
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
|
| 474 |
+
if head_mask is not None:
|
| 475 |
+
raise NotImplementedError
|
| 476 |
+
else:
|
| 477 |
+
head_mask = [None] * self.num_hidden_layers
|
| 478 |
+
# head_mask = tf.constant([0] * self.num_hidden_layers)
|
| 479 |
+
|
| 480 |
+
position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
|
| 481 |
+
|
| 482 |
+
if inputs_embeds is None:
|
| 483 |
+
check_embeddings_within_bounds(input_ids, self.wte.vocab_size)
|
| 484 |
+
inputs_embeds = self.wte(input_ids, mode="embedding")
|
| 485 |
+
|
| 486 |
+
if token_type_ids is not None:
|
| 487 |
+
token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
|
| 488 |
+
token_type_embeds = self.wte(token_type_ids, mode="embedding")
|
| 489 |
+
else:
|
| 490 |
+
token_type_embeds = tf.constant(0.0)
|
| 491 |
+
|
| 492 |
+
token_type_embeds = tf.cast(token_type_embeds, dtype=inputs_embeds.dtype)
|
| 493 |
+
hidden_states = inputs_embeds + token_type_embeds
|
| 494 |
+
hidden_states = self.drop(hidden_states, training=training)
|
| 495 |
+
|
| 496 |
+
output_shape = input_shape + [shape_list(hidden_states)[-1]]
|
| 497 |
+
|
| 498 |
+
presents = () if use_cache else None
|
| 499 |
+
all_attentions = () if output_attentions else None
|
| 500 |
+
all_hidden_states = () if output_hidden_states else None
|
| 501 |
+
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
|
| 502 |
+
if output_hidden_states:
|
| 503 |
+
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
|
| 504 |
+
|
| 505 |
+
outputs = block(
|
| 506 |
+
hidden_states=hidden_states,
|
| 507 |
+
layer_past=layer_past,
|
| 508 |
+
attention_mask=attention_mask,
|
| 509 |
+
position_ids=position_ids,
|
| 510 |
+
head_mask=head_mask[i],
|
| 511 |
+
use_cache=use_cache,
|
| 512 |
+
output_attentions=output_attentions,
|
| 513 |
+
training=training,
|
| 514 |
+
)
|
| 515 |
+
|
| 516 |
+
hidden_states = outputs[0]
|
| 517 |
+
if use_cache:
|
| 518 |
+
presents = presents + (outputs[1],)
|
| 519 |
+
|
| 520 |
+
if output_attentions:
|
| 521 |
+
all_attentions = all_attentions + (outputs[2 if use_cache else 1],)
|
| 522 |
+
|
| 523 |
+
hidden_states = self.ln_f(hidden_states)
|
| 524 |
+
|
| 525 |
+
hidden_states = tf.reshape(hidden_states, output_shape)
|
| 526 |
+
# Add last hidden state
|
| 527 |
+
if output_hidden_states:
|
| 528 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
| 529 |
+
|
| 530 |
+
if output_attentions:
|
| 531 |
+
# let the number of heads free (-1) so we can extract attention even after head pruning
|
| 532 |
+
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
|
| 533 |
+
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
|
| 534 |
+
|
| 535 |
+
if not return_dict:
|
| 536 |
+
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)
|
| 537 |
+
|
| 538 |
+
return TFBaseModelOutputWithPast(
|
| 539 |
+
last_hidden_state=hidden_states,
|
| 540 |
+
past_key_values=presents,
|
| 541 |
+
hidden_states=all_hidden_states,
|
| 542 |
+
attentions=all_attentions,
|
| 543 |
+
)
|
| 544 |
+
|
| 545 |
+
def build(self, input_shape=None):
|
| 546 |
+
if self.built:
|
| 547 |
+
return
|
| 548 |
+
self.built = True
|
| 549 |
+
if getattr(self, "wte", None) is not None:
|
| 550 |
+
with tf.name_scope(self.wte.name):
|
| 551 |
+
self.wte.build(None)
|
| 552 |
+
if getattr(self, "ln_f", None) is not None:
|
| 553 |
+
with tf.name_scope(self.ln_f.name):
|
| 554 |
+
self.ln_f.build([None, None, self.embed_dim])
|
| 555 |
+
if getattr(self, "h", None) is not None:
|
| 556 |
+
for layer in self.h:
|
| 557 |
+
with tf.name_scope(layer.name):
|
| 558 |
+
layer.build(None)
|
| 559 |
+
|
| 560 |
+
|
| 561 |
+
class TFGPTJPreTrainedModel(TFPreTrainedModel):
|
| 562 |
+
"""
|
| 563 |
+
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
| 564 |
+
models.
|
| 565 |
+
"""
|
| 566 |
+
|
| 567 |
+
config_class = GPTJConfig
|
| 568 |
+
base_model_prefix = "transformer"
|
| 569 |
+
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
|
| 570 |
+
_keys_to_ignore_on_load_unexpected = [r"h.\d+.attn.bias"]
|
| 571 |
+
|
| 572 |
+
|
| 573 |
+
GPTJ_START_DOCSTRING = r"""
|
| 574 |
+
|
| 575 |
+
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| 576 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| 577 |
+
etc.)
|
| 578 |
+
|
| 579 |
+
This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
|
| 580 |
+
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
|
| 581 |
+
behavior.
|
| 582 |
+
|
| 583 |
+
<Tip>
|
| 584 |
+
|
| 585 |
+
TensorFlow models and layers in `transformers` accept two formats as input:
|
| 586 |
+
|
| 587 |
+
- having all inputs as keyword arguments (like PyTorch models), or
|
| 588 |
+
- having all inputs as a list, tuple or dict in the first positional argument.
|
| 589 |
+
|
| 590 |
+
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
|
| 591 |
+
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
|
| 592 |
+
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
|
| 593 |
+
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
|
| 594 |
+
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
|
| 595 |
+
positional argument:
|
| 596 |
+
|
| 597 |
+
- a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
|
| 598 |
+
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
|
| 599 |
+
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
|
| 600 |
+
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
|
| 601 |
+
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
|
| 602 |
+
|
| 603 |
+
Note that when creating models and layers with
|
| 604 |
+
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
|
| 605 |
+
about any of this, as you can just pass inputs like you would to any other Python function!
|
| 606 |
+
|
| 607 |
+
</Tip>
|
| 608 |
+
|
| 609 |
+
Parameters:
|
| 610 |
+
config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.
|
| 611 |
+
Initializing with a config file does not load the weights associated with the model, only the
|
| 612 |
+
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
|
| 613 |
+
"""
|
| 614 |
+
|
| 615 |
+
GPTJ_INPUTS_DOCSTRING = r"""
|
| 616 |
+
Args:
|
| 617 |
+
input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`):
|
| 618 |
+
`input_ids_length` = `sequence_length` if `past` is `None` else `past[0].shape[-2]` (`sequence_length` of
|
| 619 |
+
input past key value states). Indices of input sequence tokens in the vocabulary.
|
| 620 |
+
|
| 621 |
+
If `past` is used, only input IDs that do not have their past calculated should be passed as `input_ids`.
|
| 622 |
+
|
| 623 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
|
| 624 |
+
[`PreTrainedTokenizer.encode`] for details.
|
| 625 |
+
|
| 626 |
+
[What are input IDs?](../glossary#input-ids)
|
| 627 |
+
past_key_values (`List[tf.Tensor]` of length `config.n_layers`):
|
| 628 |
+
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
|
| 629 |
+
`past` output below). Can be used to speed up sequential decoding. The token ids which have their past
|
| 630 |
+
given to this model should not be passed as input ids as they have already been computed.
|
| 631 |
+
attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
|
| 632 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
| 633 |
+
|
| 634 |
+
- 1 for tokens that are **not masked**,
|
| 635 |
+
- 0 for tokens that are **masked**.
|
| 636 |
+
|
| 637 |
+
[What are attention masks?](../glossary#attention-mask)
|
| 638 |
+
token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
|
| 639 |
+
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
|
| 640 |
+
1]`:
|
| 641 |
+
|
| 642 |
+
- 0 corresponds to a *sentence A* token,
|
| 643 |
+
- 1 corresponds to a *sentence B* token.
|
| 644 |
+
|
| 645 |
+
[What are token type IDs?](../glossary#token-type-ids)
|
| 646 |
+
position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
|
| 647 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| 648 |
+
config.max_position_embeddings - 1]`.
|
| 649 |
+
|
| 650 |
+
[What are position IDs?](../glossary#position-ids)
|
| 651 |
+
head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
|
| 652 |
+
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
|
| 653 |
+
|
| 654 |
+
- 1 indicates the head is **not masked**,
|
| 655 |
+
- 0 indicates the head is **masked**.
|
| 656 |
+
|
| 657 |
+
inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
| 658 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| 659 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
| 660 |
+
model's internal embedding lookup matrix.
|
| 661 |
+
output_attentions (`bool`, *optional*):
|
| 662 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| 663 |
+
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
|
| 664 |
+
config will be used instead.
|
| 665 |
+
output_hidden_states (`bool`, *optional*):
|
| 666 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| 667 |
+
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
|
| 668 |
+
used instead.
|
| 669 |
+
return_dict (`bool`, *optional*):
|
| 670 |
+
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
|
| 671 |
+
in eager mode, in graph mode the value will always be set to True.
|
| 672 |
+
training (`bool`, *optional*, defaults to `False`):
|
| 673 |
+
Whether or not to use the model in training mode (some modules like dropout modules have different
|
| 674 |
+
behaviors between training and evaluation).
|
| 675 |
+
"""
|
| 676 |
+
|
| 677 |
+
|
| 678 |
+
@add_start_docstrings(
|
| 679 |
+
"The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.",
|
| 680 |
+
GPTJ_START_DOCSTRING,
|
| 681 |
+
)
|
| 682 |
+
class TFGPTJModel(TFGPTJPreTrainedModel):
|
| 683 |
+
def __init__(self, config, *inputs, **kwargs):
|
| 684 |
+
super().__init__(config, *inputs, **kwargs)
|
| 685 |
+
self.transformer = TFGPTJMainLayer(config, name="transformer")
|
| 686 |
+
|
| 687 |
+
@unpack_inputs
|
| 688 |
+
@add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING)
|
| 689 |
+
@add_code_sample_docstrings(
|
| 690 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 691 |
+
output_type=TFBaseModelOutputWithPast,
|
| 692 |
+
config_class=_CONFIG_FOR_DOC,
|
| 693 |
+
)
|
| 694 |
+
def call(
|
| 695 |
+
self,
|
| 696 |
+
input_ids: TFModelInputType | None = None,
|
| 697 |
+
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
|
| 698 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 699 |
+
token_type_ids: np.ndarray | tf.Tensor | None = None,
|
| 700 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 701 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
| 702 |
+
inputs_embeds: np.ndarray | tf.Tensor | None = None,
|
| 703 |
+
use_cache: Optional[bool] = None,
|
| 704 |
+
output_attentions: Optional[bool] = None,
|
| 705 |
+
output_hidden_states: Optional[bool] = None,
|
| 706 |
+
return_dict: Optional[bool] = None,
|
| 707 |
+
training: Optional[bool] = False,
|
| 708 |
+
) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:
|
| 709 |
+
r"""
|
| 710 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
| 711 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
| 712 |
+
`past`). Set to `False` during training, `True` during generation
|
| 713 |
+
"""
|
| 714 |
+
|
| 715 |
+
outputs = self.transformer(
|
| 716 |
+
input_ids=input_ids,
|
| 717 |
+
past_key_values=past_key_values,
|
| 718 |
+
attention_mask=attention_mask,
|
| 719 |
+
token_type_ids=token_type_ids,
|
| 720 |
+
position_ids=position_ids,
|
| 721 |
+
head_mask=head_mask,
|
| 722 |
+
inputs_embeds=inputs_embeds,
|
| 723 |
+
use_cache=use_cache,
|
| 724 |
+
output_attentions=output_attentions,
|
| 725 |
+
output_hidden_states=output_hidden_states,
|
| 726 |
+
return_dict=return_dict,
|
| 727 |
+
training=training,
|
| 728 |
+
)
|
| 729 |
+
|
| 730 |
+
return outputs
|
| 731 |
+
|
| 732 |
+
def build(self, input_shape=None):
|
| 733 |
+
if self.built:
|
| 734 |
+
return
|
| 735 |
+
self.built = True
|
| 736 |
+
if getattr(self, "transformer", None) is not None:
|
| 737 |
+
with tf.name_scope(self.transformer.name):
|
| 738 |
+
self.transformer.build(None)
|
| 739 |
+
|
| 740 |
+
|
| 741 |
+
@add_start_docstrings(
|
| 742 |
+
"""
|
| 743 |
+
The GPT-J Model transformer with a language modeling head on top.
|
| 744 |
+
""",
|
| 745 |
+
GPTJ_START_DOCSTRING,
|
| 746 |
+
)
|
| 747 |
+
class TFGPTJForCausalLM(TFGPTJPreTrainedModel, TFCausalLanguageModelingLoss):
|
| 748 |
+
def __init__(self, config, *inputs, **kwargs):
|
| 749 |
+
super().__init__(config, *inputs, **kwargs)
|
| 750 |
+
self.transformer = TFGPTJMainLayer(config, name="transformer")
|
| 751 |
+
self.lm_head = keras.layers.Dense(
|
| 752 |
+
config.vocab_size, kernel_initializer=get_initializer(config.initializer_range), name="lm_head"
|
| 753 |
+
)
|
| 754 |
+
self.config = config
|
| 755 |
+
|
| 756 |
+
def get_output_embeddings(self):
|
| 757 |
+
return self.lm_head
|
| 758 |
+
|
| 759 |
+
def set_output_embeddings(self, new_embeddings):
|
| 760 |
+
self.lm_head = new_embeddings
|
| 761 |
+
|
| 762 |
+
def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs):
|
| 763 |
+
token_type_ids = kwargs.get("token_type_ids", None)
|
| 764 |
+
# only last token for inputs_ids if past is defined in kwargs
|
| 765 |
+
if past_key_values:
|
| 766 |
+
inputs = tf.expand_dims(inputs[:, -1], -1)
|
| 767 |
+
if token_type_ids is not None:
|
| 768 |
+
token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1)
|
| 769 |
+
|
| 770 |
+
position_ids = kwargs.get("position_ids", None)
|
| 771 |
+
attention_mask = kwargs.get("attention_mask", None)
|
| 772 |
+
|
| 773 |
+
if attention_mask is not None and position_ids is None:
|
| 774 |
+
position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True)
|
| 775 |
+
if past_key_values:
|
| 776 |
+
position_ids = tf.expand_dims(position_ids[:, -1], -1)
|
| 777 |
+
|
| 778 |
+
return {
|
| 779 |
+
"input_ids": inputs,
|
| 780 |
+
"attention_mask": attention_mask,
|
| 781 |
+
"position_ids": position_ids,
|
| 782 |
+
"past_key_values": past_key_values,
|
| 783 |
+
"use_cache": use_cache,
|
| 784 |
+
"token_type_ids": token_type_ids,
|
| 785 |
+
}
|
| 786 |
+
|
| 787 |
+
@unpack_inputs
|
| 788 |
+
@add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 789 |
+
@add_code_sample_docstrings(
|
| 790 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 791 |
+
output_type=TFCausalLMOutputWithPast,
|
| 792 |
+
config_class=_CONFIG_FOR_DOC,
|
| 793 |
+
)
|
| 794 |
+
def call(
|
| 795 |
+
self,
|
| 796 |
+
input_ids: TFModelInputType | None = None,
|
| 797 |
+
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
|
| 798 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 799 |
+
token_type_ids: np.ndarray | tf.Tensor | None = None,
|
| 800 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 801 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
| 802 |
+
inputs_embeds: np.ndarray | tf.Tensor | None = None,
|
| 803 |
+
labels: np.ndarray | tf.Tensor | None = None,
|
| 804 |
+
use_cache: Optional[bool] = None,
|
| 805 |
+
output_attentions: Optional[bool] = None,
|
| 806 |
+
output_hidden_states: Optional[bool] = None,
|
| 807 |
+
return_dict: Optional[bool] = None,
|
| 808 |
+
training: Optional[bool] = False,
|
| 809 |
+
) -> Union[TFCausalLMOutputWithPast, Tuple[tf.Tensor]]:
|
| 810 |
+
r"""
|
| 811 |
+
labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| 812 |
+
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
|
| 813 |
+
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
|
| 814 |
+
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
|
| 815 |
+
"""
|
| 816 |
+
|
| 817 |
+
transformer_outputs = self.transformer(
|
| 818 |
+
input_ids=input_ids,
|
| 819 |
+
past_key_values=past_key_values,
|
| 820 |
+
attention_mask=attention_mask,
|
| 821 |
+
token_type_ids=token_type_ids,
|
| 822 |
+
position_ids=position_ids,
|
| 823 |
+
head_mask=head_mask,
|
| 824 |
+
inputs_embeds=inputs_embeds,
|
| 825 |
+
use_cache=use_cache,
|
| 826 |
+
output_attentions=output_attentions,
|
| 827 |
+
output_hidden_states=output_hidden_states,
|
| 828 |
+
return_dict=return_dict,
|
| 829 |
+
training=training,
|
| 830 |
+
)
|
| 831 |
+
hidden_states = transformer_outputs[0]
|
| 832 |
+
lm_logits = self.lm_head(hidden_states)
|
| 833 |
+
|
| 834 |
+
loss = None
|
| 835 |
+
if labels is not None:
|
| 836 |
+
# shift labels to the left and cut last logit token
|
| 837 |
+
shifted_logits = lm_logits[:, :-1]
|
| 838 |
+
labels = labels[:, 1:]
|
| 839 |
+
loss = self.hf_compute_loss(labels, shifted_logits)
|
| 840 |
+
|
| 841 |
+
if not return_dict:
|
| 842 |
+
output = (lm_logits,) + transformer_outputs[1:]
|
| 843 |
+
return ((loss,) + output) if loss is not None else output
|
| 844 |
+
|
| 845 |
+
return TFCausalLMOutputWithPast(
|
| 846 |
+
loss=loss,
|
| 847 |
+
logits=lm_logits,
|
| 848 |
+
past_key_values=transformer_outputs.past_key_values,
|
| 849 |
+
hidden_states=transformer_outputs.hidden_states,
|
| 850 |
+
attentions=transformer_outputs.attentions,
|
| 851 |
+
)
|
| 852 |
+
|
| 853 |
+
def build(self, input_shape=None):
|
| 854 |
+
if self.built:
|
| 855 |
+
return
|
| 856 |
+
self.built = True
|
| 857 |
+
if getattr(self, "transformer", None) is not None:
|
| 858 |
+
with tf.name_scope(self.transformer.name):
|
| 859 |
+
self.transformer.build(None)
|
| 860 |
+
if getattr(self, "lm_head", None) is not None:
|
| 861 |
+
with tf.name_scope(self.lm_head.name):
|
| 862 |
+
self.lm_head.build([None, None, self.config.n_embd])
|
| 863 |
+
|
| 864 |
+
|
| 865 |
+
@add_start_docstrings(
|
| 866 |
+
"""
|
| 867 |
+
The GPT-J Model transformer with a sequence classification head on top (linear layer).
|
| 868 |
+
|
| 869 |
+
[`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
| 870 |
+
(e.g. GPT, GPT-2, GPT-Neo) do.
|
| 871 |
+
|
| 872 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
| 873 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
| 874 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
| 875 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
| 876 |
+
each row of the batch).
|
| 877 |
+
""",
|
| 878 |
+
GPTJ_START_DOCSTRING,
|
| 879 |
+
)
|
| 880 |
+
class TFGPTJForSequenceClassification(TFGPTJPreTrainedModel, TFSequenceClassificationLoss):
|
| 881 |
+
_keys_to_ignore_on_load_missing = [r"h.\d+.attn.masked_bias", r"h.\d+.attn.bias", r"lm_head.weight"]
|
| 882 |
+
|
| 883 |
+
def __init__(self, config, *inputs, **kwargs):
|
| 884 |
+
super().__init__(config, *inputs, **kwargs)
|
| 885 |
+
self.num_labels = config.num_labels
|
| 886 |
+
self.transformer = TFGPTJMainLayer(config, name="transformer")
|
| 887 |
+
self.score = keras.layers.Dense(
|
| 888 |
+
self.num_labels,
|
| 889 |
+
use_bias=False,
|
| 890 |
+
kernel_initializer=get_initializer(config.initializer_range),
|
| 891 |
+
name="score",
|
| 892 |
+
)
|
| 893 |
+
self.config = config
|
| 894 |
+
|
| 895 |
+
@unpack_inputs
|
| 896 |
+
@add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 897 |
+
@add_code_sample_docstrings(
|
| 898 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 899 |
+
output_type=TFSequenceClassifierOutputWithPast,
|
| 900 |
+
config_class=_CONFIG_FOR_DOC,
|
| 901 |
+
)
|
| 902 |
+
def call(
|
| 903 |
+
self,
|
| 904 |
+
input_ids: TFModelInputType | None = None,
|
| 905 |
+
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
|
| 906 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 907 |
+
token_type_ids: np.ndarray | tf.Tensor | None = None,
|
| 908 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 909 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
| 910 |
+
inputs_embeds: np.ndarray | tf.Tensor | None = None,
|
| 911 |
+
labels: np.ndarray | tf.Tensor | None = None,
|
| 912 |
+
use_cache: Optional[bool] = None,
|
| 913 |
+
output_attentions: Optional[bool] = None,
|
| 914 |
+
output_hidden_states: Optional[bool] = None,
|
| 915 |
+
return_dict: Optional[bool] = None,
|
| 916 |
+
training: Optional[bool] = False,
|
| 917 |
+
) -> Union[TFSequenceClassifierOutputWithPast, Tuple[tf.Tensor]]:
|
| 918 |
+
r"""
|
| 919 |
+
labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):
|
| 920 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| 921 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| 922 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| 923 |
+
"""
|
| 924 |
+
|
| 925 |
+
transformer_outputs = self.transformer(
|
| 926 |
+
input_ids=input_ids,
|
| 927 |
+
past_key_values=past_key_values,
|
| 928 |
+
attention_mask=attention_mask,
|
| 929 |
+
token_type_ids=token_type_ids,
|
| 930 |
+
position_ids=position_ids,
|
| 931 |
+
head_mask=head_mask,
|
| 932 |
+
inputs_embeds=inputs_embeds,
|
| 933 |
+
use_cache=use_cache,
|
| 934 |
+
output_attentions=output_attentions,
|
| 935 |
+
output_hidden_states=output_hidden_states,
|
| 936 |
+
return_dict=return_dict,
|
| 937 |
+
training=training,
|
| 938 |
+
)
|
| 939 |
+
hidden_states = transformer_outputs[0]
|
| 940 |
+
logits = self.score(hidden_states)
|
| 941 |
+
logits_shape = shape_list(logits)
|
| 942 |
+
in_logits = None
|
| 943 |
+
if self.config.pad_token_id is None:
|
| 944 |
+
sequence_lengths = -1
|
| 945 |
+
else:
|
| 946 |
+
if input_ids is not None:
|
| 947 |
+
sequence_lengths = (
|
| 948 |
+
tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1)
|
| 949 |
+
- 1
|
| 950 |
+
)
|
| 951 |
+
sequence_lengths = tf.where(
|
| 952 |
+
sequence_lengths >= 0,
|
| 953 |
+
sequence_lengths,
|
| 954 |
+
tf.cast(shape_list(input_ids[-1]), sequence_lengths.dtype) - 1,
|
| 955 |
+
)
|
| 956 |
+
in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)
|
| 957 |
+
else:
|
| 958 |
+
sequence_lengths = -1
|
| 959 |
+
logger.warning(
|
| 960 |
+
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
|
| 961 |
+
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
|
| 962 |
+
)
|
| 963 |
+
loss = None
|
| 964 |
+
|
| 965 |
+
if labels is not None:
|
| 966 |
+
if self.config.pad_token_id is None and logits_shape[0] != 1:
|
| 967 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
| 968 |
+
|
| 969 |
+
if not tf.is_tensor(sequence_lengths):
|
| 970 |
+
in_logits = logits[0 : logits_shape[0], sequence_lengths]
|
| 971 |
+
|
| 972 |
+
loss = self.hf_compute_loss(tf.reshape(labels, [-1]), tf.reshape(in_logits, [-1, self.num_labels]))
|
| 973 |
+
pooled_logits = in_logits if in_logits is not None else logits
|
| 974 |
+
|
| 975 |
+
if not return_dict:
|
| 976 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
| 977 |
+
return ((loss,) + output) if loss is not None else output
|
| 978 |
+
|
| 979 |
+
return TFSequenceClassifierOutputWithPast(
|
| 980 |
+
loss=loss,
|
| 981 |
+
logits=pooled_logits,
|
| 982 |
+
past_key_values=transformer_outputs.past_key_values,
|
| 983 |
+
hidden_states=transformer_outputs.hidden_states,
|
| 984 |
+
attentions=transformer_outputs.attentions,
|
| 985 |
+
)
|
| 986 |
+
|
| 987 |
+
def build(self, input_shape=None):
|
| 988 |
+
if self.built:
|
| 989 |
+
return
|
| 990 |
+
self.built = True
|
| 991 |
+
if getattr(self, "transformer", None) is not None:
|
| 992 |
+
with tf.name_scope(self.transformer.name):
|
| 993 |
+
self.transformer.build(None)
|
| 994 |
+
if getattr(self, "score", None) is not None:
|
| 995 |
+
with tf.name_scope(self.score.name):
|
| 996 |
+
self.score.build([None, None, self.config.n_embd])
|
| 997 |
+
|
| 998 |
+
|
| 999 |
+
@add_start_docstrings(
|
| 1000 |
+
"""
|
| 1001 |
+
The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like
|
| 1002 |
+
SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
|
| 1003 |
+
""",
|
| 1004 |
+
GPTJ_START_DOCSTRING,
|
| 1005 |
+
)
|
| 1006 |
+
class TFGPTJForQuestionAnswering(TFGPTJPreTrainedModel, TFQuestionAnsweringLoss):
|
| 1007 |
+
_keys_to_ignore_on_load_missing = [r"h.\d+.attn.masked_bias", r"h.\d+.attn.bias", r"lm_head.weight"]
|
| 1008 |
+
|
| 1009 |
+
def __init__(self, config, *inputs, **kwargs):
|
| 1010 |
+
super().__init__(config, *inputs, **kwargs)
|
| 1011 |
+
self.num_labels = config.num_labels
|
| 1012 |
+
self.transformer = TFGPTJMainLayer(config, name="transformer")
|
| 1013 |
+
self.qa_outputs = keras.layers.Dense(
|
| 1014 |
+
self.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
|
| 1015 |
+
)
|
| 1016 |
+
self.config = config
|
| 1017 |
+
|
| 1018 |
+
@unpack_inputs
|
| 1019 |
+
@add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
|
| 1020 |
+
@add_code_sample_docstrings(
|
| 1021 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
| 1022 |
+
output_type=TFQuestionAnsweringModelOutput,
|
| 1023 |
+
config_class=_CONFIG_FOR_DOC,
|
| 1024 |
+
)
|
| 1025 |
+
def call(
|
| 1026 |
+
self,
|
| 1027 |
+
input_ids: TFModelInputType | None = None,
|
| 1028 |
+
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
|
| 1029 |
+
attention_mask: np.ndarray | tf.Tensor | None = None,
|
| 1030 |
+
token_type_ids: np.ndarray | tf.Tensor | None = None,
|
| 1031 |
+
position_ids: np.ndarray | tf.Tensor | None = None,
|
| 1032 |
+
head_mask: np.ndarray | tf.Tensor | None = None,
|
| 1033 |
+
inputs_embeds: np.ndarray | tf.Tensor | None = None,
|
| 1034 |
+
start_positions: np.ndarray | tf.Tensor | None = None,
|
| 1035 |
+
end_positions: np.ndarray | tf.Tensor | None = None,
|
| 1036 |
+
output_attentions: Optional[bool] = None,
|
| 1037 |
+
output_hidden_states: Optional[bool] = None,
|
| 1038 |
+
return_dict: Optional[bool] = None,
|
| 1039 |
+
training: Optional[bool] = False,
|
| 1040 |
+
) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
|
| 1041 |
+
r"""
|
| 1042 |
+
start_positions (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):
|
| 1043 |
+
Labels for position (index) of the start of the labelled span for computing the token classification loss.
|
| 1044 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
| 1045 |
+
are not taken into account for computing the loss.
|
| 1046 |
+
end_positions (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):
|
| 1047 |
+
Labels for position (index) of the end of the labelled span for computing the token classification loss.
|
| 1048 |
+
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
| 1049 |
+
are not taken into account for computing the loss.
|
| 1050 |
+
"""
|
| 1051 |
+
|
| 1052 |
+
transformer_outputs = self.transformer(
|
| 1053 |
+
input_ids=input_ids,
|
| 1054 |
+
past_key_values=past_key_values,
|
| 1055 |
+
attention_mask=attention_mask,
|
| 1056 |
+
token_type_ids=token_type_ids,
|
| 1057 |
+
position_ids=position_ids,
|
| 1058 |
+
head_mask=head_mask,
|
| 1059 |
+
inputs_embeds=inputs_embeds,
|
| 1060 |
+
output_attentions=output_attentions,
|
| 1061 |
+
output_hidden_states=output_hidden_states,
|
| 1062 |
+
return_dict=return_dict,
|
| 1063 |
+
training=training,
|
| 1064 |
+
)
|
| 1065 |
+
sequence_output = transformer_outputs[0]
|
| 1066 |
+
|
| 1067 |
+
logits = self.qa_outputs(sequence_output)
|
| 1068 |
+
start_logits, end_logits = tf.split(logits, 2, axis=-1)
|
| 1069 |
+
start_logits = tf.squeeze(start_logits, axis=-1)
|
| 1070 |
+
end_logits = tf.squeeze(end_logits, axis=-1)
|
| 1071 |
+
|
| 1072 |
+
loss = None
|
| 1073 |
+
if start_positions is not None and end_positions is not None:
|
| 1074 |
+
labels = {"start_position": start_positions}
|
| 1075 |
+
labels["end_position"] = end_positions
|
| 1076 |
+
loss = self.hf_compute_loss(labels, (start_logits, end_logits))
|
| 1077 |
+
|
| 1078 |
+
if not return_dict:
|
| 1079 |
+
output = (start_logits, end_logits) + transformer_outputs[2:]
|
| 1080 |
+
return ((loss,) + output) if loss is not None else output
|
| 1081 |
+
|
| 1082 |
+
return TFQuestionAnsweringModelOutput(
|
| 1083 |
+
loss=loss,
|
| 1084 |
+
start_logits=start_logits,
|
| 1085 |
+
end_logits=end_logits,
|
| 1086 |
+
hidden_states=transformer_outputs.hidden_states,
|
| 1087 |
+
attentions=transformer_outputs.attentions,
|
| 1088 |
+
)
|
| 1089 |
+
|
| 1090 |
+
def build(self, input_shape=None):
|
| 1091 |
+
if self.built:
|
| 1092 |
+
return
|
| 1093 |
+
self.built = True
|
| 1094 |
+
if getattr(self, "transformer", None) is not None:
|
| 1095 |
+
with tf.name_scope(self.transformer.name):
|
| 1096 |
+
self.transformer.build(None)
|
| 1097 |
+
if getattr(self, "qa_outputs", None) is not None:
|
| 1098 |
+
with tf.name_scope(self.qa_outputs.name):
|
| 1099 |
+
self.qa_outputs.build([None, None, self.config.hidden_size])
|
parrot/lib/python3.10/site-packages/transformers/utils/__init__.py
ADDED
|
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
# coding=utf-8
|
| 3 |
+
|
| 4 |
+
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
|
| 5 |
+
#
|
| 6 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 7 |
+
# you may not use this file except in compliance with the License.
|
| 8 |
+
# You may obtain a copy of the License at
|
| 9 |
+
#
|
| 10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 11 |
+
#
|
| 12 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 13 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 14 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 15 |
+
# See the License for the specific language governing permissions and
|
| 16 |
+
# limitations under the License.
|
| 17 |
+
|
| 18 |
+
from huggingface_hub import get_full_repo_name # for backward compatibility
|
| 19 |
+
from huggingface_hub.constants import HF_HUB_DISABLE_TELEMETRY as DISABLE_TELEMETRY # for backward compatibility
|
| 20 |
+
from packaging import version
|
| 21 |
+
|
| 22 |
+
from .. import __version__
|
| 23 |
+
from .backbone_utils import BackboneConfigMixin, BackboneMixin
|
| 24 |
+
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
|
| 25 |
+
from .doc import (
|
| 26 |
+
add_code_sample_docstrings,
|
| 27 |
+
add_end_docstrings,
|
| 28 |
+
add_start_docstrings,
|
| 29 |
+
add_start_docstrings_to_model_forward,
|
| 30 |
+
copy_func,
|
| 31 |
+
replace_return_docstrings,
|
| 32 |
+
)
|
| 33 |
+
from .generic import (
|
| 34 |
+
ContextManagers,
|
| 35 |
+
ExplicitEnum,
|
| 36 |
+
ModelOutput,
|
| 37 |
+
PaddingStrategy,
|
| 38 |
+
TensorType,
|
| 39 |
+
add_model_info_to_auto_map,
|
| 40 |
+
cached_property,
|
| 41 |
+
can_return_loss,
|
| 42 |
+
expand_dims,
|
| 43 |
+
find_labels,
|
| 44 |
+
flatten_dict,
|
| 45 |
+
infer_framework,
|
| 46 |
+
is_jax_tensor,
|
| 47 |
+
is_numpy_array,
|
| 48 |
+
is_tensor,
|
| 49 |
+
is_tf_symbolic_tensor,
|
| 50 |
+
is_tf_tensor,
|
| 51 |
+
is_torch_device,
|
| 52 |
+
is_torch_dtype,
|
| 53 |
+
is_torch_tensor,
|
| 54 |
+
reshape,
|
| 55 |
+
squeeze,
|
| 56 |
+
strtobool,
|
| 57 |
+
tensor_size,
|
| 58 |
+
to_numpy,
|
| 59 |
+
to_py_obj,
|
| 60 |
+
transpose,
|
| 61 |
+
working_or_temp_dir,
|
| 62 |
+
)
|
| 63 |
+
from .hub import (
|
| 64 |
+
CLOUDFRONT_DISTRIB_PREFIX,
|
| 65 |
+
HF_MODULES_CACHE,
|
| 66 |
+
HUGGINGFACE_CO_PREFIX,
|
| 67 |
+
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
|
| 68 |
+
PYTORCH_PRETRAINED_BERT_CACHE,
|
| 69 |
+
PYTORCH_TRANSFORMERS_CACHE,
|
| 70 |
+
S3_BUCKET_PREFIX,
|
| 71 |
+
TRANSFORMERS_CACHE,
|
| 72 |
+
TRANSFORMERS_DYNAMIC_MODULE_NAME,
|
| 73 |
+
EntryNotFoundError,
|
| 74 |
+
PushInProgress,
|
| 75 |
+
PushToHubMixin,
|
| 76 |
+
RepositoryNotFoundError,
|
| 77 |
+
RevisionNotFoundError,
|
| 78 |
+
cached_file,
|
| 79 |
+
default_cache_path,
|
| 80 |
+
define_sagemaker_information,
|
| 81 |
+
download_url,
|
| 82 |
+
extract_commit_hash,
|
| 83 |
+
get_cached_models,
|
| 84 |
+
get_file_from_repo,
|
| 85 |
+
has_file,
|
| 86 |
+
http_user_agent,
|
| 87 |
+
is_offline_mode,
|
| 88 |
+
is_remote_url,
|
| 89 |
+
move_cache,
|
| 90 |
+
send_example_telemetry,
|
| 91 |
+
try_to_load_from_cache,
|
| 92 |
+
)
|
| 93 |
+
from .import_utils import (
|
| 94 |
+
ACCELERATE_MIN_VERSION,
|
| 95 |
+
ENV_VARS_TRUE_AND_AUTO_VALUES,
|
| 96 |
+
ENV_VARS_TRUE_VALUES,
|
| 97 |
+
TORCH_FX_REQUIRED_VERSION,
|
| 98 |
+
USE_JAX,
|
| 99 |
+
USE_TF,
|
| 100 |
+
USE_TORCH,
|
| 101 |
+
XLA_FSDPV2_MIN_VERSION,
|
| 102 |
+
DummyObject,
|
| 103 |
+
OptionalDependencyNotAvailable,
|
| 104 |
+
_LazyModule,
|
| 105 |
+
ccl_version,
|
| 106 |
+
direct_transformers_import,
|
| 107 |
+
get_torch_version,
|
| 108 |
+
is_accelerate_available,
|
| 109 |
+
is_apex_available,
|
| 110 |
+
is_aqlm_available,
|
| 111 |
+
is_auto_awq_available,
|
| 112 |
+
is_auto_gptq_available,
|
| 113 |
+
is_av_available,
|
| 114 |
+
is_bitsandbytes_available,
|
| 115 |
+
is_bs4_available,
|
| 116 |
+
is_coloredlogs_available,
|
| 117 |
+
is_cv2_available,
|
| 118 |
+
is_cython_available,
|
| 119 |
+
is_datasets_available,
|
| 120 |
+
is_decord_available,
|
| 121 |
+
is_detectron2_available,
|
| 122 |
+
is_eetq_available,
|
| 123 |
+
is_essentia_available,
|
| 124 |
+
is_faiss_available,
|
| 125 |
+
is_flash_attn_2_available,
|
| 126 |
+
is_flash_attn_greater_or_equal_2_10,
|
| 127 |
+
is_flax_available,
|
| 128 |
+
is_fsdp_available,
|
| 129 |
+
is_ftfy_available,
|
| 130 |
+
is_g2p_en_available,
|
| 131 |
+
is_galore_torch_available,
|
| 132 |
+
is_gguf_available,
|
| 133 |
+
is_hqq_available,
|
| 134 |
+
is_in_notebook,
|
| 135 |
+
is_ipex_available,
|
| 136 |
+
is_jieba_available,
|
| 137 |
+
is_jinja_available,
|
| 138 |
+
is_jumanpp_available,
|
| 139 |
+
is_kenlm_available,
|
| 140 |
+
is_keras_nlp_available,
|
| 141 |
+
is_levenshtein_available,
|
| 142 |
+
is_librosa_available,
|
| 143 |
+
is_mlx_available,
|
| 144 |
+
is_natten_available,
|
| 145 |
+
is_ninja_available,
|
| 146 |
+
is_nltk_available,
|
| 147 |
+
is_onnx_available,
|
| 148 |
+
is_openai_available,
|
| 149 |
+
is_optimum_available,
|
| 150 |
+
is_pandas_available,
|
| 151 |
+
is_peft_available,
|
| 152 |
+
is_phonemizer_available,
|
| 153 |
+
is_pretty_midi_available,
|
| 154 |
+
is_protobuf_available,
|
| 155 |
+
is_psutil_available,
|
| 156 |
+
is_py3nvml_available,
|
| 157 |
+
is_pyctcdecode_available,
|
| 158 |
+
is_pytesseract_available,
|
| 159 |
+
is_pytest_available,
|
| 160 |
+
is_pytorch_quantization_available,
|
| 161 |
+
is_quanto_available,
|
| 162 |
+
is_rjieba_available,
|
| 163 |
+
is_sacremoses_available,
|
| 164 |
+
is_safetensors_available,
|
| 165 |
+
is_sagemaker_dp_enabled,
|
| 166 |
+
is_sagemaker_mp_enabled,
|
| 167 |
+
is_scipy_available,
|
| 168 |
+
is_sentencepiece_available,
|
| 169 |
+
is_seqio_available,
|
| 170 |
+
is_sklearn_available,
|
| 171 |
+
is_soundfile_availble,
|
| 172 |
+
is_spacy_available,
|
| 173 |
+
is_speech_available,
|
| 174 |
+
is_sudachi_available,
|
| 175 |
+
is_sudachi_projection_available,
|
| 176 |
+
is_tensorflow_probability_available,
|
| 177 |
+
is_tensorflow_text_available,
|
| 178 |
+
is_tf2onnx_available,
|
| 179 |
+
is_tf_available,
|
| 180 |
+
is_timm_available,
|
| 181 |
+
is_tokenizers_available,
|
| 182 |
+
is_torch_available,
|
| 183 |
+
is_torch_bf16_available,
|
| 184 |
+
is_torch_bf16_available_on_device,
|
| 185 |
+
is_torch_bf16_cpu_available,
|
| 186 |
+
is_torch_bf16_gpu_available,
|
| 187 |
+
is_torch_compile_available,
|
| 188 |
+
is_torch_cuda_available,
|
| 189 |
+
is_torch_fp16_available_on_device,
|
| 190 |
+
is_torch_fx_available,
|
| 191 |
+
is_torch_fx_proxy,
|
| 192 |
+
is_torch_mlu_available,
|
| 193 |
+
is_torch_mps_available,
|
| 194 |
+
is_torch_neuroncore_available,
|
| 195 |
+
is_torch_npu_available,
|
| 196 |
+
is_torch_sdpa_available,
|
| 197 |
+
is_torch_tensorrt_fx_available,
|
| 198 |
+
is_torch_tf32_available,
|
| 199 |
+
is_torch_tpu_available,
|
| 200 |
+
is_torch_xla_available,
|
| 201 |
+
is_torch_xpu_available,
|
| 202 |
+
is_torchaudio_available,
|
| 203 |
+
is_torchdistx_available,
|
| 204 |
+
is_torchdynamo_available,
|
| 205 |
+
is_torchdynamo_compiling,
|
| 206 |
+
is_torchvision_available,
|
| 207 |
+
is_training_run_on_sagemaker,
|
| 208 |
+
is_vision_available,
|
| 209 |
+
requires_backends,
|
| 210 |
+
torch_only_method,
|
| 211 |
+
)
|
| 212 |
+
from .peft_utils import (
|
| 213 |
+
ADAPTER_CONFIG_NAME,
|
| 214 |
+
ADAPTER_SAFE_WEIGHTS_NAME,
|
| 215 |
+
ADAPTER_WEIGHTS_NAME,
|
| 216 |
+
check_peft_version,
|
| 217 |
+
find_adapter_config_file,
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
WEIGHTS_NAME = "pytorch_model.bin"
|
| 222 |
+
WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json"
|
| 223 |
+
TF2_WEIGHTS_NAME = "tf_model.h5"
|
| 224 |
+
TF2_WEIGHTS_INDEX_NAME = "tf_model.h5.index.json"
|
| 225 |
+
TF_WEIGHTS_NAME = "model.ckpt"
|
| 226 |
+
FLAX_WEIGHTS_NAME = "flax_model.msgpack"
|
| 227 |
+
FLAX_WEIGHTS_INDEX_NAME = "flax_model.msgpack.index.json"
|
| 228 |
+
SAFE_WEIGHTS_NAME = "model.safetensors"
|
| 229 |
+
SAFE_WEIGHTS_INDEX_NAME = "model.safetensors.index.json"
|
| 230 |
+
CONFIG_NAME = "config.json"
|
| 231 |
+
FEATURE_EXTRACTOR_NAME = "preprocessor_config.json"
|
| 232 |
+
IMAGE_PROCESSOR_NAME = FEATURE_EXTRACTOR_NAME
|
| 233 |
+
PROCESSOR_NAME = "processor_config.json"
|
| 234 |
+
GENERATION_CONFIG_NAME = "generation_config.json"
|
| 235 |
+
MODEL_CARD_NAME = "modelcard.json"
|
| 236 |
+
|
| 237 |
+
SENTENCEPIECE_UNDERLINE = "▁"
|
| 238 |
+
SPIECE_UNDERLINE = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
|
| 239 |
+
|
| 240 |
+
MULTIPLE_CHOICE_DUMMY_INPUTS = [
|
| 241 |
+
[[0, 1, 0, 1], [1, 0, 0, 1]]
|
| 242 |
+
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
|
| 243 |
+
DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
|
| 244 |
+
DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def check_min_version(min_version):
|
| 248 |
+
if version.parse(__version__) < version.parse(min_version):
|
| 249 |
+
if "dev" in min_version:
|
| 250 |
+
error_message = (
|
| 251 |
+
"This example requires a source install from HuggingFace Transformers (see "
|
| 252 |
+
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
|
| 253 |
+
)
|
| 254 |
+
else:
|
| 255 |
+
error_message = f"This example requires a minimum version of {min_version},"
|
| 256 |
+
error_message += f" but the version found is {__version__}.\n"
|
| 257 |
+
raise ImportError(
|
| 258 |
+
error_message
|
| 259 |
+
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
|
| 260 |
+
"versions of HuggingFace Transformers."
|
| 261 |
+
)
|
parrot/lib/python3.10/site-packages/transformers/utils/backbone_utils.py
ADDED
|
@@ -0,0 +1,350 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2023 The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
""" Collection of utils to be used by backbones and their components."""
|
| 17 |
+
|
| 18 |
+
import enum
|
| 19 |
+
import inspect
|
| 20 |
+
from typing import Iterable, List, Optional, Tuple, Union
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class BackboneType(enum.Enum):
|
| 24 |
+
TIMM = "timm"
|
| 25 |
+
TRANSFORMERS = "transformers"
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def verify_out_features_out_indices(
|
| 29 |
+
out_features: Optional[Iterable[str]], out_indices: Optional[Iterable[int]], stage_names: Optional[Iterable[str]]
|
| 30 |
+
):
|
| 31 |
+
"""
|
| 32 |
+
Verify that out_indices and out_features are valid for the given stage_names.
|
| 33 |
+
"""
|
| 34 |
+
if stage_names is None:
|
| 35 |
+
raise ValueError("Stage_names must be set for transformers backbones")
|
| 36 |
+
|
| 37 |
+
if out_features is not None:
|
| 38 |
+
if not isinstance(out_features, (list,)):
|
| 39 |
+
raise ValueError(f"out_features must be a list got {type(out_features)}")
|
| 40 |
+
if any(feat not in stage_names for feat in out_features):
|
| 41 |
+
raise ValueError(f"out_features must be a subset of stage_names: {stage_names} got {out_features}")
|
| 42 |
+
if len(out_features) != len(set(out_features)):
|
| 43 |
+
raise ValueError(f"out_features must not contain any duplicates, got {out_features}")
|
| 44 |
+
if out_features != (sorted_feats := [feat for feat in stage_names if feat in out_features]):
|
| 45 |
+
raise ValueError(
|
| 46 |
+
f"out_features must be in the same order as stage_names, expected {sorted_feats} got {out_features}"
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
if out_indices is not None:
|
| 50 |
+
if not isinstance(out_indices, (list, tuple)):
|
| 51 |
+
raise ValueError(f"out_indices must be a list or tuple, got {type(out_indices)}")
|
| 52 |
+
# Convert negative indices to their positive equivalent: [-1,] -> [len(stage_names) - 1,]
|
| 53 |
+
positive_indices = tuple(idx % len(stage_names) if idx < 0 else idx for idx in out_indices)
|
| 54 |
+
if any(idx for idx in positive_indices if idx not in range(len(stage_names))):
|
| 55 |
+
raise ValueError(f"out_indices must be valid indices for stage_names {stage_names}, got {out_indices}")
|
| 56 |
+
if len(positive_indices) != len(set(positive_indices)):
|
| 57 |
+
msg = f"out_indices must not contain any duplicates, got {out_indices}"
|
| 58 |
+
msg += f"(equivalent to {positive_indices}))" if positive_indices != out_indices else ""
|
| 59 |
+
raise ValueError(msg)
|
| 60 |
+
if positive_indices != tuple(sorted(positive_indices)):
|
| 61 |
+
sorted_negative = tuple(idx for _, idx in sorted(zip(positive_indices, out_indices), key=lambda x: x[0]))
|
| 62 |
+
raise ValueError(
|
| 63 |
+
f"out_indices must be in the same order as stage_names, expected {sorted_negative} got {out_indices}"
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
if out_features is not None and out_indices is not None:
|
| 67 |
+
if len(out_features) != len(out_indices):
|
| 68 |
+
raise ValueError("out_features and out_indices should have the same length if both are set")
|
| 69 |
+
if out_features != [stage_names[idx] for idx in out_indices]:
|
| 70 |
+
raise ValueError("out_features and out_indices should correspond to the same stages if both are set")
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def _align_output_features_output_indices(
|
| 74 |
+
out_features: Optional[List[str]],
|
| 75 |
+
out_indices: Optional[Union[List[int], Tuple[int]]],
|
| 76 |
+
stage_names: List[str],
|
| 77 |
+
):
|
| 78 |
+
"""
|
| 79 |
+
Finds the corresponding `out_features` and `out_indices` for the given `stage_names`.
|
| 80 |
+
|
| 81 |
+
The logic is as follows:
|
| 82 |
+
- `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the
|
| 83 |
+
`out_indices`.
|
| 84 |
+
- `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the
|
| 85 |
+
`out_features`.
|
| 86 |
+
- `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage.
|
| 87 |
+
- `out_indices` and `out_features` set: input `out_indices` and `out_features` are returned.
|
| 88 |
+
|
| 89 |
+
Args:
|
| 90 |
+
out_features (`List[str]`): The names of the features for the backbone to output.
|
| 91 |
+
out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output.
|
| 92 |
+
stage_names (`List[str]`): The names of the stages of the backbone.
|
| 93 |
+
"""
|
| 94 |
+
if out_indices is None and out_features is None:
|
| 95 |
+
out_indices = [len(stage_names) - 1]
|
| 96 |
+
out_features = [stage_names[-1]]
|
| 97 |
+
elif out_indices is None and out_features is not None:
|
| 98 |
+
out_indices = [stage_names.index(layer) for layer in out_features]
|
| 99 |
+
elif out_features is None and out_indices is not None:
|
| 100 |
+
out_features = [stage_names[idx] for idx in out_indices]
|
| 101 |
+
return out_features, out_indices
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def get_aligned_output_features_output_indices(
|
| 105 |
+
out_features: Optional[List[str]],
|
| 106 |
+
out_indices: Optional[Union[List[int], Tuple[int]]],
|
| 107 |
+
stage_names: List[str],
|
| 108 |
+
) -> Tuple[List[str], List[int]]:
|
| 109 |
+
"""
|
| 110 |
+
Get the `out_features` and `out_indices` so that they are aligned.
|
| 111 |
+
|
| 112 |
+
The logic is as follows:
|
| 113 |
+
- `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the
|
| 114 |
+
`out_indices`.
|
| 115 |
+
- `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the
|
| 116 |
+
`out_features`.
|
| 117 |
+
- `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage.
|
| 118 |
+
- `out_indices` and `out_features` set: they are verified to be aligned.
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
out_features (`List[str]`): The names of the features for the backbone to output.
|
| 122 |
+
out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output.
|
| 123 |
+
stage_names (`List[str]`): The names of the stages of the backbone.
|
| 124 |
+
"""
|
| 125 |
+
# First verify that the out_features and out_indices are valid
|
| 126 |
+
verify_out_features_out_indices(out_features=out_features, out_indices=out_indices, stage_names=stage_names)
|
| 127 |
+
output_features, output_indices = _align_output_features_output_indices(
|
| 128 |
+
out_features=out_features, out_indices=out_indices, stage_names=stage_names
|
| 129 |
+
)
|
| 130 |
+
# Verify that the aligned out_features and out_indices are valid
|
| 131 |
+
verify_out_features_out_indices(out_features=output_features, out_indices=output_indices, stage_names=stage_names)
|
| 132 |
+
return output_features, output_indices
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
class BackboneMixin:
|
| 136 |
+
backbone_type: Optional[BackboneType] = None
|
| 137 |
+
|
| 138 |
+
def _init_timm_backbone(self, config) -> None:
|
| 139 |
+
"""
|
| 140 |
+
Initialize the backbone model from timm The backbone must already be loaded to self._backbone
|
| 141 |
+
"""
|
| 142 |
+
if getattr(self, "_backbone", None) is None:
|
| 143 |
+
raise ValueError("self._backbone must be set before calling _init_timm_backbone")
|
| 144 |
+
|
| 145 |
+
# These will diagree with the defaults for the transformers models e.g. for resnet50
|
| 146 |
+
# the transformer model has out_features = ['stem', 'stage1', 'stage2', 'stage3', 'stage4']
|
| 147 |
+
# the timm model has out_features = ['act', 'layer1', 'layer2', 'layer3', 'layer4']
|
| 148 |
+
self.stage_names = [stage["module"] for stage in self._backbone.feature_info.info]
|
| 149 |
+
self.num_features = [stage["num_chs"] for stage in self._backbone.feature_info.info]
|
| 150 |
+
out_indices = self._backbone.feature_info.out_indices
|
| 151 |
+
out_features = self._backbone.feature_info.module_name()
|
| 152 |
+
|
| 153 |
+
# We verify the out indices and out features are valid
|
| 154 |
+
verify_out_features_out_indices(
|
| 155 |
+
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
|
| 156 |
+
)
|
| 157 |
+
self._out_features, self._out_indices = out_features, out_indices
|
| 158 |
+
|
| 159 |
+
def _init_transformers_backbone(self, config) -> None:
|
| 160 |
+
stage_names = getattr(config, "stage_names")
|
| 161 |
+
out_features = getattr(config, "out_features", None)
|
| 162 |
+
out_indices = getattr(config, "out_indices", None)
|
| 163 |
+
|
| 164 |
+
self.stage_names = stage_names
|
| 165 |
+
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
|
| 166 |
+
out_features=out_features, out_indices=out_indices, stage_names=stage_names
|
| 167 |
+
)
|
| 168 |
+
# Number of channels for each stage. This is set in the transformer backbone model init
|
| 169 |
+
self.num_features = None
|
| 170 |
+
|
| 171 |
+
def _init_backbone(self, config) -> None:
|
| 172 |
+
"""
|
| 173 |
+
Method to initialize the backbone. This method is called by the constructor of the base class after the
|
| 174 |
+
pretrained model weights have been loaded.
|
| 175 |
+
"""
|
| 176 |
+
self.config = config
|
| 177 |
+
|
| 178 |
+
self.use_timm_backbone = getattr(config, "use_timm_backbone", False)
|
| 179 |
+
self.backbone_type = BackboneType.TIMM if self.use_timm_backbone else BackboneType.TRANSFORMERS
|
| 180 |
+
|
| 181 |
+
if self.backbone_type == BackboneType.TIMM:
|
| 182 |
+
self._init_timm_backbone(config)
|
| 183 |
+
elif self.backbone_type == BackboneType.TRANSFORMERS:
|
| 184 |
+
self._init_transformers_backbone(config)
|
| 185 |
+
else:
|
| 186 |
+
raise ValueError(f"backbone_type {self.backbone_type} not supported.")
|
| 187 |
+
|
| 188 |
+
@property
|
| 189 |
+
def out_features(self):
|
| 190 |
+
return self._out_features
|
| 191 |
+
|
| 192 |
+
@out_features.setter
|
| 193 |
+
def out_features(self, out_features: List[str]):
|
| 194 |
+
"""
|
| 195 |
+
Set the out_features attribute. This will also update the out_indices attribute to match the new out_features.
|
| 196 |
+
"""
|
| 197 |
+
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
|
| 198 |
+
out_features=out_features, out_indices=None, stage_names=self.stage_names
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
@property
|
| 202 |
+
def out_indices(self):
|
| 203 |
+
return self._out_indices
|
| 204 |
+
|
| 205 |
+
@out_indices.setter
|
| 206 |
+
def out_indices(self, out_indices: Union[Tuple[int], List[int]]):
|
| 207 |
+
"""
|
| 208 |
+
Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices.
|
| 209 |
+
"""
|
| 210 |
+
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
|
| 211 |
+
out_features=None, out_indices=out_indices, stage_names=self.stage_names
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
@property
|
| 215 |
+
def out_feature_channels(self):
|
| 216 |
+
# the current backbones will output the number of channels for each stage
|
| 217 |
+
# even if that stage is not in the out_features list.
|
| 218 |
+
return {stage: self.num_features[i] for i, stage in enumerate(self.stage_names)}
|
| 219 |
+
|
| 220 |
+
@property
|
| 221 |
+
def channels(self):
|
| 222 |
+
return [self.out_feature_channels[name] for name in self.out_features]
|
| 223 |
+
|
| 224 |
+
def forward_with_filtered_kwargs(self, *args, **kwargs):
|
| 225 |
+
signature = dict(inspect.signature(self.forward).parameters)
|
| 226 |
+
filtered_kwargs = {k: v for k, v in kwargs.items() if k in signature}
|
| 227 |
+
return self(*args, **filtered_kwargs)
|
| 228 |
+
|
| 229 |
+
def forward(
|
| 230 |
+
self,
|
| 231 |
+
pixel_values,
|
| 232 |
+
output_hidden_states: Optional[bool] = None,
|
| 233 |
+
output_attentions: Optional[bool] = None,
|
| 234 |
+
return_dict: Optional[bool] = None,
|
| 235 |
+
):
|
| 236 |
+
raise NotImplementedError("This method should be implemented by the derived class.")
|
| 237 |
+
|
| 238 |
+
def to_dict(self):
|
| 239 |
+
"""
|
| 240 |
+
Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig` to
|
| 241 |
+
include the `out_features` and `out_indices` attributes.
|
| 242 |
+
"""
|
| 243 |
+
output = super().to_dict()
|
| 244 |
+
output["out_features"] = output.pop("_out_features")
|
| 245 |
+
output["out_indices"] = output.pop("_out_indices")
|
| 246 |
+
return output
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
class BackboneConfigMixin:
|
| 250 |
+
"""
|
| 251 |
+
A Mixin to support handling the `out_features` and `out_indices` attributes for the backbone configurations.
|
| 252 |
+
"""
|
| 253 |
+
|
| 254 |
+
@property
|
| 255 |
+
def out_features(self):
|
| 256 |
+
return self._out_features
|
| 257 |
+
|
| 258 |
+
@out_features.setter
|
| 259 |
+
def out_features(self, out_features: List[str]):
|
| 260 |
+
"""
|
| 261 |
+
Set the out_features attribute. This will also update the out_indices attribute to match the new out_features.
|
| 262 |
+
"""
|
| 263 |
+
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
|
| 264 |
+
out_features=out_features, out_indices=None, stage_names=self.stage_names
|
| 265 |
+
)
|
| 266 |
+
|
| 267 |
+
@property
|
| 268 |
+
def out_indices(self):
|
| 269 |
+
return self._out_indices
|
| 270 |
+
|
| 271 |
+
@out_indices.setter
|
| 272 |
+
def out_indices(self, out_indices: Union[Tuple[int], List[int]]):
|
| 273 |
+
"""
|
| 274 |
+
Set the out_indices attribute. This will also update the out_features attribute to match the new out_indices.
|
| 275 |
+
"""
|
| 276 |
+
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
|
| 277 |
+
out_features=None, out_indices=out_indices, stage_names=self.stage_names
|
| 278 |
+
)
|
| 279 |
+
|
| 280 |
+
def to_dict(self):
|
| 281 |
+
"""
|
| 282 |
+
Serializes this instance to a Python dictionary. Override the default `to_dict()` from `PretrainedConfig` to
|
| 283 |
+
include the `out_features` and `out_indices` attributes.
|
| 284 |
+
"""
|
| 285 |
+
output = super().to_dict()
|
| 286 |
+
output["out_features"] = output.pop("_out_features")
|
| 287 |
+
output["out_indices"] = output.pop("_out_indices")
|
| 288 |
+
return output
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
def load_backbone(config):
|
| 292 |
+
"""
|
| 293 |
+
Loads the backbone model from a config object.
|
| 294 |
+
|
| 295 |
+
If the config is from the backbone model itself, then we return a backbone model with randomly initialized
|
| 296 |
+
weights.
|
| 297 |
+
|
| 298 |
+
If the config is from the parent model of the backbone model itself, then we load the pretrained backbone weights
|
| 299 |
+
if specified.
|
| 300 |
+
"""
|
| 301 |
+
from transformers import AutoBackbone, AutoConfig
|
| 302 |
+
|
| 303 |
+
backbone_config = getattr(config, "backbone_config", None)
|
| 304 |
+
use_timm_backbone = getattr(config, "use_timm_backbone", None)
|
| 305 |
+
use_pretrained_backbone = getattr(config, "use_pretrained_backbone", None)
|
| 306 |
+
backbone_checkpoint = getattr(config, "backbone", None)
|
| 307 |
+
backbone_kwargs = getattr(config, "backbone_kwargs", None)
|
| 308 |
+
|
| 309 |
+
backbone_kwargs = {} if backbone_kwargs is None else backbone_kwargs
|
| 310 |
+
|
| 311 |
+
if backbone_kwargs and backbone_config is not None:
|
| 312 |
+
raise ValueError("You can't specify both `backbone_kwargs` and `backbone_config`.")
|
| 313 |
+
|
| 314 |
+
# If there is a backbone_config and a backbone checkpoint, and use_pretrained_backbone=False then the desired
|
| 315 |
+
# behaviour is ill-defined: do you want to load from the checkpoint's config or the backbone_config?
|
| 316 |
+
if backbone_config is not None and backbone_checkpoint is not None and use_pretrained_backbone is not None:
|
| 317 |
+
raise ValueError("Cannot specify both config.backbone_config and config.backbone")
|
| 318 |
+
|
| 319 |
+
# If any of thhe following are set, then the config passed in is from a model which contains a backbone.
|
| 320 |
+
if (
|
| 321 |
+
backbone_config is None
|
| 322 |
+
and use_timm_backbone is None
|
| 323 |
+
and backbone_checkpoint is None
|
| 324 |
+
and backbone_checkpoint is None
|
| 325 |
+
):
|
| 326 |
+
return AutoBackbone.from_config(config=config, **backbone_kwargs)
|
| 327 |
+
|
| 328 |
+
# config from the parent model that has a backbone
|
| 329 |
+
if use_timm_backbone:
|
| 330 |
+
if backbone_checkpoint is None:
|
| 331 |
+
raise ValueError("config.backbone must be set if use_timm_backbone is True")
|
| 332 |
+
# Because of how timm backbones were originally added to models, we need to pass in use_pretrained_backbone
|
| 333 |
+
# to determine whether to load the pretrained weights.
|
| 334 |
+
backbone = AutoBackbone.from_pretrained(
|
| 335 |
+
backbone_checkpoint,
|
| 336 |
+
use_timm_backbone=use_timm_backbone,
|
| 337 |
+
use_pretrained_backbone=use_pretrained_backbone,
|
| 338 |
+
**backbone_kwargs,
|
| 339 |
+
)
|
| 340 |
+
elif use_pretrained_backbone:
|
| 341 |
+
if backbone_checkpoint is None:
|
| 342 |
+
raise ValueError("config.backbone must be set if use_pretrained_backbone is True")
|
| 343 |
+
backbone = AutoBackbone.from_pretrained(backbone_checkpoint, **backbone_kwargs)
|
| 344 |
+
else:
|
| 345 |
+
if backbone_config is None and backbone_checkpoint is None:
|
| 346 |
+
raise ValueError("Either config.backbone_config or config.backbone must be set")
|
| 347 |
+
if backbone_config is None:
|
| 348 |
+
backbone_config = AutoConfig.from_pretrained(backbone_checkpoint, **backbone_kwargs)
|
| 349 |
+
backbone = AutoBackbone.from_config(config=backbone_config)
|
| 350 |
+
return backbone
|
parrot/lib/python3.10/site-packages/transformers/utils/bitsandbytes.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
import warnings
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
warnings.warn(
|
| 18 |
+
"transformers.utils.bitsandbytes module is deprecated and will be removed in a future version. Please import bitsandbytes modules directly from transformers.integrations",
|
| 19 |
+
FutureWarning,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
from ..integrations import ( # noqa
|
| 23 |
+
get_keys_to_not_convert,
|
| 24 |
+
replace_8bit_linear,
|
| 25 |
+
replace_with_bnb_linear,
|
| 26 |
+
set_module_8bit_tensor_to_device,
|
| 27 |
+
set_module_quantized_tensor_to_device,
|
| 28 |
+
)
|
parrot/lib/python3.10/site-packages/transformers/utils/constants.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
IMAGENET_DEFAULT_MEAN = [0.485, 0.456, 0.406]
|
| 2 |
+
IMAGENET_DEFAULT_STD = [0.229, 0.224, 0.225]
|
| 3 |
+
IMAGENET_STANDARD_MEAN = [0.5, 0.5, 0.5]
|
| 4 |
+
IMAGENET_STANDARD_STD = [0.5, 0.5, 0.5]
|
| 5 |
+
OPENAI_CLIP_MEAN = [0.48145466, 0.4578275, 0.40821073]
|
| 6 |
+
OPENAI_CLIP_STD = [0.26862954, 0.26130258, 0.27577711]
|