ZTWHHH commited on
Commit
c4678bb
·
verified ·
1 Parent(s): 475b4dd

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. parrot/lib/libssl.so +3 -0
  3. parrot/lib/python3.10/site-packages/aiohttp-3.10.6.dist-info/INSTALLER +1 -0
  4. parrot/lib/python3.10/site-packages/aiohttp-3.10.6.dist-info/LICENSE.txt +13 -0
  5. parrot/lib/python3.10/site-packages/aiohttp-3.10.6.dist-info/METADATA +246 -0
  6. parrot/lib/python3.10/site-packages/aiohttp-3.10.6.dist-info/RECORD +120 -0
  7. parrot/lib/python3.10/site-packages/aiohttp-3.10.6.dist-info/REQUESTED +0 -0
  8. parrot/lib/python3.10/site-packages/aiohttp-3.10.6.dist-info/WHEEL +6 -0
  9. parrot/lib/python3.10/site-packages/aiohttp-3.10.6.dist-info/top_level.txt +1 -0
  10. parrot/lib/python3.10/site-packages/annotated_types/__init__.py +432 -0
  11. parrot/lib/python3.10/site-packages/annotated_types/__pycache__/__init__.cpython-310.pyc +0 -0
  12. parrot/lib/python3.10/site-packages/annotated_types/__pycache__/test_cases.cpython-310.pyc +0 -0
  13. parrot/lib/python3.10/site-packages/annotated_types/py.typed +0 -0
  14. parrot/lib/python3.10/site-packages/annotated_types/test_cases.py +151 -0
  15. parrot/lib/python3.10/site-packages/gitdb/__init__.py +38 -0
  16. parrot/lib/python3.10/site-packages/gitdb/__pycache__/__init__.cpython-310.pyc +0 -0
  17. parrot/lib/python3.10/site-packages/gitdb/__pycache__/base.cpython-310.pyc +0 -0
  18. parrot/lib/python3.10/site-packages/gitdb/__pycache__/const.cpython-310.pyc +0 -0
  19. parrot/lib/python3.10/site-packages/gitdb/__pycache__/exc.cpython-310.pyc +0 -0
  20. parrot/lib/python3.10/site-packages/gitdb/__pycache__/fun.cpython-310.pyc +0 -0
  21. parrot/lib/python3.10/site-packages/gitdb/__pycache__/pack.cpython-310.pyc +0 -0
  22. parrot/lib/python3.10/site-packages/gitdb/__pycache__/stream.cpython-310.pyc +0 -0
  23. parrot/lib/python3.10/site-packages/gitdb/__pycache__/typ.cpython-310.pyc +0 -0
  24. parrot/lib/python3.10/site-packages/gitdb/__pycache__/util.cpython-310.pyc +0 -0
  25. parrot/lib/python3.10/site-packages/gitdb/base.py +315 -0
  26. parrot/lib/python3.10/site-packages/gitdb/const.py +4 -0
  27. parrot/lib/python3.10/site-packages/gitdb/db/__init__.py +11 -0
  28. parrot/lib/python3.10/site-packages/gitdb/db/__pycache__/git.cpython-310.pyc +0 -0
  29. parrot/lib/python3.10/site-packages/gitdb/db/__pycache__/loose.cpython-310.pyc +0 -0
  30. parrot/lib/python3.10/site-packages/gitdb/db/__pycache__/mem.cpython-310.pyc +0 -0
  31. parrot/lib/python3.10/site-packages/gitdb/db/__pycache__/pack.cpython-310.pyc +0 -0
  32. parrot/lib/python3.10/site-packages/gitdb/db/base.py +278 -0
  33. parrot/lib/python3.10/site-packages/gitdb/db/git.py +85 -0
  34. parrot/lib/python3.10/site-packages/gitdb/db/loose.py +254 -0
  35. parrot/lib/python3.10/site-packages/gitdb/db/mem.py +110 -0
  36. parrot/lib/python3.10/site-packages/gitdb/db/pack.py +206 -0
  37. parrot/lib/python3.10/site-packages/gitdb/db/ref.py +82 -0
  38. parrot/lib/python3.10/site-packages/gitdb/exc.py +57 -0
  39. parrot/lib/python3.10/site-packages/gitdb/fun.py +704 -0
  40. parrot/lib/python3.10/site-packages/gitdb/pack.py +1031 -0
  41. parrot/lib/python3.10/site-packages/gitdb/stream.py +730 -0
  42. parrot/lib/python3.10/site-packages/gitdb/test/__pycache__/__init__.cpython-310.pyc +0 -0
  43. parrot/lib/python3.10/site-packages/gitdb/test/__pycache__/lib.cpython-310.pyc +0 -0
  44. parrot/lib/python3.10/site-packages/gitdb/test/__pycache__/test_base.cpython-310.pyc +0 -0
  45. parrot/lib/python3.10/site-packages/gitdb/test/__pycache__/test_example.cpython-310.pyc +0 -0
  46. parrot/lib/python3.10/site-packages/gitdb/test/__pycache__/test_pack.cpython-310.pyc +0 -0
  47. parrot/lib/python3.10/site-packages/gitdb/test/__pycache__/test_stream.cpython-310.pyc +0 -0
  48. parrot/lib/python3.10/site-packages/gitdb/test/__pycache__/test_util.cpython-310.pyc +0 -0
  49. parrot/lib/python3.10/site-packages/gitdb/test/lib.py +192 -0
  50. parrot/lib/python3.10/site-packages/gitdb/test/test_base.py +105 -0
.gitattributes CHANGED
@@ -149,3 +149,4 @@ parrot/lib/libquadmath.so filter=lfs diff=lfs merge=lfs -text
149
  parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
150
  parrot/lib/python3.10/ensurepip/_bundled/setuptools-65.5.0-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text
151
  parrot/lib/python3.10/site-packages/cv2/qt/fonts/DejaVuSansCondensed.ttf filter=lfs diff=lfs merge=lfs -text
 
 
149
  parrot/lib/python3.10/site-packages/pyarrow/_dataset_parquet.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
150
  parrot/lib/python3.10/ensurepip/_bundled/setuptools-65.5.0-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text
151
  parrot/lib/python3.10/site-packages/cv2/qt/fonts/DejaVuSansCondensed.ttf filter=lfs diff=lfs merge=lfs -text
152
+ parrot/lib/libssl.so filter=lfs diff=lfs merge=lfs -text
parrot/lib/libssl.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3480c91df4e0c1a33514955568641405e37924f680e8ba42f494a209640516c6
3
+ size 775712
parrot/lib/python3.10/site-packages/aiohttp-3.10.6.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
parrot/lib/python3.10/site-packages/aiohttp-3.10.6.dist-info/LICENSE.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright aio-libs contributors.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
parrot/lib/python3.10/site-packages/aiohttp-3.10.6.dist-info/METADATA ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: aiohttp
3
+ Version: 3.10.6
4
+ Summary: Async http client/server framework (asyncio)
5
+ Home-page: https://github.com/aio-libs/aiohttp
6
+ Maintainer: aiohttp team <team@aiohttp.org>
7
+ Maintainer-email: team@aiohttp.org
8
+ License: Apache 2
9
+ Project-URL: Chat: Matrix, https://matrix.to/#/#aio-libs:matrix.org
10
+ Project-URL: Chat: Matrix Space, https://matrix.to/#/#aio-libs-space:matrix.org
11
+ Project-URL: CI: GitHub Actions, https://github.com/aio-libs/aiohttp/actions?query=workflow%3ACI
12
+ Project-URL: Coverage: codecov, https://codecov.io/github/aio-libs/aiohttp
13
+ Project-URL: Docs: Changelog, https://docs.aiohttp.org/en/stable/changes.html
14
+ Project-URL: Docs: RTD, https://docs.aiohttp.org
15
+ Project-URL: GitHub: issues, https://github.com/aio-libs/aiohttp/issues
16
+ Project-URL: GitHub: repo, https://github.com/aio-libs/aiohttp
17
+ Classifier: Development Status :: 5 - Production/Stable
18
+ Classifier: Framework :: AsyncIO
19
+ Classifier: Intended Audience :: Developers
20
+ Classifier: License :: OSI Approved :: Apache Software License
21
+ Classifier: Operating System :: POSIX
22
+ Classifier: Operating System :: MacOS :: MacOS X
23
+ Classifier: Operating System :: Microsoft :: Windows
24
+ Classifier: Programming Language :: Python
25
+ Classifier: Programming Language :: Python :: 3
26
+ Classifier: Programming Language :: Python :: 3.8
27
+ Classifier: Programming Language :: Python :: 3.9
28
+ Classifier: Programming Language :: Python :: 3.10
29
+ Classifier: Programming Language :: Python :: 3.11
30
+ Classifier: Programming Language :: Python :: 3.12
31
+ Classifier: Programming Language :: Python :: 3.13
32
+ Classifier: Topic :: Internet :: WWW/HTTP
33
+ Requires-Python: >=3.8
34
+ Description-Content-Type: text/x-rst
35
+ License-File: LICENSE.txt
36
+ Requires-Dist: aiohappyeyeballs >=2.3.0
37
+ Requires-Dist: aiosignal >=1.1.2
38
+ Requires-Dist: attrs >=17.3.0
39
+ Requires-Dist: frozenlist >=1.1.1
40
+ Requires-Dist: multidict <7.0,>=4.5
41
+ Requires-Dist: yarl <2.0,>=1.12.0
42
+ Requires-Dist: async-timeout <5.0,>=4.0 ; python_version < "3.11"
43
+ Provides-Extra: speedups
44
+ Requires-Dist: brotlicffi ; (platform_python_implementation != "CPython") and extra == 'speedups'
45
+ Requires-Dist: Brotli ; (platform_python_implementation == "CPython") and extra == 'speedups'
46
+ Requires-Dist: aiodns >=3.2.0 ; (sys_platform == "linux" or sys_platform == "darwin") and extra == 'speedups'
47
+
48
+ ==================================
49
+ Async http client/server framework
50
+ ==================================
51
+
52
+ .. image:: https://raw.githubusercontent.com/aio-libs/aiohttp/master/docs/aiohttp-plain.svg
53
+ :height: 64px
54
+ :width: 64px
55
+ :alt: aiohttp logo
56
+
57
+ |
58
+
59
+ .. image:: https://github.com/aio-libs/aiohttp/workflows/CI/badge.svg
60
+ :target: https://github.com/aio-libs/aiohttp/actions?query=workflow%3ACI
61
+ :alt: GitHub Actions status for master branch
62
+
63
+ .. image:: https://codecov.io/gh/aio-libs/aiohttp/branch/master/graph/badge.svg
64
+ :target: https://codecov.io/gh/aio-libs/aiohttp
65
+ :alt: codecov.io status for master branch
66
+
67
+ .. image:: https://badge.fury.io/py/aiohttp.svg
68
+ :target: https://pypi.org/project/aiohttp
69
+ :alt: Latest PyPI package version
70
+
71
+ .. image:: https://readthedocs.org/projects/aiohttp/badge/?version=latest
72
+ :target: https://docs.aiohttp.org/
73
+ :alt: Latest Read The Docs
74
+
75
+ .. image:: https://img.shields.io/matrix/aio-libs:matrix.org?label=Discuss%20on%20Matrix%20at%20%23aio-libs%3Amatrix.org&logo=matrix&server_fqdn=matrix.org&style=flat
76
+ :target: https://matrix.to/#/%23aio-libs:matrix.org
77
+ :alt: Matrix Room — #aio-libs:matrix.org
78
+
79
+ .. image:: https://img.shields.io/matrix/aio-libs-space:matrix.org?label=Discuss%20on%20Matrix%20at%20%23aio-libs-space%3Amatrix.org&logo=matrix&server_fqdn=matrix.org&style=flat
80
+ :target: https://matrix.to/#/%23aio-libs-space:matrix.org
81
+ :alt: Matrix Space — #aio-libs-space:matrix.org
82
+
83
+
84
+ Key Features
85
+ ============
86
+
87
+ - Supports both client and server side of HTTP protocol.
88
+ - Supports both client and server Web-Sockets out-of-the-box and avoids
89
+ Callback Hell.
90
+ - Provides Web-server with middleware and pluggable routing.
91
+
92
+
93
+ Getting started
94
+ ===============
95
+
96
+ Client
97
+ ------
98
+
99
+ To get something from the web:
100
+
101
+ .. code-block:: python
102
+
103
+ import aiohttp
104
+ import asyncio
105
+
106
+ async def main():
107
+
108
+ async with aiohttp.ClientSession() as session:
109
+ async with session.get('http://python.org') as response:
110
+
111
+ print("Status:", response.status)
112
+ print("Content-type:", response.headers['content-type'])
113
+
114
+ html = await response.text()
115
+ print("Body:", html[:15], "...")
116
+
117
+ asyncio.run(main())
118
+
119
+ This prints:
120
+
121
+ .. code-block::
122
+
123
+ Status: 200
124
+ Content-type: text/html; charset=utf-8
125
+ Body: <!doctype html> ...
126
+
127
+ Coming from `requests <https://requests.readthedocs.io/>`_ ? Read `why we need so many lines <https://aiohttp.readthedocs.io/en/latest/http_request_lifecycle.html>`_.
128
+
129
+ Server
130
+ ------
131
+
132
+ An example using a simple server:
133
+
134
+ .. code-block:: python
135
+
136
+ # examples/server_simple.py
137
+ from aiohttp import web
138
+
139
+ async def handle(request):
140
+ name = request.match_info.get('name', "Anonymous")
141
+ text = "Hello, " + name
142
+ return web.Response(text=text)
143
+
144
+ async def wshandle(request):
145
+ ws = web.WebSocketResponse()
146
+ await ws.prepare(request)
147
+
148
+ async for msg in ws:
149
+ if msg.type == web.WSMsgType.text:
150
+ await ws.send_str("Hello, {}".format(msg.data))
151
+ elif msg.type == web.WSMsgType.binary:
152
+ await ws.send_bytes(msg.data)
153
+ elif msg.type == web.WSMsgType.close:
154
+ break
155
+
156
+ return ws
157
+
158
+
159
+ app = web.Application()
160
+ app.add_routes([web.get('/', handle),
161
+ web.get('/echo', wshandle),
162
+ web.get('/{name}', handle)])
163
+
164
+ if __name__ == '__main__':
165
+ web.run_app(app)
166
+
167
+
168
+ Documentation
169
+ =============
170
+
171
+ https://aiohttp.readthedocs.io/
172
+
173
+
174
+ Demos
175
+ =====
176
+
177
+ https://github.com/aio-libs/aiohttp-demos
178
+
179
+
180
+ External links
181
+ ==============
182
+
183
+ * `Third party libraries
184
+ <http://aiohttp.readthedocs.io/en/latest/third_party.html>`_
185
+ * `Built with aiohttp
186
+ <http://aiohttp.readthedocs.io/en/latest/built_with.html>`_
187
+ * `Powered by aiohttp
188
+ <http://aiohttp.readthedocs.io/en/latest/powered_by.html>`_
189
+
190
+ Feel free to make a Pull Request for adding your link to these pages!
191
+
192
+
193
+ Communication channels
194
+ ======================
195
+
196
+ *aio-libs Discussions*: https://github.com/aio-libs/aiohttp/discussions
197
+
198
+ *Matrix*: `#aio-libs:matrix.org <https://matrix.to/#/#aio-libs:matrix.org>`_
199
+
200
+ We support `Stack Overflow
201
+ <https://stackoverflow.com/questions/tagged/aiohttp>`_.
202
+ Please add *aiohttp* tag to your question there.
203
+
204
+ Requirements
205
+ ============
206
+
207
+ - attrs_
208
+ - multidict_
209
+ - yarl_
210
+ - frozenlist_
211
+
212
+ Optionally you may install the aiodns_ library (highly recommended for sake of speed).
213
+
214
+ .. _aiodns: https://pypi.python.org/pypi/aiodns
215
+ .. _attrs: https://github.com/python-attrs/attrs
216
+ .. _multidict: https://pypi.python.org/pypi/multidict
217
+ .. _frozenlist: https://pypi.org/project/frozenlist/
218
+ .. _yarl: https://pypi.python.org/pypi/yarl
219
+ .. _async-timeout: https://pypi.python.org/pypi/async_timeout
220
+
221
+ License
222
+ =======
223
+
224
+ ``aiohttp`` is offered under the Apache 2 license.
225
+
226
+
227
+ Keepsafe
228
+ ========
229
+
230
+ The aiohttp community would like to thank Keepsafe
231
+ (https://www.getkeepsafe.com) for its support in the early days of
232
+ the project.
233
+
234
+
235
+ Source code
236
+ ===========
237
+
238
+ The latest developer version is available in a GitHub repository:
239
+ https://github.com/aio-libs/aiohttp
240
+
241
+ Benchmarks
242
+ ==========
243
+
244
+ If you are interested in efficiency, the AsyncIO community maintains a
245
+ list of benchmarks on the official wiki:
246
+ https://github.com/python/asyncio/wiki/Benchmarks
parrot/lib/python3.10/site-packages/aiohttp-3.10.6.dist-info/RECORD ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiohttp-3.10.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ aiohttp-3.10.6.dist-info/LICENSE.txt,sha256=n4DQ2311WpQdtFchcsJw7L2PCCuiFd3QlZhZQu2Uqes,588
3
+ aiohttp-3.10.6.dist-info/METADATA,sha256=50HgoGbIfzYQGjbzr8R6Q-nD5LLpgVPt-Jymi_fJw9Q,7575
4
+ aiohttp-3.10.6.dist-info/RECORD,,
5
+ aiohttp-3.10.6.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ aiohttp-3.10.6.dist-info/WHEEL,sha256=VXRyidHovicsPXAYYBPK-lnsPgFrrhXkyzySBEhHzcg,151
7
+ aiohttp-3.10.6.dist-info/top_level.txt,sha256=iv-JIaacmTl-hSho3QmphcKnbRRYx1st47yjz_178Ro,8
8
+ aiohttp/.hash/_cparser.pxd.hash,sha256=hYa9Vje-oMs2eh_7MfCPOh2QW_1x1yCjcZuc7AmwLd0,121
9
+ aiohttp/.hash/_find_header.pxd.hash,sha256=_mbpD6vM-CVCKq3ulUvsOAz5Wdo88wrDzfpOsMQaMNA,125
10
+ aiohttp/.hash/_helpers.pyi.hash,sha256=Ew4BZDc2LqFwszgZZUHHrJvw5P8HBhJ700n1Ntg52hE,121
11
+ aiohttp/.hash/_helpers.pyx.hash,sha256=gHk--W5adjp8iqCNNIj6-FrROPFNV7NC0Zu97f3mx5Y,121
12
+ aiohttp/.hash/_http_parser.pyx.hash,sha256=ZBiVMEMMqlfeIhQReFEMF0iOYI3CaKsvyJTRTbo0qXM,125
13
+ aiohttp/.hash/_http_writer.pyx.hash,sha256=3Qg3T3D-Ud73elzPHBufK0yEu9tP5jsu6g-aPKQY9gE,125
14
+ aiohttp/.hash/_websocket.pyx.hash,sha256=M97f-Yti-4vnE4GNTD1s_DzKs-fG_ww3jle6EUvixnE,123
15
+ aiohttp/.hash/hdrs.py.hash,sha256=2oEszMWjYFTHoF2w4OcFCoM7osv4vY9KLLJCu9HP0xI,116
16
+ aiohttp/__init__.py,sha256=1oDlP49upLXN5wfLI59VVt5jic32ZFNifD6vs9j4usA,7698
17
+ aiohttp/__pycache__/__init__.cpython-310.pyc,,
18
+ aiohttp/__pycache__/abc.cpython-310.pyc,,
19
+ aiohttp/__pycache__/base_protocol.cpython-310.pyc,,
20
+ aiohttp/__pycache__/client.cpython-310.pyc,,
21
+ aiohttp/__pycache__/client_exceptions.cpython-310.pyc,,
22
+ aiohttp/__pycache__/client_proto.cpython-310.pyc,,
23
+ aiohttp/__pycache__/client_reqrep.cpython-310.pyc,,
24
+ aiohttp/__pycache__/client_ws.cpython-310.pyc,,
25
+ aiohttp/__pycache__/compression_utils.cpython-310.pyc,,
26
+ aiohttp/__pycache__/connector.cpython-310.pyc,,
27
+ aiohttp/__pycache__/cookiejar.cpython-310.pyc,,
28
+ aiohttp/__pycache__/formdata.cpython-310.pyc,,
29
+ aiohttp/__pycache__/hdrs.cpython-310.pyc,,
30
+ aiohttp/__pycache__/helpers.cpython-310.pyc,,
31
+ aiohttp/__pycache__/http.cpython-310.pyc,,
32
+ aiohttp/__pycache__/http_exceptions.cpython-310.pyc,,
33
+ aiohttp/__pycache__/http_parser.cpython-310.pyc,,
34
+ aiohttp/__pycache__/http_websocket.cpython-310.pyc,,
35
+ aiohttp/__pycache__/http_writer.cpython-310.pyc,,
36
+ aiohttp/__pycache__/locks.cpython-310.pyc,,
37
+ aiohttp/__pycache__/log.cpython-310.pyc,,
38
+ aiohttp/__pycache__/multipart.cpython-310.pyc,,
39
+ aiohttp/__pycache__/payload.cpython-310.pyc,,
40
+ aiohttp/__pycache__/payload_streamer.cpython-310.pyc,,
41
+ aiohttp/__pycache__/pytest_plugin.cpython-310.pyc,,
42
+ aiohttp/__pycache__/resolver.cpython-310.pyc,,
43
+ aiohttp/__pycache__/streams.cpython-310.pyc,,
44
+ aiohttp/__pycache__/tcp_helpers.cpython-310.pyc,,
45
+ aiohttp/__pycache__/test_utils.cpython-310.pyc,,
46
+ aiohttp/__pycache__/tracing.cpython-310.pyc,,
47
+ aiohttp/__pycache__/typedefs.cpython-310.pyc,,
48
+ aiohttp/__pycache__/web.cpython-310.pyc,,
49
+ aiohttp/__pycache__/web_app.cpython-310.pyc,,
50
+ aiohttp/__pycache__/web_exceptions.cpython-310.pyc,,
51
+ aiohttp/__pycache__/web_fileresponse.cpython-310.pyc,,
52
+ aiohttp/__pycache__/web_log.cpython-310.pyc,,
53
+ aiohttp/__pycache__/web_middlewares.cpython-310.pyc,,
54
+ aiohttp/__pycache__/web_protocol.cpython-310.pyc,,
55
+ aiohttp/__pycache__/web_request.cpython-310.pyc,,
56
+ aiohttp/__pycache__/web_response.cpython-310.pyc,,
57
+ aiohttp/__pycache__/web_routedef.cpython-310.pyc,,
58
+ aiohttp/__pycache__/web_runner.cpython-310.pyc,,
59
+ aiohttp/__pycache__/web_server.cpython-310.pyc,,
60
+ aiohttp/__pycache__/web_urldispatcher.cpython-310.pyc,,
61
+ aiohttp/__pycache__/web_ws.cpython-310.pyc,,
62
+ aiohttp/__pycache__/worker.cpython-310.pyc,,
63
+ aiohttp/_cparser.pxd,sha256=8jGIg-VJ9p3llwCakUYDsPGxA4HiZe9dmK9Jmtlz-5g,4318
64
+ aiohttp/_find_header.pxd,sha256=0GfwFCPN2zxEKTO1_MA5sYq2UfzsG8kcV3aTqvwlz3g,68
65
+ aiohttp/_headers.pxi,sha256=n701k28dVPjwRnx5j6LpJhLTfj7dqu2vJt7f0O60Oyg,2007
66
+ aiohttp/_helpers.cpython-310-x86_64-linux-gnu.so,sha256=NXad7lYxp-vH7g2yQE5o19-aKYx1Ib_Ju_X_K9Ouo1U,414160
67
+ aiohttp/_helpers.pyi,sha256=ZoKiJSS51PxELhI2cmIr5737YjjZcJt7FbIRO3ym1Ss,202
68
+ aiohttp/_helpers.pyx,sha256=GdmPCO_VWkDJmy_EyDQdp-5cwUOxpZGBAUw_Q6PpVCM,1006
69
+ aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so,sha256=msUDiiF60CJyt8sMX1CE9S7pzHfZt14YRfcN8VXwNaw,2616448
70
+ aiohttp/_http_parser.pyx,sha256=4tli5RoYO24nI8HLl7nxHHlb7ccJOuHrA4pwQN2PTXA,28395
71
+ aiohttp/_http_writer.cpython-310-x86_64-linux-gnu.so,sha256=VDZWkJPdac8PALAYqRidCPst3UW2UEneQpcZ8VQPp3c,459376
72
+ aiohttp/_http_writer.pyx,sha256=aIHAp8g4ZV5kbGRdmZce-vXjELw2M6fGKyJuOdgYQqw,4575
73
+ aiohttp/_websocket.cpython-310-x86_64-linux-gnu.so,sha256=GaDatkEeK5-oiBiitEhrbgcVO3Yg0j_-Gg26GU03GLA,234048
74
+ aiohttp/_websocket.pyx,sha256=1XuOSNDCbyDrzF5uMA2isqausSs8l2jWTLDlNDLM9Io,1561
75
+ aiohttp/abc.py,sha256=NaAMNASsemeZ-42PcN8w3O50u5h2e2qKX6ITk9C5ub4,6097
76
+ aiohttp/base_protocol.py,sha256=aZEoolVFSeRq_I-51QjhKljCtfjAOD3T8PbO6DPGaMk,2940
77
+ aiohttp/client.py,sha256=Y8bcgtvEBRWjcafMJ8XXWq_TzDBYlw9cJmPnupKNJV0,52148
78
+ aiohttp/client_exceptions.py,sha256=ih-Rb30CoBkcI0qY0nnvscZCw4x-2A5b0uZfm5oGwxo,10927
79
+ aiohttp/client_proto.py,sha256=BjILAejNi6ezOY16AQzROl7aIl4jphZ_TMROVnljk-M,10490
80
+ aiohttp/client_reqrep.py,sha256=ts_jhvL4RJiaoOqjQfNXj5PGxSF2w5L-9xBTwKPiX-M,42634
81
+ aiohttp/client_ws.py,sha256=s1cFjk7pT61CLnZDZEqS6SJdIyWMs2kr6Q8WS_Vwej8,14275
82
+ aiohttp/compression_utils.py,sha256=Yp6TfmFKrKwDcQqSY9mIPoeXvRGA1fWSm6FdHJ1jEnY,5055
83
+ aiohttp/connector.py,sha256=YAasT4pyP7FjYbwrGSkgR4R_KaUJLGLZAKHKz4bQCEU,57340
84
+ aiohttp/cookiejar.py,sha256=XbJsSrIQ5gBL0nGeCDXYDbMpyc1_mCCXWw08hja71Ao,17162
85
+ aiohttp/formdata.py,sha256=WjHA1mieKlWwI5O3hi3-siqN0dWz_X04oXNNZje2z7Q,6521
86
+ aiohttp/hdrs.py,sha256=uzn5agn_jXid2h-ky6Y0ZAQ8BrPeTGLDGr-weiMctso,4613
87
+ aiohttp/helpers.py,sha256=nA4nVhZLYzJk2VPmiZzGgJBrno_N5EMP9svW_kEyltY,30418
88
+ aiohttp/http.py,sha256=8o8j8xH70OWjnfTWA9V44NR785QPxEPrUtzMXiAVpwc,1842
89
+ aiohttp/http_exceptions.py,sha256=rw6EER4AresvBZw0_V6eifa_3jrNZUnysMT2GLEWzSE,2715
90
+ aiohttp/http_parser.py,sha256=OhsZlHiTj002TjcI-SUh9jRXEjPJVdKMK3hmGW0a7qc,36451
91
+ aiohttp/http_websocket.py,sha256=h7PldJwuTJGHQg9Anfble0o-V5QA0yyUhMKRP3_9eCU,27369
92
+ aiohttp/http_writer.py,sha256=duTHOdlMzbQqZyqZwBp4NHGoCA6PUekPhsFIQzIXu5k,5975
93
+ aiohttp/locks.py,sha256=wRYFo1U82LwBBdqwU24JEPaoTAlKaaJd2FtfDKhkTb4,1136
94
+ aiohttp/log.py,sha256=BbNKx9e3VMIm0xYjZI0IcBBoS7wjdeIeSaiJE7-qK2g,325
95
+ aiohttp/multipart.py,sha256=Zo9wekrwsLyDPpE8bEk6rjhTRx6HfgMU0v8H6v7AVnM,36983
96
+ aiohttp/payload.py,sha256=DJla14SSy4XREiLPwyLA28QUT9NHtSBCGSawO_1h7K8,14898
97
+ aiohttp/payload_streamer.py,sha256=ZzEYyfzcjGWkVkK3XR2pBthSCSIykYvY3Wr5cGQ2eTc,2211
98
+ aiohttp/py.typed,sha256=sow9soTwP9T_gEAQSVh7Gb8855h04Nwmhs2We-JRgZM,7
99
+ aiohttp/pytest_plugin.py,sha256=JvGt8Yyoy_LUegfRo72bk8psPZnfl9JXIV2LLl0JX8M,12101
100
+ aiohttp/resolver.py,sha256=iUsSU_W72sZgnUx5VRCT9JqJnF-QRhNKjDUBJor6WWw,6458
101
+ aiohttp/streams.py,sha256=k7j05wdgCNzLU0hFMlgLuNb7x643hMLGBBxbokUnNiE,21223
102
+ aiohttp/tcp_helpers.py,sha256=BSadqVWaBpMFDRWnhaaR941N9MiDZ7bdTrxgCb0CW-M,961
103
+ aiohttp/test_utils.py,sha256=XhcaLPl4K5OR6k6WFlU5g06Sad7ZR8ApcrePIgeYxFY,21995
104
+ aiohttp/tracing.py,sha256=66XQwtdR5DHv8p953eeNL0l8o6iHDaNwH9bBaybHXD4,15137
105
+ aiohttp/typedefs.py,sha256=VAdpiUqR0JPrHq8gL6vUl7exPpJ9_HJlg2LXPqyGNnA,2070
106
+ aiohttp/web.py,sha256=uBo6hdXOGccdGpJBqQEQtrvg0wxjvpPFU1G6yzjo16g,18217
107
+ aiohttp/web_app.py,sha256=T5VIrfmT1xUyo4m7yEH_wy9f5Azsu4aJO4u7OjNKork,19155
108
+ aiohttp/web_exceptions.py,sha256=7nIuiwhZ39vJJ9KrWqArA5QcWbUdqkz2CLwEpJapeN8,10360
109
+ aiohttp/web_fileresponse.py,sha256=2BeMj0CR0qp5Z0Qf-OcUmCNAEcWPr0qMuf3UagXUFR8,13862
110
+ aiohttp/web_log.py,sha256=DOfOxGyh2U7K5K_w6O7ILdfGcs4qOdzHxOwj2-k3c6c,7801
111
+ aiohttp/web_middlewares.py,sha256=sFI0AgeNjdyAjuz92QtMIpngmJSOxrqe2Jfbs4BNUu0,4165
112
+ aiohttp/web_protocol.py,sha256=9NgIL20n3BoEVfSxaETPQwA3YnnntrAyIK47GFd-JXE,24868
113
+ aiohttp/web_request.py,sha256=NHt0Biw6mrpT-_NJ_xZYnBJfOlrIfZjRoB9e4Yioaz4,29786
114
+ aiohttp/web_response.py,sha256=tlomtJ7T2j0oOVBSAzuepfUetdeUjsZcSXTEMbrSBFw,28079
115
+ aiohttp/web_routedef.py,sha256=6PpuqK4SOFFdbYx_jNx-EvauiXO8K_qQeh4oruVFW7A,6116
116
+ aiohttp/web_runner.py,sha256=WAe6mWpHZpJdcKHEbWPuJ8OdxMhaIjpO9Ncp7jOmeOU,11683
117
+ aiohttp/web_server.py,sha256=jx3sQSQhyEP3V4loJZpqIiPXctXk7sTVFEkXcrxnjTw,2764
118
+ aiohttp/web_urldispatcher.py,sha256=ktz83jldMFD-NNq3XnFfj65FkfFwHcRDZPx1Ci08tnY,43719
119
+ aiohttp/web_ws.py,sha256=XXXO1hpdTXticCrm2qILxKNsOfMpeg-o98yWnfiw12E,21654
120
+ aiohttp/worker.py,sha256=bkozEd2rAzQS0qs4knnnplOmaZ4TNdYtqWXSXx9djEc,7965
parrot/lib/python3.10/site-packages/aiohttp-3.10.6.dist-info/REQUESTED ADDED
File without changes
parrot/lib/python3.10/site-packages/aiohttp-3.10.6.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (75.1.0)
3
+ Root-Is-Purelib: false
4
+ Tag: cp310-cp310-manylinux_2_17_x86_64
5
+ Tag: cp310-cp310-manylinux2014_x86_64
6
+
parrot/lib/python3.10/site-packages/aiohttp-3.10.6.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ aiohttp
parrot/lib/python3.10/site-packages/annotated_types/__init__.py ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import sys
3
+ import types
4
+ from dataclasses import dataclass
5
+ from datetime import tzinfo
6
+ from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, SupportsFloat, SupportsIndex, TypeVar, Union
7
+
8
+ if sys.version_info < (3, 8):
9
+ from typing_extensions import Protocol, runtime_checkable
10
+ else:
11
+ from typing import Protocol, runtime_checkable
12
+
13
+ if sys.version_info < (3, 9):
14
+ from typing_extensions import Annotated, Literal
15
+ else:
16
+ from typing import Annotated, Literal
17
+
18
+ if sys.version_info < (3, 10):
19
+ EllipsisType = type(Ellipsis)
20
+ KW_ONLY = {}
21
+ SLOTS = {}
22
+ else:
23
+ from types import EllipsisType
24
+
25
+ KW_ONLY = {"kw_only": True}
26
+ SLOTS = {"slots": True}
27
+
28
+
29
+ __all__ = (
30
+ 'BaseMetadata',
31
+ 'GroupedMetadata',
32
+ 'Gt',
33
+ 'Ge',
34
+ 'Lt',
35
+ 'Le',
36
+ 'Interval',
37
+ 'MultipleOf',
38
+ 'MinLen',
39
+ 'MaxLen',
40
+ 'Len',
41
+ 'Timezone',
42
+ 'Predicate',
43
+ 'LowerCase',
44
+ 'UpperCase',
45
+ 'IsDigits',
46
+ 'IsFinite',
47
+ 'IsNotFinite',
48
+ 'IsNan',
49
+ 'IsNotNan',
50
+ 'IsInfinite',
51
+ 'IsNotInfinite',
52
+ 'doc',
53
+ 'DocInfo',
54
+ '__version__',
55
+ )
56
+
57
+ __version__ = '0.7.0'
58
+
59
+
60
+ T = TypeVar('T')
61
+
62
+
63
+ # arguments that start with __ are considered
64
+ # positional only
65
+ # see https://peps.python.org/pep-0484/#positional-only-arguments
66
+
67
+
68
+ class SupportsGt(Protocol):
69
+ def __gt__(self: T, __other: T) -> bool:
70
+ ...
71
+
72
+
73
+ class SupportsGe(Protocol):
74
+ def __ge__(self: T, __other: T) -> bool:
75
+ ...
76
+
77
+
78
+ class SupportsLt(Protocol):
79
+ def __lt__(self: T, __other: T) -> bool:
80
+ ...
81
+
82
+
83
+ class SupportsLe(Protocol):
84
+ def __le__(self: T, __other: T) -> bool:
85
+ ...
86
+
87
+
88
+ class SupportsMod(Protocol):
89
+ def __mod__(self: T, __other: T) -> T:
90
+ ...
91
+
92
+
93
+ class SupportsDiv(Protocol):
94
+ def __div__(self: T, __other: T) -> T:
95
+ ...
96
+
97
+
98
+ class BaseMetadata:
99
+ """Base class for all metadata.
100
+
101
+ This exists mainly so that implementers
102
+ can do `isinstance(..., BaseMetadata)` while traversing field annotations.
103
+ """
104
+
105
+ __slots__ = ()
106
+
107
+
108
+ @dataclass(frozen=True, **SLOTS)
109
+ class Gt(BaseMetadata):
110
+ """Gt(gt=x) implies that the value must be greater than x.
111
+
112
+ It can be used with any type that supports the ``>`` operator,
113
+ including numbers, dates and times, strings, sets, and so on.
114
+ """
115
+
116
+ gt: SupportsGt
117
+
118
+
119
+ @dataclass(frozen=True, **SLOTS)
120
+ class Ge(BaseMetadata):
121
+ """Ge(ge=x) implies that the value must be greater than or equal to x.
122
+
123
+ It can be used with any type that supports the ``>=`` operator,
124
+ including numbers, dates and times, strings, sets, and so on.
125
+ """
126
+
127
+ ge: SupportsGe
128
+
129
+
130
+ @dataclass(frozen=True, **SLOTS)
131
+ class Lt(BaseMetadata):
132
+ """Lt(lt=x) implies that the value must be less than x.
133
+
134
+ It can be used with any type that supports the ``<`` operator,
135
+ including numbers, dates and times, strings, sets, and so on.
136
+ """
137
+
138
+ lt: SupportsLt
139
+
140
+
141
+ @dataclass(frozen=True, **SLOTS)
142
+ class Le(BaseMetadata):
143
+ """Le(le=x) implies that the value must be less than or equal to x.
144
+
145
+ It can be used with any type that supports the ``<=`` operator,
146
+ including numbers, dates and times, strings, sets, and so on.
147
+ """
148
+
149
+ le: SupportsLe
150
+
151
+
152
+ @runtime_checkable
153
+ class GroupedMetadata(Protocol):
154
+ """A grouping of multiple objects, like typing.Unpack.
155
+
156
+ `GroupedMetadata` on its own is not metadata and has no meaning.
157
+ All of the constraints and metadata should be fully expressable
158
+ in terms of the `BaseMetadata`'s returned by `GroupedMetadata.__iter__()`.
159
+
160
+ Concrete implementations should override `GroupedMetadata.__iter__()`
161
+ to add their own metadata.
162
+ For example:
163
+
164
+ >>> @dataclass
165
+ >>> class Field(GroupedMetadata):
166
+ >>> gt: float | None = None
167
+ >>> description: str | None = None
168
+ ...
169
+ >>> def __iter__(self) -> Iterable[object]:
170
+ >>> if self.gt is not None:
171
+ >>> yield Gt(self.gt)
172
+ >>> if self.description is not None:
173
+ >>> yield Description(self.gt)
174
+
175
+ Also see the implementation of `Interval` below for an example.
176
+
177
+ Parsers should recognize this and unpack it so that it can be used
178
+ both with and without unpacking:
179
+
180
+ - `Annotated[int, Field(...)]` (parser must unpack Field)
181
+ - `Annotated[int, *Field(...)]` (PEP-646)
182
+ """ # noqa: trailing-whitespace
183
+
184
+ @property
185
+ def __is_annotated_types_grouped_metadata__(self) -> Literal[True]:
186
+ return True
187
+
188
+ def __iter__(self) -> Iterator[object]:
189
+ ...
190
+
191
+ if not TYPE_CHECKING:
192
+ __slots__ = () # allow subclasses to use slots
193
+
194
+ def __init_subclass__(cls, *args: Any, **kwargs: Any) -> None:
195
+ # Basic ABC like functionality without the complexity of an ABC
196
+ super().__init_subclass__(*args, **kwargs)
197
+ if cls.__iter__ is GroupedMetadata.__iter__:
198
+ raise TypeError("Can't subclass GroupedMetadata without implementing __iter__")
199
+
200
+ def __iter__(self) -> Iterator[object]: # noqa: F811
201
+ raise NotImplementedError # more helpful than "None has no attribute..." type errors
202
+
203
+
204
+ @dataclass(frozen=True, **KW_ONLY, **SLOTS)
205
+ class Interval(GroupedMetadata):
206
+ """Interval can express inclusive or exclusive bounds with a single object.
207
+
208
+ It accepts keyword arguments ``gt``, ``ge``, ``lt``, and/or ``le``, which
209
+ are interpreted the same way as the single-bound constraints.
210
+ """
211
+
212
+ gt: Union[SupportsGt, None] = None
213
+ ge: Union[SupportsGe, None] = None
214
+ lt: Union[SupportsLt, None] = None
215
+ le: Union[SupportsLe, None] = None
216
+
217
+ def __iter__(self) -> Iterator[BaseMetadata]:
218
+ """Unpack an Interval into zero or more single-bounds."""
219
+ if self.gt is not None:
220
+ yield Gt(self.gt)
221
+ if self.ge is not None:
222
+ yield Ge(self.ge)
223
+ if self.lt is not None:
224
+ yield Lt(self.lt)
225
+ if self.le is not None:
226
+ yield Le(self.le)
227
+
228
+
229
+ @dataclass(frozen=True, **SLOTS)
230
+ class MultipleOf(BaseMetadata):
231
+ """MultipleOf(multiple_of=x) might be interpreted in two ways:
232
+
233
+ 1. Python semantics, implying ``value % multiple_of == 0``, or
234
+ 2. JSONschema semantics, where ``int(value / multiple_of) == value / multiple_of``
235
+
236
+ We encourage users to be aware of these two common interpretations,
237
+ and libraries to carefully document which they implement.
238
+ """
239
+
240
+ multiple_of: Union[SupportsDiv, SupportsMod]
241
+
242
+
243
+ @dataclass(frozen=True, **SLOTS)
244
+ class MinLen(BaseMetadata):
245
+ """
246
+ MinLen() implies minimum inclusive length,
247
+ e.g. ``len(value) >= min_length``.
248
+ """
249
+
250
+ min_length: Annotated[int, Ge(0)]
251
+
252
+
253
+ @dataclass(frozen=True, **SLOTS)
254
+ class MaxLen(BaseMetadata):
255
+ """
256
+ MaxLen() implies maximum inclusive length,
257
+ e.g. ``len(value) <= max_length``.
258
+ """
259
+
260
+ max_length: Annotated[int, Ge(0)]
261
+
262
+
263
+ @dataclass(frozen=True, **SLOTS)
264
+ class Len(GroupedMetadata):
265
+ """
266
+ Len() implies that ``min_length <= len(value) <= max_length``.
267
+
268
+ Upper bound may be omitted or ``None`` to indicate no upper length bound.
269
+ """
270
+
271
+ min_length: Annotated[int, Ge(0)] = 0
272
+ max_length: Optional[Annotated[int, Ge(0)]] = None
273
+
274
+ def __iter__(self) -> Iterator[BaseMetadata]:
275
+ """Unpack a Len into zone or more single-bounds."""
276
+ if self.min_length > 0:
277
+ yield MinLen(self.min_length)
278
+ if self.max_length is not None:
279
+ yield MaxLen(self.max_length)
280
+
281
+
282
+ @dataclass(frozen=True, **SLOTS)
283
+ class Timezone(BaseMetadata):
284
+ """Timezone(tz=...) requires a datetime to be aware (or ``tz=None``, naive).
285
+
286
+ ``Annotated[datetime, Timezone(None)]`` must be a naive datetime.
287
+ ``Timezone[...]`` (the ellipsis literal) expresses that the datetime must be
288
+ tz-aware but any timezone is allowed.
289
+
290
+ You may also pass a specific timezone string or tzinfo object such as
291
+ ``Timezone(timezone.utc)`` or ``Timezone("Africa/Abidjan")`` to express that
292
+ you only allow a specific timezone, though we note that this is often
293
+ a symptom of poor design.
294
+ """
295
+
296
+ tz: Union[str, tzinfo, EllipsisType, None]
297
+
298
+
299
+ @dataclass(frozen=True, **SLOTS)
300
+ class Unit(BaseMetadata):
301
+ """Indicates that the value is a physical quantity with the specified unit.
302
+
303
+ It is intended for usage with numeric types, where the value represents the
304
+ magnitude of the quantity. For example, ``distance: Annotated[float, Unit('m')]``
305
+ or ``speed: Annotated[float, Unit('m/s')]``.
306
+
307
+ Interpretation of the unit string is left to the discretion of the consumer.
308
+ It is suggested to follow conventions established by python libraries that work
309
+ with physical quantities, such as
310
+
311
+ - ``pint`` : <https://pint.readthedocs.io/en/stable/>
312
+ - ``astropy.units``: <https://docs.astropy.org/en/stable/units/>
313
+
314
+ For indicating a quantity with a certain dimensionality but without a specific unit
315
+ it is recommended to use square brackets, e.g. `Annotated[float, Unit('[time]')]`.
316
+ Note, however, ``annotated_types`` itself makes no use of the unit string.
317
+ """
318
+
319
+ unit: str
320
+
321
+
322
+ @dataclass(frozen=True, **SLOTS)
323
+ class Predicate(BaseMetadata):
324
+ """``Predicate(func: Callable)`` implies `func(value)` is truthy for valid values.
325
+
326
+ Users should prefer statically inspectable metadata, but if you need the full
327
+ power and flexibility of arbitrary runtime predicates... here it is.
328
+
329
+ We provide a few predefined predicates for common string constraints:
330
+ ``IsLower = Predicate(str.islower)``, ``IsUpper = Predicate(str.isupper)``, and
331
+ ``IsDigits = Predicate(str.isdigit)``. Users are encouraged to use methods which
332
+ can be given special handling, and avoid indirection like ``lambda s: s.lower()``.
333
+
334
+ Some libraries might have special logic to handle certain predicates, e.g. by
335
+ checking for `str.isdigit` and using its presence to both call custom logic to
336
+ enforce digit-only strings, and customise some generated external schema.
337
+
338
+ We do not specify what behaviour should be expected for predicates that raise
339
+ an exception. For example `Annotated[int, Predicate(str.isdigit)]` might silently
340
+ skip invalid constraints, or statically raise an error; or it might try calling it
341
+ and then propagate or discard the resulting exception.
342
+ """
343
+
344
+ func: Callable[[Any], bool]
345
+
346
+ def __repr__(self) -> str:
347
+ if getattr(self.func, "__name__", "<lambda>") == "<lambda>":
348
+ return f"{self.__class__.__name__}({self.func!r})"
349
+ if isinstance(self.func, (types.MethodType, types.BuiltinMethodType)) and (
350
+ namespace := getattr(self.func.__self__, "__name__", None)
351
+ ):
352
+ return f"{self.__class__.__name__}({namespace}.{self.func.__name__})"
353
+ if isinstance(self.func, type(str.isascii)): # method descriptor
354
+ return f"{self.__class__.__name__}({self.func.__qualname__})"
355
+ return f"{self.__class__.__name__}({self.func.__name__})"
356
+
357
+
358
+ @dataclass
359
+ class Not:
360
+ func: Callable[[Any], bool]
361
+
362
+ def __call__(self, __v: Any) -> bool:
363
+ return not self.func(__v)
364
+
365
+
366
+ _StrType = TypeVar("_StrType", bound=str)
367
+
368
+ LowerCase = Annotated[_StrType, Predicate(str.islower)]
369
+ """
370
+ Return True if the string is a lowercase string, False otherwise.
371
+
372
+ A string is lowercase if all cased characters in the string are lowercase and there is at least one cased character in the string.
373
+ """ # noqa: E501
374
+ UpperCase = Annotated[_StrType, Predicate(str.isupper)]
375
+ """
376
+ Return True if the string is an uppercase string, False otherwise.
377
+
378
+ A string is uppercase if all cased characters in the string are uppercase and there is at least one cased character in the string.
379
+ """ # noqa: E501
380
+ IsDigit = Annotated[_StrType, Predicate(str.isdigit)]
381
+ IsDigits = IsDigit # type: ignore # plural for backwards compatibility, see #63
382
+ """
383
+ Return True if the string is a digit string, False otherwise.
384
+
385
+ A string is a digit string if all characters in the string are digits and there is at least one character in the string.
386
+ """ # noqa: E501
387
+ IsAscii = Annotated[_StrType, Predicate(str.isascii)]
388
+ """
389
+ Return True if all characters in the string are ASCII, False otherwise.
390
+
391
+ ASCII characters have code points in the range U+0000-U+007F. Empty string is ASCII too.
392
+ """
393
+
394
+ _NumericType = TypeVar('_NumericType', bound=Union[SupportsFloat, SupportsIndex])
395
+ IsFinite = Annotated[_NumericType, Predicate(math.isfinite)]
396
+ """Return True if x is neither an infinity nor a NaN, and False otherwise."""
397
+ IsNotFinite = Annotated[_NumericType, Predicate(Not(math.isfinite))]
398
+ """Return True if x is one of infinity or NaN, and False otherwise"""
399
+ IsNan = Annotated[_NumericType, Predicate(math.isnan)]
400
+ """Return True if x is a NaN (not a number), and False otherwise."""
401
+ IsNotNan = Annotated[_NumericType, Predicate(Not(math.isnan))]
402
+ """Return True if x is anything but NaN (not a number), and False otherwise."""
403
+ IsInfinite = Annotated[_NumericType, Predicate(math.isinf)]
404
+ """Return True if x is a positive or negative infinity, and False otherwise."""
405
+ IsNotInfinite = Annotated[_NumericType, Predicate(Not(math.isinf))]
406
+ """Return True if x is neither a positive or negative infinity, and False otherwise."""
407
+
408
+ try:
409
+ from typing_extensions import DocInfo, doc # type: ignore [attr-defined]
410
+ except ImportError:
411
+
412
+ @dataclass(frozen=True, **SLOTS)
413
+ class DocInfo: # type: ignore [no-redef]
414
+ """ "
415
+ The return value of doc(), mainly to be used by tools that want to extract the
416
+ Annotated documentation at runtime.
417
+ """
418
+
419
+ documentation: str
420
+ """The documentation string passed to doc()."""
421
+
422
+ def doc(
423
+ documentation: str,
424
+ ) -> DocInfo:
425
+ """
426
+ Add documentation to a type annotation inside of Annotated.
427
+
428
+ For example:
429
+
430
+ >>> def hi(name: Annotated[int, doc("The name of the user")]) -> None: ...
431
+ """
432
+ return DocInfo(documentation)
parrot/lib/python3.10/site-packages/annotated_types/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
parrot/lib/python3.10/site-packages/annotated_types/__pycache__/test_cases.cpython-310.pyc ADDED
Binary file (5.62 kB). View file
 
parrot/lib/python3.10/site-packages/annotated_types/py.typed ADDED
File without changes
parrot/lib/python3.10/site-packages/annotated_types/test_cases.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import sys
3
+ from datetime import date, datetime, timedelta, timezone
4
+ from decimal import Decimal
5
+ from typing import Any, Dict, Iterable, Iterator, List, NamedTuple, Set, Tuple
6
+
7
+ if sys.version_info < (3, 9):
8
+ from typing_extensions import Annotated
9
+ else:
10
+ from typing import Annotated
11
+
12
+ import annotated_types as at
13
+
14
+
15
+ class Case(NamedTuple):
16
+ """
17
+ A test case for `annotated_types`.
18
+ """
19
+
20
+ annotation: Any
21
+ valid_cases: Iterable[Any]
22
+ invalid_cases: Iterable[Any]
23
+
24
+
25
+ def cases() -> Iterable[Case]:
26
+ # Gt, Ge, Lt, Le
27
+ yield Case(Annotated[int, at.Gt(4)], (5, 6, 1000), (4, 0, -1))
28
+ yield Case(Annotated[float, at.Gt(0.5)], (0.6, 0.7, 0.8, 0.9), (0.5, 0.0, -0.1))
29
+ yield Case(
30
+ Annotated[datetime, at.Gt(datetime(2000, 1, 1))],
31
+ [datetime(2000, 1, 2), datetime(2000, 1, 3)],
32
+ [datetime(2000, 1, 1), datetime(1999, 12, 31)],
33
+ )
34
+ yield Case(
35
+ Annotated[datetime, at.Gt(date(2000, 1, 1))],
36
+ [date(2000, 1, 2), date(2000, 1, 3)],
37
+ [date(2000, 1, 1), date(1999, 12, 31)],
38
+ )
39
+ yield Case(
40
+ Annotated[datetime, at.Gt(Decimal('1.123'))],
41
+ [Decimal('1.1231'), Decimal('123')],
42
+ [Decimal('1.123'), Decimal('0')],
43
+ )
44
+
45
+ yield Case(Annotated[int, at.Ge(4)], (4, 5, 6, 1000, 4), (0, -1))
46
+ yield Case(Annotated[float, at.Ge(0.5)], (0.5, 0.6, 0.7, 0.8, 0.9), (0.4, 0.0, -0.1))
47
+ yield Case(
48
+ Annotated[datetime, at.Ge(datetime(2000, 1, 1))],
49
+ [datetime(2000, 1, 2), datetime(2000, 1, 3)],
50
+ [datetime(1998, 1, 1), datetime(1999, 12, 31)],
51
+ )
52
+
53
+ yield Case(Annotated[int, at.Lt(4)], (0, -1), (4, 5, 6, 1000, 4))
54
+ yield Case(Annotated[float, at.Lt(0.5)], (0.4, 0.0, -0.1), (0.5, 0.6, 0.7, 0.8, 0.9))
55
+ yield Case(
56
+ Annotated[datetime, at.Lt(datetime(2000, 1, 1))],
57
+ [datetime(1999, 12, 31), datetime(1999, 12, 31)],
58
+ [datetime(2000, 1, 2), datetime(2000, 1, 3)],
59
+ )
60
+
61
+ yield Case(Annotated[int, at.Le(4)], (4, 0, -1), (5, 6, 1000))
62
+ yield Case(Annotated[float, at.Le(0.5)], (0.5, 0.0, -0.1), (0.6, 0.7, 0.8, 0.9))
63
+ yield Case(
64
+ Annotated[datetime, at.Le(datetime(2000, 1, 1))],
65
+ [datetime(2000, 1, 1), datetime(1999, 12, 31)],
66
+ [datetime(2000, 1, 2), datetime(2000, 1, 3)],
67
+ )
68
+
69
+ # Interval
70
+ yield Case(Annotated[int, at.Interval(gt=4)], (5, 6, 1000), (4, 0, -1))
71
+ yield Case(Annotated[int, at.Interval(gt=4, lt=10)], (5, 6), (4, 10, 1000, 0, -1))
72
+ yield Case(Annotated[float, at.Interval(ge=0.5, le=1)], (0.5, 0.9, 1), (0.49, 1.1))
73
+ yield Case(
74
+ Annotated[datetime, at.Interval(gt=datetime(2000, 1, 1), le=datetime(2000, 1, 3))],
75
+ [datetime(2000, 1, 2), datetime(2000, 1, 3)],
76
+ [datetime(2000, 1, 1), datetime(2000, 1, 4)],
77
+ )
78
+
79
+ yield Case(Annotated[int, at.MultipleOf(multiple_of=3)], (0, 3, 9), (1, 2, 4))
80
+ yield Case(Annotated[float, at.MultipleOf(multiple_of=0.5)], (0, 0.5, 1, 1.5), (0.4, 1.1))
81
+
82
+ # lengths
83
+
84
+ yield Case(Annotated[str, at.MinLen(3)], ('123', '1234', 'x' * 10), ('', '1', '12'))
85
+ yield Case(Annotated[str, at.Len(3)], ('123', '1234', 'x' * 10), ('', '1', '12'))
86
+ yield Case(Annotated[List[int], at.MinLen(3)], ([1, 2, 3], [1, 2, 3, 4], [1] * 10), ([], [1], [1, 2]))
87
+ yield Case(Annotated[List[int], at.Len(3)], ([1, 2, 3], [1, 2, 3, 4], [1] * 10), ([], [1], [1, 2]))
88
+
89
+ yield Case(Annotated[str, at.MaxLen(4)], ('', '1234'), ('12345', 'x' * 10))
90
+ yield Case(Annotated[str, at.Len(0, 4)], ('', '1234'), ('12345', 'x' * 10))
91
+ yield Case(Annotated[List[str], at.MaxLen(4)], ([], ['a', 'bcdef'], ['a', 'b', 'c']), (['a'] * 5, ['b'] * 10))
92
+ yield Case(Annotated[List[str], at.Len(0, 4)], ([], ['a', 'bcdef'], ['a', 'b', 'c']), (['a'] * 5, ['b'] * 10))
93
+
94
+ yield Case(Annotated[str, at.Len(3, 5)], ('123', '12345'), ('', '1', '12', '123456', 'x' * 10))
95
+ yield Case(Annotated[str, at.Len(3, 3)], ('123',), ('12', '1234'))
96
+
97
+ yield Case(Annotated[Dict[int, int], at.Len(2, 3)], [{1: 1, 2: 2}], [{}, {1: 1}, {1: 1, 2: 2, 3: 3, 4: 4}])
98
+ yield Case(Annotated[Set[int], at.Len(2, 3)], ({1, 2}, {1, 2, 3}), (set(), {1}, {1, 2, 3, 4}))
99
+ yield Case(Annotated[Tuple[int, ...], at.Len(2, 3)], ((1, 2), (1, 2, 3)), ((), (1,), (1, 2, 3, 4)))
100
+
101
+ # Timezone
102
+
103
+ yield Case(
104
+ Annotated[datetime, at.Timezone(None)], [datetime(2000, 1, 1)], [datetime(2000, 1, 1, tzinfo=timezone.utc)]
105
+ )
106
+ yield Case(
107
+ Annotated[datetime, at.Timezone(...)], [datetime(2000, 1, 1, tzinfo=timezone.utc)], [datetime(2000, 1, 1)]
108
+ )
109
+ yield Case(
110
+ Annotated[datetime, at.Timezone(timezone.utc)],
111
+ [datetime(2000, 1, 1, tzinfo=timezone.utc)],
112
+ [datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=timezone(timedelta(hours=6)))],
113
+ )
114
+ yield Case(
115
+ Annotated[datetime, at.Timezone('Europe/London')],
116
+ [datetime(2000, 1, 1, tzinfo=timezone(timedelta(0), name='Europe/London'))],
117
+ [datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=timezone(timedelta(hours=6)))],
118
+ )
119
+
120
+ # Quantity
121
+
122
+ yield Case(Annotated[float, at.Unit(unit='m')], (5, 4.2), ('5m', '4.2m'))
123
+
124
+ # predicate types
125
+
126
+ yield Case(at.LowerCase[str], ['abc', 'foobar'], ['', 'A', 'Boom'])
127
+ yield Case(at.UpperCase[str], ['ABC', 'DEFO'], ['', 'a', 'abc', 'AbC'])
128
+ yield Case(at.IsDigit[str], ['123'], ['', 'ab', 'a1b2'])
129
+ yield Case(at.IsAscii[str], ['123', 'foo bar'], ['£100', '😊', 'whatever 👀'])
130
+
131
+ yield Case(Annotated[int, at.Predicate(lambda x: x % 2 == 0)], [0, 2, 4], [1, 3, 5])
132
+
133
+ yield Case(at.IsFinite[float], [1.23], [math.nan, math.inf, -math.inf])
134
+ yield Case(at.IsNotFinite[float], [math.nan, math.inf], [1.23])
135
+ yield Case(at.IsNan[float], [math.nan], [1.23, math.inf])
136
+ yield Case(at.IsNotNan[float], [1.23, math.inf], [math.nan])
137
+ yield Case(at.IsInfinite[float], [math.inf], [math.nan, 1.23])
138
+ yield Case(at.IsNotInfinite[float], [math.nan, 1.23], [math.inf])
139
+
140
+ # check stacked predicates
141
+ yield Case(at.IsInfinite[Annotated[float, at.Predicate(lambda x: x > 0)]], [math.inf], [-math.inf, 1.23, math.nan])
142
+
143
+ # doc
144
+ yield Case(Annotated[int, at.doc("A number")], [1, 2], [])
145
+
146
+ # custom GroupedMetadata
147
+ class MyCustomGroupedMetadata(at.GroupedMetadata):
148
+ def __iter__(self) -> Iterator[at.Predicate]:
149
+ yield at.Predicate(lambda x: float(x).is_integer())
150
+
151
+ yield Case(Annotated[float, MyCustomGroupedMetadata()], [0, 2.0], [0.01, 1.5])
parrot/lib/python3.10/site-packages/gitdb/__init__.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ """Initialize the object database module"""
6
+
7
+ import sys
8
+ import os
9
+
10
+ #{ Initialization
11
+
12
+
13
+ def _init_externals():
14
+ """Initialize external projects by putting them into the path"""
15
+ if 'PYOXIDIZER' not in os.environ:
16
+ where = os.path.join(os.path.dirname(__file__), 'ext', 'smmap')
17
+ if os.path.exists(where):
18
+ sys.path.append(where)
19
+
20
+ import smmap
21
+ del smmap
22
+ # END handle imports
23
+
24
+ #} END initialization
25
+
26
+ _init_externals()
27
+
28
+ __author__ = "Sebastian Thiel"
29
+ __contact__ = "byronimo@gmail.com"
30
+ __homepage__ = "https://github.com/gitpython-developers/gitdb"
31
+ version_info = (4, 0, 11)
32
+ __version__ = '.'.join(str(i) for i in version_info)
33
+
34
+
35
+ # default imports
36
+ from gitdb.base import *
37
+ from gitdb.db import *
38
+ from gitdb.stream import *
parrot/lib/python3.10/site-packages/gitdb/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.01 kB). View file
 
parrot/lib/python3.10/site-packages/gitdb/__pycache__/base.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
parrot/lib/python3.10/site-packages/gitdb/__pycache__/const.cpython-310.pyc ADDED
Binary file (287 Bytes). View file
 
parrot/lib/python3.10/site-packages/gitdb/__pycache__/exc.cpython-310.pyc ADDED
Binary file (2.17 kB). View file
 
parrot/lib/python3.10/site-packages/gitdb/__pycache__/fun.cpython-310.pyc ADDED
Binary file (16.9 kB). View file
 
parrot/lib/python3.10/site-packages/gitdb/__pycache__/pack.cpython-310.pyc ADDED
Binary file (30.3 kB). View file
 
parrot/lib/python3.10/site-packages/gitdb/__pycache__/stream.cpython-310.pyc ADDED
Binary file (17.9 kB). View file
 
parrot/lib/python3.10/site-packages/gitdb/__pycache__/typ.cpython-310.pyc ADDED
Binary file (349 Bytes). View file
 
parrot/lib/python3.10/site-packages/gitdb/__pycache__/util.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
parrot/lib/python3.10/site-packages/gitdb/base.py ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ """Module with basic data structures - they are designed to be lightweight and fast"""
6
+ from gitdb.util import bin_to_hex
7
+
8
+ from gitdb.fun import (
9
+ type_id_to_type_map,
10
+ type_to_type_id_map
11
+ )
12
+
13
+ __all__ = ('OInfo', 'OPackInfo', 'ODeltaPackInfo',
14
+ 'OStream', 'OPackStream', 'ODeltaPackStream',
15
+ 'IStream', 'InvalidOInfo', 'InvalidOStream')
16
+
17
+ #{ ODB Bases
18
+
19
+
20
+ class OInfo(tuple):
21
+
22
+ """Carries information about an object in an ODB, providing information
23
+ about the binary sha of the object, the type_string as well as the uncompressed size
24
+ in bytes.
25
+
26
+ It can be accessed using tuple notation and using attribute access notation::
27
+
28
+ assert dbi[0] == dbi.binsha
29
+ assert dbi[1] == dbi.type
30
+ assert dbi[2] == dbi.size
31
+
32
+ The type is designed to be as lightweight as possible."""
33
+ __slots__ = tuple()
34
+
35
+ def __new__(cls, sha, type, size):
36
+ return tuple.__new__(cls, (sha, type, size))
37
+
38
+ def __init__(self, *args):
39
+ tuple.__init__(self)
40
+
41
+ #{ Interface
42
+ @property
43
+ def binsha(self):
44
+ """:return: our sha as binary, 20 bytes"""
45
+ return self[0]
46
+
47
+ @property
48
+ def hexsha(self):
49
+ """:return: our sha, hex encoded, 40 bytes"""
50
+ return bin_to_hex(self[0])
51
+
52
+ @property
53
+ def type(self):
54
+ return self[1]
55
+
56
+ @property
57
+ def type_id(self):
58
+ return type_to_type_id_map[self[1]]
59
+
60
+ @property
61
+ def size(self):
62
+ return self[2]
63
+ #} END interface
64
+
65
+
66
+ class OPackInfo(tuple):
67
+
68
+ """As OInfo, but provides a type_id property to retrieve the numerical type id, and
69
+ does not include a sha.
70
+
71
+ Additionally, the pack_offset is the absolute offset into the packfile at which
72
+ all object information is located. The data_offset property points to the absolute
73
+ location in the pack at which that actual data stream can be found."""
74
+ __slots__ = tuple()
75
+
76
+ def __new__(cls, packoffset, type, size):
77
+ return tuple.__new__(cls, (packoffset, type, size))
78
+
79
+ def __init__(self, *args):
80
+ tuple.__init__(self)
81
+
82
+ #{ Interface
83
+
84
+ @property
85
+ def pack_offset(self):
86
+ return self[0]
87
+
88
+ @property
89
+ def type(self):
90
+ return type_id_to_type_map[self[1]]
91
+
92
+ @property
93
+ def type_id(self):
94
+ return self[1]
95
+
96
+ @property
97
+ def size(self):
98
+ return self[2]
99
+
100
+ #} END interface
101
+
102
+
103
+ class ODeltaPackInfo(OPackInfo):
104
+
105
+ """Adds delta specific information,
106
+ Either the 20 byte sha which points to some object in the database,
107
+ or the negative offset from the pack_offset, so that pack_offset - delta_info yields
108
+ the pack offset of the base object"""
109
+ __slots__ = tuple()
110
+
111
+ def __new__(cls, packoffset, type, size, delta_info):
112
+ return tuple.__new__(cls, (packoffset, type, size, delta_info))
113
+
114
+ #{ Interface
115
+ @property
116
+ def delta_info(self):
117
+ return self[3]
118
+ #} END interface
119
+
120
+
121
+ class OStream(OInfo):
122
+
123
+ """Base for object streams retrieved from the database, providing additional
124
+ information about the stream.
125
+ Generally, ODB streams are read-only as objects are immutable"""
126
+ __slots__ = tuple()
127
+
128
+ def __new__(cls, sha, type, size, stream, *args, **kwargs):
129
+ """Helps with the initialization of subclasses"""
130
+ return tuple.__new__(cls, (sha, type, size, stream))
131
+
132
+ def __init__(self, *args, **kwargs):
133
+ tuple.__init__(self)
134
+
135
+ #{ Stream Reader Interface
136
+
137
+ def read(self, size=-1):
138
+ return self[3].read(size)
139
+
140
+ @property
141
+ def stream(self):
142
+ return self[3]
143
+
144
+ #} END stream reader interface
145
+
146
+
147
+ class ODeltaStream(OStream):
148
+
149
+ """Uses size info of its stream, delaying reads"""
150
+
151
+ def __new__(cls, sha, type, size, stream, *args, **kwargs):
152
+ """Helps with the initialization of subclasses"""
153
+ return tuple.__new__(cls, (sha, type, size, stream))
154
+
155
+ #{ Stream Reader Interface
156
+
157
+ @property
158
+ def size(self):
159
+ return self[3].size
160
+
161
+ #} END stream reader interface
162
+
163
+
164
+ class OPackStream(OPackInfo):
165
+
166
+ """Next to pack object information, a stream outputting an undeltified base object
167
+ is provided"""
168
+ __slots__ = tuple()
169
+
170
+ def __new__(cls, packoffset, type, size, stream, *args):
171
+ """Helps with the initialization of subclasses"""
172
+ return tuple.__new__(cls, (packoffset, type, size, stream))
173
+
174
+ #{ Stream Reader Interface
175
+ def read(self, size=-1):
176
+ return self[3].read(size)
177
+
178
+ @property
179
+ def stream(self):
180
+ return self[3]
181
+ #} END stream reader interface
182
+
183
+
184
+ class ODeltaPackStream(ODeltaPackInfo):
185
+
186
+ """Provides a stream outputting the uncompressed offset delta information"""
187
+ __slots__ = tuple()
188
+
189
+ def __new__(cls, packoffset, type, size, delta_info, stream):
190
+ return tuple.__new__(cls, (packoffset, type, size, delta_info, stream))
191
+
192
+ #{ Stream Reader Interface
193
+ def read(self, size=-1):
194
+ return self[4].read(size)
195
+
196
+ @property
197
+ def stream(self):
198
+ return self[4]
199
+ #} END stream reader interface
200
+
201
+
202
+ class IStream(list):
203
+
204
+ """Represents an input content stream to be fed into the ODB. It is mutable to allow
205
+ the ODB to record information about the operations outcome right in this instance.
206
+
207
+ It provides interfaces for the OStream and a StreamReader to allow the instance
208
+ to blend in without prior conversion.
209
+
210
+ The only method your content stream must support is 'read'"""
211
+ __slots__ = tuple()
212
+
213
+ def __new__(cls, type, size, stream, sha=None):
214
+ return list.__new__(cls, (sha, type, size, stream, None))
215
+
216
+ def __init__(self, type, size, stream, sha=None):
217
+ list.__init__(self, (sha, type, size, stream, None))
218
+
219
+ #{ Interface
220
+ @property
221
+ def hexsha(self):
222
+ """:return: our sha, hex encoded, 40 bytes"""
223
+ return bin_to_hex(self[0])
224
+
225
+ def _error(self):
226
+ """:return: the error that occurred when processing the stream, or None"""
227
+ return self[4]
228
+
229
+ def _set_error(self, exc):
230
+ """Set this input stream to the given exc, may be None to reset the error"""
231
+ self[4] = exc
232
+
233
+ error = property(_error, _set_error)
234
+
235
+ #} END interface
236
+
237
+ #{ Stream Reader Interface
238
+
239
+ def read(self, size=-1):
240
+ """Implements a simple stream reader interface, passing the read call on
241
+ to our internal stream"""
242
+ return self[3].read(size)
243
+
244
+ #} END stream reader interface
245
+
246
+ #{ interface
247
+
248
+ def _set_binsha(self, binsha):
249
+ self[0] = binsha
250
+
251
+ def _binsha(self):
252
+ return self[0]
253
+
254
+ binsha = property(_binsha, _set_binsha)
255
+
256
+ def _type(self):
257
+ return self[1]
258
+
259
+ def _set_type(self, type):
260
+ self[1] = type
261
+
262
+ type = property(_type, _set_type)
263
+
264
+ def _size(self):
265
+ return self[2]
266
+
267
+ def _set_size(self, size):
268
+ self[2] = size
269
+
270
+ size = property(_size, _set_size)
271
+
272
+ def _stream(self):
273
+ return self[3]
274
+
275
+ def _set_stream(self, stream):
276
+ self[3] = stream
277
+
278
+ stream = property(_stream, _set_stream)
279
+
280
+ #} END odb info interface
281
+
282
+
283
+ class InvalidOInfo(tuple):
284
+
285
+ """Carries information about a sha identifying an object which is invalid in
286
+ the queried database. The exception attribute provides more information about
287
+ the cause of the issue"""
288
+ __slots__ = tuple()
289
+
290
+ def __new__(cls, sha, exc):
291
+ return tuple.__new__(cls, (sha, exc))
292
+
293
+ def __init__(self, sha, exc):
294
+ tuple.__init__(self, (sha, exc))
295
+
296
+ @property
297
+ def binsha(self):
298
+ return self[0]
299
+
300
+ @property
301
+ def hexsha(self):
302
+ return bin_to_hex(self[0])
303
+
304
+ @property
305
+ def error(self):
306
+ """:return: exception instance explaining the failure"""
307
+ return self[1]
308
+
309
+
310
+ class InvalidOStream(InvalidOInfo):
311
+
312
+ """Carries information about an invalid ODB stream"""
313
+ __slots__ = tuple()
314
+
315
+ #} END ODB Bases
parrot/lib/python3.10/site-packages/gitdb/const.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ BYTE_SPACE = b' '
2
+ NULL_BYTE = b'\0'
3
+ NULL_HEX_SHA = "0" * 40
4
+ NULL_BIN_SHA = NULL_BYTE * 20
parrot/lib/python3.10/site-packages/gitdb/db/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+
6
+ from gitdb.db.base import *
7
+ from gitdb.db.loose import *
8
+ from gitdb.db.mem import *
9
+ from gitdb.db.pack import *
10
+ from gitdb.db.git import *
11
+ from gitdb.db.ref import *
parrot/lib/python3.10/site-packages/gitdb/db/__pycache__/git.cpython-310.pyc ADDED
Binary file (2.31 kB). View file
 
parrot/lib/python3.10/site-packages/gitdb/db/__pycache__/loose.cpython-310.pyc ADDED
Binary file (5.71 kB). View file
 
parrot/lib/python3.10/site-packages/gitdb/db/__pycache__/mem.cpython-310.pyc ADDED
Binary file (3.29 kB). View file
 
parrot/lib/python3.10/site-packages/gitdb/db/__pycache__/pack.cpython-310.pyc ADDED
Binary file (6.13 kB). View file
 
parrot/lib/python3.10/site-packages/gitdb/db/base.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ """Contains implementations of database retrieveing objects"""
6
+ from gitdb.util import (
7
+ join,
8
+ LazyMixin,
9
+ hex_to_bin
10
+ )
11
+
12
+ from gitdb.utils.encoding import force_text
13
+ from gitdb.exc import (
14
+ BadObject,
15
+ AmbiguousObjectName
16
+ )
17
+
18
+ from itertools import chain
19
+ from functools import reduce
20
+
21
+
22
+ __all__ = ('ObjectDBR', 'ObjectDBW', 'FileDBBase', 'CompoundDB', 'CachingDB')
23
+
24
+
25
+ class ObjectDBR:
26
+
27
+ """Defines an interface for object database lookup.
28
+ Objects are identified either by their 20 byte bin sha"""
29
+
30
+ def __contains__(self, sha):
31
+ return self.has_obj
32
+
33
+ #{ Query Interface
34
+ def has_object(self, sha):
35
+ """
36
+ Whether the object identified by the given 20 bytes
37
+ binary sha is contained in the database
38
+
39
+ :return: True if the object identified by the given 20 bytes
40
+ binary sha is contained in the database"""
41
+ raise NotImplementedError("To be implemented in subclass")
42
+
43
+ def info(self, sha):
44
+ """ :return: OInfo instance
45
+ :param sha: bytes binary sha
46
+ :raise BadObject:"""
47
+ raise NotImplementedError("To be implemented in subclass")
48
+
49
+ def stream(self, sha):
50
+ """:return: OStream instance
51
+ :param sha: 20 bytes binary sha
52
+ :raise BadObject:"""
53
+ raise NotImplementedError("To be implemented in subclass")
54
+
55
+ def size(self):
56
+ """:return: amount of objects in this database"""
57
+ raise NotImplementedError()
58
+
59
+ def sha_iter(self):
60
+ """Return iterator yielding 20 byte shas for all objects in this data base"""
61
+ raise NotImplementedError()
62
+
63
+ #} END query interface
64
+
65
+
66
+ class ObjectDBW:
67
+
68
+ """Defines an interface to create objects in the database"""
69
+
70
+ def __init__(self, *args, **kwargs):
71
+ self._ostream = None
72
+
73
+ #{ Edit Interface
74
+ def set_ostream(self, stream):
75
+ """
76
+ Adjusts the stream to which all data should be sent when storing new objects
77
+
78
+ :param stream: if not None, the stream to use, if None the default stream
79
+ will be used.
80
+ :return: previously installed stream, or None if there was no override
81
+ :raise TypeError: if the stream doesn't have the supported functionality"""
82
+ cstream = self._ostream
83
+ self._ostream = stream
84
+ return cstream
85
+
86
+ def ostream(self):
87
+ """
88
+ Return the output stream
89
+
90
+ :return: overridden output stream this instance will write to, or None
91
+ if it will write to the default stream"""
92
+ return self._ostream
93
+
94
+ def store(self, istream):
95
+ """
96
+ Create a new object in the database
97
+ :return: the input istream object with its sha set to its corresponding value
98
+
99
+ :param istream: IStream compatible instance. If its sha is already set
100
+ to a value, the object will just be stored in the our database format,
101
+ in which case the input stream is expected to be in object format ( header + contents ).
102
+ :raise IOError: if data could not be written"""
103
+ raise NotImplementedError("To be implemented in subclass")
104
+
105
+ #} END edit interface
106
+
107
+
108
+ class FileDBBase:
109
+
110
+ """Provides basic facilities to retrieve files of interest, including
111
+ caching facilities to help mapping hexsha's to objects"""
112
+
113
+ def __init__(self, root_path):
114
+ """Initialize this instance to look for its files at the given root path
115
+ All subsequent operations will be relative to this path
116
+ :raise InvalidDBRoot:
117
+ **Note:** The base will not perform any accessablity checking as the base
118
+ might not yet be accessible, but become accessible before the first
119
+ access."""
120
+ super().__init__()
121
+ self._root_path = root_path
122
+
123
+ #{ Interface
124
+ def root_path(self):
125
+ """:return: path at which this db operates"""
126
+ return self._root_path
127
+
128
+ def db_path(self, rela_path):
129
+ """
130
+ :return: the given relative path relative to our database root, allowing
131
+ to pontentially access datafiles"""
132
+ return join(self._root_path, force_text(rela_path))
133
+ #} END interface
134
+
135
+
136
+ class CachingDB:
137
+
138
+ """A database which uses caches to speed-up access"""
139
+
140
+ #{ Interface
141
+ def update_cache(self, force=False):
142
+ """
143
+ Call this method if the underlying data changed to trigger an update
144
+ of the internal caching structures.
145
+
146
+ :param force: if True, the update must be performed. Otherwise the implementation
147
+ may decide not to perform an update if it thinks nothing has changed.
148
+ :return: True if an update was performed as something change indeed"""
149
+
150
+ # END interface
151
+
152
+
153
+ def _databases_recursive(database, output):
154
+ """Fill output list with database from db, in order. Deals with Loose, Packed
155
+ and compound databases."""
156
+ if isinstance(database, CompoundDB):
157
+ dbs = database.databases()
158
+ output.extend(db for db in dbs if not isinstance(db, CompoundDB))
159
+ for cdb in (db for db in dbs if isinstance(db, CompoundDB)):
160
+ _databases_recursive(cdb, output)
161
+ else:
162
+ output.append(database)
163
+ # END handle database type
164
+
165
+
166
+ class CompoundDB(ObjectDBR, LazyMixin, CachingDB):
167
+
168
+ """A database which delegates calls to sub-databases.
169
+
170
+ Databases are stored in the lazy-loaded _dbs attribute.
171
+ Define _set_cache_ to update it with your databases"""
172
+
173
+ def _set_cache_(self, attr):
174
+ if attr == '_dbs':
175
+ self._dbs = list()
176
+ elif attr == '_db_cache':
177
+ self._db_cache = dict()
178
+ else:
179
+ super()._set_cache_(attr)
180
+
181
+ def _db_query(self, sha):
182
+ """:return: database containing the given 20 byte sha
183
+ :raise BadObject:"""
184
+ # most databases use binary representations, prevent converting
185
+ # it every time a database is being queried
186
+ try:
187
+ return self._db_cache[sha]
188
+ except KeyError:
189
+ pass
190
+ # END first level cache
191
+
192
+ for db in self._dbs:
193
+ if db.has_object(sha):
194
+ self._db_cache[sha] = db
195
+ return db
196
+ # END for each database
197
+ raise BadObject(sha)
198
+
199
+ #{ ObjectDBR interface
200
+
201
+ def has_object(self, sha):
202
+ try:
203
+ self._db_query(sha)
204
+ return True
205
+ except BadObject:
206
+ return False
207
+ # END handle exceptions
208
+
209
+ def info(self, sha):
210
+ return self._db_query(sha).info(sha)
211
+
212
+ def stream(self, sha):
213
+ return self._db_query(sha).stream(sha)
214
+
215
+ def size(self):
216
+ """:return: total size of all contained databases"""
217
+ return reduce(lambda x, y: x + y, (db.size() for db in self._dbs), 0)
218
+
219
+ def sha_iter(self):
220
+ return chain(*(db.sha_iter() for db in self._dbs))
221
+
222
+ #} END object DBR Interface
223
+
224
+ #{ Interface
225
+
226
+ def databases(self):
227
+ """:return: tuple of database instances we use for lookups"""
228
+ return tuple(self._dbs)
229
+
230
+ def update_cache(self, force=False):
231
+ # something might have changed, clear everything
232
+ self._db_cache.clear()
233
+ stat = False
234
+ for db in self._dbs:
235
+ if isinstance(db, CachingDB):
236
+ stat |= db.update_cache(force)
237
+ # END if is caching db
238
+ # END for each database to update
239
+ return stat
240
+
241
+ def partial_to_complete_sha_hex(self, partial_hexsha):
242
+ """
243
+ :return: 20 byte binary sha1 from the given less-than-40 byte hexsha (bytes or str)
244
+ :param partial_hexsha: hexsha with less than 40 byte
245
+ :raise AmbiguousObjectName: """
246
+ databases = list()
247
+ _databases_recursive(self, databases)
248
+ partial_hexsha = force_text(partial_hexsha)
249
+ len_partial_hexsha = len(partial_hexsha)
250
+ if len_partial_hexsha % 2 != 0:
251
+ partial_binsha = hex_to_bin(partial_hexsha + "0")
252
+ else:
253
+ partial_binsha = hex_to_bin(partial_hexsha)
254
+ # END assure successful binary conversion
255
+
256
+ candidate = None
257
+ for db in databases:
258
+ full_bin_sha = None
259
+ try:
260
+ if hasattr(db, 'partial_to_complete_sha_hex'):
261
+ full_bin_sha = db.partial_to_complete_sha_hex(partial_hexsha)
262
+ else:
263
+ full_bin_sha = db.partial_to_complete_sha(partial_binsha, len_partial_hexsha)
264
+ # END handle database type
265
+ except BadObject:
266
+ continue
267
+ # END ignore bad objects
268
+ if full_bin_sha:
269
+ if candidate and candidate != full_bin_sha:
270
+ raise AmbiguousObjectName(partial_hexsha)
271
+ candidate = full_bin_sha
272
+ # END handle candidate
273
+ # END for each db
274
+ if not candidate:
275
+ raise BadObject(partial_binsha)
276
+ return candidate
277
+
278
+ #} END interface
parrot/lib/python3.10/site-packages/gitdb/db/git.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ from gitdb.db.base import (
6
+ CompoundDB,
7
+ ObjectDBW,
8
+ FileDBBase
9
+ )
10
+
11
+ from gitdb.db.loose import LooseObjectDB
12
+ from gitdb.db.pack import PackedDB
13
+ from gitdb.db.ref import ReferenceDB
14
+
15
+ from gitdb.exc import InvalidDBRoot
16
+
17
+ import os
18
+
19
+ __all__ = ('GitDB', )
20
+
21
+
22
+ class GitDB(FileDBBase, ObjectDBW, CompoundDB):
23
+
24
+ """A git-style object database, which contains all objects in the 'objects'
25
+ subdirectory
26
+
27
+ ``IMPORTANT``: The usage of this implementation is highly discouraged as it fails to release file-handles.
28
+ This can be a problem with long-running processes and/or big repositories.
29
+ """
30
+ # Configuration
31
+ PackDBCls = PackedDB
32
+ LooseDBCls = LooseObjectDB
33
+ ReferenceDBCls = ReferenceDB
34
+
35
+ # Directories
36
+ packs_dir = 'pack'
37
+ loose_dir = ''
38
+ alternates_dir = os.path.join('info', 'alternates')
39
+
40
+ def __init__(self, root_path):
41
+ """Initialize ourselves on a git objects directory"""
42
+ super().__init__(root_path)
43
+
44
+ def _set_cache_(self, attr):
45
+ if attr == '_dbs' or attr == '_loose_db':
46
+ self._dbs = list()
47
+ loose_db = None
48
+ for subpath, dbcls in ((self.packs_dir, self.PackDBCls),
49
+ (self.loose_dir, self.LooseDBCls),
50
+ (self.alternates_dir, self.ReferenceDBCls)):
51
+ path = self.db_path(subpath)
52
+ if os.path.exists(path):
53
+ self._dbs.append(dbcls(path))
54
+ if dbcls is self.LooseDBCls:
55
+ loose_db = self._dbs[-1]
56
+ # END remember loose db
57
+ # END check path exists
58
+ # END for each db type
59
+
60
+ # should have at least one subdb
61
+ if not self._dbs:
62
+ raise InvalidDBRoot(self.root_path())
63
+ # END handle error
64
+
65
+ # we the first one should have the store method
66
+ assert loose_db is not None and hasattr(loose_db, 'store'), "First database needs store functionality"
67
+
68
+ # finally set the value
69
+ self._loose_db = loose_db
70
+ else:
71
+ super()._set_cache_(attr)
72
+ # END handle attrs
73
+
74
+ #{ ObjectDBW interface
75
+
76
+ def store(self, istream):
77
+ return self._loose_db.store(istream)
78
+
79
+ def ostream(self):
80
+ return self._loose_db.ostream()
81
+
82
+ def set_ostream(self, ostream):
83
+ return self._loose_db.set_ostream(ostream)
84
+
85
+ #} END objectdbw interface
parrot/lib/python3.10/site-packages/gitdb/db/loose.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ from gitdb.db.base import (
6
+ FileDBBase,
7
+ ObjectDBR,
8
+ ObjectDBW
9
+ )
10
+
11
+ from gitdb.exc import (
12
+ BadObject,
13
+ AmbiguousObjectName
14
+ )
15
+
16
+ from gitdb.stream import (
17
+ DecompressMemMapReader,
18
+ FDCompressedSha1Writer,
19
+ FDStream,
20
+ Sha1Writer
21
+ )
22
+
23
+ from gitdb.base import (
24
+ OStream,
25
+ OInfo
26
+ )
27
+
28
+ from gitdb.util import (
29
+ file_contents_ro_filepath,
30
+ ENOENT,
31
+ hex_to_bin,
32
+ bin_to_hex,
33
+ exists,
34
+ chmod,
35
+ isfile,
36
+ remove,
37
+ rename,
38
+ dirname,
39
+ basename,
40
+ join
41
+ )
42
+
43
+ from gitdb.fun import (
44
+ chunk_size,
45
+ loose_object_header_info,
46
+ write_object,
47
+ stream_copy
48
+ )
49
+
50
+ from gitdb.utils.encoding import force_bytes
51
+
52
+ import tempfile
53
+ import os
54
+ import sys
55
+
56
+
57
+ __all__ = ('LooseObjectDB', )
58
+
59
+
60
+ class LooseObjectDB(FileDBBase, ObjectDBR, ObjectDBW):
61
+
62
+ """A database which operates on loose object files"""
63
+
64
+ # CONFIGURATION
65
+ # chunks in which data will be copied between streams
66
+ stream_chunk_size = chunk_size
67
+
68
+ # On windows we need to keep it writable, otherwise it cannot be removed
69
+ # either
70
+ new_objects_mode = int("444", 8)
71
+ if os.name == 'nt':
72
+ new_objects_mode = int("644", 8)
73
+
74
+ def __init__(self, root_path):
75
+ super().__init__(root_path)
76
+ self._hexsha_to_file = dict()
77
+ # Additional Flags - might be set to 0 after the first failure
78
+ # Depending on the root, this might work for some mounts, for others not, which
79
+ # is why it is per instance
80
+ self._fd_open_flags = getattr(os, 'O_NOATIME', 0)
81
+
82
+ #{ Interface
83
+ def object_path(self, hexsha):
84
+ """
85
+ :return: path at which the object with the given hexsha would be stored,
86
+ relative to the database root"""
87
+ return join(hexsha[:2], hexsha[2:])
88
+
89
+ def readable_db_object_path(self, hexsha):
90
+ """
91
+ :return: readable object path to the object identified by hexsha
92
+ :raise BadObject: If the object file does not exist"""
93
+ try:
94
+ return self._hexsha_to_file[hexsha]
95
+ except KeyError:
96
+ pass
97
+ # END ignore cache misses
98
+
99
+ # try filesystem
100
+ path = self.db_path(self.object_path(hexsha))
101
+ if exists(path):
102
+ self._hexsha_to_file[hexsha] = path
103
+ return path
104
+ # END handle cache
105
+ raise BadObject(hexsha)
106
+
107
+ def partial_to_complete_sha_hex(self, partial_hexsha):
108
+ """:return: 20 byte binary sha1 string which matches the given name uniquely
109
+ :param name: hexadecimal partial name (bytes or ascii string)
110
+ :raise AmbiguousObjectName:
111
+ :raise BadObject: """
112
+ candidate = None
113
+ for binsha in self.sha_iter():
114
+ if bin_to_hex(binsha).startswith(force_bytes(partial_hexsha)):
115
+ # it can't ever find the same object twice
116
+ if candidate is not None:
117
+ raise AmbiguousObjectName(partial_hexsha)
118
+ candidate = binsha
119
+ # END for each object
120
+ if candidate is None:
121
+ raise BadObject(partial_hexsha)
122
+ return candidate
123
+
124
+ #} END interface
125
+
126
+ def _map_loose_object(self, sha):
127
+ """
128
+ :return: memory map of that file to allow random read access
129
+ :raise BadObject: if object could not be located"""
130
+ db_path = self.db_path(self.object_path(bin_to_hex(sha)))
131
+ try:
132
+ return file_contents_ro_filepath(db_path, flags=self._fd_open_flags)
133
+ except OSError as e:
134
+ if e.errno != ENOENT:
135
+ # try again without noatime
136
+ try:
137
+ return file_contents_ro_filepath(db_path)
138
+ except OSError as new_e:
139
+ raise BadObject(sha) from new_e
140
+ # didn't work because of our flag, don't try it again
141
+ self._fd_open_flags = 0
142
+ else:
143
+ raise BadObject(sha) from e
144
+ # END handle error
145
+ # END exception handling
146
+
147
+ def set_ostream(self, stream):
148
+ """:raise TypeError: if the stream does not support the Sha1Writer interface"""
149
+ if stream is not None and not isinstance(stream, Sha1Writer):
150
+ raise TypeError("Output stream musst support the %s interface" % Sha1Writer.__name__)
151
+ return super().set_ostream(stream)
152
+
153
+ def info(self, sha):
154
+ m = self._map_loose_object(sha)
155
+ try:
156
+ typ, size = loose_object_header_info(m)
157
+ return OInfo(sha, typ, size)
158
+ finally:
159
+ if hasattr(m, 'close'):
160
+ m.close()
161
+ # END assure release of system resources
162
+
163
+ def stream(self, sha):
164
+ m = self._map_loose_object(sha)
165
+ type, size, stream = DecompressMemMapReader.new(m, close_on_deletion=True)
166
+ return OStream(sha, type, size, stream)
167
+
168
+ def has_object(self, sha):
169
+ try:
170
+ self.readable_db_object_path(bin_to_hex(sha))
171
+ return True
172
+ except BadObject:
173
+ return False
174
+ # END check existence
175
+
176
+ def store(self, istream):
177
+ """note: The sha we produce will be hex by nature"""
178
+ tmp_path = None
179
+ writer = self.ostream()
180
+ if writer is None:
181
+ # open a tmp file to write the data to
182
+ fd, tmp_path = tempfile.mkstemp(prefix='obj', dir=self._root_path)
183
+
184
+ if istream.binsha is None:
185
+ writer = FDCompressedSha1Writer(fd)
186
+ else:
187
+ writer = FDStream(fd)
188
+ # END handle direct stream copies
189
+ # END handle custom writer
190
+
191
+ try:
192
+ try:
193
+ if istream.binsha is not None:
194
+ # copy as much as possible, the actual uncompressed item size might
195
+ # be smaller than the compressed version
196
+ stream_copy(istream.read, writer.write, sys.maxsize, self.stream_chunk_size)
197
+ else:
198
+ # write object with header, we have to make a new one
199
+ write_object(istream.type, istream.size, istream.read, writer.write,
200
+ chunk_size=self.stream_chunk_size)
201
+ # END handle direct stream copies
202
+ finally:
203
+ if tmp_path:
204
+ writer.close()
205
+ # END assure target stream is closed
206
+ except:
207
+ if tmp_path:
208
+ os.remove(tmp_path)
209
+ raise
210
+ # END assure tmpfile removal on error
211
+
212
+ hexsha = None
213
+ if istream.binsha:
214
+ hexsha = istream.hexsha
215
+ else:
216
+ hexsha = writer.sha(as_hex=True)
217
+ # END handle sha
218
+
219
+ if tmp_path:
220
+ obj_path = self.db_path(self.object_path(hexsha))
221
+ obj_dir = dirname(obj_path)
222
+ os.makedirs(obj_dir, exist_ok=True)
223
+ # END handle destination directory
224
+ # rename onto existing doesn't work on NTFS
225
+ if isfile(obj_path):
226
+ remove(tmp_path)
227
+ else:
228
+ rename(tmp_path, obj_path)
229
+ # end rename only if needed
230
+
231
+ # make sure its readable for all ! It started out as rw-- tmp file
232
+ # but needs to be rwrr
233
+ chmod(obj_path, self.new_objects_mode)
234
+ # END handle dry_run
235
+
236
+ istream.binsha = hex_to_bin(hexsha)
237
+ return istream
238
+
239
+ def sha_iter(self):
240
+ # find all files which look like an object, extract sha from there
241
+ for root, dirs, files in os.walk(self.root_path()):
242
+ root_base = basename(root)
243
+ if len(root_base) != 2:
244
+ continue
245
+
246
+ for f in files:
247
+ if len(f) != 38:
248
+ continue
249
+ yield hex_to_bin(root_base + f)
250
+ # END for each file
251
+ # END for each walk iteration
252
+
253
+ def size(self):
254
+ return len(tuple(self.sha_iter()))
parrot/lib/python3.10/site-packages/gitdb/db/mem.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ """Contains the MemoryDatabase implementation"""
6
+ from gitdb.db.loose import LooseObjectDB
7
+ from gitdb.db.base import (
8
+ ObjectDBR,
9
+ ObjectDBW
10
+ )
11
+
12
+ from gitdb.base import (
13
+ OStream,
14
+ IStream,
15
+ )
16
+
17
+ from gitdb.exc import (
18
+ BadObject,
19
+ UnsupportedOperation
20
+ )
21
+
22
+ from gitdb.stream import (
23
+ ZippedStoreShaWriter,
24
+ DecompressMemMapReader,
25
+ )
26
+
27
+ from io import BytesIO
28
+
29
+ __all__ = ("MemoryDB", )
30
+
31
+
32
+ class MemoryDB(ObjectDBR, ObjectDBW):
33
+
34
+ """A memory database stores everything to memory, providing fast IO and object
35
+ retrieval. It should be used to buffer results and obtain SHAs before writing
36
+ it to the actual physical storage, as it allows to query whether object already
37
+ exists in the target storage before introducing actual IO"""
38
+
39
+ def __init__(self):
40
+ super().__init__()
41
+ self._db = LooseObjectDB("path/doesnt/matter")
42
+
43
+ # maps 20 byte shas to their OStream objects
44
+ self._cache = dict()
45
+
46
+ def set_ostream(self, stream):
47
+ raise UnsupportedOperation("MemoryDB's always stream into memory")
48
+
49
+ def store(self, istream):
50
+ zstream = ZippedStoreShaWriter()
51
+ self._db.set_ostream(zstream)
52
+
53
+ istream = self._db.store(istream)
54
+ zstream.close() # close to flush
55
+ zstream.seek(0)
56
+
57
+ # don't provide a size, the stream is written in object format, hence the
58
+ # header needs decompression
59
+ decomp_stream = DecompressMemMapReader(zstream.getvalue(), close_on_deletion=False)
60
+ self._cache[istream.binsha] = OStream(istream.binsha, istream.type, istream.size, decomp_stream)
61
+
62
+ return istream
63
+
64
+ def has_object(self, sha):
65
+ return sha in self._cache
66
+
67
+ def info(self, sha):
68
+ # we always return streams, which are infos as well
69
+ return self.stream(sha)
70
+
71
+ def stream(self, sha):
72
+ try:
73
+ ostream = self._cache[sha]
74
+ # rewind stream for the next one to read
75
+ ostream.stream.seek(0)
76
+ return ostream
77
+ except KeyError as e:
78
+ raise BadObject(sha) from e
79
+ # END exception handling
80
+
81
+ def size(self):
82
+ return len(self._cache)
83
+
84
+ def sha_iter(self):
85
+ return self._cache.keys()
86
+
87
+ #{ Interface
88
+ def stream_copy(self, sha_iter, odb):
89
+ """Copy the streams as identified by sha's yielded by sha_iter into the given odb
90
+ The streams will be copied directly
91
+ **Note:** the object will only be written if it did not exist in the target db
92
+
93
+ :return: amount of streams actually copied into odb. If smaller than the amount
94
+ of input shas, one or more objects did already exist in odb"""
95
+ count = 0
96
+ for sha in sha_iter:
97
+ if odb.has_object(sha):
98
+ continue
99
+ # END check object existence
100
+
101
+ ostream = self.stream(sha)
102
+ # compressed data including header
103
+ sio = BytesIO(ostream.stream.data())
104
+ istream = IStream(ostream.type, ostream.size, sio, sha)
105
+
106
+ odb.store(istream)
107
+ count += 1
108
+ # END for each sha
109
+ return count
110
+ #} END interface
parrot/lib/python3.10/site-packages/gitdb/db/pack.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ """Module containing a database to deal with packs"""
6
+ from gitdb.db.base import (
7
+ FileDBBase,
8
+ ObjectDBR,
9
+ CachingDB
10
+ )
11
+
12
+ from gitdb.util import LazyMixin
13
+
14
+ from gitdb.exc import (
15
+ BadObject,
16
+ UnsupportedOperation,
17
+ AmbiguousObjectName
18
+ )
19
+
20
+ from gitdb.pack import PackEntity
21
+
22
+ from functools import reduce
23
+
24
+ import os
25
+ import glob
26
+
27
+ __all__ = ('PackedDB', )
28
+
29
+ #{ Utilities
30
+
31
+
32
+ class PackedDB(FileDBBase, ObjectDBR, CachingDB, LazyMixin):
33
+
34
+ """A database operating on a set of object packs"""
35
+
36
+ # sort the priority list every N queries
37
+ # Higher values are better, performance tests don't show this has
38
+ # any effect, but it should have one
39
+ _sort_interval = 500
40
+
41
+ def __init__(self, root_path):
42
+ super().__init__(root_path)
43
+ # list of lists with three items:
44
+ # * hits - number of times the pack was hit with a request
45
+ # * entity - Pack entity instance
46
+ # * sha_to_index - PackIndexFile.sha_to_index method for direct cache query
47
+ # self._entities = list() # lazy loaded list
48
+ self._hit_count = 0 # amount of hits
49
+ self._st_mtime = 0 # last modification data of our root path
50
+
51
+ def _set_cache_(self, attr):
52
+ if attr == '_entities':
53
+ self._entities = list()
54
+ self.update_cache(force=True)
55
+ # END handle entities initialization
56
+
57
+ def _sort_entities(self):
58
+ self._entities.sort(key=lambda l: l[0], reverse=True)
59
+
60
+ def _pack_info(self, sha):
61
+ """:return: tuple(entity, index) for an item at the given sha
62
+ :param sha: 20 or 40 byte sha
63
+ :raise BadObject:
64
+ **Note:** This method is not thread-safe, but may be hit in multi-threaded
65
+ operation. The worst thing that can happen though is a counter that
66
+ was not incremented, or the list being in wrong order. So we safe
67
+ the time for locking here, lets see how that goes"""
68
+ # presort ?
69
+ if self._hit_count % self._sort_interval == 0:
70
+ self._sort_entities()
71
+ # END update sorting
72
+
73
+ for item in self._entities:
74
+ index = item[2](sha)
75
+ if index is not None:
76
+ item[0] += 1 # one hit for you
77
+ self._hit_count += 1 # general hit count
78
+ return (item[1], index)
79
+ # END index found in pack
80
+ # END for each item
81
+
82
+ # no hit, see whether we have to update packs
83
+ # NOTE: considering packs don't change very often, we safe this call
84
+ # and leave it to the super-caller to trigger that
85
+ raise BadObject(sha)
86
+
87
+ #{ Object DB Read
88
+
89
+ def has_object(self, sha):
90
+ try:
91
+ self._pack_info(sha)
92
+ return True
93
+ except BadObject:
94
+ return False
95
+ # END exception handling
96
+
97
+ def info(self, sha):
98
+ entity, index = self._pack_info(sha)
99
+ return entity.info_at_index(index)
100
+
101
+ def stream(self, sha):
102
+ entity, index = self._pack_info(sha)
103
+ return entity.stream_at_index(index)
104
+
105
+ def sha_iter(self):
106
+ for entity in self.entities():
107
+ index = entity.index()
108
+ sha_by_index = index.sha
109
+ for index in range(index.size()):
110
+ yield sha_by_index(index)
111
+ # END for each index
112
+ # END for each entity
113
+
114
+ def size(self):
115
+ sizes = [item[1].index().size() for item in self._entities]
116
+ return reduce(lambda x, y: x + y, sizes, 0)
117
+
118
+ #} END object db read
119
+
120
+ #{ object db write
121
+
122
+ def store(self, istream):
123
+ """Storing individual objects is not feasible as a pack is designed to
124
+ hold multiple objects. Writing or rewriting packs for single objects is
125
+ inefficient"""
126
+ raise UnsupportedOperation()
127
+
128
+ #} END object db write
129
+
130
+ #{ Interface
131
+
132
+ def update_cache(self, force=False):
133
+ """
134
+ Update our cache with the actually existing packs on disk. Add new ones,
135
+ and remove deleted ones. We keep the unchanged ones
136
+
137
+ :param force: If True, the cache will be updated even though the directory
138
+ does not appear to have changed according to its modification timestamp.
139
+ :return: True if the packs have been updated so there is new information,
140
+ False if there was no change to the pack database"""
141
+ stat = os.stat(self.root_path())
142
+ if not force and stat.st_mtime <= self._st_mtime:
143
+ return False
144
+ # END abort early on no change
145
+ self._st_mtime = stat.st_mtime
146
+
147
+ # packs are supposed to be prefixed with pack- by git-convention
148
+ # get all pack files, figure out what changed
149
+ pack_files = set(glob.glob(os.path.join(self.root_path(), "pack-*.pack")))
150
+ our_pack_files = {item[1].pack().path() for item in self._entities}
151
+
152
+ # new packs
153
+ for pack_file in (pack_files - our_pack_files):
154
+ # init the hit-counter/priority with the size, a good measure for hit-
155
+ # probability. Its implemented so that only 12 bytes will be read
156
+ entity = PackEntity(pack_file)
157
+ self._entities.append([entity.pack().size(), entity, entity.index().sha_to_index])
158
+ # END for each new packfile
159
+
160
+ # removed packs
161
+ for pack_file in (our_pack_files - pack_files):
162
+ del_index = -1
163
+ for i, item in enumerate(self._entities):
164
+ if item[1].pack().path() == pack_file:
165
+ del_index = i
166
+ break
167
+ # END found index
168
+ # END for each entity
169
+ assert del_index != -1
170
+ del(self._entities[del_index])
171
+ # END for each removed pack
172
+
173
+ # reinitialize prioritiess
174
+ self._sort_entities()
175
+ return True
176
+
177
+ def entities(self):
178
+ """:return: list of pack entities operated upon by this database"""
179
+ return [item[1] for item in self._entities]
180
+
181
+ def partial_to_complete_sha(self, partial_binsha, canonical_length):
182
+ """:return: 20 byte sha as inferred by the given partial binary sha
183
+ :param partial_binsha: binary sha with less than 20 bytes
184
+ :param canonical_length: length of the corresponding canonical representation.
185
+ It is required as binary sha's cannot display whether the original hex sha
186
+ had an odd or even number of characters
187
+ :raise AmbiguousObjectName:
188
+ :raise BadObject: """
189
+ candidate = None
190
+ for item in self._entities:
191
+ item_index = item[1].index().partial_sha_to_index(partial_binsha, canonical_length)
192
+ if item_index is not None:
193
+ sha = item[1].index().sha(item_index)
194
+ if candidate and candidate != sha:
195
+ raise AmbiguousObjectName(partial_binsha)
196
+ candidate = sha
197
+ # END handle full sha could be found
198
+ # END for each entity
199
+
200
+ if candidate:
201
+ return candidate
202
+
203
+ # still not found ?
204
+ raise BadObject(partial_binsha)
205
+
206
+ #} END interface
parrot/lib/python3.10/site-packages/gitdb/db/ref.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ import codecs
6
+ from gitdb.db.base import (
7
+ CompoundDB,
8
+ )
9
+
10
+ __all__ = ('ReferenceDB', )
11
+
12
+
13
+ class ReferenceDB(CompoundDB):
14
+
15
+ """A database consisting of database referred to in a file"""
16
+
17
+ # Configuration
18
+ # Specifies the object database to use for the paths found in the alternates
19
+ # file. If None, it defaults to the GitDB
20
+ ObjectDBCls = None
21
+
22
+ def __init__(self, ref_file):
23
+ super().__init__()
24
+ self._ref_file = ref_file
25
+
26
+ def _set_cache_(self, attr):
27
+ if attr == '_dbs':
28
+ self._dbs = list()
29
+ self._update_dbs_from_ref_file()
30
+ else:
31
+ super()._set_cache_(attr)
32
+ # END handle attrs
33
+
34
+ def _update_dbs_from_ref_file(self):
35
+ dbcls = self.ObjectDBCls
36
+ if dbcls is None:
37
+ # late import
38
+ from gitdb.db.git import GitDB
39
+ dbcls = GitDB
40
+ # END get db type
41
+
42
+ # try to get as many as possible, don't fail if some are unavailable
43
+ ref_paths = list()
44
+ try:
45
+ with codecs.open(self._ref_file, 'r', encoding="utf-8") as f:
46
+ ref_paths = [l.strip() for l in f]
47
+ except OSError:
48
+ pass
49
+ # END handle alternates
50
+
51
+ ref_paths_set = set(ref_paths)
52
+ cur_ref_paths_set = {db.root_path() for db in self._dbs}
53
+
54
+ # remove existing
55
+ for path in (cur_ref_paths_set - ref_paths_set):
56
+ for i, db in enumerate(self._dbs[:]):
57
+ if db.root_path() == path:
58
+ del(self._dbs[i])
59
+ continue
60
+ # END del matching db
61
+ # END for each path to remove
62
+
63
+ # add new
64
+ # sort them to maintain order
65
+ added_paths = sorted(ref_paths_set - cur_ref_paths_set, key=lambda p: ref_paths.index(p))
66
+ for path in added_paths:
67
+ try:
68
+ db = dbcls(path)
69
+ # force an update to verify path
70
+ if isinstance(db, CompoundDB):
71
+ db.databases()
72
+ # END verification
73
+ self._dbs.append(db)
74
+ except Exception:
75
+ # ignore invalid paths or issues
76
+ pass
77
+ # END for each path to add
78
+
79
+ def update_cache(self, force=False):
80
+ # re-read alternates and update databases
81
+ self._update_dbs_from_ref_file()
82
+ return super().update_cache(force)
parrot/lib/python3.10/site-packages/gitdb/exc.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ """Module with common exceptions"""
6
+ from gitdb.util import to_hex_sha
7
+
8
+ __all__ = [
9
+ 'AmbiguousObjectName',
10
+ 'BadName',
11
+ 'BadObject',
12
+ 'BadObjectType',
13
+ 'InvalidDBRoot',
14
+ 'ODBError',
15
+ 'ParseError',
16
+ 'UnsupportedOperation',
17
+ 'to_hex_sha',
18
+ ]
19
+
20
+ class ODBError(Exception):
21
+ """All errors thrown by the object database"""
22
+
23
+
24
+ class InvalidDBRoot(ODBError):
25
+ """Thrown if an object database cannot be initialized at the given path"""
26
+
27
+
28
+ class BadObject(ODBError):
29
+ """The object with the given SHA does not exist. Instantiate with the
30
+ failed sha"""
31
+
32
+ def __str__(self):
33
+ return "BadObject: %s" % to_hex_sha(self.args[0])
34
+
35
+
36
+ class BadName(ODBError):
37
+ """A name provided to rev_parse wasn't understood"""
38
+
39
+ def __str__(self):
40
+ return "Ref '%s' did not resolve to an object" % self.args[0]
41
+
42
+
43
+ class ParseError(ODBError):
44
+ """Thrown if the parsing of a file failed due to an invalid format"""
45
+
46
+
47
+ class AmbiguousObjectName(ODBError):
48
+ """Thrown if a possibly shortened name does not uniquely represent a single object
49
+ in the database"""
50
+
51
+
52
+ class BadObjectType(ODBError):
53
+ """The object had an unsupported type"""
54
+
55
+
56
+ class UnsupportedOperation(ODBError):
57
+ """Thrown if the given operation cannot be supported by the object database"""
parrot/lib/python3.10/site-packages/gitdb/fun.py ADDED
@@ -0,0 +1,704 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ """Contains basic c-functions which usually contain performance critical code
6
+ Keeping this code separate from the beginning makes it easier to out-source
7
+ it into c later, if required"""
8
+
9
+ import zlib
10
+ from gitdb.util import byte_ord
11
+ decompressobj = zlib.decompressobj
12
+
13
+ import mmap
14
+ from itertools import islice
15
+ from functools import reduce
16
+
17
+ from gitdb.const import NULL_BYTE, BYTE_SPACE
18
+ from gitdb.utils.encoding import force_text
19
+ from gitdb.typ import (
20
+ str_blob_type,
21
+ str_commit_type,
22
+ str_tree_type,
23
+ str_tag_type,
24
+ )
25
+
26
+ from io import StringIO
27
+
28
+ # INVARIANTS
29
+ OFS_DELTA = 6
30
+ REF_DELTA = 7
31
+ delta_types = (OFS_DELTA, REF_DELTA)
32
+
33
+ type_id_to_type_map = {
34
+ 0: b'', # EXT 1
35
+ 1: str_commit_type,
36
+ 2: str_tree_type,
37
+ 3: str_blob_type,
38
+ 4: str_tag_type,
39
+ 5: b'', # EXT 2
40
+ OFS_DELTA: "OFS_DELTA", # OFFSET DELTA
41
+ REF_DELTA: "REF_DELTA" # REFERENCE DELTA
42
+ }
43
+
44
+ type_to_type_id_map = {
45
+ str_commit_type: 1,
46
+ str_tree_type: 2,
47
+ str_blob_type: 3,
48
+ str_tag_type: 4,
49
+ "OFS_DELTA": OFS_DELTA,
50
+ "REF_DELTA": REF_DELTA,
51
+ }
52
+
53
+ # used when dealing with larger streams
54
+ chunk_size = 1000 * mmap.PAGESIZE
55
+
56
+ __all__ = ('is_loose_object', 'loose_object_header_info', 'msb_size', 'pack_object_header_info',
57
+ 'write_object', 'loose_object_header', 'stream_copy', 'apply_delta_data',
58
+ 'is_equal_canonical_sha', 'connect_deltas', 'DeltaChunkList', 'create_pack_object_header')
59
+
60
+
61
+ #{ Structures
62
+
63
+ def _set_delta_rbound(d, size):
64
+ """Truncate the given delta to the given size
65
+ :param size: size relative to our target offset, may not be 0, must be smaller or equal
66
+ to our size
67
+ :return: d"""
68
+ d.ts = size
69
+
70
+ # NOTE: data is truncated automatically when applying the delta
71
+ # MUST NOT DO THIS HERE
72
+ return d
73
+
74
+
75
+ def _move_delta_lbound(d, bytes):
76
+ """Move the delta by the given amount of bytes, reducing its size so that its
77
+ right bound stays static
78
+ :param bytes: amount of bytes to move, must be smaller than delta size
79
+ :return: d"""
80
+ if bytes == 0:
81
+ return
82
+
83
+ d.to += bytes
84
+ d.so += bytes
85
+ d.ts -= bytes
86
+ if d.data is not None:
87
+ d.data = d.data[bytes:]
88
+ # END handle data
89
+
90
+ return d
91
+
92
+
93
+ def delta_duplicate(src):
94
+ return DeltaChunk(src.to, src.ts, src.so, src.data)
95
+
96
+
97
+ def delta_chunk_apply(dc, bbuf, write):
98
+ """Apply own data to the target buffer
99
+ :param bbuf: buffer providing source bytes for copy operations
100
+ :param write: write method to call with data to write"""
101
+ if dc.data is None:
102
+ # COPY DATA FROM SOURCE
103
+ write(bbuf[dc.so:dc.so + dc.ts])
104
+ else:
105
+ # APPEND DATA
106
+ # what's faster: if + 4 function calls or just a write with a slice ?
107
+ # Considering data can be larger than 127 bytes now, it should be worth it
108
+ if dc.ts < len(dc.data):
109
+ write(dc.data[:dc.ts])
110
+ else:
111
+ write(dc.data)
112
+ # END handle truncation
113
+ # END handle chunk mode
114
+
115
+
116
+ class DeltaChunk:
117
+
118
+ """Represents a piece of a delta, it can either add new data, or copy existing
119
+ one from a source buffer"""
120
+ __slots__ = (
121
+ 'to', # start offset in the target buffer in bytes
122
+ 'ts', # size of this chunk in the target buffer in bytes
123
+ 'so', # start offset in the source buffer in bytes or None
124
+ 'data', # chunk of bytes to be added to the target buffer,
125
+ # DeltaChunkList to use as base, or None
126
+ )
127
+
128
+ def __init__(self, to, ts, so, data):
129
+ self.to = to
130
+ self.ts = ts
131
+ self.so = so
132
+ self.data = data
133
+
134
+ def __repr__(self):
135
+ return "DeltaChunk(%i, %i, %s, %s)" % (self.to, self.ts, self.so, self.data or "")
136
+
137
+ #{ Interface
138
+
139
+ def rbound(self):
140
+ return self.to + self.ts
141
+
142
+ def has_data(self):
143
+ """:return: True if the instance has data to add to the target stream"""
144
+ return self.data is not None
145
+
146
+ #} END interface
147
+
148
+
149
+ def _closest_index(dcl, absofs):
150
+ """:return: index at which the given absofs should be inserted. The index points
151
+ to the DeltaChunk with a target buffer absofs that equals or is greater than
152
+ absofs.
153
+ **Note:** global method for performance only, it belongs to DeltaChunkList"""
154
+ lo = 0
155
+ hi = len(dcl)
156
+ while lo < hi:
157
+ mid = (lo + hi) / 2
158
+ dc = dcl[mid]
159
+ if dc.to > absofs:
160
+ hi = mid
161
+ elif dc.rbound() > absofs or dc.to == absofs:
162
+ return mid
163
+ else:
164
+ lo = mid + 1
165
+ # END handle bound
166
+ # END for each delta absofs
167
+ return len(dcl) - 1
168
+
169
+
170
+ def delta_list_apply(dcl, bbuf, write):
171
+ """Apply the chain's changes and write the final result using the passed
172
+ write function.
173
+ :param bbuf: base buffer containing the base of all deltas contained in this
174
+ list. It will only be used if the chunk in question does not have a base
175
+ chain.
176
+ :param write: function taking a string of bytes to write to the output"""
177
+ for dc in dcl:
178
+ delta_chunk_apply(dc, bbuf, write)
179
+ # END for each dc
180
+
181
+
182
+ def delta_list_slice(dcl, absofs, size, ndcl):
183
+ """:return: Subsection of this list at the given absolute offset, with the given
184
+ size in bytes.
185
+ :return: None"""
186
+ cdi = _closest_index(dcl, absofs) # delta start index
187
+ cd = dcl[cdi]
188
+ slen = len(dcl)
189
+ lappend = ndcl.append
190
+
191
+ if cd.to != absofs:
192
+ tcd = DeltaChunk(cd.to, cd.ts, cd.so, cd.data)
193
+ _move_delta_lbound(tcd, absofs - cd.to)
194
+ tcd.ts = min(tcd.ts, size)
195
+ lappend(tcd)
196
+ size -= tcd.ts
197
+ cdi += 1
198
+ # END lbound overlap handling
199
+
200
+ while cdi < slen and size:
201
+ # are we larger than the current block
202
+ cd = dcl[cdi]
203
+ if cd.ts <= size:
204
+ lappend(DeltaChunk(cd.to, cd.ts, cd.so, cd.data))
205
+ size -= cd.ts
206
+ else:
207
+ tcd = DeltaChunk(cd.to, cd.ts, cd.so, cd.data)
208
+ tcd.ts = size
209
+ lappend(tcd)
210
+ size -= tcd.ts
211
+ break
212
+ # END hadle size
213
+ cdi += 1
214
+ # END for each chunk
215
+
216
+
217
+ class DeltaChunkList(list):
218
+
219
+ """List with special functionality to deal with DeltaChunks.
220
+ There are two types of lists we represent. The one was created bottom-up, working
221
+ towards the latest delta, the other kind was created top-down, working from the
222
+ latest delta down to the earliest ancestor. This attribute is queryable
223
+ after all processing with is_reversed."""
224
+
225
+ __slots__ = tuple()
226
+
227
+ def rbound(self):
228
+ """:return: rightmost extend in bytes, absolute"""
229
+ if len(self) == 0:
230
+ return 0
231
+ return self[-1].rbound()
232
+
233
+ def lbound(self):
234
+ """:return: leftmost byte at which this chunklist starts"""
235
+ if len(self) == 0:
236
+ return 0
237
+ return self[0].to
238
+
239
+ def size(self):
240
+ """:return: size of bytes as measured by our delta chunks"""
241
+ return self.rbound() - self.lbound()
242
+
243
+ def apply(self, bbuf, write):
244
+ """Only used by public clients, internally we only use the global routines
245
+ for performance"""
246
+ return delta_list_apply(self, bbuf, write)
247
+
248
+ def compress(self):
249
+ """Alter the list to reduce the amount of nodes. Currently we concatenate
250
+ add-chunks
251
+ :return: self"""
252
+ slen = len(self)
253
+ if slen < 2:
254
+ return self
255
+ i = 0
256
+
257
+ first_data_index = None
258
+ while i < slen:
259
+ dc = self[i]
260
+ i += 1
261
+ if dc.data is None:
262
+ if first_data_index is not None and i - 2 - first_data_index > 1:
263
+ # if first_data_index is not None:
264
+ nd = StringIO() # new data
265
+ so = self[first_data_index].to # start offset in target buffer
266
+ for x in range(first_data_index, i - 1):
267
+ xdc = self[x]
268
+ nd.write(xdc.data[:xdc.ts])
269
+ # END collect data
270
+
271
+ del(self[first_data_index:i - 1])
272
+ buf = nd.getvalue()
273
+ self.insert(first_data_index, DeltaChunk(so, len(buf), 0, buf))
274
+
275
+ slen = len(self)
276
+ i = first_data_index + 1
277
+
278
+ # END concatenate data
279
+ first_data_index = None
280
+ continue
281
+ # END skip non-data chunks
282
+
283
+ if first_data_index is None:
284
+ first_data_index = i - 1
285
+ # END iterate list
286
+
287
+ # if slen_orig != len(self):
288
+ # print "INFO: Reduced delta list len to %f %% of former size" % ((float(len(self)) / slen_orig) * 100)
289
+ return self
290
+
291
+ def check_integrity(self, target_size=-1):
292
+ """Verify the list has non-overlapping chunks only, and the total size matches
293
+ target_size
294
+ :param target_size: if not -1, the total size of the chain must be target_size
295
+ :raise AssertionError: if the size doesn't match"""
296
+ if target_size > -1:
297
+ assert self[-1].rbound() == target_size
298
+ assert reduce(lambda x, y: x + y, (d.ts for d in self), 0) == target_size
299
+ # END target size verification
300
+
301
+ if len(self) < 2:
302
+ return
303
+
304
+ # check data
305
+ for dc in self:
306
+ assert dc.ts > 0
307
+ if dc.has_data():
308
+ assert len(dc.data) >= dc.ts
309
+ # END for each dc
310
+
311
+ left = islice(self, 0, len(self) - 1)
312
+ right = iter(self)
313
+ right.next()
314
+ # this is very pythonic - we might have just use index based access here,
315
+ # but this could actually be faster
316
+ for lft, rgt in zip(left, right):
317
+ assert lft.rbound() == rgt.to
318
+ assert lft.to + lft.ts == rgt.to
319
+ # END for each pair
320
+
321
+
322
+ class TopdownDeltaChunkList(DeltaChunkList):
323
+
324
+ """Represents a list which is generated by feeding its ancestor streams one by
325
+ one"""
326
+ __slots__ = tuple()
327
+
328
+ def connect_with_next_base(self, bdcl):
329
+ """Connect this chain with the next level of our base delta chunklist.
330
+ The goal in this game is to mark as many of our chunks rigid, hence they
331
+ cannot be changed by any of the upcoming bases anymore. Once all our
332
+ chunks are marked like that, we can stop all processing
333
+ :param bdcl: data chunk list being one of our bases. They must be fed in
334
+ consecutively and in order, towards the earliest ancestor delta
335
+ :return: True if processing was done. Use it to abort processing of
336
+ remaining streams if False is returned"""
337
+ nfc = 0 # number of frozen chunks
338
+ dci = 0 # delta chunk index
339
+ slen = len(self) # len of self
340
+ ccl = list() # temporary list
341
+ while dci < slen:
342
+ dc = self[dci]
343
+ dci += 1
344
+
345
+ # all add-chunks which are already topmost don't need additional processing
346
+ if dc.data is not None:
347
+ nfc += 1
348
+ continue
349
+ # END skip add chunks
350
+
351
+ # copy chunks
352
+ # integrate the portion of the base list into ourselves. Lists
353
+ # dont support efficient insertion ( just one at a time ), but for now
354
+ # we live with it. Internally, its all just a 32/64bit pointer, and
355
+ # the portions of moved memory should be smallish. Maybe we just rebuild
356
+ # ourselves in order to reduce the amount of insertions ...
357
+ del(ccl[:])
358
+ delta_list_slice(bdcl, dc.so, dc.ts, ccl)
359
+
360
+ # move the target bounds into place to match with our chunk
361
+ ofs = dc.to - dc.so
362
+ for cdc in ccl:
363
+ cdc.to += ofs
364
+ # END update target bounds
365
+
366
+ if len(ccl) == 1:
367
+ self[dci - 1] = ccl[0]
368
+ else:
369
+ # maybe try to compute the expenses here, and pick the right algorithm
370
+ # It would normally be faster than copying everything physically though
371
+ # TODO: Use a deque here, and decide by the index whether to extend
372
+ # or extend left !
373
+ post_dci = self[dci:]
374
+ del(self[dci - 1:]) # include deletion of dc
375
+ self.extend(ccl)
376
+ self.extend(post_dci)
377
+
378
+ slen = len(self)
379
+ dci += len(ccl) - 1 # deleted dc, added rest
380
+
381
+ # END handle chunk replacement
382
+ # END for each chunk
383
+
384
+ if nfc == slen:
385
+ return False
386
+ # END handle completeness
387
+ return True
388
+
389
+
390
+ #} END structures
391
+
392
+ #{ Routines
393
+
394
+ def is_loose_object(m):
395
+ """
396
+ :return: True the file contained in memory map m appears to be a loose object.
397
+ Only the first two bytes are needed"""
398
+ b0, b1 = map(ord, m[:2])
399
+ word = (b0 << 8) + b1
400
+ return b0 == 0x78 and (word % 31) == 0
401
+
402
+
403
+ def loose_object_header_info(m):
404
+ """
405
+ :return: tuple(type_string, uncompressed_size_in_bytes) the type string of the
406
+ object as well as its uncompressed size in bytes.
407
+ :param m: memory map from which to read the compressed object data"""
408
+ decompress_size = 8192 # is used in cgit as well
409
+ hdr = decompressobj().decompress(m, decompress_size)
410
+ type_name, size = hdr[:hdr.find(NULL_BYTE)].split(BYTE_SPACE)
411
+
412
+ return type_name, int(size)
413
+
414
+
415
+ def pack_object_header_info(data):
416
+ """
417
+ :return: tuple(type_id, uncompressed_size_in_bytes, byte_offset)
418
+ The type_id should be interpreted according to the ``type_id_to_type_map`` map
419
+ The byte-offset specifies the start of the actual zlib compressed datastream
420
+ :param m: random-access memory, like a string or memory map"""
421
+ c = byte_ord(data[0]) # first byte
422
+ i = 1 # next char to read
423
+ type_id = (c >> 4) & 7 # numeric type
424
+ size = c & 15 # starting size
425
+ s = 4 # starting bit-shift size
426
+ while c & 0x80:
427
+ c = byte_ord(data[i])
428
+ i += 1
429
+ size += (c & 0x7f) << s
430
+ s += 7
431
+ # END character loop
432
+ # end performance at expense of maintenance ...
433
+ return (type_id, size, i)
434
+
435
+
436
+ def create_pack_object_header(obj_type, obj_size):
437
+ """
438
+ :return: string defining the pack header comprised of the object type
439
+ and its incompressed size in bytes
440
+
441
+ :param obj_type: pack type_id of the object
442
+ :param obj_size: uncompressed size in bytes of the following object stream"""
443
+ c = 0 # 1 byte
444
+ hdr = bytearray() # output string
445
+
446
+ c = (obj_type << 4) | (obj_size & 0xf)
447
+ obj_size >>= 4
448
+ while obj_size:
449
+ hdr.append(c | 0x80)
450
+ c = obj_size & 0x7f
451
+ obj_size >>= 7
452
+ # END until size is consumed
453
+ hdr.append(c)
454
+ # end handle interpreter
455
+ return hdr
456
+
457
+
458
+ def msb_size(data, offset=0):
459
+ """
460
+ :return: tuple(read_bytes, size) read the msb size from the given random
461
+ access data starting at the given byte offset"""
462
+ size = 0
463
+ i = 0
464
+ l = len(data)
465
+ hit_msb = False
466
+ while i < l:
467
+ c = data[i + offset]
468
+ size |= (c & 0x7f) << i * 7
469
+ i += 1
470
+ if not c & 0x80:
471
+ hit_msb = True
472
+ break
473
+ # END check msb bit
474
+ # END while in range
475
+ # end performance ...
476
+ if not hit_msb:
477
+ raise AssertionError("Could not find terminating MSB byte in data stream")
478
+ return i + offset, size
479
+
480
+
481
+ def loose_object_header(type, size):
482
+ """
483
+ :return: bytes representing the loose object header, which is immediately
484
+ followed by the content stream of size 'size'"""
485
+ return ('%s %i\0' % (force_text(type), size)).encode('ascii')
486
+
487
+
488
+ def write_object(type, size, read, write, chunk_size=chunk_size):
489
+ """
490
+ Write the object as identified by type, size and source_stream into the
491
+ target_stream
492
+
493
+ :param type: type string of the object
494
+ :param size: amount of bytes to write from source_stream
495
+ :param read: read method of a stream providing the content data
496
+ :param write: write method of the output stream
497
+ :param close_target_stream: if True, the target stream will be closed when
498
+ the routine exits, even if an error is thrown
499
+ :return: The actual amount of bytes written to stream, which includes the header and a trailing newline"""
500
+ tbw = 0 # total num bytes written
501
+
502
+ # WRITE HEADER: type SP size NULL
503
+ tbw += write(loose_object_header(type, size))
504
+ tbw += stream_copy(read, write, size, chunk_size)
505
+
506
+ return tbw
507
+
508
+
509
+ def stream_copy(read, write, size, chunk_size):
510
+ """
511
+ Copy a stream up to size bytes using the provided read and write methods,
512
+ in chunks of chunk_size
513
+
514
+ **Note:** its much like stream_copy utility, but operates just using methods"""
515
+ dbw = 0 # num data bytes written
516
+
517
+ # WRITE ALL DATA UP TO SIZE
518
+ while True:
519
+ cs = min(chunk_size, size - dbw)
520
+ # NOTE: not all write methods return the amount of written bytes, like
521
+ # mmap.write. Its bad, but we just deal with it ... perhaps its not
522
+ # even less efficient
523
+ # data_len = write(read(cs))
524
+ # dbw += data_len
525
+ data = read(cs)
526
+ data_len = len(data)
527
+ dbw += data_len
528
+ write(data)
529
+ if data_len < cs or dbw == size:
530
+ break
531
+ # END check for stream end
532
+ # END duplicate data
533
+ return dbw
534
+
535
+
536
+ def connect_deltas(dstreams):
537
+ """
538
+ Read the condensed delta chunk information from dstream and merge its information
539
+ into a list of existing delta chunks
540
+
541
+ :param dstreams: iterable of delta stream objects, the delta to be applied last
542
+ comes first, then all its ancestors in order
543
+ :return: DeltaChunkList, containing all operations to apply"""
544
+ tdcl = None # topmost dcl
545
+
546
+ dcl = tdcl = TopdownDeltaChunkList()
547
+ for dsi, ds in enumerate(dstreams):
548
+ # print "Stream", dsi
549
+ db = ds.read()
550
+ delta_buf_size = ds.size
551
+
552
+ # read header
553
+ i, base_size = msb_size(db)
554
+ i, target_size = msb_size(db, i)
555
+
556
+ # interpret opcodes
557
+ tbw = 0 # amount of target bytes written
558
+ while i < delta_buf_size:
559
+ c = ord(db[i])
560
+ i += 1
561
+ if c & 0x80:
562
+ cp_off, cp_size = 0, 0
563
+ if (c & 0x01):
564
+ cp_off = ord(db[i])
565
+ i += 1
566
+ if (c & 0x02):
567
+ cp_off |= (ord(db[i]) << 8)
568
+ i += 1
569
+ if (c & 0x04):
570
+ cp_off |= (ord(db[i]) << 16)
571
+ i += 1
572
+ if (c & 0x08):
573
+ cp_off |= (ord(db[i]) << 24)
574
+ i += 1
575
+ if (c & 0x10):
576
+ cp_size = ord(db[i])
577
+ i += 1
578
+ if (c & 0x20):
579
+ cp_size |= (ord(db[i]) << 8)
580
+ i += 1
581
+ if (c & 0x40):
582
+ cp_size |= (ord(db[i]) << 16)
583
+ i += 1
584
+
585
+ if not cp_size:
586
+ cp_size = 0x10000
587
+
588
+ rbound = cp_off + cp_size
589
+ if (rbound < cp_size or
590
+ rbound > base_size):
591
+ break
592
+
593
+ dcl.append(DeltaChunk(tbw, cp_size, cp_off, None))
594
+ tbw += cp_size
595
+ elif c:
596
+ # NOTE: in C, the data chunks should probably be concatenated here.
597
+ # In python, we do it as a post-process
598
+ dcl.append(DeltaChunk(tbw, c, 0, db[i:i + c]))
599
+ i += c
600
+ tbw += c
601
+ else:
602
+ raise ValueError("unexpected delta opcode 0")
603
+ # END handle command byte
604
+ # END while processing delta data
605
+
606
+ dcl.compress()
607
+
608
+ # merge the lists !
609
+ if dsi > 0:
610
+ if not tdcl.connect_with_next_base(dcl):
611
+ break
612
+ # END handle merge
613
+
614
+ # prepare next base
615
+ dcl = DeltaChunkList()
616
+ # END for each delta stream
617
+
618
+ return tdcl
619
+
620
+
621
+ def apply_delta_data(src_buf, src_buf_size, delta_buf, delta_buf_size, write):
622
+ """
623
+ Apply data from a delta buffer using a source buffer to the target file
624
+
625
+ :param src_buf: random access data from which the delta was created
626
+ :param src_buf_size: size of the source buffer in bytes
627
+ :param delta_buf_size: size for the delta buffer in bytes
628
+ :param delta_buf: random access delta data
629
+ :param write: write method taking a chunk of bytes
630
+
631
+ **Note:** transcribed to python from the similar routine in patch-delta.c"""
632
+ i = 0
633
+ db = delta_buf
634
+ while i < delta_buf_size:
635
+ c = db[i]
636
+ i += 1
637
+ if c & 0x80:
638
+ cp_off, cp_size = 0, 0
639
+ if (c & 0x01):
640
+ cp_off = db[i]
641
+ i += 1
642
+ if (c & 0x02):
643
+ cp_off |= (db[i] << 8)
644
+ i += 1
645
+ if (c & 0x04):
646
+ cp_off |= (db[i] << 16)
647
+ i += 1
648
+ if (c & 0x08):
649
+ cp_off |= (db[i] << 24)
650
+ i += 1
651
+ if (c & 0x10):
652
+ cp_size = db[i]
653
+ i += 1
654
+ if (c & 0x20):
655
+ cp_size |= (db[i] << 8)
656
+ i += 1
657
+ if (c & 0x40):
658
+ cp_size |= (db[i] << 16)
659
+ i += 1
660
+
661
+ if not cp_size:
662
+ cp_size = 0x10000
663
+
664
+ rbound = cp_off + cp_size
665
+ if (rbound < cp_size or
666
+ rbound > src_buf_size):
667
+ break
668
+ write(src_buf[cp_off:cp_off + cp_size])
669
+ elif c:
670
+ write(db[i:i + c])
671
+ i += c
672
+ else:
673
+ raise ValueError("unexpected delta opcode 0")
674
+ # END handle command byte
675
+ # END while processing delta data
676
+
677
+ # yes, lets use the exact same error message that git uses :)
678
+ assert i == delta_buf_size, "delta replay has gone wild"
679
+
680
+
681
+ def is_equal_canonical_sha(canonical_length, match, sha1):
682
+ """
683
+ :return: True if the given lhs and rhs 20 byte binary shas
684
+ The comparison will take the canonical_length of the match sha into account,
685
+ hence the comparison will only use the last 4 bytes for uneven canonical representations
686
+ :param match: less than 20 byte sha
687
+ :param sha1: 20 byte sha"""
688
+ binary_length = canonical_length // 2
689
+ if match[:binary_length] != sha1[:binary_length]:
690
+ return False
691
+
692
+ if canonical_length - binary_length and \
693
+ (byte_ord(match[-1]) ^ byte_ord(sha1[len(match) - 1])) & 0xf0:
694
+ return False
695
+ # END handle uneven canonnical length
696
+ return True
697
+
698
+ #} END routines
699
+
700
+
701
+ try:
702
+ from gitdb_speedups._perf import connect_deltas
703
+ except ImportError:
704
+ pass
parrot/lib/python3.10/site-packages/gitdb/pack.py ADDED
@@ -0,0 +1,1031 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ """Contains PackIndexFile and PackFile implementations"""
6
+ import zlib
7
+
8
+ from gitdb.exc import (
9
+ BadObject,
10
+ AmbiguousObjectName,
11
+ UnsupportedOperation,
12
+ ParseError
13
+ )
14
+
15
+ from gitdb.util import (
16
+ mman,
17
+ LazyMixin,
18
+ unpack_from,
19
+ bin_to_hex,
20
+ byte_ord,
21
+ )
22
+
23
+ from gitdb.fun import (
24
+ create_pack_object_header,
25
+ pack_object_header_info,
26
+ is_equal_canonical_sha,
27
+ type_id_to_type_map,
28
+ write_object,
29
+ stream_copy,
30
+ chunk_size,
31
+ delta_types,
32
+ OFS_DELTA,
33
+ REF_DELTA,
34
+ msb_size
35
+ )
36
+
37
+ try:
38
+ from gitdb_speedups._perf import PackIndexFile_sha_to_index
39
+ except ImportError:
40
+ pass
41
+ # END try c module
42
+
43
+ from gitdb.base import ( # Amazing !
44
+ OInfo,
45
+ OStream,
46
+ OPackInfo,
47
+ OPackStream,
48
+ ODeltaStream,
49
+ ODeltaPackInfo,
50
+ ODeltaPackStream,
51
+ )
52
+
53
+ from gitdb.stream import (
54
+ DecompressMemMapReader,
55
+ DeltaApplyReader,
56
+ Sha1Writer,
57
+ NullStream,
58
+ FlexibleSha1Writer
59
+ )
60
+
61
+ from struct import pack
62
+ from binascii import crc32
63
+
64
+ from gitdb.const import NULL_BYTE
65
+
66
+ import tempfile
67
+ import array
68
+ import os
69
+ import sys
70
+
71
+ __all__ = ('PackIndexFile', 'PackFile', 'PackEntity')
72
+
73
+
74
+ #{ Utilities
75
+
76
+ def pack_object_at(cursor, offset, as_stream):
77
+ """
78
+ :return: Tuple(abs_data_offset, PackInfo|PackStream)
79
+ an object of the correct type according to the type_id of the object.
80
+ If as_stream is True, the object will contain a stream, allowing the
81
+ data to be read decompressed.
82
+ :param data: random accessible data containing all required information
83
+ :parma offset: offset in to the data at which the object information is located
84
+ :param as_stream: if True, a stream object will be returned that can read
85
+ the data, otherwise you receive an info object only"""
86
+ data = cursor.use_region(offset).buffer()
87
+ type_id, uncomp_size, data_rela_offset = pack_object_header_info(data)
88
+ total_rela_offset = None # set later, actual offset until data stream begins
89
+ delta_info = None
90
+
91
+ # OFFSET DELTA
92
+ if type_id == OFS_DELTA:
93
+ i = data_rela_offset
94
+ c = byte_ord(data[i])
95
+ i += 1
96
+ delta_offset = c & 0x7f
97
+ while c & 0x80:
98
+ c = byte_ord(data[i])
99
+ i += 1
100
+ delta_offset += 1
101
+ delta_offset = (delta_offset << 7) + (c & 0x7f)
102
+ # END character loop
103
+ delta_info = delta_offset
104
+ total_rela_offset = i
105
+ # REF DELTA
106
+ elif type_id == REF_DELTA:
107
+ total_rela_offset = data_rela_offset + 20
108
+ delta_info = data[data_rela_offset:total_rela_offset]
109
+ # BASE OBJECT
110
+ else:
111
+ # assume its a base object
112
+ total_rela_offset = data_rela_offset
113
+ # END handle type id
114
+ abs_data_offset = offset + total_rela_offset
115
+ if as_stream:
116
+ stream = DecompressMemMapReader(data[total_rela_offset:], False, uncomp_size)
117
+ if delta_info is None:
118
+ return abs_data_offset, OPackStream(offset, type_id, uncomp_size, stream)
119
+ else:
120
+ return abs_data_offset, ODeltaPackStream(offset, type_id, uncomp_size, delta_info, stream)
121
+ else:
122
+ if delta_info is None:
123
+ return abs_data_offset, OPackInfo(offset, type_id, uncomp_size)
124
+ else:
125
+ return abs_data_offset, ODeltaPackInfo(offset, type_id, uncomp_size, delta_info)
126
+ # END handle info
127
+ # END handle stream
128
+
129
+
130
+ def write_stream_to_pack(read, write, zstream, base_crc=None):
131
+ """Copy a stream as read from read function, zip it, and write the result.
132
+ Count the number of written bytes and return it
133
+ :param base_crc: if not None, the crc will be the base for all compressed data
134
+ we consecutively write and generate a crc32 from. If None, no crc will be generated
135
+ :return: tuple(no bytes read, no bytes written, crc32) crc might be 0 if base_crc
136
+ was false"""
137
+ br = 0 # bytes read
138
+ bw = 0 # bytes written
139
+ want_crc = base_crc is not None
140
+ crc = 0
141
+ if want_crc:
142
+ crc = base_crc
143
+ # END initialize crc
144
+
145
+ while True:
146
+ chunk = read(chunk_size)
147
+ br += len(chunk)
148
+ compressed = zstream.compress(chunk)
149
+ bw += len(compressed)
150
+ write(compressed) # cannot assume return value
151
+
152
+ if want_crc:
153
+ crc = crc32(compressed, crc)
154
+ # END handle crc
155
+
156
+ if len(chunk) != chunk_size:
157
+ break
158
+ # END copy loop
159
+
160
+ compressed = zstream.flush()
161
+ bw += len(compressed)
162
+ write(compressed)
163
+ if want_crc:
164
+ crc = crc32(compressed, crc)
165
+ # END handle crc
166
+
167
+ return (br, bw, crc)
168
+
169
+
170
+ #} END utilities
171
+
172
+
173
+ class IndexWriter:
174
+
175
+ """Utility to cache index information, allowing to write all information later
176
+ in one go to the given stream
177
+ **Note:** currently only writes v2 indices"""
178
+ __slots__ = '_objs'
179
+
180
+ def __init__(self):
181
+ self._objs = list()
182
+
183
+ def append(self, binsha, crc, offset):
184
+ """Append one piece of object information"""
185
+ self._objs.append((binsha, crc, offset))
186
+
187
+ def write(self, pack_sha, write):
188
+ """Write the index file using the given write method
189
+ :param pack_sha: binary sha over the whole pack that we index
190
+ :return: sha1 binary sha over all index file contents"""
191
+ # sort for sha1 hash
192
+ self._objs.sort(key=lambda o: o[0])
193
+
194
+ sha_writer = FlexibleSha1Writer(write)
195
+ sha_write = sha_writer.write
196
+ sha_write(PackIndexFile.index_v2_signature)
197
+ sha_write(pack(">L", PackIndexFile.index_version_default))
198
+
199
+ # fanout
200
+ tmplist = list((0,) * 256) # fanout or list with 64 bit offsets
201
+ for t in self._objs:
202
+ tmplist[byte_ord(t[0][0])] += 1
203
+ # END prepare fanout
204
+ for i in range(255):
205
+ v = tmplist[i]
206
+ sha_write(pack('>L', v))
207
+ tmplist[i + 1] += v
208
+ # END write each fanout entry
209
+ sha_write(pack('>L', tmplist[255]))
210
+
211
+ # sha1 ordered
212
+ # save calls, that is push them into c
213
+ sha_write(b''.join(t[0] for t in self._objs))
214
+
215
+ # crc32
216
+ for t in self._objs:
217
+ sha_write(pack('>L', t[1] & 0xffffffff))
218
+ # END for each crc
219
+
220
+ tmplist = list()
221
+ # offset 32
222
+ for t in self._objs:
223
+ ofs = t[2]
224
+ if ofs > 0x7fffffff:
225
+ tmplist.append(ofs)
226
+ ofs = 0x80000000 + len(tmplist) - 1
227
+ # END handle 64 bit offsets
228
+ sha_write(pack('>L', ofs & 0xffffffff))
229
+ # END for each offset
230
+
231
+ # offset 64
232
+ for ofs in tmplist:
233
+ sha_write(pack(">Q", ofs))
234
+ # END for each offset
235
+
236
+ # trailer
237
+ assert(len(pack_sha) == 20)
238
+ sha_write(pack_sha)
239
+ sha = sha_writer.sha(as_hex=False)
240
+ write(sha)
241
+ return sha
242
+
243
+
244
+ class PackIndexFile(LazyMixin):
245
+
246
+ """A pack index provides offsets into the corresponding pack, allowing to find
247
+ locations for offsets faster."""
248
+
249
+ # Dont use slots as we dynamically bind functions for each version, need a dict for this
250
+ # The slots you see here are just to keep track of our instance variables
251
+ # __slots__ = ('_indexpath', '_fanout_table', '_cursor', '_version',
252
+ # '_sha_list_offset', '_crc_list_offset', '_pack_offset', '_pack_64_offset')
253
+
254
+ # used in v2 indices
255
+ _sha_list_offset = 8 + 1024
256
+ index_v2_signature = b'\xfftOc'
257
+ index_version_default = 2
258
+
259
+ def __init__(self, indexpath):
260
+ super().__init__()
261
+ self._indexpath = indexpath
262
+
263
+ def close(self):
264
+ mman.force_map_handle_removal_win(self._indexpath)
265
+ self._cursor = None
266
+
267
+ def _set_cache_(self, attr):
268
+ if attr == "_packfile_checksum":
269
+ self._packfile_checksum = self._cursor.map()[-40:-20]
270
+ elif attr == "_packfile_checksum":
271
+ self._packfile_checksum = self._cursor.map()[-20:]
272
+ elif attr == "_cursor":
273
+ # Note: We don't lock the file when reading as we cannot be sure
274
+ # that we can actually write to the location - it could be a read-only
275
+ # alternate for instance
276
+ self._cursor = mman.make_cursor(self._indexpath).use_region()
277
+ # We will assume that the index will always fully fit into memory !
278
+ if mman.window_size() > 0 and self._cursor.file_size() > mman.window_size():
279
+ raise AssertionError("The index file at %s is too large to fit into a mapped window (%i > %i). This is a limitation of the implementation" % (
280
+ self._indexpath, self._cursor.file_size(), mman.window_size()))
281
+ # END assert window size
282
+ else:
283
+ # now its time to initialize everything - if we are here, someone wants
284
+ # to access the fanout table or related properties
285
+
286
+ # CHECK VERSION
287
+ mmap = self._cursor.map()
288
+ self._version = (mmap[:4] == self.index_v2_signature and 2) or 1
289
+ if self._version == 2:
290
+ version_id = unpack_from(">L", mmap, 4)[0]
291
+ assert version_id == self._version, "Unsupported index version: %i" % version_id
292
+ # END assert version
293
+
294
+ # SETUP FUNCTIONS
295
+ # setup our functions according to the actual version
296
+ for fname in ('entry', 'offset', 'sha', 'crc'):
297
+ setattr(self, fname, getattr(self, "_%s_v%i" % (fname, self._version)))
298
+ # END for each function to initialize
299
+
300
+ # INITIALIZE DATA
301
+ # byte offset is 8 if version is 2, 0 otherwise
302
+ self._initialize()
303
+ # END handle attributes
304
+
305
+ #{ Access V1
306
+
307
+ def _entry_v1(self, i):
308
+ """:return: tuple(offset, binsha, 0)"""
309
+ return unpack_from(">L20s", self._cursor.map(), 1024 + i * 24) + (0, )
310
+
311
+ def _offset_v1(self, i):
312
+ """see ``_offset_v2``"""
313
+ return unpack_from(">L", self._cursor.map(), 1024 + i * 24)[0]
314
+
315
+ def _sha_v1(self, i):
316
+ """see ``_sha_v2``"""
317
+ base = 1024 + (i * 24) + 4
318
+ return self._cursor.map()[base:base + 20]
319
+
320
+ def _crc_v1(self, i):
321
+ """unsupported"""
322
+ return 0
323
+
324
+ #} END access V1
325
+
326
+ #{ Access V2
327
+ def _entry_v2(self, i):
328
+ """:return: tuple(offset, binsha, crc)"""
329
+ return (self._offset_v2(i), self._sha_v2(i), self._crc_v2(i))
330
+
331
+ def _offset_v2(self, i):
332
+ """:return: 32 or 64 byte offset into pack files. 64 byte offsets will only
333
+ be returned if the pack is larger than 4 GiB, or 2^32"""
334
+ offset = unpack_from(">L", self._cursor.map(), self._pack_offset + i * 4)[0]
335
+
336
+ # if the high-bit is set, this indicates that we have to lookup the offset
337
+ # in the 64 bit region of the file. The current offset ( lower 31 bits )
338
+ # are the index into it
339
+ if offset & 0x80000000:
340
+ offset = unpack_from(">Q", self._cursor.map(), self._pack_64_offset + (offset & ~0x80000000) * 8)[0]
341
+ # END handle 64 bit offset
342
+
343
+ return offset
344
+
345
+ def _sha_v2(self, i):
346
+ """:return: sha at the given index of this file index instance"""
347
+ base = self._sha_list_offset + i * 20
348
+ return self._cursor.map()[base:base + 20]
349
+
350
+ def _crc_v2(self, i):
351
+ """:return: 4 bytes crc for the object at index i"""
352
+ return unpack_from(">L", self._cursor.map(), self._crc_list_offset + i * 4)[0]
353
+
354
+ #} END access V2
355
+
356
+ #{ Initialization
357
+
358
+ def _initialize(self):
359
+ """initialize base data"""
360
+ self._fanout_table = self._read_fanout((self._version == 2) * 8)
361
+
362
+ if self._version == 2:
363
+ self._crc_list_offset = self._sha_list_offset + self.size() * 20
364
+ self._pack_offset = self._crc_list_offset + self.size() * 4
365
+ self._pack_64_offset = self._pack_offset + self.size() * 4
366
+ # END setup base
367
+
368
+ def _read_fanout(self, byte_offset):
369
+ """Generate a fanout table from our data"""
370
+ d = self._cursor.map()
371
+ out = list()
372
+ append = out.append
373
+ for i in range(256):
374
+ append(unpack_from('>L', d, byte_offset + i * 4)[0])
375
+ # END for each entry
376
+ return out
377
+
378
+ #} END initialization
379
+
380
+ #{ Properties
381
+ def version(self):
382
+ return self._version
383
+
384
+ def size(self):
385
+ """:return: amount of objects referred to by this index"""
386
+ return self._fanout_table[255]
387
+
388
+ def path(self):
389
+ """:return: path to the packindexfile"""
390
+ return self._indexpath
391
+
392
+ def packfile_checksum(self):
393
+ """:return: 20 byte sha representing the sha1 hash of the pack file"""
394
+ return self._cursor.map()[-40:-20]
395
+
396
+ def indexfile_checksum(self):
397
+ """:return: 20 byte sha representing the sha1 hash of this index file"""
398
+ return self._cursor.map()[-20:]
399
+
400
+ def offsets(self):
401
+ """:return: sequence of all offsets in the order in which they were written
402
+
403
+ **Note:** return value can be random accessed, but may be immmutable"""
404
+ if self._version == 2:
405
+ # read stream to array, convert to tuple
406
+ a = array.array('I') # 4 byte unsigned int, long are 8 byte on 64 bit it appears
407
+ a.frombytes(self._cursor.map()[self._pack_offset:self._pack_64_offset])
408
+
409
+ # networkbyteorder to something array likes more
410
+ if sys.byteorder == 'little':
411
+ a.byteswap()
412
+ return a
413
+ else:
414
+ return tuple(self.offset(index) for index in range(self.size()))
415
+ # END handle version
416
+
417
+ def sha_to_index(self, sha):
418
+ """
419
+ :return: index usable with the ``offset`` or ``entry`` method, or None
420
+ if the sha was not found in this pack index
421
+ :param sha: 20 byte sha to lookup"""
422
+ first_byte = byte_ord(sha[0])
423
+ get_sha = self.sha
424
+ lo = 0 # lower index, the left bound of the bisection
425
+ if first_byte != 0:
426
+ lo = self._fanout_table[first_byte - 1]
427
+ hi = self._fanout_table[first_byte] # the upper, right bound of the bisection
428
+
429
+ # bisect until we have the sha
430
+ while lo < hi:
431
+ mid = (lo + hi) // 2
432
+ mid_sha = get_sha(mid)
433
+ if sha < mid_sha:
434
+ hi = mid
435
+ elif sha == mid_sha:
436
+ return mid
437
+ else:
438
+ lo = mid + 1
439
+ # END handle midpoint
440
+ # END bisect
441
+ return None
442
+
443
+ def partial_sha_to_index(self, partial_bin_sha, canonical_length):
444
+ """
445
+ :return: index as in `sha_to_index` or None if the sha was not found in this
446
+ index file
447
+ :param partial_bin_sha: an at least two bytes of a partial binary sha as bytes
448
+ :param canonical_length: length of the original hexadecimal representation of the
449
+ given partial binary sha
450
+ :raise AmbiguousObjectName:"""
451
+ if len(partial_bin_sha) < 2:
452
+ raise ValueError("Require at least 2 bytes of partial sha")
453
+
454
+ assert isinstance(partial_bin_sha, bytes), "partial_bin_sha must be bytes"
455
+ first_byte = byte_ord(partial_bin_sha[0])
456
+
457
+ get_sha = self.sha
458
+ lo = 0 # lower index, the left bound of the bisection
459
+ if first_byte != 0:
460
+ lo = self._fanout_table[first_byte - 1]
461
+ hi = self._fanout_table[first_byte] # the upper, right bound of the bisection
462
+
463
+ # fill the partial to full 20 bytes
464
+ filled_sha = partial_bin_sha + NULL_BYTE * (20 - len(partial_bin_sha))
465
+
466
+ # find lowest
467
+ while lo < hi:
468
+ mid = (lo + hi) // 2
469
+ mid_sha = get_sha(mid)
470
+ if filled_sha < mid_sha:
471
+ hi = mid
472
+ elif filled_sha == mid_sha:
473
+ # perfect match
474
+ lo = mid
475
+ break
476
+ else:
477
+ lo = mid + 1
478
+ # END handle midpoint
479
+ # END bisect
480
+
481
+ if lo < self.size():
482
+ cur_sha = get_sha(lo)
483
+ if is_equal_canonical_sha(canonical_length, partial_bin_sha, cur_sha):
484
+ next_sha = None
485
+ if lo + 1 < self.size():
486
+ next_sha = get_sha(lo + 1)
487
+ if next_sha and next_sha == cur_sha:
488
+ raise AmbiguousObjectName(partial_bin_sha)
489
+ return lo
490
+ # END if we have a match
491
+ # END if we found something
492
+ return None
493
+
494
+ if 'PackIndexFile_sha_to_index' in globals():
495
+ # NOTE: Its just about 25% faster, the major bottleneck might be the attr
496
+ # accesses
497
+ def sha_to_index(self, sha):
498
+ return PackIndexFile_sha_to_index(self, sha)
499
+ # END redefine heavy-hitter with c version
500
+
501
+ #} END properties
502
+
503
+
504
+ class PackFile(LazyMixin):
505
+
506
+ """A pack is a file written according to the Version 2 for git packs
507
+
508
+ As we currently use memory maps, it could be assumed that the maximum size of
509
+ packs therefore is 32 bit on 32 bit systems. On 64 bit systems, this should be
510
+ fine though.
511
+
512
+ **Note:** at some point, this might be implemented using streams as well, or
513
+ streams are an alternate path in the case memory maps cannot be created
514
+ for some reason - one clearly doesn't want to read 10GB at once in that
515
+ case"""
516
+
517
+ __slots__ = ('_packpath', '_cursor', '_size', '_version')
518
+ pack_signature = 0x5041434b # 'PACK'
519
+ pack_version_default = 2
520
+
521
+ # offset into our data at which the first object starts
522
+ first_object_offset = 3 * 4 # header bytes
523
+ footer_size = 20 # final sha
524
+
525
+ def __init__(self, packpath):
526
+ self._packpath = packpath
527
+
528
+ def close(self):
529
+ mman.force_map_handle_removal_win(self._packpath)
530
+ self._cursor = None
531
+
532
+ def _set_cache_(self, attr):
533
+ # we fill the whole cache, whichever attribute gets queried first
534
+ self._cursor = mman.make_cursor(self._packpath).use_region()
535
+
536
+ # read the header information
537
+ type_id, self._version, self._size = unpack_from(">LLL", self._cursor.map(), 0)
538
+
539
+ # TODO: figure out whether we should better keep the lock, or maybe
540
+ # add a .keep file instead ?
541
+ if type_id != self.pack_signature:
542
+ raise ParseError("Invalid pack signature: %i" % type_id)
543
+
544
+ def _iter_objects(self, start_offset, as_stream=True):
545
+ """Handle the actual iteration of objects within this pack"""
546
+ c = self._cursor
547
+ content_size = c.file_size() - self.footer_size
548
+ cur_offset = start_offset or self.first_object_offset
549
+
550
+ null = NullStream()
551
+ while cur_offset < content_size:
552
+ data_offset, ostream = pack_object_at(c, cur_offset, True)
553
+ # scrub the stream to the end - this decompresses the object, but yields
554
+ # the amount of compressed bytes we need to get to the next offset
555
+
556
+ stream_copy(ostream.read, null.write, ostream.size, chunk_size)
557
+ assert ostream.stream._br == ostream.size
558
+ cur_offset += (data_offset - ostream.pack_offset) + ostream.stream.compressed_bytes_read()
559
+
560
+ # if a stream is requested, reset it beforehand
561
+ # Otherwise return the Stream object directly, its derived from the
562
+ # info object
563
+ if as_stream:
564
+ ostream.stream.seek(0)
565
+ yield ostream
566
+ # END until we have read everything
567
+
568
+ #{ Pack Information
569
+
570
+ def size(self):
571
+ """:return: The amount of objects stored in this pack"""
572
+ return self._size
573
+
574
+ def version(self):
575
+ """:return: the version of this pack"""
576
+ return self._version
577
+
578
+ def data(self):
579
+ """
580
+ :return: read-only data of this pack. It provides random access and usually
581
+ is a memory map.
582
+ :note: This method is unsafe as it returns a window into a file which might be larger than than the actual window size"""
583
+ # can use map as we are starting at offset 0. Otherwise we would have to use buffer()
584
+ return self._cursor.use_region().map()
585
+
586
+ def checksum(self):
587
+ """:return: 20 byte sha1 hash on all object sha's contained in this file"""
588
+ return self._cursor.use_region(self._cursor.file_size() - 20).buffer()[:]
589
+
590
+ def path(self):
591
+ """:return: path to the packfile"""
592
+ return self._packpath
593
+ #} END pack information
594
+
595
+ #{ Pack Specific
596
+
597
+ def collect_streams(self, offset):
598
+ """
599
+ :return: list of pack streams which are required to build the object
600
+ at the given offset. The first entry of the list is the object at offset,
601
+ the last one is either a full object, or a REF_Delta stream. The latter
602
+ type needs its reference object to be locked up in an ODB to form a valid
603
+ delta chain.
604
+ If the object at offset is no delta, the size of the list is 1.
605
+ :param offset: specifies the first byte of the object within this pack"""
606
+ out = list()
607
+ c = self._cursor
608
+ while True:
609
+ ostream = pack_object_at(c, offset, True)[1]
610
+ out.append(ostream)
611
+ if ostream.type_id == OFS_DELTA:
612
+ offset = ostream.pack_offset - ostream.delta_info
613
+ else:
614
+ # the only thing we can lookup are OFFSET deltas. Everything
615
+ # else is either an object, or a ref delta, in the latter
616
+ # case someone else has to find it
617
+ break
618
+ # END handle type
619
+ # END while chaining streams
620
+ return out
621
+
622
+ #} END pack specific
623
+
624
+ #{ Read-Database like Interface
625
+
626
+ def info(self, offset):
627
+ """Retrieve information about the object at the given file-absolute offset
628
+
629
+ :param offset: byte offset
630
+ :return: OPackInfo instance, the actual type differs depending on the type_id attribute"""
631
+ return pack_object_at(self._cursor, offset or self.first_object_offset, False)[1]
632
+
633
+ def stream(self, offset):
634
+ """Retrieve an object at the given file-relative offset as stream along with its information
635
+
636
+ :param offset: byte offset
637
+ :return: OPackStream instance, the actual type differs depending on the type_id attribute"""
638
+ return pack_object_at(self._cursor, offset or self.first_object_offset, True)[1]
639
+
640
+ def stream_iter(self, start_offset=0):
641
+ """
642
+ :return: iterator yielding OPackStream compatible instances, allowing
643
+ to access the data in the pack directly.
644
+ :param start_offset: offset to the first object to iterate. If 0, iteration
645
+ starts at the very first object in the pack.
646
+
647
+ **Note:** Iterating a pack directly is costly as the datastream has to be decompressed
648
+ to determine the bounds between the objects"""
649
+ return self._iter_objects(start_offset, as_stream=True)
650
+
651
+ #} END Read-Database like Interface
652
+
653
+
654
+ class PackEntity(LazyMixin):
655
+
656
+ """Combines the PackIndexFile and the PackFile into one, allowing the
657
+ actual objects to be resolved and iterated"""
658
+
659
+ __slots__ = ('_index', # our index file
660
+ '_pack', # our pack file
661
+ '_offset_map' # on demand dict mapping one offset to the next consecutive one
662
+ )
663
+
664
+ IndexFileCls = PackIndexFile
665
+ PackFileCls = PackFile
666
+
667
+ def __init__(self, pack_or_index_path):
668
+ """Initialize ourselves with the path to the respective pack or index file"""
669
+ basename, ext = os.path.splitext(pack_or_index_path)
670
+ self._index = self.IndexFileCls("%s.idx" % basename) # PackIndexFile instance
671
+ self._pack = self.PackFileCls("%s.pack" % basename) # corresponding PackFile instance
672
+
673
+ def close(self):
674
+ self._index.close()
675
+ self._pack.close()
676
+
677
+ def _set_cache_(self, attr):
678
+ # currently this can only be _offset_map
679
+ # TODO: make this a simple sorted offset array which can be bisected
680
+ # to find the respective entry, from which we can take a +1 easily
681
+ # This might be slower, but should also be much lighter in memory !
682
+ offsets_sorted = sorted(self._index.offsets())
683
+ last_offset = len(self._pack.data()) - self._pack.footer_size
684
+ assert offsets_sorted, "Cannot handle empty indices"
685
+
686
+ offset_map = None
687
+ if len(offsets_sorted) == 1:
688
+ offset_map = {offsets_sorted[0]: last_offset}
689
+ else:
690
+ iter_offsets = iter(offsets_sorted)
691
+ iter_offsets_plus_one = iter(offsets_sorted)
692
+ next(iter_offsets_plus_one)
693
+ consecutive = zip(iter_offsets, iter_offsets_plus_one)
694
+
695
+ offset_map = dict(consecutive)
696
+
697
+ # the last offset is not yet set
698
+ offset_map[offsets_sorted[-1]] = last_offset
699
+ # END handle offset amount
700
+ self._offset_map = offset_map
701
+
702
+ def _sha_to_index(self, sha):
703
+ """:return: index for the given sha, or raise"""
704
+ index = self._index.sha_to_index(sha)
705
+ if index is None:
706
+ raise BadObject(sha)
707
+ return index
708
+
709
+ def _iter_objects(self, as_stream):
710
+ """Iterate over all objects in our index and yield their OInfo or OStream instences"""
711
+ _sha = self._index.sha
712
+ _object = self._object
713
+ for index in range(self._index.size()):
714
+ yield _object(_sha(index), as_stream, index)
715
+ # END for each index
716
+
717
+ def _object(self, sha, as_stream, index=-1):
718
+ """:return: OInfo or OStream object providing information about the given sha
719
+ :param index: if not -1, its assumed to be the sha's index in the IndexFile"""
720
+ # its a little bit redundant here, but it needs to be efficient
721
+ if index < 0:
722
+ index = self._sha_to_index(sha)
723
+ if sha is None:
724
+ sha = self._index.sha(index)
725
+ # END assure sha is present ( in output )
726
+ offset = self._index.offset(index)
727
+ type_id, uncomp_size, data_rela_offset = pack_object_header_info(self._pack._cursor.use_region(offset).buffer())
728
+ if as_stream:
729
+ if type_id not in delta_types:
730
+ packstream = self._pack.stream(offset)
731
+ return OStream(sha, packstream.type, packstream.size, packstream.stream)
732
+ # END handle non-deltas
733
+
734
+ # produce a delta stream containing all info
735
+ # To prevent it from applying the deltas when querying the size,
736
+ # we extract it from the delta stream ourselves
737
+ streams = self.collect_streams_at_offset(offset)
738
+ dstream = DeltaApplyReader.new(streams)
739
+
740
+ return ODeltaStream(sha, dstream.type, None, dstream)
741
+ else:
742
+ if type_id not in delta_types:
743
+ return OInfo(sha, type_id_to_type_map[type_id], uncomp_size)
744
+ # END handle non-deltas
745
+
746
+ # deltas are a little tougher - unpack the first bytes to obtain
747
+ # the actual target size, as opposed to the size of the delta data
748
+ streams = self.collect_streams_at_offset(offset)
749
+ buf = streams[0].read(512)
750
+ offset, src_size = msb_size(buf)
751
+ offset, target_size = msb_size(buf, offset)
752
+
753
+ # collect the streams to obtain the actual object type
754
+ if streams[-1].type_id in delta_types:
755
+ raise BadObject(sha, "Could not resolve delta object")
756
+ return OInfo(sha, streams[-1].type, target_size)
757
+ # END handle stream
758
+
759
+ #{ Read-Database like Interface
760
+
761
+ def info(self, sha):
762
+ """Retrieve information about the object identified by the given sha
763
+
764
+ :param sha: 20 byte sha1
765
+ :raise BadObject:
766
+ :return: OInfo instance, with 20 byte sha"""
767
+ return self._object(sha, False)
768
+
769
+ def stream(self, sha):
770
+ """Retrieve an object stream along with its information as identified by the given sha
771
+
772
+ :param sha: 20 byte sha1
773
+ :raise BadObject:
774
+ :return: OStream instance, with 20 byte sha"""
775
+ return self._object(sha, True)
776
+
777
+ def info_at_index(self, index):
778
+ """As ``info``, but uses a PackIndexFile compatible index to refer to the object"""
779
+ return self._object(None, False, index)
780
+
781
+ def stream_at_index(self, index):
782
+ """As ``stream``, but uses a PackIndexFile compatible index to refer to the
783
+ object"""
784
+ return self._object(None, True, index)
785
+
786
+ #} END Read-Database like Interface
787
+
788
+ #{ Interface
789
+
790
+ def pack(self):
791
+ """:return: the underlying pack file instance"""
792
+ return self._pack
793
+
794
+ def index(self):
795
+ """:return: the underlying pack index file instance"""
796
+ return self._index
797
+
798
+ def is_valid_stream(self, sha, use_crc=False):
799
+ """
800
+ Verify that the stream at the given sha is valid.
801
+
802
+ :param use_crc: if True, the index' crc is run over the compressed stream of
803
+ the object, which is much faster than checking the sha1. It is also
804
+ more prone to unnoticed corruption or manipulation.
805
+ :param sha: 20 byte sha1 of the object whose stream to verify
806
+ whether the compressed stream of the object is valid. If it is
807
+ a delta, this only verifies that the delta's data is valid, not the
808
+ data of the actual undeltified object, as it depends on more than
809
+ just this stream.
810
+ If False, the object will be decompressed and the sha generated. It must
811
+ match the given sha
812
+
813
+ :return: True if the stream is valid
814
+ :raise UnsupportedOperation: If the index is version 1 only
815
+ :raise BadObject: sha was not found"""
816
+ if use_crc:
817
+ if self._index.version() < 2:
818
+ raise UnsupportedOperation("Version 1 indices do not contain crc's, verify by sha instead")
819
+ # END handle index version
820
+
821
+ index = self._sha_to_index(sha)
822
+ offset = self._index.offset(index)
823
+ next_offset = self._offset_map[offset]
824
+ crc_value = self._index.crc(index)
825
+
826
+ # create the current crc value, on the compressed object data
827
+ # Read it in chunks, without copying the data
828
+ crc_update = zlib.crc32
829
+ pack_data = self._pack.data()
830
+ cur_pos = offset
831
+ this_crc_value = 0
832
+ while cur_pos < next_offset:
833
+ rbound = min(cur_pos + chunk_size, next_offset)
834
+ size = rbound - cur_pos
835
+ this_crc_value = crc_update(pack_data[cur_pos:cur_pos + size], this_crc_value)
836
+ cur_pos += size
837
+ # END window size loop
838
+
839
+ # crc returns signed 32 bit numbers, the AND op forces it into unsigned
840
+ # mode ... wow, sneaky, from dulwich.
841
+ return (this_crc_value & 0xffffffff) == crc_value
842
+ else:
843
+ shawriter = Sha1Writer()
844
+ stream = self._object(sha, as_stream=True)
845
+ # write a loose object, which is the basis for the sha
846
+ write_object(stream.type, stream.size, stream.read, shawriter.write)
847
+
848
+ assert shawriter.sha(as_hex=False) == sha
849
+ return shawriter.sha(as_hex=False) == sha
850
+ # END handle crc/sha verification
851
+ return True
852
+
853
+ def info_iter(self):
854
+ """
855
+ :return: Iterator over all objects in this pack. The iterator yields
856
+ OInfo instances"""
857
+ return self._iter_objects(as_stream=False)
858
+
859
+ def stream_iter(self):
860
+ """
861
+ :return: iterator over all objects in this pack. The iterator yields
862
+ OStream instances"""
863
+ return self._iter_objects(as_stream=True)
864
+
865
+ def collect_streams_at_offset(self, offset):
866
+ """
867
+ As the version in the PackFile, but can resolve REF deltas within this pack
868
+ For more info, see ``collect_streams``
869
+
870
+ :param offset: offset into the pack file at which the object can be found"""
871
+ streams = self._pack.collect_streams(offset)
872
+
873
+ # try to resolve the last one if needed. It is assumed to be either
874
+ # a REF delta, or a base object, as OFFSET deltas are resolved by the pack
875
+ if streams[-1].type_id == REF_DELTA:
876
+ stream = streams[-1]
877
+ while stream.type_id in delta_types:
878
+ if stream.type_id == REF_DELTA:
879
+ # smmap can return memory view objects, which can't be compared as buffers/bytes can ...
880
+ if isinstance(stream.delta_info, memoryview):
881
+ sindex = self._index.sha_to_index(stream.delta_info.tobytes())
882
+ else:
883
+ sindex = self._index.sha_to_index(stream.delta_info)
884
+ if sindex is None:
885
+ break
886
+ stream = self._pack.stream(self._index.offset(sindex))
887
+ streams.append(stream)
888
+ else:
889
+ # must be another OFS DELTA - this could happen if a REF
890
+ # delta we resolve previously points to an OFS delta. Who
891
+ # would do that ;) ? We can handle it though
892
+ stream = self._pack.stream(stream.delta_info)
893
+ streams.append(stream)
894
+ # END handle ref delta
895
+ # END resolve ref streams
896
+ # END resolve streams
897
+
898
+ return streams
899
+
900
+ def collect_streams(self, sha):
901
+ """
902
+ As ``PackFile.collect_streams``, but takes a sha instead of an offset.
903
+ Additionally, ref_delta streams will be resolved within this pack.
904
+ If this is not possible, the stream will be left alone, hence it is adivsed
905
+ to check for unresolved ref-deltas and resolve them before attempting to
906
+ construct a delta stream.
907
+
908
+ :param sha: 20 byte sha1 specifying the object whose related streams you want to collect
909
+ :return: list of streams, first being the actual object delta, the last being
910
+ a possibly unresolved base object.
911
+ :raise BadObject:"""
912
+ return self.collect_streams_at_offset(self._index.offset(self._sha_to_index(sha)))
913
+
914
+ @classmethod
915
+ def write_pack(cls, object_iter, pack_write, index_write=None,
916
+ object_count=None, zlib_compression=zlib.Z_BEST_SPEED):
917
+ """
918
+ Create a new pack by putting all objects obtained by the object_iterator
919
+ into a pack which is written using the pack_write method.
920
+ The respective index is produced as well if index_write is not Non.
921
+
922
+ :param object_iter: iterator yielding odb output objects
923
+ :param pack_write: function to receive strings to write into the pack stream
924
+ :param indx_write: if not None, the function writes the index file corresponding
925
+ to the pack.
926
+ :param object_count: if you can provide the amount of objects in your iteration,
927
+ this would be the place to put it. Otherwise we have to pre-iterate and store
928
+ all items into a list to get the number, which uses more memory than necessary.
929
+ :param zlib_compression: the zlib compression level to use
930
+ :return: tuple(pack_sha, index_binsha) binary sha over all the contents of the pack
931
+ and over all contents of the index. If index_write was None, index_binsha will be None
932
+
933
+ **Note:** The destination of the write functions is up to the user. It could
934
+ be a socket, or a file for instance
935
+
936
+ **Note:** writes only undeltified objects"""
937
+ objs = object_iter
938
+ if not object_count:
939
+ if not isinstance(object_iter, (tuple, list)):
940
+ objs = list(object_iter)
941
+ # END handle list type
942
+ object_count = len(objs)
943
+ # END handle object
944
+
945
+ pack_writer = FlexibleSha1Writer(pack_write)
946
+ pwrite = pack_writer.write
947
+ ofs = 0 # current offset into the pack file
948
+ index = None
949
+ wants_index = index_write is not None
950
+
951
+ # write header
952
+ pwrite(pack('>LLL', PackFile.pack_signature, PackFile.pack_version_default, object_count))
953
+ ofs += 12
954
+
955
+ if wants_index:
956
+ index = IndexWriter()
957
+ # END handle index header
958
+
959
+ actual_count = 0
960
+ for obj in objs:
961
+ actual_count += 1
962
+ crc = 0
963
+
964
+ # object header
965
+ hdr = create_pack_object_header(obj.type_id, obj.size)
966
+ if index_write:
967
+ crc = crc32(hdr)
968
+ else:
969
+ crc = None
970
+ # END handle crc
971
+ pwrite(hdr)
972
+
973
+ # data stream
974
+ zstream = zlib.compressobj(zlib_compression)
975
+ ostream = obj.stream
976
+ br, bw, crc = write_stream_to_pack(ostream.read, pwrite, zstream, base_crc=crc)
977
+ assert(br == obj.size)
978
+ if wants_index:
979
+ index.append(obj.binsha, crc, ofs)
980
+ # END handle index
981
+
982
+ ofs += len(hdr) + bw
983
+ if actual_count == object_count:
984
+ break
985
+ # END abort once we are done
986
+ # END for each object
987
+
988
+ if actual_count != object_count:
989
+ raise ValueError(
990
+ "Expected to write %i objects into pack, but received only %i from iterators" % (object_count, actual_count))
991
+ # END count assertion
992
+
993
+ # write footer
994
+ pack_sha = pack_writer.sha(as_hex=False)
995
+ assert len(pack_sha) == 20
996
+ pack_write(pack_sha)
997
+ ofs += len(pack_sha) # just for completeness ;)
998
+
999
+ index_sha = None
1000
+ if wants_index:
1001
+ index_sha = index.write(pack_sha, index_write)
1002
+ # END handle index
1003
+
1004
+ return pack_sha, index_sha
1005
+
1006
+ @classmethod
1007
+ def create(cls, object_iter, base_dir, object_count=None, zlib_compression=zlib.Z_BEST_SPEED):
1008
+ """Create a new on-disk entity comprised of a properly named pack file and a properly named
1009
+ and corresponding index file. The pack contains all OStream objects contained in object iter.
1010
+ :param base_dir: directory which is to contain the files
1011
+ :return: PackEntity instance initialized with the new pack
1012
+
1013
+ **Note:** for more information on the other parameters see the write_pack method"""
1014
+ pack_fd, pack_path = tempfile.mkstemp('', 'pack', base_dir)
1015
+ index_fd, index_path = tempfile.mkstemp('', 'index', base_dir)
1016
+ pack_write = lambda d: os.write(pack_fd, d)
1017
+ index_write = lambda d: os.write(index_fd, d)
1018
+
1019
+ pack_binsha, index_binsha = cls.write_pack(object_iter, pack_write, index_write, object_count, zlib_compression)
1020
+ os.close(pack_fd)
1021
+ os.close(index_fd)
1022
+
1023
+ fmt = "pack-%s.%s"
1024
+ new_pack_path = os.path.join(base_dir, fmt % (bin_to_hex(pack_binsha), 'pack'))
1025
+ new_index_path = os.path.join(base_dir, fmt % (bin_to_hex(pack_binsha), 'idx'))
1026
+ os.rename(pack_path, new_pack_path)
1027
+ os.rename(index_path, new_index_path)
1028
+
1029
+ return cls(new_pack_path)
1030
+
1031
+ #} END interface
parrot/lib/python3.10/site-packages/gitdb/stream.py ADDED
@@ -0,0 +1,730 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+
6
+ from io import BytesIO
7
+
8
+ import mmap
9
+ import os
10
+ import sys
11
+ import zlib
12
+
13
+ from gitdb.fun import (
14
+ msb_size,
15
+ stream_copy,
16
+ apply_delta_data,
17
+ connect_deltas,
18
+ delta_types
19
+ )
20
+
21
+ from gitdb.util import (
22
+ allocate_memory,
23
+ LazyMixin,
24
+ make_sha,
25
+ write,
26
+ close,
27
+ )
28
+
29
+ from gitdb.const import NULL_BYTE, BYTE_SPACE
30
+ from gitdb.utils.encoding import force_bytes
31
+
32
+ has_perf_mod = False
33
+ try:
34
+ from gitdb_speedups._perf import apply_delta as c_apply_delta
35
+ has_perf_mod = True
36
+ except ImportError:
37
+ pass
38
+
39
+ __all__ = ('DecompressMemMapReader', 'FDCompressedSha1Writer', 'DeltaApplyReader',
40
+ 'Sha1Writer', 'FlexibleSha1Writer', 'ZippedStoreShaWriter', 'FDCompressedSha1Writer',
41
+ 'FDStream', 'NullStream')
42
+
43
+
44
+ #{ RO Streams
45
+
46
+ class DecompressMemMapReader(LazyMixin):
47
+
48
+ """Reads data in chunks from a memory map and decompresses it. The client sees
49
+ only the uncompressed data, respective file-like read calls are handling on-demand
50
+ buffered decompression accordingly
51
+
52
+ A constraint on the total size of bytes is activated, simulating
53
+ a logical file within a possibly larger physical memory area
54
+
55
+ To read efficiently, you clearly don't want to read individual bytes, instead,
56
+ read a few kilobytes at least.
57
+
58
+ **Note:** The chunk-size should be carefully selected as it will involve quite a bit
59
+ of string copying due to the way the zlib is implemented. Its very wasteful,
60
+ hence we try to find a good tradeoff between allocation time and number of
61
+ times we actually allocate. An own zlib implementation would be good here
62
+ to better support streamed reading - it would only need to keep the mmap
63
+ and decompress it into chunks, that's all ... """
64
+ __slots__ = ('_m', '_zip', '_buf', '_buflen', '_br', '_cws', '_cwe', '_s', '_close',
65
+ '_cbr', '_phi')
66
+
67
+ max_read_size = 512 * 1024 # currently unused
68
+
69
+ def __init__(self, m, close_on_deletion, size=None):
70
+ """Initialize with mmap for stream reading
71
+ :param m: must be content data - use new if you have object data and no size"""
72
+ self._m = m
73
+ self._zip = zlib.decompressobj()
74
+ self._buf = None # buffer of decompressed bytes
75
+ self._buflen = 0 # length of bytes in buffer
76
+ if size is not None:
77
+ self._s = size # size of uncompressed data to read in total
78
+ self._br = 0 # num uncompressed bytes read
79
+ self._cws = 0 # start byte of compression window
80
+ self._cwe = 0 # end byte of compression window
81
+ self._cbr = 0 # number of compressed bytes read
82
+ self._phi = False # is True if we parsed the header info
83
+ self._close = close_on_deletion # close the memmap on deletion ?
84
+
85
+ def _set_cache_(self, attr):
86
+ assert attr == '_s'
87
+ # only happens for size, which is a marker to indicate we still
88
+ # have to parse the header from the stream
89
+ self._parse_header_info()
90
+
91
+ def __del__(self):
92
+ self.close()
93
+
94
+ def _parse_header_info(self):
95
+ """If this stream contains object data, parse the header info and skip the
96
+ stream to a point where each read will yield object content
97
+
98
+ :return: parsed type_string, size"""
99
+ # read header
100
+ # should really be enough, cgit uses 8192 I believe
101
+ # And for good reason !! This needs to be that high for the header to be read correctly in all cases
102
+ maxb = 8192
103
+ self._s = maxb
104
+ hdr = self.read(maxb)
105
+ hdrend = hdr.find(NULL_BYTE)
106
+ typ, size = hdr[:hdrend].split(BYTE_SPACE)
107
+ size = int(size)
108
+ self._s = size
109
+
110
+ # adjust internal state to match actual header length that we ignore
111
+ # The buffer will be depleted first on future reads
112
+ self._br = 0
113
+ hdrend += 1
114
+ self._buf = BytesIO(hdr[hdrend:])
115
+ self._buflen = len(hdr) - hdrend
116
+
117
+ self._phi = True
118
+
119
+ return typ, size
120
+
121
+ #{ Interface
122
+
123
+ @classmethod
124
+ def new(self, m, close_on_deletion=False):
125
+ """Create a new DecompressMemMapReader instance for acting as a read-only stream
126
+ This method parses the object header from m and returns the parsed
127
+ type and size, as well as the created stream instance.
128
+
129
+ :param m: memory map on which to operate. It must be object data ( header + contents )
130
+ :param close_on_deletion: if True, the memory map will be closed once we are
131
+ being deleted"""
132
+ inst = DecompressMemMapReader(m, close_on_deletion, 0)
133
+ typ, size = inst._parse_header_info()
134
+ return typ, size, inst
135
+
136
+ def data(self):
137
+ """:return: random access compatible data we are working on"""
138
+ return self._m
139
+
140
+ def close(self):
141
+ """Close our underlying stream of compressed bytes if this was allowed during initialization
142
+ :return: True if we closed the underlying stream
143
+ :note: can be called safely
144
+ """
145
+ if self._close:
146
+ if hasattr(self._m, 'close'):
147
+ self._m.close()
148
+ self._close = False
149
+ # END handle resource freeing
150
+
151
+ def compressed_bytes_read(self):
152
+ """
153
+ :return: number of compressed bytes read. This includes the bytes it
154
+ took to decompress the header ( if there was one )"""
155
+ # ABSTRACT: When decompressing a byte stream, it can be that the first
156
+ # x bytes which were requested match the first x bytes in the loosely
157
+ # compressed datastream. This is the worst-case assumption that the reader
158
+ # does, it assumes that it will get at least X bytes from X compressed bytes
159
+ # in call cases.
160
+ # The caveat is that the object, according to our known uncompressed size,
161
+ # is already complete, but there are still some bytes left in the compressed
162
+ # stream that contribute to the amount of compressed bytes.
163
+ # How can we know that we are truly done, and have read all bytes we need
164
+ # to read ?
165
+ # Without help, we cannot know, as we need to obtain the status of the
166
+ # decompression. If it is not finished, we need to decompress more data
167
+ # until it is finished, to yield the actual number of compressed bytes
168
+ # belonging to the decompressed object
169
+ # We are using a custom zlib module for this, if its not present,
170
+ # we try to put in additional bytes up for decompression if feasible
171
+ # and check for the unused_data.
172
+
173
+ # Only scrub the stream forward if we are officially done with the
174
+ # bytes we were to have.
175
+ if self._br == self._s and not self._zip.unused_data:
176
+ # manipulate the bytes-read to allow our own read method to continue
177
+ # but keep the window at its current position
178
+ self._br = 0
179
+ if hasattr(self._zip, 'status'):
180
+ while self._zip.status == zlib.Z_OK:
181
+ self.read(mmap.PAGESIZE)
182
+ # END scrub-loop custom zlib
183
+ else:
184
+ # pass in additional pages, until we have unused data
185
+ while not self._zip.unused_data and self._cbr != len(self._m):
186
+ self.read(mmap.PAGESIZE)
187
+ # END scrub-loop default zlib
188
+ # END handle stream scrubbing
189
+
190
+ # reset bytes read, just to be sure
191
+ self._br = self._s
192
+ # END handle stream scrubbing
193
+
194
+ # unused data ends up in the unconsumed tail, which was removed
195
+ # from the count already
196
+ return self._cbr
197
+
198
+ #} END interface
199
+
200
+ def seek(self, offset, whence=getattr(os, 'SEEK_SET', 0)):
201
+ """Allows to reset the stream to restart reading
202
+ :raise ValueError: If offset and whence are not 0"""
203
+ if offset != 0 or whence != getattr(os, 'SEEK_SET', 0):
204
+ raise ValueError("Can only seek to position 0")
205
+ # END handle offset
206
+
207
+ self._zip = zlib.decompressobj()
208
+ self._br = self._cws = self._cwe = self._cbr = 0
209
+ if self._phi:
210
+ self._phi = False
211
+ del(self._s) # trigger header parsing on first access
212
+ # END skip header
213
+
214
+ def read(self, size=-1):
215
+ if size < 1:
216
+ size = self._s - self._br
217
+ else:
218
+ size = min(size, self._s - self._br)
219
+ # END clamp size
220
+
221
+ if size == 0:
222
+ return b''
223
+ # END handle depletion
224
+
225
+ # deplete the buffer, then just continue using the decompress object
226
+ # which has an own buffer. We just need this to transparently parse the
227
+ # header from the zlib stream
228
+ dat = b''
229
+ if self._buf:
230
+ if self._buflen >= size:
231
+ # have enough data
232
+ dat = self._buf.read(size)
233
+ self._buflen -= size
234
+ self._br += size
235
+ return dat
236
+ else:
237
+ dat = self._buf.read() # ouch, duplicates data
238
+ size -= self._buflen
239
+ self._br += self._buflen
240
+
241
+ self._buflen = 0
242
+ self._buf = None
243
+ # END handle buffer len
244
+ # END handle buffer
245
+
246
+ # decompress some data
247
+ # Abstract: zlib needs to operate on chunks of our memory map ( which may
248
+ # be large ), as it will otherwise and always fill in the 'unconsumed_tail'
249
+ # attribute which possible reads our whole map to the end, forcing
250
+ # everything to be read from disk even though just a portion was requested.
251
+ # As this would be a nogo, we workaround it by passing only chunks of data,
252
+ # moving the window into the memory map along as we decompress, which keeps
253
+ # the tail smaller than our chunk-size. This causes 'only' the chunk to be
254
+ # copied once, and another copy of a part of it when it creates the unconsumed
255
+ # tail. We have to use it to hand in the appropriate amount of bytes during
256
+ # the next read.
257
+ tail = self._zip.unconsumed_tail
258
+ if tail:
259
+ # move the window, make it as large as size demands. For code-clarity,
260
+ # we just take the chunk from our map again instead of reusing the unconsumed
261
+ # tail. The latter one would safe some memory copying, but we could end up
262
+ # with not getting enough data uncompressed, so we had to sort that out as well.
263
+ # Now we just assume the worst case, hence the data is uncompressed and the window
264
+ # needs to be as large as the uncompressed bytes we want to read.
265
+ self._cws = self._cwe - len(tail)
266
+ self._cwe = self._cws + size
267
+ else:
268
+ cws = self._cws
269
+ self._cws = self._cwe
270
+ self._cwe = cws + size
271
+ # END handle tail
272
+
273
+ # if window is too small, make it larger so zip can decompress something
274
+ if self._cwe - self._cws < 8:
275
+ self._cwe = self._cws + 8
276
+ # END adjust winsize
277
+
278
+ # takes a slice, but doesn't copy the data, it says ...
279
+ indata = self._m[self._cws:self._cwe]
280
+
281
+ # get the actual window end to be sure we don't use it for computations
282
+ self._cwe = self._cws + len(indata)
283
+ dcompdat = self._zip.decompress(indata, size)
284
+ # update the amount of compressed bytes read
285
+ # We feed possibly overlapping chunks, which is why the unconsumed tail
286
+ # has to be taken into consideration, as well as the unused data
287
+ # if we hit the end of the stream
288
+ # NOTE: Behavior changed in PY2.7 onward, which requires special handling to make the tests work properly.
289
+ # They are thorough, and I assume it is truly working.
290
+ # Why is this logic as convoluted as it is ? Please look at the table in
291
+ # https://github.com/gitpython-developers/gitdb/issues/19 to learn about the test-results.
292
+ # Basically, on py2.6, you want to use branch 1, whereas on all other python version, the second branch
293
+ # will be the one that works.
294
+ # However, the zlib VERSIONs as well as the platform check is used to further match the entries in the
295
+ # table in the github issue. This is it ... it was the only way I could make this work everywhere.
296
+ # IT's CERTAINLY GOING TO BITE US IN THE FUTURE ... .
297
+ if getattr(zlib, 'ZLIB_RUNTIME_VERSION', zlib.ZLIB_VERSION) in ('1.2.7', '1.2.5') and not sys.platform == 'darwin':
298
+ unused_datalen = len(self._zip.unconsumed_tail)
299
+ else:
300
+ unused_datalen = len(self._zip.unconsumed_tail) + len(self._zip.unused_data)
301
+ # # end handle very special case ...
302
+
303
+ self._cbr += len(indata) - unused_datalen
304
+ self._br += len(dcompdat)
305
+
306
+ if dat:
307
+ dcompdat = dat + dcompdat
308
+ # END prepend our cached data
309
+
310
+ # it can happen, depending on the compression, that we get less bytes
311
+ # than ordered as it needs the final portion of the data as well.
312
+ # Recursively resolve that.
313
+ # Note: dcompdat can be empty even though we still appear to have bytes
314
+ # to read, if we are called by compressed_bytes_read - it manipulates
315
+ # us to empty the stream
316
+ if dcompdat and (len(dcompdat) - len(dat)) < size and self._br < self._s:
317
+ dcompdat += self.read(size - len(dcompdat))
318
+ # END handle special case
319
+ return dcompdat
320
+
321
+
322
+ class DeltaApplyReader(LazyMixin):
323
+
324
+ """A reader which dynamically applies pack deltas to a base object, keeping the
325
+ memory demands to a minimum.
326
+
327
+ The size of the final object is only obtainable once all deltas have been
328
+ applied, unless it is retrieved from a pack index.
329
+
330
+ The uncompressed Delta has the following layout (MSB being a most significant
331
+ bit encoded dynamic size):
332
+
333
+ * MSB Source Size - the size of the base against which the delta was created
334
+ * MSB Target Size - the size of the resulting data after the delta was applied
335
+ * A list of one byte commands (cmd) which are followed by a specific protocol:
336
+
337
+ * cmd & 0x80 - copy delta_data[offset:offset+size]
338
+
339
+ * Followed by an encoded offset into the delta data
340
+ * Followed by an encoded size of the chunk to copy
341
+
342
+ * cmd & 0x7f - insert
343
+
344
+ * insert cmd bytes from the delta buffer into the output stream
345
+
346
+ * cmd == 0 - invalid operation ( or error in delta stream )
347
+ """
348
+ __slots__ = (
349
+ "_bstream", # base stream to which to apply the deltas
350
+ "_dstreams", # tuple of delta stream readers
351
+ "_mm_target", # memory map of the delta-applied data
352
+ "_size", # actual number of bytes in _mm_target
353
+ "_br" # number of bytes read
354
+ )
355
+
356
+ #{ Configuration
357
+ k_max_memory_move = 250 * 1000 * 1000
358
+ #} END configuration
359
+
360
+ def __init__(self, stream_list):
361
+ """Initialize this instance with a list of streams, the first stream being
362
+ the delta to apply on top of all following deltas, the last stream being the
363
+ base object onto which to apply the deltas"""
364
+ assert len(stream_list) > 1, "Need at least one delta and one base stream"
365
+
366
+ self._bstream = stream_list[-1]
367
+ self._dstreams = tuple(stream_list[:-1])
368
+ self._br = 0
369
+
370
+ def _set_cache_too_slow_without_c(self, attr):
371
+ # the direct algorithm is fastest and most direct if there is only one
372
+ # delta. Also, the extra overhead might not be worth it for items smaller
373
+ # than X - definitely the case in python, every function call costs
374
+ # huge amounts of time
375
+ # if len(self._dstreams) * self._bstream.size < self.k_max_memory_move:
376
+ if len(self._dstreams) == 1:
377
+ return self._set_cache_brute_(attr)
378
+
379
+ # Aggregate all deltas into one delta in reverse order. Hence we take
380
+ # the last delta, and reverse-merge its ancestor delta, until we receive
381
+ # the final delta data stream.
382
+ dcl = connect_deltas(self._dstreams)
383
+
384
+ # call len directly, as the (optional) c version doesn't implement the sequence
385
+ # protocol
386
+ if dcl.rbound() == 0:
387
+ self._size = 0
388
+ self._mm_target = allocate_memory(0)
389
+ return
390
+ # END handle empty list
391
+
392
+ self._size = dcl.rbound()
393
+ self._mm_target = allocate_memory(self._size)
394
+
395
+ bbuf = allocate_memory(self._bstream.size)
396
+ stream_copy(self._bstream.read, bbuf.write, self._bstream.size, 256 * mmap.PAGESIZE)
397
+
398
+ # APPLY CHUNKS
399
+ write = self._mm_target.write
400
+ dcl.apply(bbuf, write)
401
+
402
+ self._mm_target.seek(0)
403
+
404
+ def _set_cache_brute_(self, attr):
405
+ """If we are here, we apply the actual deltas"""
406
+ # TODO: There should be a special case if there is only one stream
407
+ # Then the default-git algorithm should perform a tad faster, as the
408
+ # delta is not peaked into, causing less overhead.
409
+ buffer_info_list = list()
410
+ max_target_size = 0
411
+ for dstream in self._dstreams:
412
+ buf = dstream.read(512) # read the header information + X
413
+ offset, src_size = msb_size(buf)
414
+ offset, target_size = msb_size(buf, offset)
415
+ buffer_info_list.append((buf[offset:], offset, src_size, target_size))
416
+ max_target_size = max(max_target_size, target_size)
417
+ # END for each delta stream
418
+
419
+ # sanity check - the first delta to apply should have the same source
420
+ # size as our actual base stream
421
+ base_size = self._bstream.size
422
+ target_size = max_target_size
423
+
424
+ # if we have more than 1 delta to apply, we will swap buffers, hence we must
425
+ # assure that all buffers we use are large enough to hold all the results
426
+ if len(self._dstreams) > 1:
427
+ base_size = target_size = max(base_size, max_target_size)
428
+ # END adjust buffer sizes
429
+
430
+ # Allocate private memory map big enough to hold the first base buffer
431
+ # We need random access to it
432
+ bbuf = allocate_memory(base_size)
433
+ stream_copy(self._bstream.read, bbuf.write, base_size, 256 * mmap.PAGESIZE)
434
+
435
+ # allocate memory map large enough for the largest (intermediate) target
436
+ # We will use it as scratch space for all delta ops. If the final
437
+ # target buffer is smaller than our allocated space, we just use parts
438
+ # of it upon return.
439
+ tbuf = allocate_memory(target_size)
440
+
441
+ # for each delta to apply, memory map the decompressed delta and
442
+ # work on the op-codes to reconstruct everything.
443
+ # For the actual copying, we use a seek and write pattern of buffer
444
+ # slices.
445
+ final_target_size = None
446
+ for (dbuf, offset, src_size, target_size), dstream in zip(reversed(buffer_info_list), reversed(self._dstreams)):
447
+ # allocate a buffer to hold all delta data - fill in the data for
448
+ # fast access. We do this as we know that reading individual bytes
449
+ # from our stream would be slower than necessary ( although possible )
450
+ # The dbuf buffer contains commands after the first two MSB sizes, the
451
+ # offset specifies the amount of bytes read to get the sizes.
452
+ ddata = allocate_memory(dstream.size - offset)
453
+ ddata.write(dbuf)
454
+ # read the rest from the stream. The size we give is larger than necessary
455
+ stream_copy(dstream.read, ddata.write, dstream.size, 256 * mmap.PAGESIZE)
456
+
457
+ #######################################################################
458
+ if 'c_apply_delta' in globals():
459
+ c_apply_delta(bbuf, ddata, tbuf)
460
+ else:
461
+ apply_delta_data(bbuf, src_size, ddata, len(ddata), tbuf.write)
462
+ #######################################################################
463
+
464
+ # finally, swap out source and target buffers. The target is now the
465
+ # base for the next delta to apply
466
+ bbuf, tbuf = tbuf, bbuf
467
+ bbuf.seek(0)
468
+ tbuf.seek(0)
469
+ final_target_size = target_size
470
+ # END for each delta to apply
471
+
472
+ # its already seeked to 0, constrain it to the actual size
473
+ # NOTE: in the end of the loop, it swaps buffers, hence our target buffer
474
+ # is not tbuf, but bbuf !
475
+ self._mm_target = bbuf
476
+ self._size = final_target_size
477
+
478
+ #{ Configuration
479
+ if not has_perf_mod:
480
+ _set_cache_ = _set_cache_brute_
481
+ else:
482
+ _set_cache_ = _set_cache_too_slow_without_c
483
+
484
+ #} END configuration
485
+
486
+ def read(self, count=0):
487
+ bl = self._size - self._br # bytes left
488
+ if count < 1 or count > bl:
489
+ count = bl
490
+ # NOTE: we could check for certain size limits, and possibly
491
+ # return buffers instead of strings to prevent byte copying
492
+ data = self._mm_target.read(count)
493
+ self._br += len(data)
494
+ return data
495
+
496
+ def seek(self, offset, whence=getattr(os, 'SEEK_SET', 0)):
497
+ """Allows to reset the stream to restart reading
498
+
499
+ :raise ValueError: If offset and whence are not 0"""
500
+ if offset != 0 or whence != getattr(os, 'SEEK_SET', 0):
501
+ raise ValueError("Can only seek to position 0")
502
+ # END handle offset
503
+ self._br = 0
504
+ self._mm_target.seek(0)
505
+
506
+ #{ Interface
507
+
508
+ @classmethod
509
+ def new(cls, stream_list):
510
+ """
511
+ Convert the given list of streams into a stream which resolves deltas
512
+ when reading from it.
513
+
514
+ :param stream_list: two or more stream objects, first stream is a Delta
515
+ to the object that you want to resolve, followed by N additional delta
516
+ streams. The list's last stream must be a non-delta stream.
517
+
518
+ :return: Non-Delta OPackStream object whose stream can be used to obtain
519
+ the decompressed resolved data
520
+ :raise ValueError: if the stream list cannot be handled"""
521
+ if len(stream_list) < 2:
522
+ raise ValueError("Need at least two streams")
523
+ # END single object special handling
524
+
525
+ if stream_list[-1].type_id in delta_types:
526
+ raise ValueError(
527
+ "Cannot resolve deltas if there is no base object stream, last one was type: %s" % stream_list[-1].type)
528
+ # END check stream
529
+ return cls(stream_list)
530
+
531
+ #} END interface
532
+
533
+ #{ OInfo like Interface
534
+
535
+ @property
536
+ def type(self):
537
+ return self._bstream.type
538
+
539
+ @property
540
+ def type_id(self):
541
+ return self._bstream.type_id
542
+
543
+ @property
544
+ def size(self):
545
+ """:return: number of uncompressed bytes in the stream"""
546
+ return self._size
547
+
548
+ #} END oinfo like interface
549
+
550
+
551
+ #} END RO streams
552
+
553
+
554
+ #{ W Streams
555
+
556
+ class Sha1Writer:
557
+
558
+ """Simple stream writer which produces a sha whenever you like as it degests
559
+ everything it is supposed to write"""
560
+ __slots__ = "sha1"
561
+
562
+ def __init__(self):
563
+ self.sha1 = make_sha()
564
+
565
+ #{ Stream Interface
566
+
567
+ def write(self, data):
568
+ """:raise IOError: If not all bytes could be written
569
+ :param data: byte object
570
+ :return: length of incoming data"""
571
+
572
+ self.sha1.update(data)
573
+
574
+ return len(data)
575
+
576
+ # END stream interface
577
+
578
+ #{ Interface
579
+
580
+ def sha(self, as_hex=False):
581
+ """:return: sha so far
582
+ :param as_hex: if True, sha will be hex-encoded, binary otherwise"""
583
+ if as_hex:
584
+ return self.sha1.hexdigest()
585
+ return self.sha1.digest()
586
+
587
+ #} END interface
588
+
589
+
590
+ class FlexibleSha1Writer(Sha1Writer):
591
+
592
+ """Writer producing a sha1 while passing on the written bytes to the given
593
+ write function"""
594
+ __slots__ = 'writer'
595
+
596
+ def __init__(self, writer):
597
+ Sha1Writer.__init__(self)
598
+ self.writer = writer
599
+
600
+ def write(self, data):
601
+ Sha1Writer.write(self, data)
602
+ self.writer(data)
603
+
604
+
605
+ class ZippedStoreShaWriter(Sha1Writer):
606
+
607
+ """Remembers everything someone writes to it and generates a sha"""
608
+ __slots__ = ('buf', 'zip')
609
+
610
+ def __init__(self):
611
+ Sha1Writer.__init__(self)
612
+ self.buf = BytesIO()
613
+ self.zip = zlib.compressobj(zlib.Z_BEST_SPEED)
614
+
615
+ def __getattr__(self, attr):
616
+ return getattr(self.buf, attr)
617
+
618
+ def write(self, data):
619
+ alen = Sha1Writer.write(self, data)
620
+ self.buf.write(self.zip.compress(data))
621
+
622
+ return alen
623
+
624
+ def close(self):
625
+ self.buf.write(self.zip.flush())
626
+
627
+ def seek(self, offset, whence=getattr(os, 'SEEK_SET', 0)):
628
+ """Seeking currently only supports to rewind written data
629
+ Multiple writes are not supported"""
630
+ if offset != 0 or whence != getattr(os, 'SEEK_SET', 0):
631
+ raise ValueError("Can only seek to position 0")
632
+ # END handle offset
633
+ self.buf.seek(0)
634
+
635
+ def getvalue(self):
636
+ """:return: string value from the current stream position to the end"""
637
+ return self.buf.getvalue()
638
+
639
+
640
+ class FDCompressedSha1Writer(Sha1Writer):
641
+
642
+ """Digests data written to it, making the sha available, then compress the
643
+ data and write it to the file descriptor
644
+
645
+ **Note:** operates on raw file descriptors
646
+ **Note:** for this to work, you have to use the close-method of this instance"""
647
+ __slots__ = ("fd", "sha1", "zip")
648
+
649
+ # default exception
650
+ exc = IOError("Failed to write all bytes to filedescriptor")
651
+
652
+ def __init__(self, fd):
653
+ super().__init__()
654
+ self.fd = fd
655
+ self.zip = zlib.compressobj(zlib.Z_BEST_SPEED)
656
+
657
+ #{ Stream Interface
658
+
659
+ def write(self, data):
660
+ """:raise IOError: If not all bytes could be written
661
+ :return: length of incoming data"""
662
+ self.sha1.update(data)
663
+ cdata = self.zip.compress(data)
664
+ bytes_written = write(self.fd, cdata)
665
+
666
+ if bytes_written != len(cdata):
667
+ raise self.exc
668
+
669
+ return len(data)
670
+
671
+ def close(self):
672
+ remainder = self.zip.flush()
673
+ if write(self.fd, remainder) != len(remainder):
674
+ raise self.exc
675
+ return close(self.fd)
676
+
677
+ #} END stream interface
678
+
679
+
680
+ class FDStream:
681
+
682
+ """A simple wrapper providing the most basic functions on a file descriptor
683
+ with the fileobject interface. Cannot use os.fdopen as the resulting stream
684
+ takes ownership"""
685
+ __slots__ = ("_fd", '_pos')
686
+
687
+ def __init__(self, fd):
688
+ self._fd = fd
689
+ self._pos = 0
690
+
691
+ def write(self, data):
692
+ self._pos += len(data)
693
+ os.write(self._fd, data)
694
+
695
+ def read(self, count=0):
696
+ if count == 0:
697
+ count = os.path.getsize(self._filepath)
698
+ # END handle read everything
699
+
700
+ bytes = os.read(self._fd, count)
701
+ self._pos += len(bytes)
702
+ return bytes
703
+
704
+ def fileno(self):
705
+ return self._fd
706
+
707
+ def tell(self):
708
+ return self._pos
709
+
710
+ def close(self):
711
+ close(self._fd)
712
+
713
+
714
+ class NullStream:
715
+
716
+ """A stream that does nothing but providing a stream interface.
717
+ Use it like /dev/null"""
718
+ __slots__ = tuple()
719
+
720
+ def read(self, size=0):
721
+ return ''
722
+
723
+ def close(self):
724
+ pass
725
+
726
+ def write(self, data):
727
+ return len(data)
728
+
729
+
730
+ #} END W streams
parrot/lib/python3.10/site-packages/gitdb/test/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (162 Bytes). View file
 
parrot/lib/python3.10/site-packages/gitdb/test/__pycache__/lib.cpython-310.pyc ADDED
Binary file (5.62 kB). View file
 
parrot/lib/python3.10/site-packages/gitdb/test/__pycache__/test_base.cpython-310.pyc ADDED
Binary file (1.95 kB). View file
 
parrot/lib/python3.10/site-packages/gitdb/test/__pycache__/test_example.cpython-310.pyc ADDED
Binary file (1.18 kB). View file
 
parrot/lib/python3.10/site-packages/gitdb/test/__pycache__/test_pack.cpython-310.pyc ADDED
Binary file (5.79 kB). View file
 
parrot/lib/python3.10/site-packages/gitdb/test/__pycache__/test_stream.cpython-310.pyc ADDED
Binary file (4.1 kB). View file
 
parrot/lib/python3.10/site-packages/gitdb/test/__pycache__/test_util.cpython-310.pyc ADDED
Binary file (2.15 kB). View file
 
parrot/lib/python3.10/site-packages/gitdb/test/lib.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ """Utilities used in ODB testing"""
6
+ from gitdb import OStream
7
+
8
+ import sys
9
+ import random
10
+ from array import array
11
+
12
+ from io import BytesIO
13
+
14
+ import glob
15
+ import unittest
16
+ import tempfile
17
+ import shutil
18
+ import os
19
+ import gc
20
+ import logging
21
+ from functools import wraps
22
+
23
+
24
+ #{ Bases
25
+
26
+ class TestBase(unittest.TestCase):
27
+ """Base class for all tests
28
+
29
+ TestCase providing access to readonly repositories using the following member variables.
30
+
31
+ * gitrepopath
32
+
33
+ * read-only base path of the git source repository, i.e. .../git/.git
34
+ """
35
+
36
+ #{ Invvariants
37
+ k_env_git_repo = "GITDB_TEST_GIT_REPO_BASE"
38
+ #} END invariants
39
+
40
+ @classmethod
41
+ def setUpClass(cls):
42
+ try:
43
+ super().setUpClass()
44
+ except AttributeError:
45
+ pass
46
+
47
+ cls.gitrepopath = os.environ.get(cls.k_env_git_repo)
48
+ if not cls.gitrepopath:
49
+ logging.info(
50
+ "You can set the %s environment variable to a .git repository of your choice - defaulting to the gitdb repository", cls.k_env_git_repo)
51
+ ospd = os.path.dirname
52
+ cls.gitrepopath = os.path.join(ospd(ospd(ospd(__file__))), '.git')
53
+ # end assure gitrepo is set
54
+ assert cls.gitrepopath.endswith('.git')
55
+
56
+
57
+ #} END bases
58
+
59
+ #{ Decorators
60
+
61
+ def with_rw_directory(func):
62
+ """Create a temporary directory which can be written to, remove it if the
63
+ test succeeds, but leave it otherwise to aid additional debugging"""
64
+
65
+ def wrapper(self):
66
+ path = tempfile.mktemp(prefix=func.__name__)
67
+ os.mkdir(path)
68
+ keep = False
69
+ try:
70
+ try:
71
+ return func(self, path)
72
+ except Exception:
73
+ sys.stderr.write(f"Test {type(self).__name__}.{func.__name__} failed, output is at {path!r}\n")
74
+ keep = True
75
+ raise
76
+ finally:
77
+ # Need to collect here to be sure all handles have been closed. It appears
78
+ # a windows-only issue. In fact things should be deleted, as well as
79
+ # memory maps closed, once objects go out of scope. For some reason
80
+ # though this is not the case here unless we collect explicitly.
81
+ if not keep:
82
+ gc.collect()
83
+ shutil.rmtree(path)
84
+ # END handle exception
85
+ # END wrapper
86
+
87
+ wrapper.__name__ = func.__name__
88
+ return wrapper
89
+
90
+
91
+ def with_packs_rw(func):
92
+ """Function that provides a path into which the packs for testing should be
93
+ copied. Will pass on the path to the actual function afterwards"""
94
+
95
+ def wrapper(self, path):
96
+ src_pack_glob = fixture_path('packs/*')
97
+ copy_files_globbed(src_pack_glob, path, hard_link_ok=True)
98
+ return func(self, path)
99
+ # END wrapper
100
+
101
+ wrapper.__name__ = func.__name__
102
+ return wrapper
103
+
104
+ #} END decorators
105
+
106
+ #{ Routines
107
+
108
+
109
+ def fixture_path(relapath=''):
110
+ """:return: absolute path into the fixture directory
111
+ :param relapath: relative path into the fixtures directory, or ''
112
+ to obtain the fixture directory itself"""
113
+ return os.path.join(os.path.dirname(__file__), 'fixtures', relapath)
114
+
115
+
116
+ def copy_files_globbed(source_glob, target_dir, hard_link_ok=False):
117
+ """Copy all files found according to the given source glob into the target directory
118
+ :param hard_link_ok: if True, hard links will be created if possible. Otherwise
119
+ the files will be copied"""
120
+ for src_file in glob.glob(source_glob):
121
+ if hard_link_ok and hasattr(os, 'link'):
122
+ target = os.path.join(target_dir, os.path.basename(src_file))
123
+ try:
124
+ os.link(src_file, target)
125
+ except OSError:
126
+ shutil.copy(src_file, target_dir)
127
+ # END handle cross device links ( and resulting failure )
128
+ else:
129
+ shutil.copy(src_file, target_dir)
130
+ # END try hard link
131
+ # END for each file to copy
132
+
133
+
134
+ def make_bytes(size_in_bytes, randomize=False):
135
+ """:return: string with given size in bytes
136
+ :param randomize: try to produce a very random stream"""
137
+ actual_size = size_in_bytes // 4
138
+ producer = range(actual_size)
139
+ if randomize:
140
+ producer = list(producer)
141
+ random.shuffle(producer)
142
+ # END randomize
143
+ a = array('i', producer)
144
+ return a.tobytes()
145
+
146
+
147
+ def make_object(type, data):
148
+ """:return: bytes resembling an uncompressed object"""
149
+ odata = "blob %i\0" % len(data)
150
+ return odata.encode("ascii") + data
151
+
152
+
153
+ def make_memory_file(size_in_bytes, randomize=False):
154
+ """:return: tuple(size_of_stream, stream)
155
+ :param randomize: try to produce a very random stream"""
156
+ d = make_bytes(size_in_bytes, randomize)
157
+ return len(d), BytesIO(d)
158
+
159
+ #} END routines
160
+
161
+ #{ Stream Utilities
162
+
163
+
164
+ class DummyStream:
165
+
166
+ def __init__(self):
167
+ self.was_read = False
168
+ self.bytes = 0
169
+ self.closed = False
170
+
171
+ def read(self, size):
172
+ self.was_read = True
173
+ self.bytes = size
174
+
175
+ def close(self):
176
+ self.closed = True
177
+
178
+ def _assert(self):
179
+ assert self.was_read
180
+
181
+
182
+ class DeriveTest(OStream):
183
+
184
+ def __init__(self, sha, type, size, stream, *args, **kwargs):
185
+ self.myarg = kwargs.pop('myarg')
186
+ self.args = args
187
+
188
+ def _assert(self):
189
+ assert self.args
190
+ assert self.myarg
191
+
192
+ #} END stream utilitiess
parrot/lib/python3.10/site-packages/gitdb/test/test_base.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2
+ #
3
+ # This module is part of GitDB and is released under
4
+ # the New BSD License: https://opensource.org/license/bsd-3-clause/
5
+ """Test for object db"""
6
+ from gitdb.test.lib import (
7
+ TestBase,
8
+ DummyStream,
9
+ DeriveTest,
10
+ )
11
+
12
+ from gitdb import (
13
+ OInfo,
14
+ OPackInfo,
15
+ ODeltaPackInfo,
16
+ OStream,
17
+ OPackStream,
18
+ ODeltaPackStream,
19
+ IStream
20
+ )
21
+ from gitdb.util import (
22
+ NULL_BIN_SHA
23
+ )
24
+
25
+ from gitdb.typ import (
26
+ str_blob_type
27
+ )
28
+
29
+
30
+ class TestBaseTypes(TestBase):
31
+
32
+ def test_streams(self):
33
+ # test info
34
+ sha = NULL_BIN_SHA
35
+ s = 20
36
+ blob_id = 3
37
+
38
+ info = OInfo(sha, str_blob_type, s)
39
+ assert info.binsha == sha
40
+ assert info.type == str_blob_type
41
+ assert info.type_id == blob_id
42
+ assert info.size == s
43
+
44
+ # test pack info
45
+ # provides type_id
46
+ pinfo = OPackInfo(0, blob_id, s)
47
+ assert pinfo.type == str_blob_type
48
+ assert pinfo.type_id == blob_id
49
+ assert pinfo.pack_offset == 0
50
+
51
+ dpinfo = ODeltaPackInfo(0, blob_id, s, sha)
52
+ assert dpinfo.type == str_blob_type
53
+ assert dpinfo.type_id == blob_id
54
+ assert dpinfo.delta_info == sha
55
+ assert dpinfo.pack_offset == 0
56
+
57
+ # test ostream
58
+ stream = DummyStream()
59
+ ostream = OStream(*(info + (stream, )))
60
+ assert ostream.stream is stream
61
+ ostream.read(15)
62
+ stream._assert()
63
+ assert stream.bytes == 15
64
+ ostream.read(20)
65
+ assert stream.bytes == 20
66
+
67
+ # test packstream
68
+ postream = OPackStream(*(pinfo + (stream, )))
69
+ assert postream.stream is stream
70
+ postream.read(10)
71
+ stream._assert()
72
+ assert stream.bytes == 10
73
+
74
+ # test deltapackstream
75
+ dpostream = ODeltaPackStream(*(dpinfo + (stream, )))
76
+ dpostream.stream is stream
77
+ dpostream.read(5)
78
+ stream._assert()
79
+ assert stream.bytes == 5
80
+
81
+ # derive with own args
82
+ DeriveTest(sha, str_blob_type, s, stream, 'mine', myarg=3)._assert()
83
+
84
+ # test istream
85
+ istream = IStream(str_blob_type, s, stream)
86
+ assert istream.binsha == None
87
+ istream.binsha = sha
88
+ assert istream.binsha == sha
89
+
90
+ assert len(istream.binsha) == 20
91
+ assert len(istream.hexsha) == 40
92
+
93
+ assert istream.size == s
94
+ istream.size = s * 2
95
+ istream.size == s * 2
96
+ assert istream.type == str_blob_type
97
+ istream.type = "something"
98
+ assert istream.type == "something"
99
+ assert istream.stream is stream
100
+ istream.stream = None
101
+ assert istream.stream is None
102
+
103
+ assert istream.error is None
104
+ istream.error = Exception()
105
+ assert isinstance(istream.error, Exception)